diff --git a/.github/dockerfiles/docker_tag b/.github/dockerfiles/docker_tag index bcfa07fb5c24b3..1dc77e89521bfe 100644 --- a/.github/dockerfiles/docker_tag +++ b/.github/dockerfiles/docker_tag @@ -1 +1 @@ -pr-27597 +pr-27882 diff --git a/.github/dockerfiles/ov_build/fedora_29/Dockerfile b/.github/dockerfiles/ov_build/fedora_29/Dockerfile index e5f400e2915e9c..0b9911ac707b13 100644 --- a/.github/dockerfiles/ov_build/fedora_29/Dockerfile +++ b/.github/dockerfiles/ov_build/fedora_29/Dockerfile @@ -3,7 +3,11 @@ FROM ${REGISTRY}/library/fedora:29 USER root -RUN yum update -y && yum install -y \ +# dnf configuration +RUN echo "timeout=60" >> /etc/dnf/dnf.conf && \ + echo "retries=10" >> /etc/dnf/dnf.conf + +RUN dnf update -y && dnf install -y \ git \ curl \ python3 \ diff --git a/.github/dockerfiles/ov_test/fedora_33/Dockerfile b/.github/dockerfiles/ov_test/fedora_33/Dockerfile index 6e0fcc7d35156b..4c5b2037e60578 100644 --- a/.github/dockerfiles/ov_test/fedora_33/Dockerfile +++ b/.github/dockerfiles/ov_test/fedora_33/Dockerfile @@ -3,7 +3,11 @@ FROM ${REGISTRY}/library/fedora:33 USER root -RUN yum update -y && yum install -y \ +# dnf configuration +RUN echo "timeout=60" >> /etc/dnf/dnf.conf && \ + echo "retries=10" >> /etc/dnf/dnf.conf + +RUN dnf update -y && dnf install -y \ git \ curl \ python3 \ diff --git a/.github/workflows/fedora_29.yml b/.github/workflows/fedora_29.yml index 0dd101225dc533..6d128f33fca274 100644 --- a/.github/workflows/fedora_29.yml +++ b/.github/workflows/fedora_29.yml @@ -131,10 +131,10 @@ jobs: # install previous release version mv /tmp/openvino-2023.repo /etc/yum.repos.d - yum install -y openvino + dnf install -y openvino # install current version - yum install --allowerasing -y *.rpm + dnf install --allowerasing -y *.rpm working-directory: ${{ env.RPM_PACKAGES_DIR }} - name: Test RPM packages diff --git a/samples/js/node/package-lock.json b/samples/js/node/package-lock.json index 020cec71ea3103..eb7369b10a7578 100644 --- a/samples/js/node/package-lock.json +++ b/samples/js/node/package-lock.json @@ -15,7 +15,7 @@ "args": "^5.0.3", "eslint": "^8.39.0", "https-proxy-agent": "^7.0.2", - "openvino-node": "^2024.5.0-0" + "openvino-node": "^2024.6.0" }, "engines": { "node": ">=21.0.0" @@ -1920,9 +1920,9 @@ } }, "node_modules/openvino-node": { - "version": "2024.5.0-0", - "resolved": "https://registry.npmjs.org/openvino-node/-/openvino-node-2024.5.0-0.tgz", - "integrity": "sha512-SgvHH3OdOXyMu5iZx0oBFWn7yIu3uB54IIfmXFKlyhHbSjO+3ph+DauUdlUkp2DGETR7bzq7+cPyyroeOF7qqQ==", + "version": "2024.6.0", + "resolved": "https://registry.npmjs.org/openvino-node/-/openvino-node-2024.6.0.tgz", + "integrity": "sha512-EQ0kdklsac3rfJTv6jUc9UIR0IG/YyIMOeq40+EYS0wozQ0mp4aQGBJRsT30SaEM4Ct797F9Mq+v9PjHxlJvcw==", "dev": true, "hasInstallScript": true, "license": "Apache-2.0", diff --git a/samples/js/node/package.json b/samples/js/node/package.json index b3e12a265f0c77..8198d13c80e6a5 100644 --- a/samples/js/node/package.json +++ b/samples/js/node/package.json @@ -8,7 +8,7 @@ "args": "^5.0.3", "eslint": "^8.39.0", "https-proxy-agent": "^7.0.2", - "openvino-node": "^2024.5.0-0", + "openvino-node": "^2024.6.0", "@napi-rs/canvas": "^0.1.59" }, "scripts": { diff --git a/src/bindings/js/node/package-lock.json b/src/bindings/js/node/package-lock.json index 27f426968e5b54..c202a824c37556 100644 --- a/src/bindings/js/node/package-lock.json +++ b/src/bindings/js/node/package-lock.json @@ -1,12 +1,12 @@ { "name": "openvino-node", - "version": "2024.5.0-0", + "version": "2024.6.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "openvino-node", - "version": "2024.5.0-0", + "version": "2024.6.0", "hasInstallScript": true, "license": "Apache-2.0", "os": [ diff --git a/src/bindings/js/node/package.json b/src/bindings/js/node/package.json index c0e4e03ddc4df6..0dab709718ae9b 100644 --- a/src/bindings/js/node/package.json +++ b/src/bindings/js/node/package.json @@ -1,6 +1,6 @@ { "name": "openvino-node", - "version": "2024.5.0-0", + "version": "2024.6.0", "description": "OpenVINO™ utils for using from Node.js environment", "repository": { "url": "git+https://github.com/openvinotoolkit/openvino.git", @@ -44,7 +44,6 @@ "tar-fs": "^3.0.4" }, "binary": { - "version": "2024.5.0", "module_path": "./bin/", "remote_path": "./repositories/openvino/nodejs_bindings/{version}/{platform}/", "package_name": "openvino_nodejs_bindings_{platform}_{version}_{arch}.tar.gz", diff --git a/src/bindings/python/src/openvino/_ov_api.py b/src/bindings/python/src/openvino/_ov_api.py index 972ab4a9eb81c0..da31fab4c95d8e 100644 --- a/src/bindings/python/src/openvino/_ov_api.py +++ b/src/bindings/python/src/openvino/_ov_api.py @@ -2,7 +2,8 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from typing import Any, Iterable, Union, Optional, Dict +from types import TracebackType +from typing import Any, Iterable, Union, Optional, Dict, Type from pathlib import Path @@ -21,22 +22,30 @@ ) -class Model(ModelBase): +class Model: def __init__(self, *args: Any, **kwargs: Any) -> None: if args and not kwargs: if isinstance(args[0], ModelBase): - super().__init__(args[0]) + self.__model = ModelBase(args[0]) elif isinstance(args[0], Node): - super().__init__(*args) + self.__model = ModelBase(*args) else: - super().__init__(*args) + self.__model = ModelBase(*args) if args and kwargs: - super().__init__(*args, **kwargs) + self.__model = ModelBase(*args, **kwargs) if kwargs and not args: - super().__init__(**kwargs) + self.__model = ModelBase(**kwargs) + + def __getattr__(self, name: str) -> Any: + if self.__model is None: + raise AttributeError(f"'Model' object has no attribute '{name}' or attribute is no longer accessible.") + return getattr(self.__model, name) def clone(self) -> "Model": - return Model(super().clone()) + return Model(self.__model.clone()) + + def __copy__(self) -> "Model": + raise TypeError("Cannot copy 'openvino.runtime.Model'. Please, use deepcopy instead.") def __deepcopy__(self, memo: Dict) -> "Model": """Returns a deepcopy of Model. @@ -44,7 +53,17 @@ def __deepcopy__(self, memo: Dict) -> "Model": :return: A copy of Model. :rtype: openvino.runtime.Model """ - return Model(super().clone()) + return Model(self.__model.clone()) + + def __enter__(self) -> "Model": + return self + + def __exit__(self, exc_type: Type[BaseException], exc_value: BaseException, traceback: TracebackType) -> None: + del self.__model + self.__model = None + + def __repr__(self) -> str: + return self.__model.__repr__() class InferRequest(_InferRequestWrapper): @@ -500,6 +519,8 @@ def read_model( config: Optional[dict] = None ) -> Model: config = {} if config is None else config + if isinstance(model, Model): + model = model._Model__model if isinstance(weights, Tensor): return Model(super().read_model(model, weights)) @@ -543,6 +564,8 @@ def compile_model( :return: A compiled model. :rtype: openvino.runtime.CompiledModel """ + if isinstance(model, Model): + model = model._Model__model if weights is None: if device_name is None: return CompiledModel( @@ -562,6 +585,16 @@ def compile_model( weights=weights, ) + def query_model( + self, + model: Model, + device_name: str, + config: Optional[dict] = None, + ) -> dict: + return super().query_model(model._Model__model, + device_name, + {} if config is None else config, ) + def import_model( self, model_stream: bytes, @@ -637,4 +670,6 @@ def compile_model( """ core = Core() + if isinstance(model, Model): + model = model._Model__model return core.compile_model(model, device_name, {} if config is None else config) diff --git a/src/bindings/python/src/openvino/frontend/frontend.py b/src/bindings/python/src/openvino/frontend/frontend.py index 4d549d24b4ef7c..6a16d5a573b7d7 100644 --- a/src/bindings/python/src/openvino/frontend/frontend.py +++ b/src/bindings/python/src/openvino/frontend/frontend.py @@ -7,7 +7,7 @@ from openvino._pyopenvino import FrontEnd as FrontEndBase from openvino._pyopenvino import FrontEndManager as FrontEndManagerBase from openvino._pyopenvino import InputModel -from openvino.runtime import Model +from openvino import Model class FrontEnd(FrontEndBase): diff --git a/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py b/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py index 914f6b2e2ee548..9072598f824939 100644 --- a/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py +++ b/src/bindings/python/src/openvino/frontend/jax/jaxpr_decoder.py @@ -6,7 +6,7 @@ import jax.core from openvino.frontend.jax.py_jax_frontend import _FrontEndJaxDecoder as Decoder -from openvino.runtime import PartialShape, Type as OVType, OVAny +from openvino import PartialShape, Type as OVType, OVAny from openvino.frontend.jax.utils import jax_array_to_ov_const, get_ov_type_for_value, \ ivalue_to_constant, param_to_constants diff --git a/src/bindings/python/src/openvino/frontend/jax/utils.py b/src/bindings/python/src/openvino/frontend/jax/utils.py index 4535265d6de082..659677b11d5af8 100644 --- a/src/bindings/python/src/openvino/frontend/jax/utils.py +++ b/src/bindings/python/src/openvino/frontend/jax/utils.py @@ -8,7 +8,7 @@ import jax.numpy as jnp import numpy as np from openvino.frontend.jax.passes import filter_element, filter_ivalue, filter_param -from openvino.runtime import op, Type as OVType, Shape, OVAny +from openvino import op, Type as OVType, Shape, OVAny numpy_to_ov_type_map = { np.float32: OVType.f32, diff --git a/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py index c448571f1ac17a..81a2764ee1188d 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/fx_decoder.py @@ -10,7 +10,7 @@ from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType -from openvino.runtime import PartialShape, Type as OVType, OVAny, Shape +from openvino import PartialShape, Type as OVType, OVAny, Shape from openvino.frontend.pytorch.utils import make_constant, fetch_attr, pt_to_ov_type_map, torch_tensor_to_ov_const logger = logging.getLogger(__name__) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py index 9f2ef019769875..a9a65781dcb254 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend.py @@ -18,7 +18,7 @@ from torch._decomp import decomposition_table, get_decompositions from openvino.frontend import FrontEndManager -from openvino.runtime import Core, Type, PartialShape +from openvino import Core, Type, PartialShape from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder from openvino.frontend.pytorch.torchdynamo import decompositions from openvino.frontend.pytorch.torchdynamo.decompositions import get_aot_decomposition_list, get_inf_decomposition_list @@ -27,7 +27,7 @@ from openvino.frontend.pytorch.torchdynamo.compile import cached_model_name, openvino_compile_cached_model from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_model_caching, _get_decompositions, _get_aot_autograd -from openvino.runtime import Core, Type, PartialShape +from openvino import Core, Type, PartialShape logger = logging.getLogger(__name__) logger.setLevel(logging.WARNING) diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py index 47b3b82806b18b..c9a772b3feac42 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/backend_utils.py @@ -5,7 +5,7 @@ # mypy: ignore-errors from typing import Optional, Any -from openvino.runtime import Core +from openvino import Core def _get_device(options) -> Optional[Any]: diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py index fa446893a05d07..ca8d5478e76c15 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/compile.py @@ -14,7 +14,7 @@ from openvino.frontend import FrontEndManager from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder -from openvino.runtime import Core, Type, PartialShape, serialize +from openvino import Core, Type, PartialShape, serialize from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_config, _is_cache_dir_in_config from typing import Callable, Optional diff --git a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py index 4f41f7b5a6a9de..7527ad7acb37a4 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/torchdynamo/execute.py @@ -20,7 +20,7 @@ from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder from openvino.frontend.pytorch.torchdynamo.partition import Partitioner from openvino.frontend.pytorch.torchdynamo.compile import openvino_compile -from openvino.runtime import Core, Type, PartialShape +from openvino import Core, Type, PartialShape from openvino.frontend.pytorch.torchdynamo.backend_utils import _get_cache_dir, _get_device, _get_aot_autograd from typing import Callable, Optional, Any diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 6d8fdb1658793e..7bb8073167a654 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -6,7 +6,7 @@ from openvino.frontend.pytorch.py_pytorch_frontend import _FrontEndPytorchDecoder as Decoder from openvino.frontend.pytorch.py_pytorch_frontend import _Type as DecoderType -from openvino.runtime import op, PartialShape, Type as OVType, OVAny +from openvino import op, PartialShape, Type as OVType, OVAny from openvino.frontend.pytorch.utils import ( ivalue_to_constant, get_value_from_getattr, @@ -15,7 +15,7 @@ convert_quantized_tensor, graph_has_ops, ) -from openvino.runtime import opset11 as ops +from openvino import opset11 as ops from openvino.frontend.pytorch import quantized, patch_model from openvino.frontend.pytorch.module_extension import ModuleExtension diff --git a/src/bindings/python/src/openvino/frontend/pytorch/utils.py b/src/bindings/python/src/openvino/frontend/pytorch/utils.py index 826d766505fa79..9ba36707037c9e 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/utils.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/utils.py @@ -7,8 +7,8 @@ import torch import numpy as np -from openvino.runtime import op, Type as OVType, Shape, Tensor -from openvino.runtime import opset11 as ops +from openvino import op, Type as OVType, Shape, Tensor +from openvino import opset11 as ops def make_constant(*args, **kwargs): diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py index fcedd7a74c2b51..d15262cbc30366 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py @@ -7,7 +7,7 @@ import numpy as np import tensorflow as tf from openvino.frontend.tensorflow.py_tensorflow_frontend import _FrontEndDecoderBase as DecoderBase -from openvino.runtime import PartialShape, Type, OVAny, Tensor +from openvino import PartialShape, Type, OVAny, Tensor def tf_type_to_ov_type(tf_type_int): diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py index 74c0dfff92297e..7de5dc950be53e 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/utils.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/utils.py @@ -8,7 +8,7 @@ import logging as log import numpy as np import sys -from openvino.runtime import PartialShape, Dimension, Type +from openvino import PartialShape, Dimension, Type from packaging.version import parse, Version from typing import List, Dict, Union diff --git a/src/bindings/python/src/openvino/helpers/packing.py b/src/bindings/python/src/openvino/helpers/packing.py index 796af87402f3a6..d0956e09fc6261 100644 --- a/src/bindings/python/src/openvino/helpers/packing.py +++ b/src/bindings/python/src/openvino/helpers/packing.py @@ -5,7 +5,7 @@ import numpy as np from typing import Union -from openvino.runtime import Type, Shape +from openvino import Type, Shape def pack_data(array: np.ndarray, type: Type) -> np.ndarray: diff --git a/src/bindings/python/src/openvino/opset1/ops.py b/src/bindings/python/src/openvino/opset1/ops.py index edca6c62a0b246..e264aea304fb1f 100644 --- a/src/bindings/python/src/openvino/opset1/ops.py +++ b/src/bindings/python/src/openvino/opset1/ops.py @@ -8,17 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, PartialShape, Type +from openvino import Node, PartialShape, Type from openvino.op import Constant, Parameter, tensor_iterator -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset10/ops.py b/src/bindings/python/src/openvino/opset10/ops.py index c7b75777484a59..d0bc3cbf1cba4a 100644 --- a/src/bindings/python/src/openvino/opset10/ops.py +++ b/src/bindings/python/src/openvino/opset10/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import List, Optional -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, as_node, diff --git a/src/bindings/python/src/openvino/opset11/ops.py b/src/bindings/python/src/openvino/opset11/ops.py index 575c99501d2d6c..95767b4800db1c 100644 --- a/src/bindings/python/src/openvino/opset11/ops.py +++ b/src/bindings/python/src/openvino/opset11/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import List, Optional -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, ) diff --git a/src/bindings/python/src/openvino/opset12/ops.py b/src/bindings/python/src/openvino/opset12/ops.py index 928bf4f71a9773..4b354b1fcff973 100644 --- a/src/bindings/python/src/openvino/opset12/ops.py +++ b/src/bindings/python/src/openvino/opset12/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import Optional -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, as_node, diff --git a/src/bindings/python/src/openvino/opset13/ops.py b/src/bindings/python/src/openvino/opset13/ops.py index 12f0d06b1a28e6..5c6863740120f8 100644 --- a/src/bindings/python/src/openvino/opset13/ops.py +++ b/src/bindings/python/src/openvino/opset13/ops.py @@ -11,12 +11,12 @@ log = logging.getLogger(__name__) -from openvino.runtime import Node, Shape, Type, Output, Tensor +from openvino import Node, Shape, Type, Output, Tensor from openvino.op import Constant, Result from openvino.opset1 import convert_like -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op, overloading -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op, unary_op, overloading +from openvino.utils.types import ( NumericData, NodeInput, NumericType, diff --git a/src/bindings/python/src/openvino/opset14/ops.py b/src/bindings/python/src/openvino/opset14/ops.py index fa872d24eb7f1a..59e1bfd3e89c6f 100644 --- a/src/bindings/python/src/openvino/opset14/ops.py +++ b/src/bindings/python/src/openvino/opset14/ops.py @@ -7,11 +7,11 @@ from typing import Union, Optional, List -from openvino.runtime import Node, Type -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.types import TensorShape -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import NodeInput, as_node, as_nodes +from openvino import Node, Type +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.types import TensorShape +from openvino.utils.decorators import nameable_op +from openvino.utils.types import NodeInput, as_node, as_nodes _get_node_factory_opset14 = partial(_get_node_factory, "opset14") diff --git a/src/bindings/python/src/openvino/opset15/ops.py b/src/bindings/python/src/openvino/opset15/ops.py index 8e6b8bd46d5f7c..97d4419fc4834b 100644 --- a/src/bindings/python/src/openvino/opset15/ops.py +++ b/src/bindings/python/src/openvino/opset15/ops.py @@ -7,12 +7,12 @@ from typing import List, Literal, Optional import numpy as np -from openvino.runtime import Node, Type +from openvino import Node, Type from openvino.opset1 import convert_like from openvino.opset14 import constant -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op -from openvino.runtime.utils.types import NodeInput, as_nodes +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import binary_op, nameable_op +from openvino.utils.types import NodeInput, as_nodes _get_node_factory_opset15 = partial(_get_node_factory, "opset15") diff --git a/src/bindings/python/src/openvino/opset16/ops.py b/src/bindings/python/src/openvino/opset16/ops.py index 60656f6d993b6a..e5ebdc7a2a11d6 100644 --- a/src/bindings/python/src/openvino/opset16/ops.py +++ b/src/bindings/python/src/openvino/opset16/ops.py @@ -6,10 +6,10 @@ from functools import partial from typing import Optional -from openvino.runtime import Node -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.types import NodeInput, as_nodes +from openvino import Node +from openvino.utils.decorators import nameable_op +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.types import NodeInput, as_nodes _get_node_factory_opset16 = partial(_get_node_factory, "opset16") diff --git a/src/bindings/python/src/openvino/opset2/ops.py b/src/bindings/python/src/openvino/opset2/ops.py index 45b33f5bc0288b..f76f608fe9a5c7 100644 --- a/src/bindings/python/src/openvino/opset2/ops.py +++ b/src/bindings/python/src/openvino/opset2/ops.py @@ -9,18 +9,17 @@ from functools import partial import warnings -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset3/ops.py b/src/bindings/python/src/openvino/opset3/ops.py index 989f5819acb685..1c2c7e309fe919 100644 --- a/src/bindings/python/src/openvino/opset3/ops.py +++ b/src/bindings/python/src/openvino/opset3/ops.py @@ -8,18 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset4/ops.py b/src/bindings/python/src/openvino/opset4/ops.py index 4f6ba016852b02..e6f3a3a1550937 100644 --- a/src/bindings/python/src/openvino/opset4/ops.py +++ b/src/bindings/python/src/openvino/opset4/ops.py @@ -8,18 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset5/ops.py b/src/bindings/python/src/openvino/opset5/ops.py index 20057b78c7c31d..9217830752b1d8 100644 --- a/src/bindings/python/src/openvino/opset5/ops.py +++ b/src/bindings/python/src/openvino/opset5/ops.py @@ -8,18 +8,17 @@ import numpy as np from functools import partial -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter, loop -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset6/ops.py b/src/bindings/python/src/openvino/opset6/ops.py index 8020715f20dea3..340d0405b4ba23 100644 --- a/src/bindings/python/src/openvino/opset6/ops.py +++ b/src/bindings/python/src/openvino/opset6/ops.py @@ -9,13 +9,13 @@ from functools import partial, singledispatch -from openvino.runtime import Node, Type, PartialShape, Output, Shape +from openvino import Node, Type, PartialShape, Output, Shape from openvino.op import assign, Constant, Parameter from openvino.op import read_value as _read_value from openvino.op.util import VariableInfo, Variable -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op, overloading -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op, overloading +from openvino.utils.types import ( NodeInput, NumericType, TensorShape, diff --git a/src/bindings/python/src/openvino/opset7/ops.py b/src/bindings/python/src/openvino/opset7/ops.py index 59e09b64888eb1..e33d266debedf1 100644 --- a/src/bindings/python/src/openvino/opset7/ops.py +++ b/src/bindings/python/src/openvino/opset7/ops.py @@ -7,18 +7,17 @@ from typing import Callable, Iterable, List, Optional, Set, Union import numpy as np -from openvino.runtime import Node, Shape +from openvino import Node, Shape from openvino.op import Constant, Parameter -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import binary_op, nameable_op, unary_op -from openvino.runtime.utils.input_validation import ( +from openvino.utils.decorators import binary_op, nameable_op, unary_op +from openvino.utils.input_validation import ( assert_list_of_ints, check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.node_factory import NodeFactory -from openvino.runtime.utils.types import ( +from openvino.utils.node_factory import NodeFactory, _get_node_factory +from openvino.utils.types import ( NodeInput, NumericData, NumericType, diff --git a/src/bindings/python/src/openvino/opset8/ops.py b/src/bindings/python/src/openvino/opset8/ops.py index 6995d55a28a776..a9a868e7b541d8 100644 --- a/src/bindings/python/src/openvino/opset8/ops.py +++ b/src/bindings/python/src/openvino/opset8/ops.py @@ -9,15 +9,15 @@ import numpy as np from openvino.exceptions import UserInputError from openvino.op import Constant, Parameter, if_op -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.input_validation import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.input_validation import ( check_valid_attributes, is_non_negative_value, is_positive_value, ) -from openvino.runtime.utils.types import ( +from openvino.utils.types import ( NodeInput, TensorShape, as_node, diff --git a/src/bindings/python/src/openvino/opset9/ops.py b/src/bindings/python/src/openvino/opset9/ops.py index a6d45cfd0be2cc..e2264845e058dc 100644 --- a/src/bindings/python/src/openvino/opset9/ops.py +++ b/src/bindings/python/src/openvino/opset9/ops.py @@ -7,10 +7,10 @@ from typing import Optional import numpy as np -from openvino.runtime import Node -from openvino.runtime.opset_utils import _get_node_factory -from openvino.runtime.utils.decorators import nameable_op -from openvino.runtime.utils.types import ( +from openvino import Node +from openvino.utils.node_factory import _get_node_factory +from openvino.utils.decorators import nameable_op +from openvino.utils.types import ( NodeInput, as_nodes, as_node, diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py b/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py index c14635cc118208..717e945217468c 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/preprocess_converter.py @@ -5,7 +5,7 @@ from typing import Callable, Any, Union import logging -import openvino.runtime as ov +import openvino as ov class PreprocessConverter(): diff --git a/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py b/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py index f8b51afd546f57..5dad42b47da44a 100644 --- a/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py +++ b/src/bindings/python/src/openvino/preprocess/torchvision/torchvision_preprocessing.py @@ -20,10 +20,10 @@ import torchvision.transforms as transforms from torchvision.transforms import InterpolationMode -import openvino.runtime as ov -import openvino.runtime.opset11 as ops -from openvino.runtime import Layout, Type -from openvino.runtime.utils.decorators import custom_preprocess_function +import openvino as ov +import openvino.opset11 as ops +from openvino import Layout, Type +from openvino.utils.decorators import custom_preprocess_function from openvino.preprocess import PrePostProcessor, ResizeAlgorithm, ColorFormat diff --git a/src/bindings/python/src/openvino/properties/_properties.py b/src/bindings/python/src/openvino/properties/_properties.py index a3d9e2076ad072..ee0a612583431c 100644 --- a/src/bindings/python/src/openvino/properties/_properties.py +++ b/src/bindings/python/src/openvino/properties/_properties.py @@ -16,6 +16,9 @@ def __new__(cls, prop: Callable[..., Any]): # type: ignore def __call__(self, *args: Any) -> Callable[..., Any]: if args is not None: + from openvino import Model + if args and isinstance(args[0], Model): + return self.prop(args[0]._Model__model) return self.prop(*args) return self.prop() diff --git a/src/bindings/python/src/openvino/test_utils/__init__.py b/src/bindings/python/src/openvino/test_utils/__init__.py index e25fa9e67be800..bca79f8a4e2729 100644 --- a/src/bindings/python/src/openvino/test_utils/__init__.py +++ b/src/bindings/python/src/openvino/test_utils/__init__.py @@ -2,4 +2,4 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from .test_utils_api import compare_functions +from .test_api import compare_functions diff --git a/src/bindings/python/src/openvino/test_utils/test_api.py b/src/bindings/python/src/openvino/test_utils/test_api.py new file mode 100644 index 00000000000000..ce65eb9dcd820e --- /dev/null +++ b/src/bindings/python/src/openvino/test_utils/test_api.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +from .test_utils_api import compare_functions as compare_functions_base +from openvino.runtime import Model + + +def compare_functions(lhs: Model, rhs: Model, compare_tensor_names: bool = True) -> tuple: + return compare_functions_base(lhs._Model__model, rhs._Model__model, compare_tensor_names) diff --git a/src/bindings/python/src/openvino/utils/broadcasting.py b/src/bindings/python/src/openvino/utils/broadcasting.py index a950aea9bba820..01549625e2c628 100644 --- a/src/bindings/python/src/openvino/utils/broadcasting.py +++ b/src/bindings/python/src/openvino/utils/broadcasting.py @@ -5,7 +5,7 @@ import logging from typing import Optional -from openvino.runtime import AxisSet +from openvino import AxisSet from openvino.utils.types import ( TensorShape, ) diff --git a/src/bindings/python/src/openvino/utils/decorators.py b/src/bindings/python/src/openvino/utils/decorators.py index 604c0745860bf6..9418c359d129e8 100644 --- a/src/bindings/python/src/openvino/utils/decorators.py +++ b/src/bindings/python/src/openvino/utils/decorators.py @@ -6,7 +6,7 @@ from inspect import signature from typing import Any, Callable, Dict, Optional, Union, get_origin, get_args -from openvino.runtime import Node, Output +from openvino import Node, Output from openvino.utils.types import NodeInput, as_node, as_nodes diff --git a/src/bindings/python/src/openvino/utils/input_validation.py b/src/bindings/python/src/openvino/utils/input_validation.py index e79a16c48581b1..1de08452e1da9f 100644 --- a/src/bindings/python/src/openvino/utils/input_validation.py +++ b/src/bindings/python/src/openvino/utils/input_validation.py @@ -9,7 +9,7 @@ import numpy as np -from openvino.runtime.exceptions import UserInputError +from openvino.exceptions import UserInputError log = logging.getLogger(__name__) diff --git a/src/bindings/python/src/openvino/utils/node_factory.py b/src/bindings/python/src/openvino/utils/node_factory.py index 9841daaea4e818..e999ae6988814a 100644 --- a/src/bindings/python/src/openvino/utils/node_factory.py +++ b/src/bindings/python/src/openvino/utils/node_factory.py @@ -9,9 +9,9 @@ from openvino._pyopenvino import NodeFactory as _NodeFactory -from openvino.runtime import Node, Output, Extension +from openvino import Node, Output, Extension -from openvino.runtime.exceptions import UserInputError +from openvino.exceptions import UserInputError DEFAULT_OPSET = "opset13" diff --git a/src/bindings/python/src/openvino/utils/reduction.py b/src/bindings/python/src/openvino/utils/reduction.py index 71d0af8de7376e..e6be6d0ac9a104 100644 --- a/src/bindings/python/src/openvino/utils/reduction.py +++ b/src/bindings/python/src/openvino/utils/reduction.py @@ -4,7 +4,7 @@ from typing import Iterable, Optional -from openvino.runtime import Node +from openvino import Node def get_reduction_axes(node: Node, reduction_axes: Optional[Iterable[int]]) -> Iterable[int]: diff --git a/src/bindings/python/src/openvino/utils/types.py b/src/bindings/python/src/openvino/utils/types.py index 854cc0c7f6411d..b3543739741d94 100644 --- a/src/bindings/python/src/openvino/utils/types.py +++ b/src/bindings/python/src/openvino/utils/types.py @@ -9,8 +9,8 @@ import numpy as np -from openvino.runtime.exceptions import OVTypeError -from openvino.runtime import Node, Shape, Output, Type +from openvino.exceptions import OVTypeError +from openvino import Node, Shape, Output, Type from openvino.op import Constant log = logging.getLogger(__name__) diff --git a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp index 641893cdd267a2..90aece1803f4b4 100644 --- a/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp +++ b/src/bindings/python/src/pyopenvino/core/offline_transformations.cpp @@ -23,6 +23,7 @@ #include "openvino/pass/low_latency.hpp" #include "openvino/pass/manager.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -34,7 +35,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_moc_transformations", - [](std::shared_ptr model, bool cf, bool smart_reshape) { + [](py::object& ie_api_model, bool cf, bool smart_reshape) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; if (smart_reshape) manager.register_pass(); @@ -48,7 +50,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_moc_legacy_transformations", - [](std::shared_ptr model, const std::vector& params_with_custom_types) { + [](py::object& ie_api_model, const std::vector& params_with_custom_types) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(params_with_custom_types); manager.run_passes(model); @@ -58,7 +61,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_low_latency_transformation", - [](std::shared_ptr model, bool use_const_initializer = true) { + [](py::object& ie_api_model, bool use_const_initializer = true) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(use_const_initializer); manager.run_passes(model); @@ -68,7 +72,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_pruning_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -77,7 +82,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_make_stateful_transformation", - [](std::shared_ptr model, const std::map& param_res_names) { + [](py::object& ie_api_model, const std::map& param_res_names) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(param_res_names); manager.run_passes(model); @@ -87,7 +93,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_make_stateful_transformation", - [](std::shared_ptr model, const ov::pass::MakeStateful::ParamResPairs& pairs_to_replace) { + [](py::object& ie_api_model, const ov::pass::MakeStateful::ParamResPairs& pairs_to_replace) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(pairs_to_replace); manager.run_passes(model); @@ -97,7 +104,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "compress_model_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); bool postponed = false; return ov::pass::compress_model_to_f16(model, postponed); }, @@ -105,7 +113,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "compress_quantize_weights_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -114,7 +123,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "convert_sequence_to_tensor_iterator_transformation", - [](std::shared_ptr model) { + [](py::object ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -123,7 +133,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "apply_fused_names_cleanup", - [](std::shared_ptr model) { + [](py::object ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); @@ -132,7 +143,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "paged_attention_transformation", - [](std::shared_ptr model, bool use_block_indices_inputs, bool use_score_outputs) { + [](py::object& ie_api_model, bool use_block_indices_inputs, bool use_score_outputs) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(use_block_indices_inputs, use_score_outputs); manager.run_passes(model); @@ -143,7 +155,8 @@ void regmodule_offline_transformations(py::module m) { m_offline_transformations.def( "stateful_to_stateless_transformation", - [](std::shared_ptr model) { + [](py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::pass::Manager manager; manager.register_pass(); manager.run_passes(model); diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index 758fb505f5f885..52707b0b8248ce 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -113,10 +113,13 @@ void regclass_frontend_FrontEnd(py::module m) { :rtype: openvino.runtime.Model )"); - fem.def("convert", - static_cast&) const>(&FrontEnd::convert), - py::arg("model"), - R"( + fem.def( + "convert", + [](FrontEnd& self, const py::object& ie_api_model) { + return self.convert(Common::utils::convert_to_model(ie_api_model)); + }, + py::arg("model"), + R"( Completely convert the remaining, not converted part of a function. :param model: Partially converted OpenVINO model. @@ -153,10 +156,13 @@ void regclass_frontend_FrontEnd(py::module m) { :rtype: openvino.runtime.Model )"); - fem.def("normalize", - &FrontEnd::normalize, - py::arg("model"), - R"( + fem.def( + "normalize", + [](FrontEnd& self, const py::object& ie_api_model) { + self.normalize(Common::utils::convert_to_model(ie_api_model)); + }, + py::arg("model"), + R"( Runs normalization passes on function that was loaded with partial conversion. :param model : Partially converted OpenVINO model. diff --git a/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp b/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp index 587d3906b02607..40a603977159a5 100644 --- a/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp +++ b/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp @@ -35,6 +35,7 @@ void regclass_graph_AttributeVisitor(py::module m) { "on_attributes", [](ov::AttributeVisitor* self, py::dict& attributes) { py::object float_32_type = py::module_::import("numpy").attr("float32"); + py::object model = py::module_::import("openvino.runtime").attr("Model"); for (const auto& attribute : attributes) { if (py::isinstance(attribute.second)) { visit_attribute(attributes, attribute, self); @@ -48,6 +49,10 @@ void regclass_graph_AttributeVisitor(py::module m) { visit_attribute(attributes, attribute, self); } else if (py::isinstance(attribute.second)) { visit_attribute>(attributes, attribute, self); + } else if (py::isinstance(attribute.second, model)) { + auto attr_casted = attribute.second.attr("_Model__model").cast>(); + self->on_attribute>(attribute.first.cast(), attr_casted); + attributes[attribute.first] = std::move(attr_casted); } else if (py::isinstance(attribute.second)) { visit_attribute(attributes, attribute, self); } else if (py::isinstance(attribute.second)) { diff --git a/src/bindings/python/src/pyopenvino/graph/model.cpp b/src/bindings/python/src/pyopenvino/graph/model.cpp index e3c648c0f4cfcb..a482ba55e46e74 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.cpp +++ b/src/bindings/python/src/pyopenvino/graph/model.cpp @@ -1328,10 +1328,6 @@ void regclass_graph_Model(py::module m) { outputs_str + "\n]>"; }); - model.def("__copy__", [](ov::Model& self) { - throw py::type_error("Cannot copy 'openvino.runtime.Model. Please, use deepcopy instead."); - }); - model.def("get_rt_info", (PyRTMap & (ov::Model::*)()) & ov::Model::get_rt_info, py::return_value_policy::reference_internal, diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp index c452e2fe4ac849..8cd52099436d2b 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp @@ -12,6 +12,7 @@ #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/ops/if.hpp" #include "pyopenvino/graph/ops/util/multisubgraph.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -77,10 +78,14 @@ void regclass_graph_op_If(py::module m) { :rtype: openvino.Model )"); - cls.def("set_then_body", - &ov::op::v8::If::set_then_body, - py::arg("body"), - R"( + cls.def( + "set_then_body", + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto body = Common::utils::convert_to_model(ie_api_model); + return self->set_then_body(body); + }, + py::arg("body"), + R"( Sets new Model object as new then_body. :param body: new body for 'then' branch. @@ -89,10 +94,14 @@ void regclass_graph_op_If(py::module m) { :rtype: None )"); - cls.def("set_else_body", - &ov::op::v8::If::set_else_body, - py::arg("body"), - R"( + cls.def( + "set_else_body", + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto body = Common::utils::convert_to_model(ie_api_model); + return self->set_else_body(body); + }, + py::arg("body"), + R"( Sets new Model object as new else_body. :param body: new body for 'else' branch. @@ -156,11 +165,15 @@ void regclass_graph_op_If(py::module m) { :rtype: openvino.Model )"); - cls.def("set_function", - &ov::op::util::MultiSubGraphOp::set_function, - py::arg("index"), - py::arg("func"), - R"( + cls.def( + "set_function", + [](const std::shared_ptr& self, int index, const py::object& ie_api_model) { + const auto func = Common::utils::convert_to_model(ie_api_model); + self->set_function(index, func); + }, + py::arg("index"), + py::arg("func"), + R"( Adds sub-graph to MultiSubGraphOp. :param index: index of new sub-graph. diff --git a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp index 536d97d17273ab..069a1376eba758 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp @@ -11,6 +11,7 @@ #include "openvino/util/log.hpp" #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/ops/util/multisubgraph.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -91,7 +92,8 @@ void regclass_graph_op_Loop(py::module m) { cls.def( "set_function", - [](const std::shared_ptr& self, const std::shared_ptr& func) { + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto func = Common::utils::convert_to_model(ie_api_model); self->set_function(func); }, py::arg("func")); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp index 5932656c3eccb9..3039aa90008f29 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp @@ -9,6 +9,7 @@ #include "openvino/op/util/sub_graph_base.hpp" #include "pyopenvino/core/common.hpp" #include "pyopenvino/graph/ops/util/multisubgraph.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -18,7 +19,13 @@ void regclass_graph_op_TensorIterator(py::module m) { "tensor_iterator"); cls.doc() = "openvino.impl.op.TensorIterator wraps ov::op::v0::TensorIterator"; cls.def(py::init<>()); - cls.def("set_body", &ov::op::v0::TensorIterator::set_body, py::arg("body")); + cls.def( + "set_body", + [](const std::shared_ptr& self, py::object& ie_api_model) { + const auto body = Common::utils::convert_to_model(ie_api_model); + self->set_body(body); + }, + py::arg("body")); cls.def("set_invariant_input", &ov::op::v0::TensorIterator::set_invariant_input, py::arg("body_parameter"), @@ -68,7 +75,8 @@ void regclass_graph_op_TensorIterator(py::module m) { cls.def( "set_function", - [](const std::shared_ptr& self, const std::shared_ptr& func) { + [](const std::shared_ptr& self, const py::object& ie_api_model) { + const auto func = Common::utils::convert_to_model(ie_api_model); self->set_function(func); }, py::arg("func")); diff --git a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp index 9bd2833308db41..5fb4ddb4bd6dc8 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp @@ -35,10 +35,14 @@ void regclass_passes_Manager(py::module m) { :type new_state: bool )"); - manager.def("run_passes", - &ov::pass::Manager::run_passes, - py::arg("model"), - R"( + manager.def( + "run_passes", + [](ov::pass::Manager& self, const py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); + self.run_passes(model); + }, + py::arg("model"), + R"( Executes sequence of transformations on given Model. :param model: openvino.runtime.Model to be transformed. diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp index a19f2b2f482337..25fdd7b007a297 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp @@ -11,6 +11,7 @@ #include "openvino/core/node.hpp" #include "openvino/core/preprocess/pre_post_process.hpp" #include "pyopenvino/core/common.hpp" +#include "pyopenvino/utils/utils.hpp" namespace py = pybind11; @@ -553,7 +554,14 @@ void regclass_graph_PrePostProcessor(py::module m) { "PrePostProcessor"); proc.doc() = "openvino.runtime.preprocess.PrePostProcessor wraps ov::preprocess::PrePostProcessor"; - proc.def(py::init&>(), py::arg("model")); + proc.def(py::init([](const py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); + return std::make_shared(model); + }), + py::arg("model"), + R"( + It creates PrePostProcessor. + )"); proc.def("input", [](ov::preprocess::PrePostProcessor& self) { return &self.input(); @@ -591,7 +599,15 @@ void regclass_graph_PrePostProcessor(py::module m) { }, py::arg("output_index")); - proc.def("build", &ov::preprocess::PrePostProcessor::build, py::call_guard()); + proc.def("build", [](ov::preprocess::PrePostProcessor& self) { + std::shared_ptr model; + { + py::gil_scoped_release release; + model = self.build(); + } + py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + return model_class(py::cast(model)); + }); proc.def("__str__", [](const ov::preprocess::PrePostProcessor& self) -> std::string { std::stringstream ss; diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index ee3ef1c8b8144e..c385e5467224c0 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -98,10 +98,25 @@ PYBIND11_MODULE(_pyopenvino, m) { m.def("get_version", &get_version); m.def("get_batch", &ov::get_batch); - m.def("set_batch", &ov::set_batch); + m.def( + "get_batch", + [](const py::object& ie_api_model) { + const auto model = Common::utils::convert_to_model(ie_api_model); + return ov::get_batch(model); + }, + py::arg("model")); + m.def( + "set_batch", + [](const py::object& ie_api_model, ov::Dimension value) { + auto model = Common::utils::convert_to_model(ie_api_model); + ov::set_batch(model, value); + }, + py::arg("model"), + py::arg("dimension")); m.def( "set_batch", - [](const std::shared_ptr& model, int64_t value) { + [](const py::object& ie_api_model, int64_t value) { + auto model = Common::utils::convert_to_model(ie_api_model); ov::set_batch(model, ov::Dimension(value)); }, py::arg("model"), @@ -109,10 +124,11 @@ PYBIND11_MODULE(_pyopenvino, m) { m.def( "serialize", - [](std::shared_ptr& model, + [](py::object& ie_api_model, const py::object& xml_path, const py::object& bin_path, const std::string& version) { + const auto model = Common::utils::convert_to_model(ie_api_model); ov::serialize(model, Common::utils::convert_path_to_string(xml_path), Common::utils::convert_path_to_string(bin_path), @@ -173,15 +189,9 @@ PYBIND11_MODULE(_pyopenvino, m) { m.def( "save_model", - [](std::shared_ptr& model, - const py::object& xml_path, - bool compress_to_fp16) { - if (model == nullptr) { - throw py::attribute_error("'model' argument is required and cannot be None."); - } - ov::save_model(model, - Common::utils::convert_path_to_string(xml_path), - compress_to_fp16); + [](py::object& ie_api_model, const py::object& xml_path, bool compress_to_fp16) { + const auto model = Common::utils::convert_to_model(ie_api_model); + ov::save_model(model, Common::utils::convert_path_to_string(xml_path), compress_to_fp16); }, py::arg("model"), py::arg("output_model"), diff --git a/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt b/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt index 94a1e62b7e1809..81d993b93f95a4 100644 --- a/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt +++ b/src/bindings/python/src/pyopenvino/test_utils/CMakeLists.txt @@ -39,7 +39,7 @@ endif() # perform copy add_custom_command(TARGET ${TARGET_NAME} POST_BUILD - COMMAND ${CMAKE_COMMAND} -E copy ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py + COMMAND ${CMAKE_COMMAND} -E copy_directory ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} ) ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME} @@ -53,7 +53,7 @@ install(TARGETS ${TARGET_NAME} LIBRARY DESTINATION tests/${OV_CPACK_PYTHONDIR}/openvino/test_utils COMPONENT tests EXCLUDE_FROM_ALL) -install(PROGRAMS ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils/__init__.py - DESTINATION tests/${OV_CPACK_PYTHONDIR}/openvino/test_utils +install(DIRECTORY ${OpenVINOPython_SOURCE_DIR}/src/openvino/test_utils + DESTINATION tests/${OV_CPACK_PYTHONDIR}/openvino COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/src/bindings/python/src/pyopenvino/utils/utils.cpp b/src/bindings/python/src/pyopenvino/utils/utils.cpp index c747e2d3b81166..bd1520119bd8a9 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.cpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.cpp @@ -311,6 +311,18 @@ std::string convert_path_to_string(const py::object& path) { OPENVINO_THROW(str.str()); } +std::shared_ptr convert_to_model(const py::object& obj) { + if (!py::isinstance(obj, py::module_::import("openvino").attr("Model"))) { + throw py::type_error("Incompatible `model` argument. Please provide a valid openvino.Model instance."); + } + auto model = obj.attr("_Model__model").cast>(); + if (model == nullptr) { + throw py::attribute_error("Invalid openvino.Model instance. It cannot be None. " + "Please make sure it is not used outside of its context."); + } + return model; +} + Version convert_to_version(const std::string& version) { if (version == "UNSPECIFIED") return Version::UNSPECIFIED; diff --git a/src/bindings/python/src/pyopenvino/utils/utils.hpp b/src/bindings/python/src/pyopenvino/utils/utils.hpp index 2a7b6505269535..224b70bb1fa176 100644 --- a/src/bindings/python/src/pyopenvino/utils/utils.hpp +++ b/src/bindings/python/src/pyopenvino/utils/utils.hpp @@ -81,6 +81,8 @@ class MemoryBuffer : public std::streambuf { std::string convert_path_to_string(const py::object& path); + std::shared_ptr convert_to_model(const py::object& obj); + void deprecation_warning(const std::string& function_name, const std::string& version = std::string(), const std::string& message = std::string(), int stacklevel=2); void raise_not_implemented(); diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index 0ae592b2d1dff5..425cdb97129c69 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -3,11 +3,13 @@ # SPDX-License-Identifier: Apache-2.0 import os +import sys import numpy as np import pytest import math from contextlib import nullcontext as does_not_raise from copy import copy +import tempfile import openvino.runtime.opset13 as ops from openvino import ( @@ -801,13 +803,49 @@ def test_model_add_remove_variable(): def test_save_model_with_none(): - with pytest.raises(AttributeError) as e: + with pytest.raises(TypeError) as e: save_model(model=None, output_model="model.xml") - assert "'model' argument is required and cannot be None." in str(e.value) + assert "Please provide a valid openvino.Model instance." in str(e.value) def test_copy_failed(): model = generate_add_model() with pytest.raises(TypeError) as e: copy(model) - assert "Cannot copy 'openvino.runtime.Model. Please, use deepcopy instead." in str(e.value) + assert "Cannot copy 'openvino.runtime.Model'. Please, use deepcopy instead." in str(e.value) + + +def test_model_attr_not_found(): + model = generate_add_model() + with pytest.raises(AttributeError) as e: + _ = model.not_found_attr + assert "'openvino._pyopenvino.Model' object has no attribute 'not_found_attr'" in str(e.value) + + +def test_model_with_statement(): + mem_model = generate_model_with_memory(input_shape=Shape([2, 1]), data_type=Type.f32) + with tempfile.TemporaryDirectory() as model_save_dir: + save_model(mem_model, f"{model_save_dir}/model.xml") + + with Core().read_model(f"{model_save_dir}/model.xml") as model: + assert mem_model.friendly_name == model.friendly_name + + with pytest.raises(AttributeError): + save_model(model, f"{model_save_dir}/model.xml") + + # Behavior after exiting the context manager + with mem_model as model: + pass + assert isinstance(mem_model, Model) + with pytest.raises(AttributeError, match="attribute is no longer accessible."): + model.friendly_name + + +@pytest.mark.skipif(sys.platform != "win32", reason="Windows only") +def test_tempdir_save_load_error(): + # Generate a model with stateful components, ensuring the .bin file will be non-empty after saving + mem_model = generate_model_with_memory(input_shape=Shape([2, 1]), data_type=Type.f32) + with pytest.raises((NotADirectoryError, PermissionError)): + with tempfile.TemporaryDirectory() as model_save_dir: + save_model(mem_model, f"{model_save_dir}/model.xml") + _ = Core().read_model(f"{model_save_dir}/model.xml") diff --git a/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp b/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp index 2caf346bfc5da6..5d82af51847081 100644 --- a/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp +++ b/src/common/low_precision_transformations/include/low_precision/align_quantization_intervals.hpp @@ -30,7 +30,7 @@ class LP_TRANSFORMATIONS_API AlignQuantizationIntervals; */ class ov::pass::low_precision::AlignQuantizationIntervals : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignQuantizationIntervals", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::AlignQuantizationIntervals"); AlignQuantizationIntervals(const std::vector& defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support()); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp b/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp index 89c9bf59747860..ce6db1c397522f 100644 --- a/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp +++ b/src/common/low_precision_transformations/include/low_precision/align_quantization_parameters.hpp @@ -31,7 +31,7 @@ class LP_TRANSFORMATIONS_API AlignQuantizationParameters; */ class ov::pass::low_precision::AlignQuantizationParameters : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignQuantizationParameters", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::AlignQuantizationParameters"); AlignQuantizationParameters(const std::vector defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support()); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp index d6bddd3643a4f6..b3b92340303ced 100644 --- a/src/common/low_precision_transformations/include/low_precision/low_precision.hpp +++ b/src/common/low_precision_transformations/include/low_precision/low_precision.hpp @@ -42,7 +42,7 @@ class LP_TRANSFORMATIONS_API LowPrecision; class ov::pass::low_precision::MarkupOptimizations : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkupOptimizations", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupOptimizations"); MarkupOptimizations( const std::vector& precisionRestrictions, const std::vector& quantizationRestrictions, @@ -62,7 +62,7 @@ class ov::pass::low_precision::TypeRelaxedReplacer : public ov::pass::GraphRewri class LP_TRANSFORMATIONS_API ov::pass::low_precision::LowPrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("LowPrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::LowPrecision"); LowPrecision( const std::vector& precisionRestrictions = {}, const std::vector& quantizationRestrictions = {}, diff --git a/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp b/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp index da3605b4ee4ba5..4e65b9f414f9fc 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_avg_pool_precision_preserved.hpp @@ -29,7 +29,7 @@ class LP_TRANSFORMATIONS_API MarkupAvgPoolPrecisionPreserved; */ class ov::pass::low_precision::MarkupAvgPoolPrecisionPreserved : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkupAvgPoolPrecisionPreserved", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupAvgPoolPrecisionPreserved"); MarkupAvgPoolPrecisionPreserved(const std::vector defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support()); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp b/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp index f886e69f2088c7..7359a6617c96b6 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_can_be_quantized.hpp @@ -30,7 +30,7 @@ class LP_TRANSFORMATIONS_API MarkupCanBeQuantized; */ class ov::pass::low_precision::MarkupCanBeQuantized : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkupCanBeQuantized", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupCanBeQuantized"); MarkupCanBeQuantized(const std::vector defaultPrecisions = { ov::element::u8, ov::element::i8 }); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp b/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp index 92ea93b2c3e5b1..56e926101581bf 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_precisions.hpp @@ -36,6 +36,7 @@ class LP_TRANSFORMATIONS_API MarkupPrecisions; */ class ov::pass::low_precision::MarkupPrecisions : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupPrecisions"); class Restriction { public: class RestrictionByVersion { @@ -65,7 +66,6 @@ class ov::pass::low_precision::MarkupPrecisions : public ov::pass::ModelPass { std::unordered_map precisionsByVersion; }; - OPENVINO_RTTI("MarkupPrecisions", "0"); explicit MarkupPrecisions(const std::vector& restrictions = {}, const std::vector& defaultPrecisions = { ov::element::u8, ov::element::i8 }); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp b/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp index a61473cf263963..aa0ee4b4191de0 100644 --- a/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp +++ b/src/common/low_precision_transformations/include/low_precision/markup_quantization_granularity.hpp @@ -34,6 +34,8 @@ class LP_TRANSFORMATIONS_API MarkupQuantizationGranularity; */ class ov::pass::low_precision::MarkupQuantizationGranularity : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("low_precision::MarkupQuantizationGranularity"); + class PerTensorQuantization { public: explicit PerTensorQuantization(const bool versionIsRequired) : versionIsRequired(versionIsRequired) {} @@ -45,7 +47,6 @@ class ov::pass::low_precision::MarkupQuantizationGranularity : public ov::pass:: std::unordered_map> portsByVersion; }; - OPENVINO_RTTI("MarkupPerTensorQuantization", "0"); explicit MarkupQuantizationGranularity(const std::vector& restrictions = {}); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp index c17fa2faded476..98f91bc5155d7b 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_precisions.hpp @@ -31,7 +31,7 @@ class LP_TRANSFORMATIONS_API PropagatePrecisions; */ class ov::pass::low_precision::PropagatePrecisions : public ov::pass::ModelPass { public: - OPENVINO_RTTI("PropagatePrecisions", "0"); + OPENVINO_MODEL_PASS_RTTI("low_precision::PropagatePrecisions"); PropagatePrecisions(const AttributeParameters& params = AttributeParameters()); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp b/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp index 16a014cda6ec04..9254e2fc007dab 100644 --- a/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp +++ b/src/common/low_precision_transformations/include/low_precision/propagate_shared_value.hpp @@ -38,6 +38,7 @@ class LP_TRANSFORMATIONS_API PropagateSharedValue; template class ov::pass::low_precision::PropagateSharedValue : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("low_precision::PropagateSharedValue"); bool run_on_model(const std::shared_ptr& f) override { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "PropagateSharedValue"); diff --git a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp index 7593361f8dd71a..36cf9747f6d76b 100644 --- a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp +++ b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.hpp @@ -16,6 +16,7 @@ class SimpleLowPrecisionTransformer : public ov::pass::ModelPass{ public: + OPENVINO_MODEL_PASS_RTTI("SimpleLowPrecisionTransformer"); SimpleLowPrecisionTransformer( const std::vector& precisionRestrictions = {}, const std::vector& quantizationRestrictions = {}, diff --git a/src/common/offline_transformations/include/pruning.hpp b/src/common/offline_transformations/include/pruning.hpp index dd5374d7477ded..e573108a89eb86 100644 --- a/src/common/offline_transformations/include/pruning.hpp +++ b/src/common/offline_transformations/include/pruning.hpp @@ -67,7 +67,7 @@ class ov::pass::PropagateMasks : public ov::pass::GraphRewrite { */ class ov::pass::ShrinkWeights : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ShrinkWeights", "0"); + OPENVINO_MODEL_PASS_RTTI("ShrinkWeights"); bool run_on_model(const std::shared_ptr&) override; }; @@ -77,6 +77,6 @@ class ov::pass::ShrinkWeights : public ov::pass::ModelPass { */ class ov::pass::Pruning : public ov::pass::ModelPass { public: - OPENVINO_RTTI("Pruning", "0"); + OPENVINO_MODEL_PASS_RTTI("Pruning"); bool run_on_model(const std::shared_ptr&) override; }; diff --git a/src/common/snippets/include/snippets/pass/align_element_types.hpp b/src/common/snippets/include/snippets/pass/align_element_types.hpp index 1d5a1fa9256c88..6261b2b87b2a65 100644 --- a/src/common/snippets/include/snippets/pass/align_element_types.hpp +++ b/src/common/snippets/include/snippets/pass/align_element_types.hpp @@ -19,7 +19,7 @@ namespace pass { */ class AlignElementTypes: public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignElementTypes"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::AlignElementTypes"); AlignElementTypes(std::vector input_precisions, std::vector output_precisions); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp b/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp index 4367567c9df1c7..9d23e462ddff94 100644 --- a/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp +++ b/src/common/snippets/include/snippets/pass/analyze_broadcastable_inputs.hpp @@ -21,7 +21,7 @@ namespace pass { */ class AnalyzeBroadcastableInputs : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AnalyzeBroadcastableInputs"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::AnalyzeBroadcastableInputs"); // [Index of Parameter -> Index of broadcastable dimension from end] using BroadcastableInputsMap = std::map; AnalyzeBroadcastableInputs(BroadcastableInputsMap& map); diff --git a/src/common/snippets/include/snippets/pass/canonicalization.hpp b/src/common/snippets/include/snippets/pass/canonicalization.hpp index 5c7acaa781d2b8..645184a55609ba 100644 --- a/src/common/snippets/include/snippets/pass/canonicalization.hpp +++ b/src/common/snippets/include/snippets/pass/canonicalization.hpp @@ -22,7 +22,7 @@ namespace pass { */ class Canonicalization: public ov::pass::ModelPass { public: - OPENVINO_RTTI("Canonicalization"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::Canonicalization"); using BlockedShapeVector = op::Subgraph::BlockedShapeVector; using Layout = std::vector; explicit Canonicalization(const BlockedShapeVector& blocked_input_shapes); diff --git a/src/common/snippets/include/snippets/pass/fq_decomposition.hpp b/src/common/snippets/include/snippets/pass/fq_decomposition.hpp index 1e4af6c04e22fa..9f2b0ab7cc4bf9 100644 --- a/src/common/snippets/include/snippets/pass/fq_decomposition.hpp +++ b/src/common/snippets/include/snippets/pass/fq_decomposition.hpp @@ -80,6 +80,8 @@ class FakeQuantizeDecomposition : public ov::pass::MatcherPass { */ class CommonFakeQuantizeDecomposition: public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("snippets::pass::CommonFakeQuantizeDecomposition"); + bool run_on_model(const std::shared_ptr& m) override; static bool is_supported_fq(const std::shared_ptr& fq); diff --git a/src/common/snippets/include/snippets/pass/hash.hpp b/src/common/snippets/include/snippets/pass/hash.hpp index 66126cd21bbe6d..fc3bd9a64d0bdc 100644 --- a/src/common/snippets/include/snippets/pass/hash.hpp +++ b/src/common/snippets/include/snippets/pass/hash.hpp @@ -18,7 +18,7 @@ namespace pass { */ class Hash : public ov::pass::ModelPass { public: - OPENVINO_RTTI("HashPass", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::Hash"); bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/snippets/include/snippets/pass/propagate_precision.hpp b/src/common/snippets/include/snippets/pass/propagate_precision.hpp index 6f805cb1b68808..66d0b28430dd5f 100644 --- a/src/common/snippets/include/snippets/pass/propagate_precision.hpp +++ b/src/common/snippets/include/snippets/pass/propagate_precision.hpp @@ -19,7 +19,7 @@ namespace pass { */ class PropagatePrecision: public ov::pass::ModelPass { public: - OPENVINO_RTTI("PropagatePrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::PropagatePrecision"); PropagatePrecision(const std::shared_ptr& target_machine); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/snippets/include/snippets/pass/tokenization.hpp b/src/common/snippets/include/snippets/pass/tokenization.hpp index 24efcceec71a24..fc7ed8aace6d64 100644 --- a/src/common/snippets/include/snippets/pass/tokenization.hpp +++ b/src/common/snippets/include/snippets/pass/tokenization.hpp @@ -37,7 +37,7 @@ int64_t GetTopologicalOrder(const std::shared_ptr&); */ class EnumerateNodes : public ov::pass::ModelPass { public: - OPENVINO_RTTI("EnumerateNodes", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::EnumerateNodes"); EnumerateNodes() : ModelPass() {} bool run_on_model(const std::shared_ptr&) override; }; @@ -59,6 +59,8 @@ class EnumerateNodes : public ov::pass::ModelPass { */ class SnippetsTokenization : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("snippets::pass::SnippetsTokenization"); + /** * @interface Config * @brief Allow to adjust tokenization passes @@ -123,7 +125,6 @@ class SnippetsTokenization : public ov::pass::ModelPass { std::set m_mha_supported_transpose_ranks = { 3, 4 }; }; - OPENVINO_RTTI("SnippetsTokenization", "0"); SnippetsTokenization(const Config& config) : m_config(config) {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/snippets/include/snippets/pass/validate.hpp b/src/common/snippets/include/snippets/pass/validate.hpp index 1fedf8a2ce08d5..21d748eaa3afbb 100644 --- a/src/common/snippets/include/snippets/pass/validate.hpp +++ b/src/common/snippets/include/snippets/pass/validate.hpp @@ -17,7 +17,7 @@ namespace pass { */ class Validate: public ov::pass::ModelPass { public: - OPENVINO_RTTI("Validate", "0"); + OPENVINO_MODEL_PASS_RTTI("snippets::pass::Validate"); Validate(const std::shared_ptr& pass_config) : m_pass_config(pass_config) {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp b/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp index d2ff059d5974ef..023937b79df48a 100644 --- a/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/change_placeholder_types.hpp @@ -20,7 +20,7 @@ class TRANSFORMATIONS_API ChangePlaceholderTypes; */ class ChangePlaceholderTypes : public ModelPass { public: - OPENVINO_RTTI("ChangePlaceholderTypes", "0"); + OPENVINO_MODEL_PASS_RTTI("ChangePlaceholderTypes"); explicit ChangePlaceholderTypes(const std::vector& params_with_custom_types) : m_params_with_custom_types(params_with_custom_types) {} bool run_on_model(const std::shared_ptr& model) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp b/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp index 7540275ce74d0f..35017f5984f9a1 100644 --- a/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/common_optimizations.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API CommonOptimizations; class ov::pass::CommonOptimizations : public ov::pass::ModelPass { public: - OPENVINO_RTTI("CommonOptimizations", "0"); + OPENVINO_MODEL_PASS_RTTI("CommonOptimizations"); bool run_on_model(const std::shared_ptr& f) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp b/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp index d1ec2069b3f621..ca176057972eaa 100644 --- a/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/dimension_tracking.hpp @@ -22,7 +22,7 @@ class TRANSFORMATIONS_API FindBatch; class ov::pass::FindBatch : public ov::pass::ModelPass { public: - OPENVINO_RTTI("FindBatch"); + OPENVINO_MODEL_PASS_RTTI("FindBatch"); FindBatch(bool detach_detection_output = false, bool track = true) : track(track), detach_do(detach_detection_output) {} diff --git a/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp b/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp index 8058a01811d9bc..fd7ce8defba920 100644 --- a/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/fused_names_cleanup.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API FusedNamesCleanup; */ class ov::pass::FusedNamesCleanup : public ov::pass::ModelPass { public: - OPENVINO_RTTI("FusedNamesCleanup", "0"); + OPENVINO_MODEL_PASS_RTTI("FusedNamesCleanup"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp b/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp index 05e73456d46ee0..3e20e7535f8fed 100644 --- a/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/mark_precision_sensitive_shapeof_subgraphs.hpp @@ -26,7 +26,7 @@ class TRANSFORMATIONS_API MarkShapeOfSubgraphs; */ class ov::pass::MarkPrecisionSensitiveShapeOfSubgraphs : public ModelPass { public: - OPENVINO_RTTI("MarkPrecisionSensitiveShapeOfSubgraphs", "0"); + OPENVINO_MODEL_PASS_RTTI("MarkPrecisionSensitiveShapeOfSubgraphs"); MarkPrecisionSensitiveShapeOfSubgraphs(); bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp b/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp index 833ce83e6cf065..0f40a4d81a1bde 100644 --- a/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/moc_legacy_transformations.hpp @@ -32,7 +32,7 @@ namespace pass { class MOCLegacyTransformations : public ModelPass { public: - OPENVINO_RTTI("MOCLegacyTransformations", "0"); + OPENVINO_MODEL_PASS_RTTI("MOCLegacyTransformations"); explicit MOCLegacyTransformations(const std::vector& params_with_custom_types) : m_params_with_custom_types(params_with_custom_types) {} bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp b/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp index 49893dfb220de6..b65c9b84456ff8 100644 --- a/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/moc_transformations.hpp @@ -24,11 +24,9 @@ class TRANSFORMATIONS_API MOCTransformations; */ class ov::pass::MOCTransformations : public ov::pass::ModelPass { - bool m_use_shapes; - bool m_low_precision_enabled; - public: - OPENVINO_RTTI("MOCTransformations", "0"); + OPENVINO_MODEL_PASS_RTTI("MOCTransformations"); + /** * use_shapes = True enables transformations which are depends on shapes and also it * enables ConstantFolding for all ShapeOf operations. @@ -41,4 +39,8 @@ class ov::pass::MOCTransformations : public ov::pass::ModelPass { m_low_precision_enabled(low_precision_enabled) {} bool run_on_model(const std::shared_ptr& m) override; + +private: + bool m_use_shapes; + bool m_low_precision_enabled; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp b/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp index 454378a0e9bbd1..cb642795254791 100644 --- a/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/optimize_strided_slice.hpp @@ -29,7 +29,7 @@ class TRANSFORMATIONS_API SliceSequenceToSingleSlice; */ class ov::pass::UselessSliceEraser : public ov::pass::ModelPass { public: - OPENVINO_RTTI("UselessSliceEraser", "0"); + OPENVINO_MODEL_PASS_RTTI("UselessSliceEraser"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -41,7 +41,7 @@ class ov::pass::UselessSliceEraser : public ov::pass::ModelPass { */ class ov::pass::GroupedStridedSliceOptimizer : public ov::pass::ModelPass { public: - OPENVINO_RTTI("GroupedStridedSliceOptimizer", "0"); + OPENVINO_MODEL_PASS_RTTI("GroupedStridedSliceOptimizer"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -53,7 +53,7 @@ class ov::pass::GroupedStridedSliceOptimizer : public ov::pass::ModelPass { */ class ov::pass::GroupedSliceToVSplitOptimization : public ov::pass::ModelPass { public: - OPENVINO_RTTI("GroupedSliceToVSplitOptimization", "0"); + OPENVINO_MODEL_PASS_RTTI("GroupedSliceToVSplitOptimization"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -82,9 +82,9 @@ class ov::pass::SliceSequenceToSingleSlice : public ov::pass::MatcherPass { */ class ov::pass::StridedSliceOptimization : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("StridedSliceOptimization"); StridedSliceOptimization(bool use_shapes = true); - OPENVINO_RTTI("StridedSliceOptimization", "0"); bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp b/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp index e571b9a41869f2..085da725a64233 100644 --- a/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/push_constant_to_subgraph.hpp @@ -17,7 +17,7 @@ namespace pass { */ class TRANSFORMATIONS_API PushConstantToSubgraph : public ov::pass::ModelPass { public: - OPENVINO_RTTI("PushConstantToSubgraph", "0"); + OPENVINO_MODEL_PASS_RTTI("PushConstantToSubgraph"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp b/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp index 359d68f7c980c2..b5b35e44364e46 100644 --- a/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/remove_multi_subgraph_op_dangling_params.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API RemoveMultiSubGraphOpDanglingParamsResults; class ov::pass::RemoveMultiSubGraphOpDanglingParamsResults : public ov::pass::ModelPass { public: - OPENVINO_RTTI("RemoveMultiSubGraphOpDanglingParamsResults", "0"); + OPENVINO_MODEL_PASS_RTTI("RemoveMultiSubGraphOpDanglingParamsResults"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp b/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp index cfc5d789d9c07e..f0f9fe269e6206 100644 --- a/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/reverse_shape_and_type_infer.hpp @@ -20,7 +20,7 @@ class TRANSFORMATIONS_API ReverseShapeAndTypeInfer; */ class ov::pass::ReverseShapeAndTypeInfer : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ReverseShapeAndTypeInfer", "0"); + OPENVINO_MODEL_PASS_RTTI("ReverseShapeAndTypeInfer"); bool run_on_model(const std::shared_ptr& f) override; private: diff --git a/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp b/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp index c91c27a464cada..1c56d61bb615c9 100644 --- a/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/ric_fusion.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API ReverseInputChannelsFusion; class ov::pass::ReverseInputChannelsFusion : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ReverseInputChannelsFusion", "0"); + OPENVINO_MODEL_PASS_RTTI("ReverseInputChannelsFusion"); bool run_on_model(const std::shared_ptr&) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp b/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp index 8e2a87502ebcc9..6ff8f611564db2 100644 --- a/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/shared_ops_optimization.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API SharedOpOptimization; */ class ov::pass::SharedOpOptimization : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SharedOpOptimization", "0"); + OPENVINO_MODEL_PASS_RTTI("SharedOpOptimization"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp index 509047578bd489..79e0ffd789bf7c 100644 --- a/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp +++ b/src/common/transformations/include/transformations/common_optimizations/simplify_shape_of_sub_graph.hpp @@ -41,7 +41,7 @@ class ov::pass::GroupedGatherElimination : public ov::pass::MatcherPass { */ class ov::pass::SimplifyShapeOfSubGraph : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SimplifyShapeOfSubGraph", "0"); + OPENVINO_MODEL_PASS_RTTI("SimplifyShapeOfSubGraph"); explicit SimplifyShapeOfSubGraph(bool use_shapes = true) : m_use_shapes(use_shapes){}; bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/common/transformations/include/transformations/control_flow/unroll_if.hpp b/src/common/transformations/include/transformations/control_flow/unroll_if.hpp index e59b60106e2536..5dc95ee3eeb85a 100644 --- a/src/common/transformations/include/transformations/control_flow/unroll_if.hpp +++ b/src/common/transformations/include/transformations/control_flow/unroll_if.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API UnrollIf; class ov::pass::UnrollIf : public ov::pass::ModelPass { public: - OPENVINO_RTTI("UnrollIf", "0"); + OPENVINO_MODEL_PASS_RTTI("UnrollIf"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp b/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp index 282aecab5c7f05..ce8b6419e6734e 100644 --- a/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp +++ b/src/common/transformations/include/transformations/control_flow/unroll_tensor_iterator.hpp @@ -28,6 +28,6 @@ class TRANSFORMATIONS_API UnrollTensorIterator; class ov::pass::UnrollTensorIterator : public ov::pass::ModelPass { public: - OPENVINO_RTTI("UnrollTensorIterator", "0"); + OPENVINO_MODEL_PASS_RTTI("UnrollTensorIterator"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/convert_precision.hpp b/src/common/transformations/include/transformations/convert_precision.hpp index 30f773da9e3be4..b411629ccbce77 100644 --- a/src/common/transformations/include/transformations/convert_precision.hpp +++ b/src/common/transformations/include/transformations/convert_precision.hpp @@ -79,7 +79,7 @@ using type_to_fuse_map = class ov::pass::ConvertPrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertPrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertPrecision"); ConvertPrecision(ov::element::Type_t from, ov::element::Type_t to, type_to_fuse_map additional_type_to_fuse_map = {}, diff --git a/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp b/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp index 4fd93d7742ab67..e890b89794d862 100644 --- a/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/align_mixed_fp32_fp16_types.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API AlignMixedFP32FP16Types; */ class ov::pass::AlignMixedFP32FP16Types : public ov::pass::ModelPass { public: - OPENVINO_RTTI("AlignMixedFP32FP16Types", "0"); + OPENVINO_MODEL_PASS_RTTI("AlignMixedFP32FP16Types"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp b/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp index fbac44f637a2a2..c7c8e408da2209 100644 --- a/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/convert_compression_only_to_legacy.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertCompressedOnlyToLegacy; */ class ov::pass::ConvertCompressedOnlyToLegacy : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertCompressedOnlyToLegacy", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertCompressedOnlyToLegacy"); bool run_on_model(const std::shared_ptr& f) override; }; diff --git a/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp b/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp index 0ffce989f0d83c..d7f4e2991a3d07 100644 --- a/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp +++ b/src/common/transformations/include/transformations/fp16_compression/mark_subgraphs_to_keep_in_mixed_precision.hpp @@ -25,6 +25,6 @@ constexpr auto float16_min_normalized = float16::from_bits(0x0400); */ class ov::pass::MarkSugraphsToKeepInMixedPrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("MarkSugraphsToKeepInMixedPrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("MarkSugraphsToKeepInMixedPrecision"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/hash.hpp b/src/common/transformations/include/transformations/hash.hpp index 73668c7e53548e..3d6cf8f46076dc 100644 --- a/src/common/transformations/include/transformations/hash.hpp +++ b/src/common/transformations/include/transformations/hash.hpp @@ -18,7 +18,7 @@ namespace pass { */ class TRANSFORMATIONS_API Hash : public ov::pass::ModelPass { public: - OPENVINO_RTTI("HashPass"); + OPENVINO_MODEL_PASS_RTTI("HashPass"); bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/common/transformations/include/transformations/init_node_info.hpp b/src/common/transformations/include/transformations/init_node_info.hpp index e458bcfdcd4bd4..9474edc00cf9e0 100644 --- a/src/common/transformations/include/transformations/init_node_info.hpp +++ b/src/common/transformations/include/transformations/init_node_info.hpp @@ -35,6 +35,6 @@ class TRANSFORMATIONS_API InitNodeInfo; */ class ov::pass::InitNodeInfo : public ov::pass::ModelPass { public: - OPENVINO_RTTI("InitNodeInfo", "0"); + OPENVINO_MODEL_PASS_RTTI("InitNodeInfo"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp index 9f4399804c50ff..362b946554e17d 100644 --- a/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/batch_norm_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API BatchNormDecomposition; class ov::pass::BatchNormDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BatchNormDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BatchNormDecomposition"); BatchNormDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp index 2a1234ef7c041e..f74f08a9b8c061 100644 --- a/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/bidirectional_sequences_decomposition.hpp @@ -31,7 +31,7 @@ class TRANSFORMATIONS_API BidirectionalRNNSequenceDecomposition; class ov::pass::BidirectionalLSTMSequenceDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BidirectionalLSTMSequenceDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BidirectionalLSTMSequenceDecomposition"); BidirectionalLSTMSequenceDecomposition(); }; @@ -43,7 +43,7 @@ class ov::pass::BidirectionalLSTMSequenceDecomposition : public ov::pass::Matche class ov::pass::BidirectionalGRUSequenceDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BidirectionalGRUSequenceDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BidirectionalGRUSequenceDecomposition"); BidirectionalGRUSequenceDecomposition(); }; @@ -55,7 +55,7 @@ class ov::pass::BidirectionalGRUSequenceDecomposition : public ov::pass::Matcher class ov::pass::BidirectionalRNNSequenceDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BidirectionalRNNSequenceDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("BidirectionalRNNSequenceDecomposition"); BidirectionalRNNSequenceDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp index 40f4e32b60af7e..1db9706fb8c776 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_avgpool_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertAvgPool14ToAvgPool1 : public MatcherPass { public: - OPENVINO_RTTI("ConvertAvgPool14ToAvgPool1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertAvgPool14ToAvgPool1"); ConvertAvgPool14ToAvgPool1(); }; } // namespace pass diff --git a/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp b/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp index f2792b467a96e0..2ecc2b8f9ced75 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_batch_to_space.hpp @@ -33,7 +33,7 @@ class TRANSFORMATIONS_API ConvertBatchToSpace; class ov::pass::ConvertBatchToSpace : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBatchToSpace", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBatchToSpace"); explicit ConvertBatchToSpace(bool convert_by_elements = true) : MatcherPass() { if (convert_by_elements) convert_batch_to_space_by_elements(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp index 897bf9ea70fac0..a5e130e2389af2 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_bitwise_to_logical_bool.hpp @@ -18,22 +18,22 @@ class TRANSFORMATIONS_API ConvertBitwiseXorToLogicalXor; class ov::pass::ConvertBitwiseAndToLogicalAnd : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseAndToLogicalAnd", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseAndToLogicalAnd"); ConvertBitwiseAndToLogicalAnd(); }; class ov::pass::ConvertBitwiseNotToLogicalNot : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseNotToLogicalNot", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseNotToLogicalNot"); ConvertBitwiseNotToLogicalNot(); }; class ov::pass::ConvertBitwiseOrToLogicalOr : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseOrToLogicalOr", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseOrToLogicalOr"); ConvertBitwiseOrToLogicalOr(); }; class ov::pass::ConvertBitwiseXorToLogicalXor : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBitwiseXorToLogicalXor", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBitwiseXorToLogicalXor"); ConvertBitwiseXorToLogicalXor(); }; /** diff --git a/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp index 06687b9e07ba01..7518f26f5d0cbc 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_broadcast3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertBroadcast3; class ov::pass::ConvertBroadcast3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBroadcast3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBroadcast3"); ConvertBroadcast3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp b/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp index 5d5934b33e8216..5c1f374bcb724c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_broadcast_to_tiles.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertBroadcastToTiles; class ov::pass::ConvertBroadcastToTiles : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBroadcastToTiles", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBroadcastToTiles"); ConvertBroadcastToTiles(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp index dfe5e5e7424d90..5952fc114b76fd 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertConvertLike; class ov::pass::ConvertConvertLike : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertConvertLike", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertConvertLike"); ConvertConvertLike(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp b/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp index c4d95f1211bea5..bb6a593b588ffc 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_convertpromotetypes.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API ConvertConvertPromoteTypes; /// element type. class ov::pass::ConvertConvertPromoteTypes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertConvertPromoteTypes", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertConvertPromoteTypes"); ConvertConvertPromoteTypes(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp b/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp index 7db239e18d265a..37cf1935d85fbe 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_deformable_conv_v8_to_v1.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertDeformableConv8To1; */ class ov::pass::ConvertDeformableConv8To1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDeformableConv8To1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDeformableConv8To1"); ConvertDeformableConv8To1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp b/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp index 481006b7a05822..dc6124c4f6be2e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_depth_to_space.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertDepthToSpace; class ov::pass::ConvertDepthToSpace : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDepthToSpace", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDepthToSpace"); ConvertDepthToSpace(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp b/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp index 66442bbdc123da..e0526bfd815745 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_divide.hpp @@ -21,12 +21,12 @@ class TRANSFORMATIONS_API ConvertDivideWithConstant; class ov::pass::ConvertDivide : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDivide", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDivide"); ConvertDivide(); }; class ov::pass::ConvertDivideWithConstant : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDivideWithConstant", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDivideWithConstant"); ConvertDivideWithConstant(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp index 6ddbff4b7991b6..cbe39419438c52 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_offsets15_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3"); ConvertEmbeddingBagOffsets15ToEmbeddingBagOffsetsSum3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp index a925928a28a3d8..eb8b5ddf93435c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_embedding_bag_packed15_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3"); ConvertEmbeddingBagPacked15ToEmbeddingBagPackedSum3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp index 1b6fcfb2bb3684..388de0610ce4fa 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_compressed.hpp @@ -18,10 +18,11 @@ class TRANSFORMATIONS_API ConvertFullyConnectedToFullyConnectedCompressed; class ov::pass::ConvertFullyConnectedToFullyConnectedCompressed : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("ConvertFullyConnectedToFullyConnectedCompressed"); + using SupportsPredicate = std::function&, size_t, size_t, size_t)>; - OPENVINO_RTTI("ConvertFullyConnectedToFullyConnectedCompressed", "0"); ConvertFullyConnectedToFullyConnectedCompressed(const std::vector& supported_activation_types, const std::vector& supported_weights_types, SupportsPredicate supports_config = nullptr, diff --git a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp index 88990f92cb573c..b0f86055a4da17 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_fc_to_quantized_legacy.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertFCToFCQuantizedLegacy; class ov::pass::ConvertFCToFCQuantizedLegacy : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertFullyConnectedToFullyConnectedQuantized", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertFCToFCQuantizedLegacy"); ConvertFCToFCQuantizedLegacy(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp index 75f9dd967d48c8..9a1798e4319fe1 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_0d.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ConvertGather0D; */ class ov::pass::ConvertGather0D : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather0D", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather0D"); ConvertGather0D(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp index dfdbed915679b2..966175079ad30d 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_downgrade.hpp @@ -22,7 +22,7 @@ class TRANSFORMATIONS_API ConvertGather8ToGather7; */ class ov::pass::ConvertGather7ToGather1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather7ToGather1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather7ToGather1"); ConvertGather7ToGather1(); }; @@ -32,6 +32,6 @@ class ov::pass::ConvertGather7ToGather1 : public ov::pass::MatcherPass { */ class ov::pass::ConvertGather8ToGather7 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather8ToGather7", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather8ToGather7"); ConvertGather8ToGather7(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp index a916e9a4b91a44..edfdd3d5a07146 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_to_compressed.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertGatherToGatherCompressed; class ov::pass::ConvertGatherToGatherCompressed : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGatherToGatherCompressed", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGatherToGatherCompressed"); ConvertGatherToGatherCompressed(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp index ec8f8be61c3015..1c04190f4d7d0f 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gather_upgrade.hpp @@ -23,7 +23,7 @@ class TRANSFORMATIONS_API ConvertGather7ToGather8; */ class ov::pass::ConvertGather1ToGather7 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather1ToGather7", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather1ToGather7"); ConvertGather1ToGather7(); }; @@ -33,6 +33,6 @@ class ov::pass::ConvertGather1ToGather7 : public ov::pass::MatcherPass { */ class ov::pass::ConvertGather7ToGather8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGather7ToGather8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGather7ToGather8"); ConvertGather7ToGather8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp index 498872814f9cbb..dd9334381b3d8e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gelu.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertGELU; class ov::pass::ConvertGELU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGELU", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGELU"); ConvertGELU(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp index 9fe62aad7fbd8c..a2c82137387172 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_gp9_to_gp_ie_internal.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertGP9ToGPIEInternal; class ov::pass::ConvertGP9ToGPIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGP9ToGPIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGP9ToGPIEInternal"); ConvertGP9ToGPIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp index 8c136aa918f5f0..edca5bee3f215b 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_interpolate11_downgrade.hpp @@ -16,7 +16,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertInterpolate11ToInterpolate4 : public MatcherPass { public: - OPENVINO_RTTI("ConvertInterpolate11ToInterpolate4", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertInterpolate11ToInterpolate4"); ConvertInterpolate11ToInterpolate4(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp b/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp index f3b07c36962ccd..48822472ecbfd9 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_interpolate1_to_interpolate4.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API ConvertInterpolate1ToInterpolate4; */ class ov::pass::ConvertInterpolate1ToInterpolate4 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertInterpolate1ToInterpolate4", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertInterpolate1ToInterpolate4"); ConvertInterpolate1ToInterpolate4(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp b/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp index 6d414139ad6f57..ea52625c9df1ae 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_matrix_nms_to_matrix_nms_ie.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertMatrixNmsToMatrixNmsIE; class ov::pass::ConvertMatrixNmsToMatrixNmsIE : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMatrixNmsToMatrixNmsIE", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMatrixNmsToMatrixNmsIE"); ConvertMatrixNmsToMatrixNmsIE(bool force_i32_output_type = true); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp index 953cbab8a801a7..fc778773825c5c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_downgrade.hpp @@ -22,7 +22,7 @@ class TRANSFORMATIONS_API ConvertMaxPool14ToMaxPool8; */ class ov::pass::ConvertMaxPool8ToMaxPool1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMaxPool8ToMaxPool1"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMaxPool8ToMaxPool1"); ConvertMaxPool8ToMaxPool1(); }; @@ -32,6 +32,6 @@ class ov::pass::ConvertMaxPool8ToMaxPool1 : public ov::pass::MatcherPass { */ class ov::pass::ConvertMaxPool14ToMaxPool8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMaxPool14ToMaxPool8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMaxPool14ToMaxPool8"); ConvertMaxPool14ToMaxPool8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp index 6e7eed21342584..538b04bc9e254c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_maxpool_upgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertMaxPool1ToMaxPool8; class ov::pass::ConvertMaxPool1ToMaxPool8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMaxPool1ToMaxPool8"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMaxPool1ToMaxPool8"); ConvertMaxPool1ToMaxPool8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp b/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp index d092ffec29d8c4..95e9a8e4f171db 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_minimum_to_power_and_max.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertMinimum; class ov::pass::ConvertMinimum : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMinimum", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMinimum"); ConvertMinimum(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp b/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp index 0fbd3bba723ecb..8c6cedbd67d635 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_mod.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertMod; class ov::pass::ConvertMod : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMod", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMod"); ConvertMod(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp index 361d2352928b7e..8f003a05f874dd 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_to_multiclass_nms_ie.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertMulticlassNmsToMulticlassNmsIE; class ov::pass::ConvertMulticlassNmsToMulticlassNmsIE : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("public", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMulticlassNmsToMulticlassNmsIE"); ConvertMulticlassNmsToMulticlassNmsIE(bool force_i32_output_type = true); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp index 1f3e9e9b5caf08..425ec26e78ccad 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_multiclass_nms_upgrade.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertMulticlassNms8ToMulticlassNms9; class ov::pass::ConvertMulticlassNms8ToMulticlassNms9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMulticlassNms8ToMulticlassNms9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMulticlassNms8ToMulticlassNms9"); ConvertMulticlassNms8ToMulticlassNms9(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp b/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp index d958d166f7270d..9af05fa1f05891 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_mvn1_to_mvn6.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertMVN1ToMVN6; */ class ov::pass::ConvertMVN1ToMVN6 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMVN1ToMVN6", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMVN1ToMVN6"); ConvertMVN1ToMVN6(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp b/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp index b985f4bfe7f639..71df6767812c35 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_negative.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertNegative; class ov::pass::ConvertNegative : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNegative", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNegative"); ConvertNegative(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp index fee970b6b44bfc..5bfd769e122e6b 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_nms9_to_nms_ie_internal.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertNMS9ToNMSIEInternal; class ov::pass::ConvertNMS9ToNMSIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS9ToNMSIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS9ToNMSIEInternal"); ConvertNMS9ToNMSIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp index dcee03e513b38e..bf06c81e08e197 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_nms_rotated_to_nms_ie_internal.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertNMSRotatedToNMSIEInternal; class ov::pass::ConvertNMSRotatedToNMSIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMSRotatedToNMSIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMSRotatedToNMSIEInternal"); ConvertNMSRotatedToNMSIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp b/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp index ca205fe9078f7a..e8e34ce249d241 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_nms_to_nms_ie_internal.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertNMSToNMSIEInternal; class ov::pass::ConvertNMSToNMSIEInternal : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMSToNMSIEInternal", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMSToNMSIEInternal"); ConvertNMSToNMSIEInternal(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp index 36ca9112b07829..263c71fb83dc76 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_pad12_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertPad12ToPad1 : public MatcherPass { public: - OPENVINO_RTTI("ConvertPad12ToPad1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertPad12ToPad1"); ConvertPad12ToPad1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp b/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp index a89386bd7048cb..7c3f44a439ee6e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_pad_to_group_conv.hpp @@ -30,6 +30,6 @@ class TRANSFORMATIONS_API ConvertPadToGroupConvolution; class ov::pass::ConvertPadToGroupConvolution : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertPadToGroupConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertPadToGroupConvolution"); ConvertPadToGroupConvolution(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp index 8a4c0ee3110c1b..bc0d79220a5474 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_5.hpp @@ -22,18 +22,18 @@ class TRANSFORMATIONS_API ConvertNMS4ToNMS5; class ov::pass::ConvertNMS1ToNMS5 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS1ToNMS5", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS1ToNMS5"); ConvertNMS1ToNMS5(); }; class ov::pass::ConvertNMS3ToNMS5 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS3ToNMS5", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS3ToNMS5"); ConvertNMS3ToNMS5(); }; class ov::pass::ConvertNMS4ToNMS5 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS4ToNMS5", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS4ToNMS5"); ConvertNMS4ToNMS5(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp index f77db7a03f8606..feff6577d6cb07 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_previous_nms_to_nms_9.hpp @@ -23,24 +23,24 @@ class TRANSFORMATIONS_API ConvertNMS5ToNMS9; class ov::pass::ConvertNMS1ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS1ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS1ToNMS9"); ConvertNMS1ToNMS9(); }; class ov::pass::ConvertNMS3ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS3ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS3ToNMS9"); ConvertNMS3ToNMS9(); }; class ov::pass::ConvertNMS4ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS4ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS4ToNMS9"); ConvertNMS4ToNMS9(); }; class ov::pass::ConvertNMS5ToNMS9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertNMS5ToNMS9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertNMS5ToNMS9"); ConvertNMS5ToNMS9(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp b/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp index 2725d789a83a70..435a96b8e1cbc7 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_prior_box_v8_to_v0.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertPriorBox8To0; */ class ov::pass::ConvertPriorBox8To0 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertPriorBox8To0", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertPriorBox8To0"); ConvertPriorBox8To0(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp index 662660b926aa52..36d2b052243382 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_pooling.hpp @@ -35,6 +35,8 @@ class TRANSFORMATIONS_API ConvertReduceSumToPooling; class ConvertReduceBase : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("ConvertReduceBase"); + template ov::matcher_pass_callback convert_reduce_to_pooling(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp index 15e303f0c26493..f020e768be2feb 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_reduce_to_reshape.hpp @@ -32,6 +32,8 @@ class TRANSFORMATIONS_API ConvertReduceLogicalOrToReshape; class CvtReduceBase : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("CvtReduceBase"); + template ov::matcher_pass_callback convert_reduce_to_reshape(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp index 71f6becff0ba26..77c99e37b66533 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v3_to_v9.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertROIAlign3To9; */ class ov::pass::ConvertROIAlign3To9 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertROIAlign3To9", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertROIAlign3To9"); ConvertROIAlign3To9(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp index d06dc424ff436e..11b9567e78eb3e 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_roi_align_v9_to_v3.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertROIAlign9To3; */ class ov::pass::ConvertROIAlign9To3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertROIAlign9To3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertROIAlign9To3"); ConvertROIAlign9To3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp index d0a738c14fab9c..f8a2eb828e97b9 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_to_scatter.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API ConvertScatterElementsToScatter; */ class ov::pass::ConvertScatterElementsToScatter : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertScatterElementsToScatter", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertScatterElementsToScatter"); ConvertScatterElementsToScatter(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp index 4af4e18c706e93..7e6d4613f109a8 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_elements_update12_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertScatterElementsUpdate12ToScatterElementsUpdate3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertScatterElementsUpdate12ToScatterElementsUpdate3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertScatterElementsUpdate12ToScatterElementsUpdate3"); ConvertScatterElementsUpdate12ToScatterElementsUpdate3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp index dfaab66e22501c..4af9172e6351cb 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp @@ -16,7 +16,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertScatterNDUpdate15ToScatterNDUpdate3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertScatterNDUpdate15ToScatterNDUpdate3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertScatterNDUpdate15ToScatterNDUpdate3"); ConvertScatterNDUpdate15ToScatterNDUpdate3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp b/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp index 44502b42174de6..46a7e8ff0317e9 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp @@ -29,7 +29,7 @@ class TRANSFORMATIONS_API ConvertSequenceToTensorIterator; class ov::pass::ConvertRNNSequenceToTensorIterator : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertRNNSequenceToTensorIterator", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertRNNSequenceToTensorIterator"); ConvertRNNSequenceToTensorIterator(); }; @@ -41,7 +41,7 @@ class ov::pass::ConvertRNNSequenceToTensorIterator : public ov::pass::MatcherPas class ov::pass::ConvertGRUSequenceToTensorIterator : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGRUSequenceToTensorIterator", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGRUSequenceToTensorIterator"); ConvertGRUSequenceToTensorIterator(); }; @@ -53,7 +53,7 @@ class ov::pass::ConvertGRUSequenceToTensorIterator : public ov::pass::MatcherPas class ov::pass::ConvertLSTMSequenceToTensorIterator : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertLSTMSequenceToTensorIterator", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertLSTMSequenceToTensorIterator"); ConvertLSTMSequenceToTensorIterator(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp index 0aceb9e99614fb..831ba981cb16d6 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_shapeof3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertShapeOf3; class ov::pass::ConvertShapeOf3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertShapeOf3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertShapeOf3"); ConvertShapeOf3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp index 05b2d2607464b8..ac03068aa78298 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_shuffle_channels3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertShuffleChannels3; class ov::pass::ConvertShuffleChannels3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertShuffleChannels3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertShuffleChannels3"); ConvertShuffleChannels3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp b/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp index 8d396bca6ccd1e..b32c277b1a2b23 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_slice_to_strided_slice.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API SliceToStridedSlice; */ class ov::pass::SliceToStridedSlice : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SliceToStridedSlice", "0"); + OPENVINO_MATCHER_PASS_RTTI("SliceToStridedSlice"); SliceToStridedSlice(bool use_shapes); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp b/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp index 020b4e236fcac5..58dd6dbc39ac49 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_slicescatter.hpp @@ -17,6 +17,6 @@ class TRANSFORMATIONS_API ConvertSliceScatter; class ov::pass::ConvertSliceScatter : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSliceScatter", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSliceScatter"); ConvertSliceScatter(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp index 701f2cb94e9857..b8cd2907f82cdd 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_softmax_downgrade.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertSoftMax8ToSoftMax1; */ class ov::pass::ConvertSoftMax8ToSoftMax1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSoftMax8ToSoftMax1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSoftMax8ToSoftMax1"); ConvertSoftMax8ToSoftMax1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp index 2164eac6052384..a7a0ef5b01aee1 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_softmax_upgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertSoftMax1ToSoftMax8; class ov::pass::ConvertSoftMax1ToSoftMax8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSoftMax1ToSoftMax8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSoftMax1ToSoftMax8"); ConvertSoftMax1ToSoftMax8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp b/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp index 97bda0273c522f..235a56b728876a 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_space_to_batch.hpp @@ -33,7 +33,7 @@ class TRANSFORMATIONS_API ConvertSpaceToBatch; class ov::pass::ConvertSpaceToBatch : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSpaceToBatch", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSpaceToBatch"); explicit ConvertSpaceToBatch(bool convert_by_elements = true) : MatcherPass() { if (convert_by_elements) convert_space_to_batch_by_elements(); diff --git a/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp b/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp index da97add26411a8..6edf57f4c254fe 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_space_to_depth.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertSpaceToDepth; class ov::pass::ConvertSpaceToDepth : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSpaceToDepth", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSpaceToDepth"); ConvertSpaceToDepth(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp index c2ebfbc0f3138b..d35858ce10b3f4 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_squeeze15_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertSqueeze15ToSqueeze0 : public MatcherPass { public: - OPENVINO_RTTI("ConvertSqueeze15ToSqueeze0", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSqueeze15ToSqueeze0"); ConvertSqueeze15ToSqueeze0(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp b/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp index 5b2a5d0c36abdf..c6baf673efe95c 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_subtract.hpp @@ -21,12 +21,12 @@ class TRANSFORMATIONS_API ConvertSubtractWithConstant; class ov::pass::ConvertSubtract : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSubtract", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSubtract"); ConvertSubtract(); }; class ov::pass::ConvertSubtractWithConstant : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertSubtractWithConstant", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertSubtractWithConstant"); ConvertSubtractWithConstant(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp index e729d735c652d1..fb53cc81743ec4 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_ti_to_sequences.hpp @@ -36,7 +36,7 @@ class TRANSFORMATIONS_API FuseLSTMSequencesToBidirectionalLSTMSequence; class ov::pass::ConvertTensorIteratorToLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTensorIteratorToLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTensorIteratorToLSTMSequence"); ConvertTensorIteratorToLSTMSequence(); }; @@ -48,7 +48,7 @@ class ov::pass::ConvertTensorIteratorToLSTMSequence : public ov::pass::MatcherPa class ov::pass::ConvertTensorIteratorToRNNSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTensorIteratorToRNNSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTensorIteratorToRNNSequence"); ConvertTensorIteratorToRNNSequence(); }; @@ -60,7 +60,7 @@ class ov::pass::ConvertTensorIteratorToRNNSequence : public ov::pass::MatcherPas class ov::pass::ConvertTensorIteratorToGRUSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTensorIteratorToGRUSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTensorIteratorToGRUSequence"); ConvertTensorIteratorToGRUSequence(); }; @@ -72,13 +72,13 @@ class ov::pass::ConvertTensorIteratorToSequence : public GraphRewrite { class ov::pass::ConvertLoopWithSlicedInputConcatOutputToLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertLoopWithSlicedInputConcatOutputToLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertLoopWithSlicedInputConcatOutputToLSTMSequence"); ConvertLoopWithSlicedInputConcatOutputToLSTMSequence(); }; class ov::pass::ConvertLoopWithScatterUpdateToLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertLoopWithScatterUpdateToLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertLoopWithScatterUpdateToLSTMSequence"); ConvertLoopWithScatterUpdateToLSTMSequence(); }; @@ -101,7 +101,7 @@ class ov::pass::ConvertLoopToLSTMSequence : public ov::pass::GraphRewrite { */ class ov::pass::FuseReverseLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseReverseLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("FuseReverseLSTMSequence"); FuseReverseLSTMSequence(); }; @@ -111,6 +111,6 @@ class ov::pass::FuseReverseLSTMSequence : public ov::pass::MatcherPass { */ class ov::pass::FuseLSTMSequencesToBidirectionalLSTMSequence : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseLSTMSequencesToBidirectionalLSTMSequence", "0"); + OPENVINO_MATCHER_PASS_RTTI("FuseLSTMSequencesToBidirectionalLSTMSequence"); FuseLSTMSequencesToBidirectionalLSTMSequence(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp index e6e8340e45df94..fd5be9a10c10ef 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_topk11_downgrade.hpp @@ -15,7 +15,7 @@ namespace pass { */ class TRANSFORMATIONS_API ConvertTopK11ToTopK3 : public MatcherPass { public: - OPENVINO_RTTI("ConvertTopK11ToTopK3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTopK11ToTopK3"); ConvertTopK11ToTopK3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp b/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp index 0b61bf85bff28f..0f3f6ea160f825 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_topk3.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API ConvertTopK3; class ov::pass::ConvertTopK3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTopK3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTopK3"); ConvertTopK3(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp b/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp index ee7bf0f55615bd..0940f5f1a67b51 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_xor_to_logical_xor.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API ConvertXorToLogicalXor; */ class ov::pass::ConvertXorToLogicalXor : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertXorToLogicalXor", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertXorToLogicalXor"); ConvertXorToLogicalXor(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp index 8f4e6ad237a6b0..b730f78b2291e8 100644 --- a/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/detection_output_downgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertDetectionOutput8ToDetectionOutput1; */ class ov::pass::ConvertDetectionOutput8ToDetectionOutput1 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDetectionOutput8ToDetectionOutput1", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDetectionOutput8ToDetectionOutput1"); ConvertDetectionOutput8ToDetectionOutput1(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp b/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp index 8693fb206ed2cd..de92f382af92b3 100644 --- a/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/detection_output_upgrade.hpp @@ -22,6 +22,6 @@ class TRANSFORMATIONS_API ConvertDetectionOutput1ToDetectionOutput8; */ class ov::pass::ConvertDetectionOutput1ToDetectionOutput8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertDetectionOutput1ToDetectionOutput8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertDetectionOutput1ToDetectionOutput8"); ConvertDetectionOutput1ToDetectionOutput8(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp index 78accf3f0b4877..e67367a2e93bab 100644 --- a/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/einsum_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API EinsumDecomposition; */ class ov::pass::EinsumDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EinsumDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("EinsumDecomposition"); EinsumDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp index 15c53bc9cf6f30..29913d429b8462 100644 --- a/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/eye_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API EyeDecomposition; */ class ov::pass::EyeDecomposition : public MatcherPass { public: - OPENVINO_RTTI("EyeDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("EyeDecomposition"); EyeDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp index d938a9b70687e2..d099c268d2d7b0 100644 --- a/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/fq_decomposition.hpp @@ -44,6 +44,6 @@ class TRANSFORMATIONS_API FakeQuantizeDecomposition; class ov::pass::FakeQuantizeDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FakeQuantizeDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("FakeQuantizeDecomposition"); FakeQuantizeDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp index d08c3877de26a1..be26ea4c14625c 100644 --- a/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/gelu7_downgrade.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API Gelu7Downgrade; */ class ov::pass::Gelu7Downgrade : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("Gelu7Downgrade", "0"); + OPENVINO_MATCHER_PASS_RTTI("Gelu7Downgrade"); Gelu7Downgrade(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp index 8df9c220a9de75..47c908eb8ac6f5 100644 --- a/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/group_normalization_decomposition.hpp @@ -18,6 +18,6 @@ class TRANSFORMATIONS_API GroupNormalizationDecomposition; // This transformation expresses GroupNormalization with a sub-graph of OpenVINO operations class ov::pass::GroupNormalizationDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GroupNormalizationDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("GroupNormalizationDecomposition"); GroupNormalizationDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp index ce4387293f97ad..1060d266b7fd0a 100644 --- a/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/gru_cell_decomposition.hpp @@ -36,6 +36,6 @@ class TRANSFORMATIONS_API GRUCellDecomposition; class ov::pass::GRUCellDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GRUCellDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("GRUCellDecomposition"); GRUCellDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp index b635a08350922b..b7636aeb5d6d68 100644 --- a/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/hard_sigmoid_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API HardSigmoidDecomposition; */ class ov::pass::HardSigmoidDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HardSigmoidDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("HardSigmoidDecomposition"); HardSigmoidDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp index 5a455c37253afe..e597bc75f8600d 100644 --- a/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/hsigmoid_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API HSigmoidDecomposition; */ class ov::pass::HSigmoidDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSigmoidDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSigmoidDecomposition"); HSigmoidDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp index e6168109d3c89b..8d05edc3afa650 100644 --- a/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/hswish_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API HSwishDecomposition; */ class ov::pass::HSwishDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("HSwishDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("HSwishDecomposition"); HSwishDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp index f972a0cdd8fa76..84e444c12fccec 100644 --- a/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/log_softmax_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API LogSoftmaxDecomposition; */ class ov::pass::LogSoftmaxDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LogSoftmaxDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("LogSoftmaxDecomposition"); LogSoftmaxDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp index 08b109533738cf..94077e21b472cb 100644 --- a/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/lstm_cell_decomposition.hpp @@ -37,6 +37,6 @@ class TRANSFORMATIONS_API LSTMCellDecomposition; class ov::pass::LSTMCellDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("LSTMCellDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("LSTMCellDecomposition"); LSTMCellDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp index f8335f5f4546f2..abacd721fa23a7 100644 --- a/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/mvn6_decomposition.hpp @@ -23,6 +23,6 @@ class TRANSFORMATIONS_API MVN6Decomposition; */ class ov::pass::MVN6Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MVN6Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("MVN6Decomposition"); MVN6Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp index ae058a3e8cba2f..18f071f8f6fb03 100644 --- a/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/normalize_l2_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API NormalizeL2Decomposition; */ class ov::pass::NormalizeL2Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NormalizeL2Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("NormalizeL2Decomposition"); NormalizeL2Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp index 506853e003e33a..1d8cc22089a93d 100644 --- a/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/reduce_l1_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API ReduceL1Decomposition; */ class ov::pass::ReduceL1Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReduceL1Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReduceL1Decomposition"); ReduceL1Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp index dab02ff58f2f5c..8bf9955d523593 100644 --- a/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/reduce_l2_decomposition.hpp @@ -24,6 +24,6 @@ class TRANSFORMATIONS_API ReduceL2Decomposition; */ class ov::pass::ReduceL2Decomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ReduceL2Decomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("ReduceL2Decomposition"); ReduceL2Decomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp index ce651a47510577..475f2bf1cdf73e 100644 --- a/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/rnn_cell_decomposition.hpp @@ -31,6 +31,6 @@ class TRANSFORMATIONS_API RNNCellDecomposition; class ov::pass::RNNCellDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RNNCellDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("RNNCellDecomposition"); RNNCellDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp index e52c6ba46838b9..f86175c617c0fc 100644 --- a/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/scaled_dot_product_attention_decomposition.hpp @@ -18,7 +18,7 @@ class TRANSFORMATIONS_API ScaledDotProductAttentionDecomposition; class ov::pass::ScaledDotProductAttentionDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ScaledDotProductAttentionDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("ScaledDotProductAttentionDecomposition"); ScaledDotProductAttentionDecomposition(); std::shared_ptr decompose(std::shared_ptr node); }; diff --git a/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp b/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp index 70cd9bbe0162c5..e900f3c3d213f7 100644 --- a/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp +++ b/src/common/transformations/include/transformations/op_conversions/simplify_ctc_greedy_decoder_seq_len.hpp @@ -37,6 +37,6 @@ class TRANSFORMATIONS_API SimplifyCTCGreedyDecoderSeqLen; */ class ov::pass::SimplifyCTCGreedyDecoderSeqLen : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SimplifyCTCGreedyDecoderSeqLen", "0"); + OPENVINO_MATCHER_PASS_RTTI("SimplifyCTCGreedyDecoderSeqLen"); SimplifyCTCGreedyDecoderSeqLen(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp index a1ab4be69fdc62..0e13fd516b13e7 100644 --- a/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/softmax_decomposition.hpp @@ -69,6 +69,6 @@ class TRANSFORMATIONS_API SoftmaxDecomposition; class ov::pass::SoftmaxDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftmaxDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftmaxDecomposition"); SoftmaxDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp index 3a7cca9ff5c8b1..ef52b8ab922d3b 100644 --- a/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/softplus_decomposition.hpp @@ -25,6 +25,6 @@ class TRANSFORMATIONS_API SoftPlusDecomposition; */ class ov::pass::SoftPlusDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftPlusDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftPlusDecomposition"); SoftPlusDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp index bfd026f21eb111..300761470b73ce 100644 --- a/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/softsign_decomposition.hpp @@ -40,6 +40,6 @@ class TRANSFORMATIONS_API SoftSignDecomposition; class ov::pass::SoftSignDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SoftSignDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("SoftSignDecomposition"); SoftSignDecomposition(); }; diff --git a/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp b/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp index 02f889e7d3122a..6169a0a512e4f3 100644 --- a/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp +++ b/src/common/transformations/include/transformations/op_conversions/unique_decomposition.hpp @@ -21,6 +21,6 @@ class TRANSFORMATIONS_API UniqueDecomposition; // This transformation expresses Unique with a sub-graph of OpenVINO operations class ov::pass::UniqueDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UniqueDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("UniqueDecomposition"); UniqueDecomposition(); }; diff --git a/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp b/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp index 8b523bc663912d..e3d511d396c510 100644 --- a/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp +++ b/src/common/transformations/include/transformations/opset_conversions/convert_opset2_to_opset1.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API ConvertOpSet2ToOpSet1; class ov::pass::ConvertOpSet2ToOpSet1 : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertOpSet2ToOpSet1", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertOpSet2ToOpSet1"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp b/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp index 8a44b0d24ad6e4..f5e4faeeca93f8 100644 --- a/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp +++ b/src/common/transformations/include/transformations/opset_conversions/convert_opset3_to_opset2.hpp @@ -19,6 +19,6 @@ class TRANSFORMATIONS_API ConvertOpSet3ToOpSet2; class ov::pass::ConvertOpSet3ToOpSet2 : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ConvertOpSet3ToOpSet2", "0"); + OPENVINO_MODEL_PASS_RTTI("ConvertOpSet3ToOpSet2"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/resolve_names_collisions.hpp b/src/common/transformations/include/transformations/resolve_names_collisions.hpp index f1d1f18a505441..8af93bd055043f 100644 --- a/src/common/transformations/include/transformations/resolve_names_collisions.hpp +++ b/src/common/transformations/include/transformations/resolve_names_collisions.hpp @@ -24,7 +24,7 @@ namespace pass { */ class TRANSFORMATIONS_API ResolveNameCollisions : public ModelPass { public: - OPENVINO_RTTI("ResolveNameCollisions", "0"); + OPENVINO_MODEL_PASS_RTTI("ResolveNameCollisions"); ResolveNameCollisions() = default; explicit ResolveNameCollisions(bool resolve_all_names) : m_resolve_all_names(resolve_all_names) {} bool run_on_model(const std::shared_ptr& model) override; diff --git a/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp b/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp index 55f80ace7812e3..6b93dd465c3ecd 100644 --- a/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/lstm_states_broadcast.hpp @@ -26,6 +26,6 @@ class TRANSFORMATIONS_API LSTMStatesBroadcast; class ov::pass::LSTMStatesBroadcast : public ov::pass::ModelPass { public: - OPENVINO_RTTI("LSTMStatesBroadcast", "0"); + OPENVINO_MODEL_PASS_RTTI("LSTMStatesBroadcast"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp b/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp index 55bdf523762d91..970d64447c798a 100644 --- a/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp +++ b/src/common/transformations/include/transformations/smart_reshape/smart_reshape.hpp @@ -20,6 +20,6 @@ class TRANSFORMATIONS_API SmartReshape; class ov::pass::SmartReshape : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SmartReshape", "0"); + OPENVINO_MODEL_PASS_RTTI("SmartReshape"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp b/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp index 179bc7d6cfcf52..323a1218bfdf4a 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/symbol_optimization.hpp @@ -21,7 +21,7 @@ class TRANSFORMATIONS_API OptimizeSymbolsUsedAsValues; */ class ov::pass::ApplySymbolEquivalence : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ApplySymbolEquivalence", "0"); + OPENVINO_MODEL_PASS_RTTI("ApplySymbolEquivalence"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -32,6 +32,6 @@ class ov::pass::ApplySymbolEquivalence : public ov::pass::ModelPass { */ class ov::pass::OptimizeSymbolsUsedAsValues : public ov::pass::ModelPass { public: - OPENVINO_RTTI("OptimizeSymbolsUsedAsValues", "0"); + OPENVINO_MODEL_PASS_RTTI("OptimizeSymbolsUsedAsValues"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp index 17a5d8503cbcb9..c6a99c90122544 100644 --- a/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp +++ b/src/common/transformations/include/transformations/symbolic_transformations/symbolic_optimizations.hpp @@ -24,7 +24,7 @@ class TRANSFORMATIONS_API LabelResolvingThroughSelect; */ class ov::pass::SymbolicOptimizations : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SymbolicOptimizations", "0"); + OPENVINO_MODEL_PASS_RTTI("SymbolicOptimizations"); explicit SymbolicOptimizations(bool full_run = true); bool run_on_model(const std::shared_ptr& m) override; std::shared_ptr get_manager() { @@ -42,7 +42,7 @@ class ov::pass::SymbolicOptimizations : public ov::pass::ModelPass { */ class ov::pass::SymbolicPropagation : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SymbolicPropagation"); + OPENVINO_MODEL_PASS_RTTI("SymbolicPropagation"); bool run_on_model(const std::shared_ptr& m) override; }; @@ -60,4 +60,4 @@ class ov::pass::LabelResolvingThroughSelect : public ov::pass::MatcherPass { public: OPENVINO_RTTI("LabelResolvingThroughSelect", "0"); LabelResolvingThroughSelect(); -}; \ No newline at end of file +}; diff --git a/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp b/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp index c794608798a4a4..09d4d5819322a9 100644 --- a/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp +++ b/src/common/transformations/include/transformations/transpose_sinking/ts_general.hpp @@ -52,6 +52,6 @@ class ov::pass::transpose_sinking::TSGeneralBackward : public ov::pass::GraphRew */ class ov::pass::transpose_sinking::TSGeneral : public ov::pass::ModelPass { public: - OPENVINO_RTTI("TSGeneral", "0"); + OPENVINO_MODEL_PASS_RTTI("TSGeneral"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/common/transformations/include/transformations/utils/print_model.hpp b/src/common/transformations/include/transformations/utils/print_model.hpp index 29340d60727c82..0829cd7e320e88 100644 --- a/src/common/transformations/include/transformations/utils/print_model.hpp +++ b/src/common/transformations/include/transformations/utils/print_model.hpp @@ -390,7 +390,7 @@ void dump_cpp_style(std::ostream& os, const std::shared_ptr& model) { class OPENVINO_API PrintModel : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::pass::PrintModel"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::PrintModel"); PrintModel(std::string file_name) { static int dump_index = 0; diff --git a/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp b/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp index 53f26e00136ee9..4e9715883ec9f8 100644 --- a/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/ric_fusion.cpp @@ -804,7 +804,7 @@ class ConvertPassThrough : public ov::pass::MatcherPass { class Constant : public ov::pass::ModelPass { public: - OPENVINO_RTTI("Constant", "0"); + OPENVINO_MODEL_PASS_RTTI("Constant"); Constant() = default; bool run_on_model(const std::shared_ptr& model) override { RUN_ON_FUNCTION_SCOPE(Constant); diff --git a/src/core/include/openvino/pass/constant_folding.hpp b/src/core/include/openvino/pass/constant_folding.hpp index 134f02c590974f..6b2e763e49cb1a 100644 --- a/src/core/include/openvino/pass/constant_folding.hpp +++ b/src/core/include/openvino/pass/constant_folding.hpp @@ -18,7 +18,7 @@ namespace pass { */ class OPENVINO_API ConstantFolding : public ModelPass { public: - OPENVINO_RTTI("ConstantFolding"); + OPENVINO_MODEL_PASS_RTTI("ConstantFolding"); bool run_on_model(const std::shared_ptr& model) override; protected: diff --git a/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp b/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp index ae9bb25efb4a87..ebae49b7a1fa05 100644 --- a/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp +++ b/src/core/include/openvino/pass/convert_fp32_to_fp16.hpp @@ -14,7 +14,7 @@ namespace pass { */ class OPENVINO_API ConvertFP32ToFP16 : public ModelPass { public: - OPENVINO_RTTI("ConvertFP32ToFP16"); + OPENVINO_MODEL_PASS_RTTI("ConvertFP32ToFP16"); bool run_on_model(const std::shared_ptr&) override; }; } // namespace pass diff --git a/src/core/include/openvino/pass/graph_rewrite.hpp b/src/core/include/openvino/pass/graph_rewrite.hpp index 3fd801235c31a2..ec8e1339912513 100644 --- a/src/core/include/openvino/pass/graph_rewrite.hpp +++ b/src/core/include/openvino/pass/graph_rewrite.hpp @@ -34,7 +34,7 @@ namespace pass { /// \ingroup ov_pass_cpp_api class OPENVINO_API GraphRewrite : public ModelPass { public: - OPENVINO_RTTI("ov::pass::GraphRewrite"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::GraphRewrite"); GraphRewrite() = default; diff --git a/src/core/include/openvino/pass/low_latency.hpp b/src/core/include/openvino/pass/low_latency.hpp index b3a8d38f755d42..d7bead4972c39b 100644 --- a/src/core/include/openvino/pass/low_latency.hpp +++ b/src/core/include/openvino/pass/low_latency.hpp @@ -36,7 +36,7 @@ namespace pass { */ class OPENVINO_API LowLatency2 : public ModelPass { public: - OPENVINO_RTTI("LowLatency2"); + OPENVINO_MODEL_PASS_RTTI("LowLatency2"); explicit LowLatency2(bool use_const_initializer = true) : m_use_const_initializer(use_const_initializer) {} diff --git a/src/core/include/openvino/pass/make_stateful.hpp b/src/core/include/openvino/pass/make_stateful.hpp index a4132589fe6a41..064f3fdea6aad1 100644 --- a/src/core/include/openvino/pass/make_stateful.hpp +++ b/src/core/include/openvino/pass/make_stateful.hpp @@ -18,7 +18,7 @@ namespace pass { */ class OPENVINO_API MakeStateful : public ModelPass { public: - OPENVINO_RTTI("MakeStateful"); + OPENVINO_MODEL_PASS_RTTI("MakeStateful"); using ParamResPairs = std::vector, std::shared_ptr>>; diff --git a/src/core/include/openvino/pass/pass.hpp b/src/core/include/openvino/pass/pass.hpp index 5c27df8aed4a0d..3927e7542aa886 100644 --- a/src/core/include/openvino/pass/pass.hpp +++ b/src/core/include/openvino/pass/pass.hpp @@ -4,16 +4,26 @@ #pragma once -#include #include -#include +#include #include "openvino/core/core_visibility.hpp" #include "openvino/core/enum_mask.hpp" #include "openvino/core/model.hpp" #include "openvino/core/node.hpp" +#include "openvino/core/rtti.hpp" #include "openvino/pass/pass_config.hpp" +#define _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE(TYPE_NAME) _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE_VERSION(TYPE_NAME, "0") + +#define _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE_VERSION(TYPE_NAME, VERSION_NAME) \ + _OPENVINO_RTTI_WITH_TYPE_VERSION_PARENT(TYPE_NAME, VERSION_NAME, ::ov::pass::ModelPass) + +#define OPENVINO_MODEL_PASS_RTTI(...) \ + _OPENVINO_RTTI_EXPAND(_OPENVINO_RTTI_DEFINITION_SELECTOR_2(__VA_ARGS__, \ + _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE_VERSION, \ + _OPENVINO_MODEL_PASS_RTTI_WITH_TYPE)(__VA_ARGS__)) + namespace ov { namespace pass { enum class PassProperty : uint32_t { diff --git a/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp b/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp index a0dd403818b462..74aeacb0719cee 100644 --- a/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp +++ b/src/core/include/openvino/pass/sdpa_to_paged_attention.hpp @@ -17,7 +17,7 @@ namespace pass { */ class OPENVINO_API SDPAToPagedAttention : public ModelPass { public: - OPENVINO_RTTI("SDPAToPagedAttention"); + OPENVINO_MODEL_PASS_RTTI("SDPAToPagedAttention"); SDPAToPagedAttention(bool use_block_indices_inputs = false, bool use_score_outputs = false); bool run_on_model(const std::shared_ptr& model) override; diff --git a/src/core/include/openvino/pass/serialize.hpp b/src/core/include/openvino/pass/serialize.hpp index d0eaadde346bf6..ff99a59c70b556 100644 --- a/src/core/include/openvino/pass/serialize.hpp +++ b/src/core/include/openvino/pass/serialize.hpp @@ -26,7 +26,7 @@ namespace pass { */ class OPENVINO_API Serialize : public ov::pass::ModelPass { public: - OPENVINO_RTTI("Serialize"); + OPENVINO_MODEL_PASS_RTTI("Serialize"); enum class Version : uint8_t { UNSPECIFIED = 0, // Use the latest or function version @@ -63,7 +63,7 @@ class OPENVINO_API Serialize : public ov::pass::ModelPass { */ class OPENVINO_API StreamSerialize : public ov::pass::ModelPass { public: - OPENVINO_RTTI("StreamSerialize"); + OPENVINO_MODEL_PASS_RTTI("StreamSerialize"); struct DataHeader { size_t custom_data_offset; diff --git a/src/core/include/openvino/pass/stateful_to_stateless.hpp b/src/core/include/openvino/pass/stateful_to_stateless.hpp index 90fd6b9e6e7901..551c9315c20f72 100644 --- a/src/core/include/openvino/pass/stateful_to_stateless.hpp +++ b/src/core/include/openvino/pass/stateful_to_stateless.hpp @@ -14,7 +14,7 @@ namespace pass { */ class OPENVINO_API StatefulToStateless : public ModelPass { public: - OPENVINO_RTTI("StatefulToStateless"); + OPENVINO_MODEL_PASS_RTTI("StatefulToStateless"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/core/include/openvino/pass/validate.hpp b/src/core/include/openvino/pass/validate.hpp index 23cfe9909f707b..dce6967131d33d 100644 --- a/src/core/include/openvino/pass/validate.hpp +++ b/src/core/include/openvino/pass/validate.hpp @@ -24,7 +24,7 @@ namespace pass { /// \ingroup ov_pass_cpp_api class OPENVINO_API Validate : public ModelPass { public: - OPENVINO_RTTI("ov::pass::Validate"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::Validate"); Validate() : ModelPass() {} bool run_on_model(const std::shared_ptr& f) override; diff --git a/src/core/include/openvino/pass/visualize_tree.hpp b/src/core/include/openvino/pass/visualize_tree.hpp index 065cec96e27220..2207270cd57d3d 100644 --- a/src/core/include/openvino/pass/visualize_tree.hpp +++ b/src/core/include/openvino/pass/visualize_tree.hpp @@ -28,7 +28,7 @@ namespace pass { */ class OPENVINO_API VisualizeTree : public ModelPass { public: - OPENVINO_RTTI("ov::pass::VisualizeTree"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::VisualizeTree"); using node_modifiers_t = std::function& attributes)>; VisualizeTree(const std::string& file_name, node_modifiers_t nm = nullptr, bool dot_only = false); diff --git a/src/core/tests/frontend/decoder_transformation_extension.cpp b/src/core/tests/frontend/decoder_transformation_extension.cpp index 714dd4b9fafb18..d1110041392bf7 100644 --- a/src/core/tests/frontend/decoder_transformation_extension.cpp +++ b/src/core/tests/frontend/decoder_transformation_extension.cpp @@ -37,7 +37,7 @@ TEST(DecoderTransformation, FunctionPass) { namespace _decoder_transformation_test { class TestPass : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::pass::TestPass"); + OPENVINO_MODEL_PASS_RTTI("ov::pass::TestPass"); TestPass() = default; TestPass(const TestPass& tp) = default; bool run_on_model(const std::shared_ptr&) override { diff --git a/src/core/tests/pass_config.cpp b/src/core/tests/pass_config.cpp index 15ebc71eef10a6..053cb2b62aff32 100644 --- a/src/core/tests/pass_config.cpp +++ b/src/core/tests/pass_config.cpp @@ -51,7 +51,7 @@ class RenameSigmoid : public ov::pass::MatcherPass { class TestModelPass : public pass::ModelPass { public: - OPENVINO_RTTI("TestModelPass"); + OPENVINO_MODEL_PASS_RTTI("TestModelPass"); bool run_on_model(const std::shared_ptr& f) override { pass::Manager manager(get_pass_config()); diff --git a/src/frontends/common/src/extension/decoder_transformation.cpp b/src/frontends/common/src/extension/decoder_transformation.cpp index 4533fb89d85651..940131479ca9e0 100644 --- a/src/frontends/common/src/extension/decoder_transformation.cpp +++ b/src/frontends/common/src/extension/decoder_transformation.cpp @@ -12,6 +12,7 @@ using namespace ov::frontend; /// \brief Helper class to register user function as a FunctionPass class CustomModelPass : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("frontend::CustomModelPass"); explicit CustomModelPass(std::function)> pass) : m_pass(std::move(pass)) {} bool run_on_model(const std::shared_ptr& f) override { diff --git a/src/frontends/onnx/frontend/src/core/tensor.cpp b/src/frontends/onnx/frontend/src/core/tensor.cpp index b23f6c55253ac1..1c3a943e6481d1 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.cpp +++ b/src/frontends/onnx/frontend/src/core/tensor.cpp @@ -266,6 +266,102 @@ std::vector Tensor::get_data() const { ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "STRING"); } +std::shared_ptr Tensor::get_ov_constant() const { + if (m_tensor_proto->has_segment()) { + FRONT_END_THROW("Loading segments isn't supported"); + } + std::shared_ptr constant{nullptr}; + ov::element::Type ov_type = get_ov_type(); + size_t element_count = get_data_size(); + if (ov::element::is_nibble_type(ov_type)) { + element_count *= 2; // Each byte contains 2 data items + } + if (has_external_data()) { + const auto ext_data = detail::TensorExternalData(*m_tensor_proto); + if (m_mmap_cache) { + constant = + std::make_shared(ov_type, + m_shape, + ext_data.load_external_mmap_data(m_model_dir, m_mmap_cache)); + } else { + constant = + std::make_shared(ov_type, m_shape, ext_data.load_external_data(m_model_dir)); + } + // ext_data.size() might be zero, need to recalc by using info about actually red data (for byte-size) + element_count = constant->get_byte_size() / ov_type.size(); + if (ov::element::is_nibble_type(ov_type)) { + element_count *= 2; // Each byte contains 2 data items, so byte size must be multiplicated + } + if (element_count != ov::shape_size(m_shape) || + (ext_data.size() != 0 && constant->get_byte_size() != ext_data.size())) { + throw error::invalid_external_data( + "The size of the external data file does not match the byte size of an initializer '" + get_name() + + "' in the model"); + } + } else if (element_count == shape_size(m_shape)) { + switch (m_tensor_proto->data_type()) { + case TensorProto_DataType::TensorProto_DataType_FLOAT: + case TensorProto_DataType::TensorProto_DataType_DOUBLE: + case TensorProto_DataType::TensorProto_DataType_INT32: + case TensorProto_DataType::TensorProto_DataType_INT64: + case TensorProto_DataType::TensorProto_DataType_UINT32: + case TensorProto_DataType::TensorProto_DataType_UINT64: + constant = std::make_shared(ov_type, m_shape, get_data_ptr()); + break; + case TensorProto_DataType::TensorProto_DataType_INT4: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_INT8: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_INT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_UINT4: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_UINT8: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_UINT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_BOOL: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_FLOAT16: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + case TensorProto_DataType::TensorProto_DataType_STRING: + constant = std::make_shared(ov_type, m_shape, get_data().data()); + break; + default: + ONNX_UNSUPPORTED_DATA_TYPE( + m_tensor_proto->data_type(), + "BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, " + "UINT4, UINT8, UINT16, UINT32, UINT64, STRING"); + } + } else if (element_count == 0 && m_shape.size() == 0) { + constant = common::make_failsafe_constant(ov_type); + } else { + FRONT_END_THROW("Tensor shape doesn't match data size"); + } + + if (m_tensor_proto->has_name()) { + constant->set_friendly_name(get_name()); + } + return constant; +} + } // namespace onnx } // namespace frontend } // namespace ov diff --git a/src/frontends/onnx/frontend/src/core/tensor.hpp b/src/frontends/onnx/frontend/src/core/tensor.hpp index a63cdfd1906bb0..7321311e4b4775 100644 --- a/src/frontends/onnx/frontend/src/core/tensor.hpp +++ b/src/frontends/onnx/frontend/src/core/tensor.hpp @@ -186,119 +186,9 @@ class Tensor { return static_cast(m_tensor_proto->data_type()); } - std::shared_ptr get_ov_constant() const { - if (m_tensor_proto->has_segment()) { - FRONT_END_THROW("Loading segments isn't supported"); - } - switch (m_tensor_proto->data_type()) { - case TensorProto_DataType::TensorProto_DataType_BOOL: - return make_ov_constant(ov::element::boolean); - case TensorProto_DataType::TensorProto_DataType_FLOAT: - return make_ov_constant(ov::element::f32); - case TensorProto_DataType::TensorProto_DataType_FLOAT16: - return make_ov_constant(ov::element::f16); - case TensorProto_DataType::TensorProto_DataType_DOUBLE: - return make_ov_constant(ov::element::f64); - case TensorProto_DataType::TensorProto_DataType_INT4: - return make_ov_constant(ov::element::i4); - case TensorProto_DataType::TensorProto_DataType_INT8: - return make_ov_constant(ov::element::i8); - case TensorProto_DataType::TensorProto_DataType_INT16: - return make_ov_constant(ov::element::i16); - case TensorProto_DataType::TensorProto_DataType_INT32: - return make_ov_constant(ov::element::i32); - case TensorProto_DataType::TensorProto_DataType_INT64: - return make_ov_constant(ov::element::i64); - case TensorProto_DataType::TensorProto_DataType_UINT4: - return make_ov_constant(ov::element::u4); - case TensorProto_DataType::TensorProto_DataType_UINT8: - return make_ov_constant(ov::element::u8); - case TensorProto_DataType::TensorProto_DataType_UINT16: - return make_ov_constant(ov::element::u16); - case TensorProto_DataType::TensorProto_DataType_UINT32: - return make_ov_constant(ov::element::u32); - case TensorProto_DataType::TensorProto_DataType_UINT64: - return make_ov_constant(ov::element::u64); - case TensorProto_DataType::TensorProto_DataType_BFLOAT16: - return make_ov_constant(ov::element::bf16); - case TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN: - return make_ov_constant(ov::element::f8e4m3); - case TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2: - return make_ov_constant(ov::element::f8e5m2); - case TensorProto_DataType::TensorProto_DataType_STRING: - return make_ov_constant(ov::element::string); - default: - ONNX_UNSUPPORTED_DATA_TYPE( - m_tensor_proto->data_type(), - "BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, " - "UINT4, UINT8, UINT16, UINT32, UINT64, STRING"); - } - } + std::shared_ptr get_ov_constant() const; private: - template ::value || std::is_same::value || - std::is_same::value || std::is_same::value || - std::is_same::value, - bool>::type = true> - std::shared_ptr make_ov_constant(const ov::element::Type& type) const { - std::shared_ptr constant{nullptr}; - size_t data_size = get_data_size(); - if (has_external_data()) { - const auto ext_data = detail::TensorExternalData(*m_tensor_proto); - if (m_mmap_cache) { - constant = - std::make_shared(type, - m_shape, - ext_data.load_external_mmap_data(m_model_dir, m_mmap_cache)); - } else { - constant = - std::make_shared(type, m_shape, ext_data.load_external_data(m_model_dir)); - } - if (constant->get_byte_size() != ov::shape_size(m_shape) * type.size()) { - throw error::invalid_external_data( - "The size of the external data file does not match the byte size of an initializer '" + get_name() + - "' in the model"); - } - } else if (data_size == shape_size(m_shape)) { - constant = std::make_shared(type, m_shape, get_data_ptr()); - } else if (data_size == 0 && m_shape.size() == 0) { - constant = common::make_failsafe_constant(type); - } else { - FRONT_END_THROW("Tensor shape doesn't match data size"); - } - - if (m_tensor_proto->has_name()) { - constant->set_friendly_name(get_name()); - } - return constant; - } - - template ::value && !std::is_same::value && - !std::is_same::value && !std::is_same::value && - !std::is_same::value, - bool>::type = true> - std::shared_ptr make_ov_constant(const ov::element::Type& type) const { - std::shared_ptr constant{nullptr}; - auto data = get_data(); - auto element_count = data.size(); - if (ov::element::is_nibble_type(type)) { - element_count *= 2; // Each byte contains 2 data items - } - if (element_count == shape_size(m_shape)) { - constant = std::make_shared(type, m_shape, data.data()); - } else if (element_count == 0 && m_shape.size() == 0) { - constant = common::make_failsafe_constant(type); - } else { - FRONT_END_THROW("Tensor shape doesn't match data size"); - } - if (m_tensor_proto->has_name()) { - constant->set_friendly_name(get_name()); - } - return constant; - } - bool has_external_data() const { return m_tensor_proto->has_data_location() && m_tensor_proto->data_location() == TensorProto_DataLocation::TensorProto_DataLocation_EXTERNAL; @@ -317,6 +207,9 @@ class Tensor { } const void* get_data_ptr() const { + if (has_external_data()) { + FRONT_END_THROW("Unexpected usage of method for externally stored data"); + } if (m_tensor_proto->has_raw_data()) { return m_tensor_proto->raw_data().data(); } @@ -336,6 +229,10 @@ class Tensor { } size_t get_data_size() const { + if (has_external_data()) { + const auto ext_data = detail::TensorExternalData(*m_tensor_proto); + return ext_data.size() / get_onnx_data_size(m_tensor_proto->data_type()); + } if (m_tensor_proto->has_raw_data()) { return m_tensor_proto->raw_data().size() / get_onnx_data_size(m_tensor_proto->data_type()); } @@ -352,8 +249,23 @@ class Tensor { return m_tensor_proto->double_data_size(); case TensorProto_DataType::TensorProto_DataType_STRING: return m_tensor_proto->string_data_size(); + case TensorProto_DataType::TensorProto_DataType_INT4: + case TensorProto_DataType::TensorProto_DataType_INT8: + case TensorProto_DataType::TensorProto_DataType_INT16: + case TensorProto_DataType::TensorProto_DataType_UINT4: + case TensorProto_DataType::TensorProto_DataType_UINT8: + case TensorProto_DataType::TensorProto_DataType_UINT16: + case TensorProto_DataType::TensorProto_DataType_BOOL: + case TensorProto_DataType::TensorProto_DataType_BFLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT16: + case TensorProto_DataType::TensorProto_DataType_FLOAT8E4M3FN: + case TensorProto_DataType::TensorProto_DataType_FLOAT8E5M2: + return m_tensor_proto->int32_data_size(); } - ONNX_INVALID_DATA_TYPE(m_tensor_proto->data_type(), "FLOAT, INT32, INT64, UINT64, DOUBLE, STRING"); + ONNX_INVALID_DATA_TYPE( + m_tensor_proto->data_type(), + "BOOL, BFLOAT16, FLOAT8E4M3FN, FLOAT8E5M2, FLOAT, FLOAT16, DOUBLE, INT4, INT8, INT16, INT32, INT64, " + "UINT4, UINT8, UINT16, UINT32, UINT64, STRING"); } const TensorProto* m_tensor_proto; diff --git a/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp b/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp index 983e53895c1148..e715a8e7e61cdc 100644 --- a/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp +++ b/src/frontends/onnx/frontend/src/utils/tensor_external_data.hpp @@ -46,6 +46,14 @@ class TensorExternalData { /// \return State of TensorExternalData as string representation std::string to_string() const; + /// \brief Object contains a data length after construction. Method allows read-only access to this + /// information. + /// + /// \return Returns a stored data size in bytes + uint64_t size() const { + return m_data_length; + } + private: std::string m_data_location{}; uint64_t m_offset = 0; diff --git a/src/frontends/pytorch/src/transforms/dict_resolver.hpp b/src/frontends/pytorch/src/transforms/dict_resolver.hpp index 150b1361dab57d..b2830cecb51bdc 100644 --- a/src/frontends/pytorch/src/transforms/dict_resolver.hpp +++ b/src/frontends/pytorch/src/transforms/dict_resolver.hpp @@ -15,13 +15,13 @@ namespace pass { // This transformation replaces pattern Parameter(Dict)->aten::__getitem__ class DictParameterResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DictParameterResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DictParameterResolver"); bool run_on_model(const std::shared_ptr& model) override; }; // This transformation replaces pattern prim::DictConstruct->Result class DictResultResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DictResultResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DictResultResolver"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp index 31a025d6d90493..cbe8e1eb0b62c9 100644 --- a/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/prim_list_tuple_construct_replacer.hpp @@ -14,7 +14,7 @@ namespace pass { class DecomposeListTupleResults : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeListTupleResults"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DecomposeListTupleResults"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp index 2616b66fb58ab6..5d339a307033ef 100644 --- a/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/prim_unpack_parameter_replacer.hpp @@ -27,7 +27,7 @@ namespace pass { // tuples only and the most nested objects in those tuples are tensors. class DecomposeUnpackParameters : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::DecomposeUnpackParameters"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::DecomposeUnpackParameters"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp index 625b986f3b64b7..2aec2824bbddd3 100644 --- a/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp +++ b/src/frontends/pytorch/src/transforms/tuple_unpack_replacer.hpp @@ -20,7 +20,7 @@ class PrimTupleUnpackReplacer : public ov::pass::MatcherPass { class TupleUnpackInBodyReplacer : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::pytorch::pass::TupleUnpackInBodyReplacer"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::pytorch::pass::TupleUnpackInBodyReplacer"); bool run_on_model(const std::shared_ptr& model) override; }; diff --git a/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp b/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp index 6038005e2eb4c4..d9eb34fb58d81b 100644 --- a/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp +++ b/src/frontends/tensorflow/src/transformations/switch_merge_resolve.hpp @@ -20,7 +20,7 @@ namespace pass { // Merge nodes can have the same eliminated markers that means the fused If will have several outputs. class SwitchMergeResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::SwitchMergeResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow::SwitchMergeResolver"); SwitchMergeResolver() = default; bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp b/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp index fc4d1c36fac3af..016f7b994dacb3 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/const_to_result_remover.hpp @@ -17,7 +17,7 @@ namespace pass { // We need to remove them because separate sub-graphs can solidly affect performance class ConstToResultRemover : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::UnsupportedConstToResultRemover"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow::pass::UnsupportedConstToResultRemover"); ConstToResultRemover() {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp b/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp index c7d457d8d6a361..fcc493458ccbba 100644 --- a/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp +++ b/src/frontends/tensorflow_common/include/helper_transforms/saved_model_unused_remover.hpp @@ -15,7 +15,7 @@ namespace pass { // Results marked as unused by Saved Model settings class SavedModelUnusedRemover : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow::pass::SavedModelUnusedRemover"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow::pass::SavedModelUnusedRemover"); SavedModelUnusedRemover() {} bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp b/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp index 584e8c55b6a9ea..84cdf44cbf5b02 100644 --- a/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp +++ b/src/frontends/tensorflow_lite/src/tflite_transformations/tflite_quantize_resolver.hpp @@ -32,7 +32,7 @@ class TFLQuantizeReplacer : public ov::pass::MatcherPass { // This transformation simplifies type manipulations in the graph class TFLQuantizeResolver : public ov::pass::ModelPass { public: - OPENVINO_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeResolver"); + OPENVINO_MODEL_PASS_RTTI("ov::frontend::tensorflow_lite::pass::TFLQuantizeResolver"); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/plugins/intel_cpu/src/cpu_memory.cpp b/src/plugins/intel_cpu/src/cpu_memory.cpp index 7cb4abc2161f14..71851c529c6095 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.cpp +++ b/src/plugins/intel_cpu/src/cpu_memory.cpp @@ -45,7 +45,7 @@ void transferData(const IMemory& src, const IMemory& dst, bool ftz) { if (!ftz) { return; } - if (src.getDesc().getPrecision() != ov::element::f32 || dst.getDesc().getPrecision() == ov::element::bf16) { + if (src.getDesc().getPrecision() != ov::element::f32 || dst.getDesc().getPrecision() != ov::element::f32) { return; } size_t offset = 0; diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp index 9b86a1433acb06..be0c8a2a62d954 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp @@ -628,13 +628,71 @@ static MemoryPtr prepackDecompressionParams(const MemoryCPtr& paramsPtr, auto srcMem = std::make_shared(engine, srcMemoryDesc, paramsPtr->getData()); dstMem->load(*srcMem); - return dstMem; } +static dnnl::memory::dims getGroupDims(const VectorDims& weiDims, const VectorDims& scaleDims) { + if (scaleDims[0] == 1 && scaleDims[1] == 1) + return {}; + + int N = weiDims[weiDims.size() - 2]; + int K = weiDims[weiDims.size() - 1]; + dnnl::memory::dim groupN = N / scaleDims[0]; + dnnl::memory::dim groupK = K / scaleDims[1]; + + return {groupK, groupN}; +} + +static int getMask(const VectorDims& weiDims, const dnnl::memory::dims& groupDims) { + const int maskN = 1 << (weiDims.size() - 1); + const int maskK = 1 << (weiDims.size() - 2); + int N = weiDims[weiDims.size() - 2]; + int K = weiDims[weiDims.size() - 1]; + int mask = 0; + if (!groupDims.empty() && groupDims[1] != N) + mask += maskN; + if (!groupDims.empty() && groupDims[0] != K) + mask += maskK; + + return mask; +} + void DnnlPostOpsComposer::appendDecompressionScales(const MemoryCPtr& scales_ptr, bool needTranspose, - ov::element::Type dstPrecision) { + ov::element::Type dstPrecision, + const VectorDims& weiDims) { + if (scales_ptr == nullptr) + return; + + auto scaleMem = prepackDecompressionParams(scales_ptr, needTranspose, dstPrecision, engine); + auto groupDims = getGroupDims(weiDims, scaleMem->getStaticDims()); + auto mask = getMask(weiDims, groupDims); + + attr.set_scales(DNNL_ARG_WEIGHTS, mask, groupDims, DnnlExtensionUtils::ElementTypeToDataType(dstPrecision)); + cpuArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS] = std::move(scaleMem); + dnnlArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS] = + cpuArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS]->getPrimitive(); +} + +void DnnlPostOpsComposer::appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, + bool needTranspose, + ov::element::Type dstPrecision, + const VectorDims& weiDims) { + if (zero_points_ptr == nullptr) + return; + + auto zeroPointsMem = prepackDecompressionParams(zero_points_ptr, needTranspose, dstPrecision, engine); + auto groupDims = getGroupDims(weiDims, zeroPointsMem->getStaticDims()); + auto mask = getMask(weiDims, groupDims); + + attr.set_zero_points(DNNL_ARG_WEIGHTS, mask, groupDims, DnnlExtensionUtils::ElementTypeToDataType(dstPrecision)); + cpuArgs[DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS] = zeroPointsMem; + dnnlArgs[DNNL_ARG_ATTR_ZERO_POINTS | DNNL_ARG_WEIGHTS] = zeroPointsMem->getPrimitive(); +} + +void DnnlPostOpsComposer::appendDecompressionScalesLegacy(const MemoryCPtr& scales_ptr, + bool needTranspose, + ov::element::Type dstPrecision) { if (scales_ptr == nullptr) return; @@ -647,9 +705,9 @@ void DnnlPostOpsComposer::appendDecompressionScales(const MemoryCPtr& scales_ptr cpuArgs[DNNL_ARG_ATTR_SCALES | DNNL_ARG_WEIGHTS]->getPrimitive(); } -void DnnlPostOpsComposer::appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, - bool needTranspose, - ov::element::Type dstPrecision) { +void DnnlPostOpsComposer::appendDecompressionZeroPointsLegacy(const MemoryCPtr& zero_points_ptr, + bool needTranspose, + ov::element::Type dstPrecision) { if (zero_points_ptr == nullptr) return; diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.h b/src/plugins/intel_cpu/src/dnnl_postops_composer.h index 7ae634658b005f..81fd1aaeed194d 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.h +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.h @@ -30,10 +30,21 @@ class DnnlPostOpsComposer { const MemoryArgs& memory, const dnnl::memory::data_type outDataType); DnnlPrimitiveAttrs compose(); - void appendDecompressionScales(const MemoryCPtr& scales_ptr, bool needTranspose, ov::element::Type dstPrecision); + + void appendDecompressionScales(const MemoryCPtr& scales_ptr, + bool needTranspose, + ov::element::Type dstPrecision, + const VectorDims& weiDims); void appendDecompressionZeroPoints(const MemoryCPtr& zero_points_ptr, bool needTranspose, - ov::element::Type dstPrecision); + ov::element::Type dstPrecision, + const VectorDims& weiDims); + void appendDecompressionScalesLegacy(const MemoryCPtr& scales_ptr, + bool needTranspose, + ov::element::Type dstPrecision); + void appendDecompressionZeroPointsLegacy(const MemoryCPtr& zero_points_ptr, + bool needTranspose, + ov::element::Type dstPrecision); void setDynamicQuantizationParams(uint64_t groupSize); private: diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp index 95698f8ac78bb0..0f6b2c24c13df7 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp @@ -138,10 +138,12 @@ CPUTargetMachine::CPUTargetMachine(dnnl::impl::cpu::aarch64::cpu_isa_t host_isa) // unary jitters[ov::op::v0::Abs::get_type_info_static()] = CREATE_CPU_EMITTER(jit_abs_emitter); + jitters[ov::op::v0::Ceiling::get_type_info_static()] = CREATE_CPU_EMITTER(jit_ceiling_emitter); jitters[ov::op::v0::Clamp::get_type_info_static()] = CREATE_CPU_EMITTER(jit_clamp_emitter); jitters[ov::op::v0::Elu::get_type_info_static()] = CREATE_CPU_EMITTER(jit_elu_emitter); jitters[ov::op::v0::Exp::get_type_info_static()] = CREATE_CPU_EMITTER(jit_exp_emitter); jitters[ov::op::v0::Floor::get_type_info_static()] = CREATE_CPU_EMITTER(jit_floor_emitter); + jitters[ov::op::v1::FloorMod::get_type_info_static()] = CREATE_CPU_EMITTER(jit_floor_mod_emitter); jitters[ov::op::v0::Gelu::get_type_info_static()] = CREATE_CPU_EMITTER(jit_gelu_erf_emitter); jitters[ov::op::v7::Gelu::get_type_info_static()] = CREATE_GELU_V7_EMITTER(jit_gelu_erf_emitter, jit_gelu_tanh_emitter); diff --git a/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp b/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp index a0590827006eb4..0c8cddd905dc2e 100644 --- a/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp @@ -570,12 +570,13 @@ struct ConvertFromBinPrecision> { } }; -#define INTEL_CPU_CVT_FROM_4BIT_LIST \ - INTEL_CPU_CVT(u4, f32), INTEL_CPU_CVT(u4, bf16), INTEL_CPU_CVT(u4, f16), INTEL_CPU_CVT(u4, i8), \ - INTEL_CPU_CVT(u4, u8), INTEL_CPU_CVT(i4, f32), INTEL_CPU_CVT(i4, bf16), INTEL_CPU_CVT(i4, f16), \ - INTEL_CPU_CVT(i4, i8), INTEL_CPU_CVT(i4, u8), INTEL_CPU_CVT(nf4, f32), INTEL_CPU_CVT(nf4, bf16), \ - INTEL_CPU_CVT(nf4, f16), INTEL_CPU_CVT(nf4, i8), INTEL_CPU_CVT(nf4, u8), INTEL_CPU_CVT(f4e2m1, f32), \ - INTEL_CPU_CVT(f4e2m1, bf16), INTEL_CPU_CVT(f4e2m1, f16), INTEL_CPU_CVT(f4e2m1, i8), INTEL_CPU_CVT(f4e2m1, u8) +#define INTEL_CPU_CVT_FROM_4BIT_LIST \ + INTEL_CPU_CVT(u4, f32), INTEL_CPU_CVT(u4, i32), INTEL_CPU_CVT(u4, bf16), INTEL_CPU_CVT(u4, f16), \ + INTEL_CPU_CVT(u4, i8), INTEL_CPU_CVT(u4, u8), INTEL_CPU_CVT(i4, f32), INTEL_CPU_CVT(i4, i32), \ + INTEL_CPU_CVT(i4, bf16), INTEL_CPU_CVT(i4, f16), INTEL_CPU_CVT(i4, i8), INTEL_CPU_CVT(i4, u8), \ + INTEL_CPU_CVT(nf4, f32), INTEL_CPU_CVT(nf4, bf16), INTEL_CPU_CVT(nf4, f16), INTEL_CPU_CVT(nf4, i8), \ + INTEL_CPU_CVT(nf4, u8), INTEL_CPU_CVT(f4e2m1, f32), INTEL_CPU_CVT(f4e2m1, bf16), INTEL_CPU_CVT(f4e2m1, f16), \ + INTEL_CPU_CVT(f4e2m1, i8), INTEL_CPU_CVT(f4e2m1, u8) struct ConvertFrom4BitContext { ov::element::Type_t inType; diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index 52434a1eeb8461..8ae2d2784193af 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -228,14 +228,16 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, if (dstPrc != f8e8m0 || useDynamicQuantization) dstPrc = ov::element::f32; - dnnlpoc.appendDecompressionScales(memory.at(ARG_WEI | ARG_ATTR_SCALES), !attrs.weightsNonTransposed, dstPrc); + dnnlpoc.appendDecompressionScalesLegacy(memory.at(ARG_WEI | ARG_ATTR_SCALES), + !attrs.weightsNonTransposed, + dstPrc); } if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) { auto dstPrc = useDynamicQuantization ? ov::element::u8 : ov::element::f32; - dnnlpoc.appendDecompressionZeroPoints(memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS), - !attrs.weightsNonTransposed, - dstPrc); + dnnlpoc.appendDecompressionZeroPointsLegacy(memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS), + !attrs.weightsNonTransposed, + dstPrc); } if (useDynamicQuantization) { @@ -247,9 +249,9 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, uint8_t zp_value = (wei_precision == ov::element::i8) ? 128 : 8; DnnlBlockedMemoryDesc zpMemoryDesc(ov::element::u8, Shape({1})); auto decompressionSubtractPtr = std::make_shared(context->getEngine(), zpMemoryDesc, &zp_value); - dnnlpoc.appendDecompressionZeroPoints(decompressionSubtractPtr, - !attrs.weightsNonTransposed, - ov::element::u8); + dnnlpoc.appendDecompressionZeroPointsLegacy(decompressionSubtractPtr, + !attrs.weightsNonTransposed, + ov::element::u8); } dnnlpoc.setDynamicQuantizationParams(attrs.dynamicQuantizationGroupSize); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp index 86b22607111833..9ffe4731689d43 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -76,6 +77,23 @@ bool DnnlMatMulPrimitive::Key::operator==(const Key& rhs) const { return result; } +template +static dimsType normalizeToRank(const dimsType& vec, size_t rank) { + if (vec.size() == rank || vec.empty()) + return vec; + + dimsType result; + result.reserve(rank); + + for (size_t i = vec.size(); i < rank; ++i) { + result.push_back(1); + } + + result.insert(result.end(), vec.begin(), vec.end()); + + return result; +} + std::shared_ptr DnnlMatMulPrimitive::create(const MemoryArgs& memory, const MatMulAttrs& attrs, const ExecutorContext::CPtr context, @@ -105,19 +123,22 @@ DnnlMemoryDescPtr DnnlMatMulPrimitive::makeTransposedWeightDescriptor(const Dnnl const auto& weiDesc = srcDesc->getDnnlDesc(); auto wDims = weiDesc.get_dims(); auto wDataType = weiDesc.get_data_type(); + std::swap(wDims[wDims.size() - 1], wDims[wDims.size() - 2]); dnnl::memory::dims wDims2D = reshapeDownToRank<2>(wDims); const auto format = weightsNonTransposed ? dnnl::memory::format_tag::ab : dnnl::memory::format_tag::ba; const auto transposedWeiDesc = dnnl::memory::desc{wDims2D, wDataType, format}; + const auto reshapedWeiDesc = transposedWeiDesc.reshape(dstDesc->getDnnlDesc().get_dims()); - return DnnlExtensionUtils::makeDescriptor(transposedWeiDesc); + return DnnlExtensionUtils::makeDescriptor(reshapedWeiDesc); } static DnnlPrimitiveAttrs createPrimitiveAttrs(const MatMulAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, ExecutorContext::CPtr context, - bool useDynamicQuantization) { + bool useWeightsDecompression, + bool weightsNonTransposed) { const auto& srcDesc = memory.at(ARG_SRC)->getDescPtr(); const auto& weiDesc = memory.at(ARG_WEI)->getDescPtr(); const auto& dstDesc = memory.at(ARG_DST)->getDescPtr(); @@ -132,7 +153,30 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const MatMulAttrs& attrs, DnnlPostOpsComposer dnnlpoc(postOps, context->getEngine(), dims, dims.size() - 1, isINT8, 1 << 0, memory, outputDataType); - return dnnlpoc.compose(); + const auto maxRank = + std::max({srcDesc->getShape().getRank(), weiDesc->getShape().getRank(), dstDesc->getShape().getRank()}); + const auto normWeiDims = normalizeToRank(weiDesc->getShape().getStaticDims(), maxRank); + if (memory.count(ARG_WEI | ARG_ATTR_SCALES)) { + auto dstPrc = ov::element::f32; + dnnlpoc.appendDecompressionScales(memory.at(ARG_WEI | ARG_ATTR_SCALES), + !weightsNonTransposed, + dstPrc, + normWeiDims); + } + if (memory.count(ARG_WEI | ARG_ATTR_ZERO_POINTS)) { + // TODO: clarify oneDNN requirements on ZP precision + auto zp = memory.at(ARG_WEI | ARG_ATTR_ZERO_POINTS); + auto zpPrc = zp->getPrecision(); + auto dstPrc = one_of(zpPrc, i32, i8, u8, i4, u4) ? zpPrc : i32; + dnnlpoc.appendDecompressionZeroPoints(zp, !weightsNonTransposed, dstPrc, normWeiDims); + } + + auto primAttrs = dnnlpoc.compose(); + if (useWeightsDecompression) { + primAttrs.attr.set_fpmath_mode(fpmath_mode::any, true); + } + + return primAttrs; } static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory::desc& inputDesc, @@ -143,22 +187,6 @@ static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory: const dnnl::engine& engine, const bool useSparseWeights, const bool useWeightsDecompression) { - auto normalizeToRank = [](const dnnl::memory::dims& vec, size_t rank) -> dnnl::memory::dims { - if (vec.size() == rank || vec.empty()) - return vec; - - dnnl::memory::dims result; - result.reserve(rank); - - for (size_t i = vec.size(); i < rank; ++i) { - result.push_back(1); - } - - result.insert(result.end(), vec.begin(), vec.end()); - - return result; - }; - auto weiDims = weightDesc.get_dims(); std::swap(weiDims[weiDims.size() - 1], weiDims[weiDims.size() - 2]); @@ -175,7 +203,9 @@ static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory: auto idt = inputDesc.get_data_type(); auto wdt = idt; - if (idt == dnnl::memory::data_type::u8 || idt == dnnl::memory::data_type::s8) { + if (useWeightsDecompression) { + wdt = weightDesc.get_data_type(); + } else if (idt == dnnl::memory::data_type::u8 || idt == dnnl::memory::data_type::s8) { wdt = memory::data_type::s8; } @@ -245,6 +275,16 @@ static VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDim return outputShape; } +bool DnnlMatMulPrimitive::useWeightsDecompressionImpl(const ov::element::Type inputType, + const ov::element::Type weightsType) { +#if defined(OPENVINO_ARCH_X86_64) + if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2)) + return false; +#endif + + return (one_of(inputType, f32, bf16, f16) && one_of(weightsType, u8, i8, u4, i4)); +} + DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, @@ -257,7 +297,9 @@ DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAt auto dstDesc = memory.at(ARG_DST)->getDescPtr(); MatMulAttrs mmAttrs{false, false}; - const auto postOpData = createPrimitiveAttrs(mmAttrs, postOps, memory, context, false); + const auto useWeightsDecompression = useWeightsDecompressionImpl(srcDesc->getPrecision(), weiDesc->getPrecision()); + const auto postOpData = + createPrimitiveAttrs(mmAttrs, postOps, memory, context, useWeightsDecompression, attrs.weightsNonTransposed); if (!cacheWeights) return std::make_shared(postOpData); @@ -285,7 +327,7 @@ DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAt context->getEngine(), context->getImplPriorities(), false, - false); + useWeightsDecompression); const auto weightsDesc = DnnlExtensionUtils::makeDescriptor(primDesc.weights_desc()); auto originalWeightsDesc = MemoryDescUtils::convertToDnnlMemoryDesc(weiDesc); @@ -319,7 +361,7 @@ DnnlMatMulPrimitive::DnnlMatMulPrimitive(const Key& key, engine, implPriorities, false, - false)), + useWeightsDecompressionImpl(key.src->getPrecision(), key.wei->getPrecision()))), m_implType(implTypeFromPrimDesc(m_primDesc)), m_srcDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.src_desc())), m_weiDesc(DnnlExtensionUtils::makeDescriptor(m_primDesc.weights_desc())), @@ -328,8 +370,6 @@ DnnlMatMulPrimitive::DnnlMatMulPrimitive(const Key& key, m_prim(primitive(m_primDesc)) {} void DnnlMatMulPrimitive::execute(const dnnl_primitive_args& primArgs) const { - std::cout << "Executing MM primitive" - << "\n"; m_prim.execute(m_stream, primArgs); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp index 618d3abdf8b3de..5491b62a154687 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp @@ -53,6 +53,8 @@ class DnnlMatMulPrimitive { return m_implType; } + static bool useWeightsDecompressionImpl(const ov::element::Type inputType, const ov::element::Type weightsType); + static DnnlShapeAgnosticDataPtr createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index bc55af8cfbb0e2..f2cf5a7c9102b7 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -133,6 +133,8 @@ static const TypeMapping dnnlMatMulTypeMapping { // quantization configuration {{_u8 | _i8, _i8, _u8|_i8|_i32|_bf16|_f16|_f32|_undefined, _u8|_i8|_i32|_bf16|_f16|_f32}, pt(bypass(), bypass(), bypass(), bypass())}, {{_u8 | _i8, _i8, _any, _any}, pt(bypass(), bypass(), just(), just())}, + // compresses int weights + {{_f32 | _bf16 | _f16, _u8 | _i8, _any, _any}, pt(bypass(), bypass(), use<0>(), use<0>())}, // @todo should we fallback to FPXX instead of _f32? {{_any, _any, _any, _any}, pt(just(), just(), just(), just())}, // @todo explicitly cover configuration limitations for oneDNN on ARM @@ -443,7 +445,7 @@ const std::vector>& getImplementations() { return std::make_shared(attrs, postOps, memory, context); } ) - OV_CPU_INSTANCE_X64( + OV_CPU_INSTANCE_DNNL( "matmul_dnnl", ExecutorType::Dnnl, OperationType::MatMul, @@ -454,7 +456,6 @@ const std::vector>& getImplementations() { CPU_DEBUG_CAP_ENABLE( if (getEnvBool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC")) { VERIFY(noSparseDecompression(config), UNSUPPORTED_SPARSE_WEIGHTS); - VERIFY(noWeightsDecompression(config), UNSUPPORTED_WEIGHTS_DECOMPRESSION); return true; }) return false; diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 2df6c0ae7522cc..4a2e3728887087 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -41,6 +41,42 @@ namespace ov { namespace intel_cpu { namespace node { +ov::element::TypeVector FullyConnected::getSupportedCompressedWeightsTypes() { + using ov::element::Type_t; + + bool useMatmulPrim = false; + CPU_DEBUG_CAP_ENABLE(useMatmulPrim = getEnvBool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC");) + + if (useMatmulPrim) { + return {Type_t::u8, Type_t::i8}; + } else { +#if defined(OPENVINO_ARCH_X86_64) + return {Type_t::u8, Type_t::i8, Type_t::u4, Type_t::i4, Type_t::nf4, Type_t::f4e2m1}; +#else + return {}; +#endif + } +} + +ov::element::TypeVector FullyConnected::getSupportedCompressedActivationsTypes() { + using ov::element::Type_t; + + bool useMatmulPrim = false; + CPU_DEBUG_CAP_ENABLE(useMatmulPrim = getEnvBool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC");) + + if (useMatmulPrim) { + return {Type_t::f32, Type_t::f16}; + } else { +#if defined(OPENVINO_ARCH_X86_64) + // @todo enable for bf16 as well + // after EnforceInferencePrecision is replaced with ConvertPrecision + return {Type_t::f32}; +#else + return {}; +#endif + } +} + bool FullyConnected::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { @@ -113,7 +149,9 @@ bool FullyConnected::isSupportedCompressedOperation(const std::shared_ptr(isInverse, rdftType)); dftKernel.reset(new jit_dft_kernel_f32(isInverse, complex_to_complex)); vlen = cpu_isa_traits::vlen; - primDesc->setImplementationType(jit_avx512); + if (primDesc) + primDesc->setImplementationType(jit_avx512); } else if (mayiuse(cpu::x64::avx2)) { rdftKernel.reset(new jit_dft_kernel_f32(isInverse, rdftType)); dftKernel.reset(new jit_dft_kernel_f32(isInverse, complex_to_complex)); vlen = cpu_isa_traits::vlen; - primDesc->setImplementationType(jit_avx2); + if (primDesc) + primDesc->setImplementationType(jit_avx2); } else if (mayiuse(cpu::x64::sse41)) { rdftKernel.reset(new jit_dft_kernel_f32(isInverse, rdftType)); dftKernel.reset(new jit_dft_kernel_f32(isInverse, complex_to_complex)); vlen = cpu_isa_traits::vlen; - primDesc->setImplementationType(jit_sse42); + if (primDesc) + primDesc->setImplementationType(jit_sse42); } else { OPENVINO_THROW("Can't create RDFT kernel"); } @@ -1075,22 +1078,6 @@ struct RDFTRefExecutor : public RDFTExecutor { } }; -struct RDFTKey { - bool isInverse; - - size_t hash() const { - using namespace dnnl::impl::primitive_hashing; - - size_t seed = 0; - seed = hash_combine(seed, isInverse); - return seed; - } - - bool operator==(const RDFTKey& rhs) const { - return isInverse == rhs.isInverse; - } -}; - void RDFT::createPrimitive() { RDFTKey key{}; key.isInverse = inverse; @@ -1115,6 +1102,22 @@ void RDFT::createPrimitive() { Node::createPrimitive(); } + +std::shared_ptr RDFTExecutor::build(bool inverse, NodeDesc* primDesc) { + std::shared_ptr executor; +#if defined(OPENVINO_ARCH_X86_64) + using namespace dnnl::impl; + using namespace dnnl::impl::cpu::x64; + if (mayiuse(cpu::x64::sse41)) { + executor = std::make_shared(inverse, primDesc); + return executor; + } +#endif + executor = std::make_shared(inverse); + primDesc->setImplementationType(ref_any); + return executor; +} + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/rdft.h b/src/plugins/intel_cpu/src/nodes/rdft.h index fccd6500a50f7c..0de2fa0095df81 100644 --- a/src/plugins/intel_cpu/src/nodes/rdft.h +++ b/src/plugins/intel_cpu/src/nodes/rdft.h @@ -4,6 +4,7 @@ #pragma once +#include "common/primitive_hashing_utils.hpp" #include "kernels/x64/rdft_kernel.hpp" #include "node.h" @@ -30,6 +31,8 @@ struct RDFTExecutor { const std::vector& outputShape, const std::vector& axes); + static std::shared_ptr build(bool inverse, NodeDesc* primDesc = nullptr); + protected: bool isInverse; @@ -125,6 +128,20 @@ class RDFT : public Node { bool isSignalSizesConstant = false; }; +struct RDFTKey { + bool isInverse; + + size_t hash() const { + size_t seed = 0; + seed = dnnl::impl::hash_combine(seed, isInverse); + return seed; + } + + bool operator==(const RDFTKey& rhs) const { + return isInverse == rhs.isInverse; + } +}; + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/stft.cpp b/src/plugins/intel_cpu/src/nodes/stft.cpp index 47855a7eff7399..31f3b673f38841 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.cpp +++ b/src/plugins/intel_cpu/src/nodes/stft.cpp @@ -4,6 +4,10 @@ #include "stft.h" +#include "cpu/x64/cpu_isa_traits.hpp" +#include "cpu/x64/jit_generator.hpp" +#include "nodes/common/cpu_memcpy.h" +#include "openvino/core/parallel.hpp" #include "openvino/core/type.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/stft.hpp" @@ -73,15 +77,95 @@ bool STFT::created() const { return getType() == Type::STFT; } +namespace { +static void transpose_out4d(const uint8_t* in, + uint8_t* out, + const VectorDims& in_shape, + const VectorDims& out_shape, + size_t elem_size) { + const std::vector axes_order{0, 2, 1, 3}; + parallel_for3d(out_shape[0], + out_shape[1], + out_shape[2], + [in, out, axes_order, &in_shape, &out_shape, elem_size](size_t i, size_t j, size_t k) { + size_t in_indexes[3]; + in_indexes[axes_order[0]] = i; + in_indexes[axes_order[1]] = j; + in_indexes[axes_order[2]] = k; + size_t in_off = + ((in_indexes[0] * in_shape[1] + in_indexes[1]) * in_shape[2] + in_indexes[2]) * in_shape[3]; + size_t out_off = ((i * out_shape[1] + j) * out_shape[2] + k) * out_shape[3]; + cpu_memcpy(out + out_off * elem_size, in + in_off * elem_size, out_shape[3] * elem_size); + }); +} +} // namespace + void STFT::execute(dnnl::stream strm) { - ov::reference::stft(getSrcDataAtPortAs(DATA_IDX), - getSrcDataAtPortAs(WINDOW_IDX), - getDstDataAtPortAs(0), - ov::Shape{getSrcMemoryAtPort(DATA_IDX)->getStaticDims()}, - ov::Shape{getSrcMemoryAtPort(WINDOW_IDX)->getStaticDims()}, - (getSrcDataAtPortAs(FRAME_SIZE_IDX))[0], - (getSrcDataAtPortAs(FRAME_STEP_IDX))[0], - m_transpose_frames); + const float* signal = getSrcDataAtPortAs(DATA_IDX); + const float* window = getSrcDataAtPortAs(WINDOW_IDX); + float* rdft_result = getDstDataAtPortAs(0); + const VectorDims& signal_shape = getSrcMemoryAtPort(DATA_IDX)->getStaticDims(); + const VectorDims& window_shape = getSrcMemoryAtPort(WINDOW_IDX)->getStaticDims(); + const int64_t frame_size = (getSrcDataAtPortAs(FRAME_SIZE_IDX))[0]; + const int64_t frame_step = (getSrcDataAtPortAs(FRAME_STEP_IDX))[0]; + + const auto is_signal_1D = signal_shape.size() == 1; + const size_t batch_size = is_signal_1D ? 1 : signal_shape[0]; + const size_t signal_axis = is_signal_1D ? 0 : 1; + const auto signal_length = signal_shape[signal_axis]; + const auto num_frames = static_cast((signal_length - frame_size) / frame_step) + 1; + const auto frame_size_dim = static_cast(frame_size); + const auto fft_out_shape = VectorDims{static_cast((frame_size_dim / 2) + 1), 2}; + const auto fft_out_shape_size = shape_size(fft_out_shape); + + const auto window_length = window_shape[0] < frame_size_dim ? window_shape[0] : frame_size_dim; + std::vector pad_window(frame_size, 0); + cpu_parallel_memcpy(pad_window.data() + (frame_size_dim - window_length) / 2, + window, + sizeof(float) * window_shape[0]); + + float* dst = rdft_result; + const auto stft_shape = VectorDims{batch_size, num_frames, fft_out_shape[0], fft_out_shape[1]}; + if (m_transpose_frames) { // Store intermediate results + MemoryPtr dst_mem = + getScratchPadMem(std::make_shared(ov::element::f32, Shape{stft_shape})); + dst = dst_mem->getDataAs(); + } + + parallel_for2d(batch_size, num_frames, [&](size_t batch, size_t frame_idx) { + size_t batch_in_start = batch * signal_length; + size_t batch_frames_out = batch * num_frames; + + const auto frame_start = batch_in_start + frame_idx * frame_step; + const auto frame_end = frame_start + frame_size; + std::vector signal_slice(signal + frame_start, signal + frame_end); + std::transform(signal_slice.begin(), + signal_slice.end(), + pad_window.begin(), + signal_slice.begin(), + std::multiplies()); + + const auto result_idx = (batch_frames_out + frame_idx) * fft_out_shape_size; + auto twiddles = rdft_executor->generateTwiddles({static_cast(signal_slice.size())}, fft_out_shape, {0}); + rdft_executor->execute(signal_slice.data(), + dst + result_idx, + twiddles, + 1, + {0}, + {static_cast(frame_size)}, + {frame_size_dim}, + fft_out_shape, + {1}, + {2, 1}); + }); + if (m_transpose_frames) { + const auto stft_transp_out_shape = VectorDims{batch_size, fft_out_shape[0], num_frames, fft_out_shape[1]}; + transpose_out4d(reinterpret_cast(dst), + reinterpret_cast(rdft_result), + stft_shape, + stft_transp_out_shape, + sizeof(float)); + } } void STFT::executeDynamicImpl(dnnl::stream strm) { @@ -92,6 +176,20 @@ bool STFT::needShapeInfer() const { return !(m_is_frame_size_const && m_is_frame_step_const) || Node::needShapeInfer(); } +void STFT::createPrimitive() { + RDFTKey key{}; + key.isInverse = false; + auto buildExecutor = [&](const RDFTKey& key) -> std::shared_ptr { + return RDFTExecutor::build(key.isInverse, getSelectedPrimitiveDescriptor()); + }; + + auto cache = context->getParamsCache(); + auto result = cache->getOrCreate(key, buildExecutor); + rdft_executor = result.first; + + Node::createPrimitive(); +} + } // namespace node } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/stft.h b/src/plugins/intel_cpu/src/nodes/stft.h index 7b1684cae4b674..608e14661910e2 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.h +++ b/src/plugins/intel_cpu/src/nodes/stft.h @@ -7,6 +7,7 @@ #include #include "node.h" +#include "rdft.h" namespace ov { namespace intel_cpu { @@ -21,6 +22,7 @@ class STFT : public Node { bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; bool needPrepareParams() const override; + void createPrimitive() override; void execute(dnnl::stream strm) override; void executeDynamicImpl(dnnl::stream strm) override; @@ -35,6 +37,8 @@ class STFT : public Node { /// STFT params bool m_transpose_frames = false; + // RDFT executor + std::shared_ptr rdft_executor = nullptr; bool m_is_frame_size_const = false; bool m_is_frame_step_const = false; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp index 55c1ecde2aae10..3e9540f35f3273 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.hpp @@ -61,7 +61,7 @@ namespace intel_cpu { class ConvertGroupConvolution : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertGroupConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertGroupConvolution"); ConvertGroupConvolution(); }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp index 5674514eeb8e64..7898833529294b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv1d.hpp @@ -54,7 +54,7 @@ namespace ov { namespace intel_cpu { class ConvertConv1DBase : public ov::pass::MatcherPass { protected: - OPENVINO_RTTI("ConvertConv1DBase", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertConv1DBase"); template ov::matcher_pass_callback convert_conv1d_to_conv2d(); }; @@ -71,4 +71,4 @@ class ConvertGroupConv1D : public ConvertConv1DBase { ConvertGroupConv1D(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp index 8e5fd1e38b605a..947d7ee476bc81 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_multi_axis.hpp @@ -51,7 +51,7 @@ namespace intel_cpu { class ConvertReduceMultiAxisBase : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertReduceMultiAxisBase", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertReduceMultiAxisBase"); template ov::matcher_pass_callback convert_reduce(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp index ea4128ea265e42..6cc683154cc175 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_reduce_no_keep_dims.hpp @@ -45,7 +45,7 @@ namespace intel_cpu { class ConvertReduceNoKeepDimsBase : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertReduceNoKeepDims", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertReduceNoKeepDimsBase"); template ov::matcher_pass_callback convert_reduce(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp index 75b45dca468dc7..07384a8f1e24ca 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/mish_decomposition.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class MishDecomposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MishDecomposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("MishDecomposition"); MishDecomposition(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp index f9ab862b19f4dd..9b9762ee5e525b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.hpp @@ -19,7 +19,7 @@ namespace intel_cpu { class AlignMatMulInputRanks : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("AlignMatMulInputRanks", "0"); + OPENVINO_MATCHER_PASS_RTTI("AlignMatMulInputRanks"); AlignMatMulInputRanks(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp index 5801dbb8ae74a9..5f3058429a8497 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp @@ -24,7 +24,7 @@ using namespace ov::gen_pattern; class CausalMaskPreprocess : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("CausalMaskPreprocess", "0"); + OPENVINO_MATCHER_PASS_RTTI("CausalMaskPreprocess"); CausalMaskPreprocess(); private: diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp index b0b1e5632f908a..e1b3307a13bdc4 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_broadcast_to_tiles.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertBroadcastToTiles : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBroadcastToTiles", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBroadcastToTiles"); ConvertBroadcastToTiles(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp index ee3692f2ea4ca6..a51a357197ef26 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_fq_rnn_to_quantized_rnn.hpp @@ -76,7 +76,7 @@ namespace intel_cpu { class ConvertFqRnnToQuantizedRnn : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertFqRnnToQuantizedRnn", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertFqRnnToQuantizedRnn"); ConvertFqRnnToQuantizedRnn(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp index 03366161f8c904..cf628a3497f3a5 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertMatMulToFC : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMatMulToFC", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMatMulToFC"); ConvertMatMulToFC(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp index 5a4928477abec9..3760f2e000aee2 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_tile_to_seq_tiles.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertTileToSeqTiles : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertTileToSeqTiles", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertTileToSeqTiles"); ConvertTileToSeqTiles(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp index b09f14de5793c5..40d4ed2907dbba 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_leaky_relu.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertToLeakyRelu : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToLeakyRelu", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToLeakyRelu"); ConvertToLeakyRelu(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp index d34a4c3667b5aa..3797eb780b3d13 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertToPowerStatic : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToPowerStatic", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToPowerStatic"); ConvertToPowerStatic(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp index 3fe3569a13e745..3a640410db472b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_swish_cpu.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class ConvertToSwishCPU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToSwishCPU", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToSwishCPU"); ConvertToSwishCPU(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp index 329febfcd21e92..ac7e2c2e72f68a 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_integer_divide.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class DecomposeIntegerDivide : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DecomposeIntegerDivide", "0"); + OPENVINO_MATCHER_PASS_RTTI("DecomposeIntegerDivide"); DecomposeIntegerDivide(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp index f11a3afa23d20c..4cc58841a77bb7 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/decompose_rms_norm.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class DecomposeRMSNorm : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DecomposeRMSNorm", "0"); + OPENVINO_MATCHER_PASS_RTTI("DecomposeRMSNorm"); DecomposeRMSNorm(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp index d1e64969fbcf4e..5fadd183dfd694 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class FullyConnectedBiasFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FullyConnectedBiasFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("FullyConnectedBiasFusion"); FullyConnectedBiasFusion(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp index 49cfab68bc6911..7c52d3614407fd 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.hpp @@ -14,7 +14,7 @@ namespace pass { class InsertConvertAfterExtension : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("InsertConvertAfterExtension", "0"); + OPENVINO_MATCHER_PASS_RTTI("InsertConvertAfterExtension"); InsertConvertAfterExtension(bool convert_output_precision = true); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp index 0af0469ce0135e..d3869ff7509fae 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.hpp @@ -25,7 +25,7 @@ namespace intel_cpu { */ class MoveFCReshapeToWeights : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveFCReshapeToWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveFCReshapeToWeights"); MoveFCReshapeToWeights(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp index a90487391b32ac..7a3aa4b3432318 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_cpu { class NgramFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("NgramFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("NgramFusion"); NgramFusion(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp index bbca2449a473af..369ef8ccab92e3 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.hpp @@ -14,7 +14,7 @@ namespace intel_cpu { class PermuteSliceAndInterpolation : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("PermuteSliceAndInterpolation", "0"); + OPENVINO_MATCHER_PASS_RTTI("PermuteSliceAndInterpolation"); PermuteSliceAndInterpolation(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp index d7bfd28f0db80d..a63a3dce8219c2 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/rnn_sequences_optimization.hpp @@ -11,19 +11,19 @@ namespace intel_cpu { class OptimizeGRUSequenceTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeGRUSequenceTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeGRUSequenceTransposes"); OptimizeGRUSequenceTransposes(); }; class OptimizeLSTMSequenceTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeLSTMSequenceTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeLSTMSequenceTransposes"); OptimizeLSTMSequenceTransposes(); }; class OptimizeRNNSequenceTransposes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeRNNSequenceTransposes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeRNNSequenceTransposes"); OptimizeRNNSequenceTransposes(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp index bc000eb0485cd2..96028402aa9f92 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.hpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { class StatefulSDPAFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("StatefulSDPAFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("StatefulSDPAFusion"); StatefulSDPAFusion(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp index 747ca3482eaf0c..7c53d73104fc1d 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/swap_convert_transpose.hpp @@ -11,6 +11,7 @@ namespace intel_cpu { class SwapConvertTranspose : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("SwapConvertTranspose"); SwapConvertTranspose(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp index 9793c63de821ec..614f7d690f8726 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp @@ -36,26 +36,11 @@ inline void ConvertToCPUSpecificOpset(std::shared_ptr& model, const C CPU_REGISTER_PASS_COMMON(manager, ConvertMatMulToFC); CPU_REGISTER_PASS_COMMON(manager, FullyConnectedBiasFusion); - std::vector supported_activation_types{ - // @todo enable for bf16 as well - // after EnforceInferencePrecision is replaced with ConvertPrecision - ov::element::f32, - }; - - std::vector supported_compressed_weights_types{ - ov::element::u8, - ov::element::i8, - ov::element::u4, - ov::element::i4, - ov::element::nf4, - ov::element::f4e2m1, - }; - - CPU_REGISTER_PASS_X64( + CPU_REGISTER_PASS_COMMON( manager, pass::ConvertFullyConnectedToFullyConnectedCompressed, - supported_activation_types, - supported_compressed_weights_types, + ov::intel_cpu::node::FullyConnected::getSupportedCompressedActivationsTypes(), + ov::intel_cpu::node::FullyConnected::getSupportedCompressedWeightsTypes(), [&config](const std::shared_ptr& fc, size_t IC, size_t OC, size_t G) { return ov::intel_cpu::node::FullyConnected::isSupportedCompressedOperation(fc, IC, @@ -65,8 +50,8 @@ inline void ConvertToCPUSpecificOpset(std::shared_ptr& model, const C }); CPU_REGISTER_PASS_X64(manager, pass::ConvertFCToFCQuantizedLegacy); - CPU_REGISTER_PASS_X64(manager, MoveFCReshapeToWeights); - CPU_REGISTER_PASS_X64(manager, ov::pass::Validate); + CPU_REGISTER_PASS_COMMON(manager, MoveFCReshapeToWeights); + CPU_REGISTER_PASS_COMMON(manager, ov::pass::Validate); CPU_REGISTER_PASS_COMMON(manager, AlignMatMulInputRanks); CPU_REGISTER_PASS_COMMON(manager, ConvertTileToSeqTiles); CPU_REGISTER_PASS_COMMON(manager, ConvertToPowerStatic); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp index 9fee162128f3ab..d9ed8e298d18fe 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/convert_to_interaction.hpp @@ -11,19 +11,19 @@ namespace intel_cpu { class ConvertToInteraction : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertToInteraction", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertToInteraction"); ConvertToInteraction(); }; class FuseFQtoInteraction : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FuseFQtoInteraction", "0"); + OPENVINO_MATCHER_PASS_RTTI("FuseFQtoInteraction"); FuseFQtoInteraction(); }; class ConvertInteractionInt8 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertInteractionInt8", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertInteractionInt8"); ConvertInteractionInt8(); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp index 9273220c4a0a0c..fe4f4ccae04f1c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.hpp @@ -11,6 +11,9 @@ namespace ov { namespace intel_cpu { class MHAFusionBase : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("MHAFusionBase"); + protected: bool valid_transpose_order(const std::shared_ptr& node, const std::vector& expected_order) { if (auto transpose_pattern = ov::as_type_ptr(node)) { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp index 5754d0fa9b622f..139aaaa488a1cb 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.hpp @@ -11,9 +11,9 @@ namespace intel_cpu { class MLPFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MLPFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("MLPFusion"); MLPFusion(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp index bfaf42f1f0acd3..e5b1743a68f7e6 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/qkv_proj_fusion.hpp @@ -11,15 +11,15 @@ namespace intel_cpu { class QKVProjFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("QKVProjFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("QKVProjFusion"); QKVProjFusion(); }; class QKVProjFusion2 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("QKVProjFusion2", "0"); + OPENVINO_MATCHER_PASS_RTTI("QKVProjFusion2"); QKVProjFusion2(); }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp index d2bb84893c0728..ebba0a3ec9e185 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/sdpa_fuse_transpose_reshape.hpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { class SDPAFuseTransposeReshape : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("SDPAFuseTransposeReshape", "0"); + OPENVINO_MATCHER_PASS_RTTI("SDPAFuseTransposeReshape"); SDPAFuseTransposeReshape(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp index 25b10d55ca8165..c567e7c38c2ef1 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp @@ -212,6 +212,13 @@ auto is_skipped_op(const std::shared_ptr& op) -> bool { return ov::is_type(op) || ov::is_type(op) || ov::is_type(op); } + +bool isSuitableMatMulWithConstantPath(const std::shared_ptr& node) { + return ov::is_type(node) && + !ov::is_type(node->get_input_node_shared_ptr(1)) && + ov::op::util::is_on_constant_path(node->input_value(1)); +} + } // namespace bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { @@ -220,6 +227,15 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { for (auto& node : m->get_ordered_ops()) { if (is_skipped_op(node)) continue; + // We perform this check separately because we mark here only weights path + // Matmul itself will be checked further + if (isSuitableMatMulWithConstantPath(node)) { + auto markup_func = [](Node* node) { + SetSnippetsNodeType(node->shared_from_this(), snippets::pass::SnippetsNodeType::SkippedByPlugin); + }; + std::unordered_set visited; + ov::op::util::visit_constant_path(node->get_input_node_ptr(1), visited, markup_func); + } if (isSuitableConvolutionParent(node)) { // Initiate fusing chain SetNodeFusingType(node, NodeFusingType::FusedWithConvolution); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp index 2b17039d198bf3..8ca0424ccda030 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.hpp @@ -16,7 +16,7 @@ namespace intel_cpu { */ class SnippetsMarkSkipped : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SnippetsMarkSkipped", "0"); + OPENVINO_MODEL_PASS_RTTI("SnippetsMarkSkipped"); SnippetsMarkSkipped() : ModelPass() {} bool run_on_model(const std::shared_ptr&) override; }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp b/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp index 57cf48283e4552..b171e861b23cba 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/common/pass/mul_add_to_fma.hpp @@ -17,6 +17,7 @@ namespace pass { */ class MulAddToFMA : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("MulAddToFMA"); MulAddToFMA(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp index 9475171b24f65d..245f83c13c3466 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp @@ -34,7 +34,7 @@ namespace pass { */ class BrgemmToBrgemmCPU : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BrgemmToBrgemmCPU", "0"); + OPENVINO_MATCHER_PASS_RTTI("BrgemmToBrgemmCPU"); BrgemmToBrgemmCPU(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp index 89815eb1d6ffbf..c330bc9c922381 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/eliminate_brgemm_copy_b.hpp @@ -20,7 +20,7 @@ namespace pass { */ class EliminateBrgemmCopyB : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EliminateBrgemmCopyB", "0"); + OPENVINO_MATCHER_PASS_RTTI("EliminateBrgemmCopyB"); EliminateBrgemmCopyB(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp index 05f8d1206715f0..24e848cf157e0e 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp @@ -15,7 +15,7 @@ namespace pass { class EnforcePrecision : public ov::pass::ModelPass { public: - OPENVINO_RTTI("EnforcePrecision", "0"); + OPENVINO_MODEL_PASS_RTTI("EnforcePrecision"); EnforcePrecision(const element::Type source, const element::Type target, diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp index 000321a8918ccd..2c965871840572 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/remove_converts.hpp @@ -17,7 +17,7 @@ namespace pass { */ class RemoveConverts : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("RemoveConverts", "0"); + OPENVINO_MATCHER_PASS_RTTI("RemoveConverts"); RemoveConverts(); }; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp index fc5250defac8cb..f71a376ee1f038 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.hpp @@ -16,7 +16,7 @@ namespace intel_cpu { */ class SnippetsMarkSkipped : public ov::pass::ModelPass { public: - OPENVINO_RTTI("SnippetsMarkSkipped", "0"); + OPENVINO_MODEL_PASS_RTTI("SnippetsMarkSkipped"); SnippetsMarkSkipped(bool enableBF16 = false) : ModelPass(), enableBF16(enableBF16) {} bool run_on_model(const std::shared_ptr&) override; diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp index 0fb26f6df1d6b2..2b73104d1e1335 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp @@ -18,7 +18,7 @@ namespace pass { */ class BrgemmToBrgemmTPP: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BrgemmToBrgemmTPP", "0"); + OPENVINO_MATCHER_PASS_RTTI("BrgemmToBrgemmTPP"); BrgemmToBrgemmTPP(); static bool is_supported_brgemm_configuration(const std::vector>& layouts, diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp index 189d048e86ffd7..f0bdab120c3498 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp @@ -18,7 +18,7 @@ namespace pass { */ class EltwiseToEltwiseTPP: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("EltwiseToEltwiseTPP", "0"); + OPENVINO_MATCHER_PASS_RTTI("EltwiseToEltwiseTPP"); EltwiseToEltwiseTPP(); }; diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp index 9a00f97d9c464d..a99330845d443d 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp @@ -18,7 +18,7 @@ namespace pass { */ class FuseTPPToEquations: public ov::pass::ModelPass { public: - OPENVINO_RTTI("FuseTPPToEquations", "0"); + OPENVINO_MODEL_PASS_RTTI("FuseTPPToEquations"); FuseTPPToEquations() = default; bool run_on_model(const std::shared_ptr& m) override; private: diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp index 8bbfcf80c751ac..a56e23363067e2 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ScalarToScalarTPP: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ScalarToScalarTPP", "0"); + OPENVINO_MATCHER_PASS_RTTI("ScalarToScalarTPP"); ScalarToScalarTPP(); }; diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 469abbd99eb149..a63377312ecb95 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -337,19 +337,14 @@ void Transformations::PreLpt(const std::vector& defaultPrecis CPU_REGISTER_PASS_COMMON(decompression_handling_manager, ov::pass::MarkShapeOfSubgraphs); // We need to fuse Transpose to MatMul to have a simpler callback for the next transformation CPU_REGISTER_PASS_X64(decompression_handling_manager, ov::pass::TransposeMatMul); - ov::element::TypeVector decompression_precisions{ov::element::u8, - ov::element::i8, - ov::element::u4, - ov::element::i4, - ov::element::nf4, - ov::element::f4e2m1}; - - CPU_REGISTER_PASS_X64(decompression_handling_manager, - ov::pass::MarkDequantization, - decompression_precisions, - false, - true); - CPU_SET_CALLBACK_X64( + CPU_REGISTER_PASS_ARM(decompression_handling_manager, ov::pass::TransposeMatMul); + const auto& decompression_precisions = ov::intel_cpu::node::FullyConnected::getSupportedCompressedWeightsTypes(); + CPU_REGISTER_PASS_COMMON(decompression_handling_manager, + ov::pass::MarkDequantization, + decompression_precisions, + false, + true); + CPU_SET_CALLBACK_COMMON( decompression_handling_manager, [&](const_node_ptr& node) -> bool { return !is_decompression_multiply(node); @@ -1122,9 +1117,10 @@ void Transformations::MainSnippets(void) { auto is_supported_op = [](const std::shared_ptr& n) -> bool { #if defined(OPENVINO_ARCH_ARM64) return (ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp index 57f098e1f234d2..0f63a7517b5745 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.cpp @@ -267,6 +267,7 @@ const std::map>>& activat static const std::map>> activationTypes { {Abs, {{}}}, {Exp, {{}}}, + {Ceiling, {{}}}, {Clamp, {{-2.0f, 2.0f}}}, {Elu, {{0.1f}}}, {Floor, {{}}}, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp index 1696f35fc1bc4a..3f48b1f0b1e976 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/eltwise.cpp @@ -264,7 +264,8 @@ std::string EltwiseLayerCPUTest::getPrimitiveType(const utils::EltwiseTypes& elt return "jit"; } #endif - if (eltwise_type == utils::EltwiseTypes::MOD) { + if (eltwise_type == utils::EltwiseTypes::FLOOR_MOD || + eltwise_type == utils::EltwiseTypes::MOD) { return "ref"; } else { return "acl"; @@ -317,10 +318,8 @@ const std::vector& eltwiseOpTypesBinInp() { #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) utils::EltwiseTypes::SUBTRACT, // TODO: Fix CVS-105430 utils::EltwiseTypes::DIVIDE, // TODO: Fix CVS-105430 - utils::EltwiseTypes::FLOOR_MOD, // TODO: Fix CVS-111875 -#elif defined(OPENVINO_ARCH_ARM64) - utils::EltwiseTypes::FLOOR_MOD, #endif + utils::EltwiseTypes::FLOOR_MOD, utils::EltwiseTypes::SQUARED_DIFF, utils::EltwiseTypes::MOD, }; @@ -331,6 +330,7 @@ const std::vector& eltwiseOpTypesBinInpSnippets() { static const std::vector eltwiseOpTypesBinInp = { utils::EltwiseTypes::ADD, utils::EltwiseTypes::MULTIPLY, + utils::EltwiseTypes::FLOOR_MOD, utils::EltwiseTypes::MOD, }; return eltwiseOpTypesBinInp; diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp new file mode 100644 index 00000000000000..408dd40b4c658f --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp @@ -0,0 +1,86 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp" + +#include "openvino/util/env_util.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +namespace { + +std::vector filter_additional_config_basic() { + return {{}, {ov::hint::inference_precision(ov::element::f16)}}; +} + +const std::vector decompression_precisions = {ov::element::f32}; +const std::vector weights_precisions = {ov::element::u8, ov::element::i8}; + +bool should_use_decompression_impl() { +#ifdef CPU_DEBUG_CAPS + return ov::util::getenv_bool("OV_CPU_ENABLE_DNNL_MAMTUL_FOR_FC"); +#else + return false; +#endif +} + +const std::vector input_shapes = { + {{{-1, -1, -1}, {{1, 4, 16}, {10, 16, 16}}}, {16, 32}}, + {{{}, {{1, 8, 16}}}, {16, 32}, 4ul}, + {{{}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{}, {{5, 40, 96}}}, {1, 96, 240}}, + {{{}, {{1, 4, 48}}}, {48, 256}}, + {{{}, {{1, 11, 104}}}, {104, 77}, 104ul}, + {{{-1, -1, -1}, {{10, 40, 110}, {11, 40, 110}}}, {1, 110, 256}}, +}; +const std::vector fusing_params{emptyFusingSpec, fusingBias}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights, + MatmulWeightsDecompression, + ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions), + ::testing::Values(ov::element::undefined), + ::testing::Values(true), + ::testing::Values(DecompressionSubtractType::full), + ::testing::Values(false), + ::testing::ValuesIn(filter_additional_config_basic()), + ::testing::ValuesIn(fusing_params), + ::testing::Values(should_use_decompression_impl())), + MatmulWeightsDecompression::getTestCaseName); + +const std::vector input_shapes_corner_cases = { + {{{-1, -1, -1}, {{1, 4, 16}}}, {1, 16, 32}}, + {{{-1, -1, -1}, {{1, 4, 16}}}, {16, 32}}, + {{{-1, -1, -1}, {{1, 5, 16}}}, {16, 32}, 4ul}, + {{{-1, -1, -1}, {{1, 1, 128}}}, {128, 128}, 16ul}, +}; + +const std::vector transpose_weights = {true, false}; +const std::vector decompression_subtract_type = {DecompressionSubtractType::full, + DecompressionSubtractType::scalar, + DecompressionSubtractType::empty}; +const std::vector reshape_on_decompression = {true, false}; +const std::vector decompression_precisions_corner_cases = {ov::element::f16, ov::element::f32}; + +INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases, + MatmulWeightsDecompression, + ::testing::Combine(::testing::ValuesIn(input_shapes_corner_cases), + ::testing::ValuesIn(weights_precisions), + ::testing::ValuesIn(decompression_precisions_corner_cases), + ::testing::Values(ov::element::undefined), + ::testing::ValuesIn(transpose_weights), + ::testing::ValuesIn(decompression_subtract_type), + ::testing::ValuesIn(reshape_on_decompression), + ::testing::ValuesIn(filter_additional_config_basic()), + ::testing::Values(emptyFusingSpec), + ::testing::Values(should_use_decompression_impl())), + MatmulWeightsDecompression::getTestCaseName); + +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp new file mode 100644 index 00000000000000..e14245f2906e16 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.cpp @@ -0,0 +1,167 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matmul_weights_decompression.hpp" +#include "openvino/runtime/intel_cpu/properties.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +std::string MatmulWeightsDecompression::getTestCaseName(testing::TestParamInfo obj) { + MatMulDecompressionShapeParams shape_params; + ov::test::ElementType weights_precision; + ov::test::ElementType decompression_precision; + ov::test::ElementType scale_precision; + bool transpose; + DecompressionSubtractType decompression_subtract_type; + bool reshape_on_decompression; + ov::AnyMap additional_config; + fusingSpecificParams fusing_params; + bool should_fuse; + + std::tie(shape_params, + weights_precision, + decompression_precision, + scale_precision, + transpose, + decompression_subtract_type, + reshape_on_decompression, + additional_config, + fusing_params, + should_fuse) = obj.param; + + std::ostringstream result; + result << shape_params << "_"; + result << "weights_precision=" << weights_precision << "_"; + result << "decompression_precision=" << decompression_precision << "_"; + result << "scale_precision=" << scale_precision << "_"; + result << "transpose_weights=" << transpose << "_"; + result << "decompression_subtract=" << decompression_subtract_type << "_"; + result << "reshape_on_decompression=" << reshape_on_decompression << "_"; + + result << "config=("; + for (const auto& configEntry : additional_config) { + result << configEntry.first << ", " << configEntry.second.as() << "_"; + } + result << ")"; + result << CpuTestWithFusing::getTestCaseName(fusing_params); + + return result.str(); +} + +std::shared_ptr MatmulWeightsDecompression::initSubgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const ov::element::Type decompression_precision, + const ov::element::Type scale_precision, + const bool transpose_weights, + const DecompressionSubtractType decompression_subtract_type, + const bool reshape_on_decompression) { + ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; + const auto weights_subgraph = initMatMulDecompressionSubgraph(weights_shape, + group_size, + data_precision, + weights_precision, + decompression_precision, + scale_precision, + transpose_weights, + decompression_subtract_type, + reshape_on_decompression); + auto matMul = std::make_shared(params[0], weights_subgraph); + return makeNgraphFunction(data_precision, params, matMul, "MatmulWeightsDecompression"); +} + +void MatmulWeightsDecompression::SetUp() { + targetDevice = ov::test::utils::DEVICE_CPU; + + MatMulDecompressionShapeParams shape_params; + ov::test::ElementType weights_precision; + ov::test::ElementType decompression_precision; + ov::test::ElementType scale_precision; + bool transpose_weights; + DecompressionSubtractType decompression_subtract_type; + bool reshape_on_decompression; + ov::AnyMap additional_config; + fusingSpecificParams fusing_params; + bool should_fuse; + + std::tie(shape_params, + weights_precision, + decompression_precision, + scale_precision, + transpose_weights, + decompression_subtract_type, + reshape_on_decompression, + additional_config, + fusing_params, + should_fuse) = GetParam(); + + configuration.insert(additional_config.begin(), additional_config.end()); + std::tie(postOpMgrPtr, fusedOps) = fusing_params; + init_input_shapes({shape_params.data_shape}); + + if (!configuration.count(ov::hint::dynamic_quantization_group_size.name())) { + abs_threshold = 5e-3; + } + + // if dynamic quantization is enabled + if (configuration.count(ov::hint::dynamic_quantization_group_size.name()) && + configuration.at(ov::hint::dynamic_quantization_group_size.name()) != 0) { + abs_threshold = 0.1; + } + + if (configuration.count(ov::hint::inference_precision.name()) && + configuration.at(ov::hint::inference_precision.name()) == ov::element::f16) { + abs_threshold = 0.2; + } + + ElementType netType = ov::element::f32; + inType = outType = netType; + + function = initSubgraph(inputDynamicShapes[0], + shape_params.weights_shape, + shape_params.decompression_group_size, + netType, + weights_precision, + decompression_precision, + scale_precision, + transpose_weights, + decompression_subtract_type, + reshape_on_decompression); +} + +void MatmulWeightsDecompression::check_results() { + const auto& test_param = GetParam(); + const ov::element::Type compressed_weights_precision = std::get<1>(test_param); + const bool use_matmul_decompression_impl = std::get<9>(test_param); + + const auto runtime_model = compiledModel.get_runtime_model(); + const auto result = runtime_model->get_result(); + auto fc = result->get_input_node_shared_ptr(0); + // Handle precision conversion before output + auto type = fc->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); + if (type == "Reorder" || type == "Convert" || type == "Subgraph") + fc = fc->get_input_node_shared_ptr(0); + + type = fc->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); + EXPECT_EQ(type, "FullyConnected"); + + const auto& expected_weights_precision = use_matmul_decompression_impl + ? compressed_weights_precision + : fc->get_input_element_type(0); + EXPECT_EQ(fc->get_input_element_type(1), expected_weights_precision); +} + +TEST_P(MatmulWeightsDecompression, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + run(); + check_results(); +} + +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp new file mode 100644 index 00000000000000..266aab8e445928 --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp @@ -0,0 +1,79 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "shared_test_classes/subgraph/weights_decompression_builders.hpp" +#include "utils/cpu_test_utils.hpp" +#include "utils/fusing_test_utils.hpp" + +using namespace CPUTestUtils; + +namespace ov { +namespace test { + +/* + * WP - weights precision + * DP - decompression precision + * IP - input precision + * SP - scale precision + * Opt - optional + * Subtract_const(WP) + * / + * Weights(WP) Convert(DP) + * | / Multiply_const(SP) + * Convert(DP) Reshape (Opt) / + * \ / Convert(if SP != DP) + * Subtract(Opt) / + * \ Reshape (Opt) + * \ / + * Multiply + * | + * Reshape (in case of group decompression) + * | + * Convert (if IP != DP) + * | + * Data(IP) Transpose(Opt) + * \ / + * Matmul + * | + * Bias + */ +typedef std::tuple // should use decompression implementation + MatmulWeightsDecompressionParams; + +class MatmulWeightsDecompression : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CpuTestWithFusing { +public: + static std::string getTestCaseName(testing::TestParamInfo obj); + +protected: + std::shared_ptr initSubgraph(const ov::PartialShape& data_shape, + const ov::Shape& weights_shape, + const int group_size, + const ov::element::Type data_precision, + const ov::element::Type weights_precision, + const ov::element::Type decompression_precision, + const ov::element::Type scale_precision, + const bool transpose_weights, + const DecompressionSubtractType decompression_subtract_type, + const bool reshape_on_decompression); + + void SetUp() override; + + void check_results(); +}; + +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp index 9a434943893eed..5a5a375566b955 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp @@ -1,200 +1,13 @@ -// Copyright (C) 2023-2024 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" -#include "utils/fusing_test_utils.hpp" -#include "openvino/runtime/intel_cpu/properties.hpp" -#include "shared_test_classes/subgraph/weights_decompression_builders.hpp" +#include "custom/subgraph_tests/src/classes/matmul_weights_decompression.hpp" using namespace CPUTestUtils; namespace ov { namespace test { -/* - * WP - weights precision - * DP - decompression precision - * IP - input precision - * SP - scale precision - * Opt - optional - * Subtract_const(WP) - * / - * Weights(WP) Convert(DP) - * | / Multiply_const(SP) - * Convert(DP) Reshape (Opt) / - * \ / Convert(if SP != DP) - * Subtract(Opt) / - * \ Reshape (Opt) - * \ / - * Multiply - * | - * Reshape (in case of group decompression) - * | - * Convert (if IP != DP) - * | - * Data(IP) Transpose(Opt) - * \ / - * Matmul - * | - * Bias - */ -using MatmulWeightsDecompressionParams = std::tuple; // should use decompression implementation - -class MatmulWeightsDecompression : public testing::WithParamInterface, - virtual public SubgraphBaseTest, - public CpuTestWithFusing { -public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - MatMulDecompressionShapeParams shape_params; - ov::test::ElementType weights_precision; - ov::test::ElementType decompression_precision; - ov::test::ElementType scale_precision; - bool transpose; - DecompressionSubtractType decompression_subtract_type; - bool reshape_on_decompression; - ov::AnyMap additional_config; - fusingSpecificParams fusing_params; - bool should_fuse; - - std::tie(shape_params, - weights_precision, - decompression_precision, - scale_precision, - transpose, - decompression_subtract_type, - reshape_on_decompression, - additional_config, - fusing_params, - should_fuse) = obj.param; - - std::ostringstream result; - result << shape_params << "_"; - result << "weights_precision=" << weights_precision << "_"; - result << "decompression_precision=" << decompression_precision << "_"; - result << "scale_precision=" << scale_precision << "_"; - result << "transpose_weights=" << transpose << "_"; - result << "decompression_subtract=" << decompression_subtract_type << "_"; - result << "reshape_on_decompression=" << reshape_on_decompression << "_"; - - result << "config=("; - for (const auto& configEntry : additional_config) { - result << configEntry.first << ", " << configEntry.second.as() << "_"; - } - result << ")"; - result << CpuTestWithFusing::getTestCaseName(fusing_params); - - return result.str(); - } - -protected: - std::shared_ptr initSubgraph(const ov::PartialShape& data_shape, - const ov::Shape& weights_shape, - const int group_size, - const ov::element::Type data_precision, - const ov::element::Type weights_precision, - const ov::element::Type decompression_precision, - const ov::element::Type scale_precision, - const bool transpose_weights, - const DecompressionSubtractType decompression_subtract_type, - const bool reshape_on_decompression) { - ov::ParameterVector params{std::make_shared(data_precision, data_shape)}; - const auto weights_subgraph = initMatMulDecompressionSubgraph(weights_shape, - group_size, - data_precision, - weights_precision, - decompression_precision, - scale_precision, - transpose_weights, - decompression_subtract_type, - reshape_on_decompression); - auto matMul = std::make_shared(params[0], weights_subgraph); - return makeNgraphFunction(data_precision, params, matMul, "MatmulWeightsDecompression"); - } - - void SetUp() override { - targetDevice = ov::test::utils::DEVICE_CPU; - - MatMulDecompressionShapeParams shape_params; - ov::test::ElementType weights_precision; - ov::test::ElementType decompression_precision; - ov::test::ElementType scale_precision; - bool transpose_weights; - DecompressionSubtractType decompression_subtract_type; - bool reshape_on_decompression; - ov::AnyMap additional_config; - fusingSpecificParams fusing_params; - bool should_fuse; - - std::tie(shape_params, - weights_precision, - decompression_precision, - scale_precision, - transpose_weights, - decompression_subtract_type, - reshape_on_decompression, - additional_config, - fusing_params, - should_fuse) = GetParam(); - - configuration.insert(additional_config.begin(), additional_config.end()); - std::tie(postOpMgrPtr, fusedOps) = fusing_params; - init_input_shapes({shape_params.data_shape}); - - // if dynamic quantization is enabled - if (configuration.count(ov::hint::dynamic_quantization_group_size.name()) && - configuration.at(ov::hint::dynamic_quantization_group_size.name()) != 0) { - abs_threshold = 0.1; - } else if (!configuration.count(ov::hint::dynamic_quantization_group_size.name())) { - abs_threshold = 5e-3; - } - - ElementType netType = ov::element::f32; - inType = outType = netType; - - function = initSubgraph(inputDynamicShapes[0], - shape_params.weights_shape, - shape_params.decompression_group_size, - netType, - weights_precision, - decompression_precision, - scale_precision, - transpose_weights, - decompression_subtract_type, - reshape_on_decompression); - } - - void check_results() { - const auto& test_param = GetParam(); - const ov::element::Type compressed_weights_precision = std::get<1>(test_param); - const bool use_matmul_decompression_impl = std::get<9>(test_param); - - const auto runtime_model = compiledModel.get_runtime_model(); - const auto result = runtime_model->get_result(); - const auto fc = result->get_input_node_shared_ptr(0); - const auto type = fc->get_rt_info().at(ov::exec_model_info::LAYER_TYPE).as(); - EXPECT_EQ(type, "FullyConnected"); - - const auto& expected_weights_precision = use_matmul_decompression_impl - ? compressed_weights_precision - : fc->get_input_element_type(0); - EXPECT_EQ(fc->get_input_element_type(1), expected_weights_precision); - } -}; - -TEST_P(MatmulWeightsDecompression, CompareWithRefs) { - SKIP_IF_CURRENT_TEST_IS_DISABLED() - run(); - check_results(); -} namespace { @@ -205,7 +18,8 @@ std::vector filter_additional_config_basic() { std::vector filter_additional_config_amx() { std::vector additional_config = {}; if (ov::with_cpu_x86_avx512_core_amx()) - additional_config.push_back({{ov::hint::dynamic_quantization_group_size(0), ov::hint::inference_precision(ov::element::bf16)}}); + additional_config.push_back( + {{ov::hint::dynamic_quantization_group_size(0), ov::hint::inference_precision(ov::element::bf16)}}); return additional_config; } @@ -310,8 +124,9 @@ const std::vector input_shapes_corner_cases_amx }; const std::vector transpose_weights = {true, false}; -const std::vector decompression_subtract_type = { - DecompressionSubtractType::full, DecompressionSubtractType::scalar, DecompressionSubtractType::empty}; +const std::vector decompression_subtract_type = {DecompressionSubtractType::full, + DecompressionSubtractType::scalar, + DecompressionSubtractType::empty}; const std::vector reshape_on_decompression = {true, false}; const std::vector decompression_precisions_corner_cases = {ov::element::f16, ov::element::f32}; @@ -387,12 +202,11 @@ const std::vector input_shapes_basic_dyn_quant = {{{}, {{1, 1, 1728}}}, {1728, 128}, 64lu}, }; -const std::vector weights_precisions_dyn_quant = {ov::element::u8, - ov::element::u4}; +const std::vector weights_precisions_dyn_quant = {ov::element::u8, ov::element::u4}; std::vector filter_additional_config_dyn_quant() { std::vector additional_config = { - {{ov::hint::dynamic_quantization_group_size(0)}}, // dynamic quantization is disabled + {{ov::hint::dynamic_quantization_group_size(0)}}, // dynamic quantization is disabled {{ov::hint::dynamic_quantization_group_size(16)}}, {{ov::hint::dynamic_quantization_group_size(128)}}, }; diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp index 91505d131d6aa5..f3cf3280288846 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp @@ -10,75 +10,10 @@ namespace ov { namespace test { -using ov::test::STFTLayerTest; -const std::vector data_type = {ov::element::f32, ov::element::bf16}; -const std::vector step_size_type = {ov::element::i32, ov::element::i64}; - -const std::vector> input_shapes = { - { // Static shapes - {{}, {{128}}}, // 1st input - {{}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Static shapes - {{}, {{1, 128}}}, // 1st input - {{}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Static shapes - {{}, {{2, 226}}}, // 1st input - {{}, {{16}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims in the first input shape - {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input - {{}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims in the first and second input shape - {{-1}, {{128}}}, // 1st input - {{-1}, {{8}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims in the first and second input shape - {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input - {{-1}, {{8}, {16}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - }, - { // Dynamic dims with range in the first and second input shape - {{{2, 4}, {1, 300}}, {{2, 226}, {3, 128}}}, // 1st input - {{{3, 16}}, {{4}, {16}}}, // 2nd input - {{}, {{}}}, // 3rd input - {{}, {{}}} // 4th input - } -}; - -const std::vector frame_size = {16, 24}; -const std::vector step_size = {2, 3, 4}; - -const std::vector transpose_frames = { - false, - true, -}; - -std::vector in_types = {utils::InputLayerType::CONSTANT, utils::InputLayerType::PARAMETER}; - -const auto testCaseStatic = ::testing::Combine(::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(frame_size), - ::testing::ValuesIn(step_size), - ::testing::ValuesIn(transpose_frames), - ::testing::ValuesIn(data_type), - ::testing::ValuesIn(step_size_type), - ::testing::ValuesIn(in_types), - ::testing::Values(ov::test::utils::DEVICE_CPU)); - -INSTANTIATE_TEST_SUITE_P(smoke_STFT_static, STFTLayerTest, testCaseStatic, STFTLayerTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_STFT_static, + STFTLayerTest, + STFTLayerTest::GetTestDataForDevice(ov::test::utils::DEVICE_CPU), + STFTLayerTest::getTestCaseName); } // namespace test } // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 4c21c06c491179..7af707df602bfc 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -481,7 +481,6 @@ std::vector disabledTestPatterns() { // Issue 88764, 91647, 108802: accuracy issue retVector.emplace_back(R"(MultipleLSTMCellTest/MultipleLSTMCellTest.CompareWithRefs.*)"); // Compressed weights are not supported - retVector.emplace_back(R"(smoke_MatMulCompressedWeights.*)"); retVector.emplace_back(R"(smoke_MatMulSharedCompressedWeights.*)"); retVector.emplace_back(R"(smoke_MatmulAndGatherSharedWeightsDecompression.*)"); // smoke_Snippets test cases are not supported on arm32 platforms diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index e234bc68de0750..c7524f1880157d 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -273,6 +273,7 @@ REGISTER_FACTORY(v15, ROIAlignRotated); REGISTER_FACTORY(v15, BitwiseRightShift); REGISTER_FACTORY(v15, BitwiseLeftShift); REGISTER_FACTORY(v15, SearchSorted); +REGISTER_FACTORY(v15, STFT); // --------------------------- Supported internal ops --------------------------- // REGISTER_FACTORY(internal, NonMaxSuppressionIEInternal); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp new file mode 100644 index 00000000000000..8cb1a4028dfa26 --- /dev/null +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/stft.hpp @@ -0,0 +1,62 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "primitive.hpp" + +namespace cldnn { + +/// @brief Short time fourier transform (STFT) operation. +/// @details Checks the specification for details. +struct STFT : public primitive_base { + CLDNN_DECLARE_PRIMITIVE(STFT) + + STFT() : primitive_base("", {}) {} + + /// @brief Constructs STFT primitive. + /// @param id This primitive id. + /// @param signal signal input. + /// @param window window input. + /// @param frame_size Size of the frame. + /// @param frame_step Step between frames. + /// @param transpose_frames Enable/Disable transpose_frames(check specification for details).. + + STFT(const primitive_id& id, + const input_info& signal, + const input_info& window, + const input_info& frame_size, + const input_info& frame_step, + const bool transpose_frames) + : primitive_base(id, {signal, window, frame_size, frame_step}), + transpose_frames(transpose_frames) {} + + /// @brief Enable/Disabletranspose_frames(check specification for details). + bool transpose_frames = false; + + size_t hash() const override { + size_t seed = primitive::hash(); + seed = hash_combine(seed, transpose_frames); + return seed; + } + + bool operator==(const primitive& rhs) const override { + if (!compare_common_params(rhs)) + return false; + + auto rhs_casted = downcast(rhs); + + return transpose_frames == rhs_casted.transpose_frames; + } + + void save(BinaryOutputBuffer& ob) const override { + primitive_base::save(ob); + ob << transpose_frames; + } + + void load(BinaryInputBuffer& ib) override { + primitive_base::load(ib); + ib >> transpose_frames; + } +}; +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp index 2a38d20ac8c9bc..29c0e3371cad2e 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.cpp @@ -88,6 +88,7 @@ void register_implementations() { REGISTER_OCL(scaled_dot_product_attention); REGISTER_OCL(rope); REGISTER_OCL(search_sorted); + REGISTER_OCL(STFT); } } // namespace ocl diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp index c65a23822a6922..28e8956619b223 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/register.hpp @@ -162,6 +162,7 @@ REGISTER_OCL(unique_gather); REGISTER_OCL(scaled_dot_product_attention); REGISTER_OCL(rope); REGISTER_OCL(search_sorted); +REGISTER_OCL(STFT); #undef REGISTER_OCL diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp new file mode 100644 index 00000000000000..329b442e731373 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/stft.cpp @@ -0,0 +1,97 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "primitive_base.hpp" +#include "stft/stft_kernel_base.h" +#include "stft/stft_kernel_selector.h" +#include "stft_inst.h" + +namespace cldnn { +namespace ocl { + +struct STFT_impl : typed_primitive_impl_ocl { + using parent = typed_primitive_impl_ocl; + using parent::parent; + using kernel_selector_t = kernel_selector::STFT_kernel_selector; + using kernel_params_t = kernel_selector::STFT_params; + + DECLARE_OBJECT_TYPE_SERIALIZATION(cldnn::ocl::STFT_impl) + + std::unique_ptr clone() const override { + return make_unique(*this); + } + + void load(BinaryInputBuffer& ib) override { + parent::load(ib); + if (is_dynamic()) { + auto& kernel_selector = kernel_selector_t::Instance(); + auto kernel_impl = kernel_selector.GetImplementation(_kernel_data.kernelName); + kernel_impl->GetUpdateDispatchDataFunc(_kernel_data); + } + } + + void update_dispatch_data(const kernel_impl_params& impl_param) override { + // If model loaded from cache, params are not initialized, so we create a new object and reuse it in the future + if (_kernel_data.params == nullptr) { + _kernel_data.params = std::make_shared(get_kernel_params(impl_param, true)); + } + + update_shapes(*_kernel_data.params, impl_param); + (_kernel_data.update_dispatch_data_func)(*_kernel_data.params, _kernel_data); + } + + static kernel_params_t get_kernel_params(const kernel_impl_params& impl_param, bool shape_agnostic = false) { + const auto& primitive = impl_param.typed_desc(); + auto params = get_default_params(impl_param, shape_agnostic); + + // Manually add all inputs except first one, since get_default_params does not handle it. + for (size_t i = 1; i < impl_param.input_layouts.size(); ++i) { + params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(i))); + } + + params.transpose_frames = primitive->transpose_frames; + return params; + } + + // [NOTE]: Has to be added as a separete static function, since it is called via static dispatching in + // typed_primitive_impl_ocl::create().. + static kernel_impl_params static_canonicalize_shapes(const kernel_impl_params& impl_params) { + auto updated_impl_params = canonicalize_fused_shapes(impl_params); + + for (auto& input_layout : updated_impl_params.input_layouts) { + input_layout.set_partial_shape(extend_shape_to_rank_from_begin(input_layout.get_partial_shape())); + } + + for (auto& output_layout : updated_impl_params.output_layouts) { + output_layout.set_partial_shape(extend_shape_to_rank_from_begin(output_layout.get_partial_shape())); + } + + return updated_impl_params; + } + + kernel_impl_params canonicalize_shapes(const kernel_impl_params& impl_params) const override { + return static_canonicalize_shapes(impl_params); + } +}; + +namespace detail { + +attach_STFT_impl::attach_STFT_impl() { + auto types = {data_types::i32, data_types::i64, data_types::f16, data_types::f32}; + + auto formats = {format::bfyx}; + + implementation_map::add(impl_types::ocl, + shape_types::any, + typed_primitive_impl_ocl::create, + types, + formats); +} + +} // namespace detail +} // namespace ocl +} // namespace cldnn + +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::ocl::STFT_impl) +BIND_BINARY_BUFFER_WITH_TYPE(cldnn::STFT) diff --git a/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp b/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp index b7dbbaef6e64f1..f45d0897f01363 100644 --- a/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp +++ b/src/plugins/intel_gpu/src/graph/impls/registry/registry.hpp @@ -216,3 +216,4 @@ REGISTER_DEFAULT_IMPLS(unique_gather, OCL_S, OCL_D); REGISTER_DEFAULT_IMPLS(scaled_dot_product_attention, OCL_S, OCL_D); REGISTER_DEFAULT_IMPLS(rope, OCL_S, OCL_D); REGISTER_DEFAULT_IMPLS(search_sorted, OCL_S, OCL_D); +REGISTER_DEFAULT_IMPLS(STFT, OCL_S, OCL_D); diff --git a/src/plugins/intel_gpu/src/graph/include/stft_inst.h b/src/plugins/intel_gpu/src/graph/include/stft_inst.h new file mode 100644 index 00000000000000..1c770e93253e93 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/include/stft_inst.h @@ -0,0 +1,45 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#pragma once + +#include + +#include "primitive_inst.h" + +namespace cldnn { + +template <> +struct typed_program_node : public typed_program_node_base { + using parent = typed_program_node_base; + typed_program_node(const std::shared_ptr prim, program& prog) : parent(prim, prog) {} + +public: + using parent::parent; + + program_node& input(size_t idx = 0) const { + return get_dependency(idx); + } + std::vector get_shape_infer_dependencies() const override { + return {2, 3}; + } +}; + +using STFT_node = typed_program_node; + +template <> +class typed_primitive_inst : public typed_primitive_inst_base { + using parent = typed_primitive_inst_base; + using parent::parent; + +public: + typed_primitive_inst(network& network, STFT_node const& desc); + template + static std::vector calc_output_layouts(STFT_node const& node, kernel_impl_params const& impl_param); + static layout calc_output_layout(STFT_node const& node, kernel_impl_params const& impl_param); + static std::string to_string(STFT_node const& node); +}; + +using STFT_inst = typed_primitive_inst; + +} // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/stft.cpp b/src/plugins/intel_gpu/src/graph/stft.cpp new file mode 100644 index 00000000000000..33de025249a660 --- /dev/null +++ b/src/plugins/intel_gpu/src/graph/stft.cpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include +#include + +#include + +#include "memory_accessor.hpp" +#include "openvino/core/enum_names.hpp" +#include "primitive_type_base.h" +#include "stft_shape_inference.hpp" + +namespace cldnn { +GPU_DEFINE_PRIMITIVE_TYPE_ID(STFT) + +STFT_inst::typed_primitive_inst(network& network, STFT_node const& node) : parent(network, node) {} + +layout STFT_inst::calc_output_layout(STFT_node const& node, kernel_impl_params const& impl_param) { + return calc_output_layouts(node, impl_param)[0]; +} + +template +std::vector STFT_inst::calc_output_layouts(STFT_node const& node, kernel_impl_params const& impl_param) { + auto primitive = impl_param.typed_desc(); + + const auto& signal_layout = impl_param.get_input_layout(0); + const auto& window_layout = impl_param.get_input_layout(1); + const auto& frame_size_layout = impl_param.get_input_layout(2); + const auto& frame_step_layout = impl_param.get_input_layout(3); + + std::vector input_shapes = { + signal_layout.get(), + window_layout.get(), + frame_size_layout.get(), + frame_step_layout.get(), + }; + + const auto ta = MemoryAccessor(&impl_param.memory_deps, impl_param.get_stream()); + + std::vector output_shapes; + ov::op::v15::STFT op; + op.set_transpose_frames(primitive->transpose_frames); + output_shapes = shape_infer(&op, input_shapes, ta); + + return {layout{output_shapes[0], signal_layout.data_type, signal_layout.format}}; +} + +std::string STFT_inst::to_string(STFT_node const& node) { + auto node_info = node.desc_to_json(); + json_composite STFT_info; + STFT_info.add("signal", node.input(0).id()); + STFT_info.add("window", node.input(1).id()); + STFT_info.add("framesize", node.input(2).id()); + STFT_info.add("framestep", node.input(3).id()); + STFT_info.add("transpose_frames", node.get_primitive()->transpose_frames); + node_info->add("STFT info", STFT_info); + std::stringstream primitive_description; + node_info->dump(primitive_description); + return primitive_description.str(); +} + +} // namespace cldnn \ No newline at end of file diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl new file mode 100644 index 00000000000000..2f43e1e25aaaa2 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/stft_ref.cl @@ -0,0 +1,68 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +// alternative: https://github.com/OpenCL/ComplexMath/blob/master/clcomplex.h +typedef float2 cfloat; +#define real(a) ((a).s0) +#define imag(a) ((a).s1) +#define crmult(a, b) ((cfloat)(real(a) * (b), imag(a) * (b))) +#define cadd(a, b) ((cfloat)(real(a) + real(b), imag(a) + imag(b))) +#define csub(a, b) ((cfloat)(real(a) - real(b), imag(a) - imag(b))) +#define expmi(x) ((cfloat)(cos(x), -sin(x))) +#define czero() ((cfloat)(0)) + +// Unoptimized, the most obvious stft impl from the definition. +KERNEL(stft_ref)( + OPTIONAL_SHAPE_INFO_ARG + const __global INPUT0_TYPE* restrict signal, + const __global INPUT1_TYPE* restrict window, + const __global INPUT2_TYPE* restrict frame_size_buff, + const __global INPUT3_TYPE* restrict frame_step_buff, + __global OUTPUT_TYPE* restrict output) +{ + const int freq_id = get_global_id(0); + const int frame_id = get_global_id(1); + const int batch = get_global_id(2); + const int frame_size = (int)frame_size_buff[0]; + const int frame_step = (int)frame_step_buff[0]; + const int window_size = INPUT1_SIZE_X; + + // Handling case where window size is smaller than frame size. + const int start_offset = (frame_size - window_size) / 2; + + const INPUT0_TYPE* restrict signal_for_this_frame = signal + batch*INPUT0_SIZE_X + frame_id*frame_step + start_offset; + + // FT from def for single freq for given frame: + cfloat freq_val = czero(); + + // dft_power = 2*PI*(k/N) from dft def. + const float dft_power = 2.0f * M_PI_F * (float)freq_id / (float)frame_size; + + cfloat err = czero(); + for(int i = 0; i < window_size; ++i) { + const float signal_val = (float)signal_for_this_frame[i]; + const float window_val = (float)window[i]; + const float x_i = signal_val*window_val; + const cfloat e_i = expmi(dft_power*(float)(i+start_offset)); + const cfloat val_i = crmult(e_i, x_i); + + // Kahan sum algo: + const cfloat y = csub(val_i, err); + const cfloat newSum = cadd(freq_val, y); + err = csub(newSum, freq_val); + err = csub(err, y); + freq_val = newSum; + } + +#if TRANSPOSE_FRAMES + const int output_real_idx = OUTPUT_GET_INDEX(batch, freq_id, frame_id, 0); + const int output_imag_idx = OUTPUT_GET_INDEX(batch, freq_id, frame_id, 1); +#else + const int output_real_idx = OUTPUT_GET_INDEX(batch, frame_id, freq_id, 0); + const int output_imag_idx = OUTPUT_GET_INDEX(batch, frame_id, freq_id, 1); +#endif + + output[output_real_idx] = (OUTPUT_TYPE)real(freq_val); + output[output_imag_idx] = (OUTPUT_TYPE)imag(freq_val); +} \ No newline at end of file diff --git a/src/plugins/intel_gpu/src/kernel_selector/common_types.h b/src/plugins/intel_gpu/src/kernel_selector/common_types.h index 06b3e04d40e829..704c1151092c04 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/common_types.h +++ b/src/plugins/intel_gpu/src/kernel_selector/common_types.h @@ -102,7 +102,8 @@ enum class KernelType { SWIGLU, ROPE, DYNAMIC_QUANTIZE, - SEARCH_SORTED + SEARCH_SORTED, + STFT }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp new file mode 100644 index 00000000000000..8eb8ce36c14f2f --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "stft_kernel_base.h" + +#include + +#include "kernel_selector_utils.h" + +namespace kernel_selector { +JitConstants STFTKernelBase::GetJitConstants(const STFT_params& params) const { + JitConstants jit = MakeBaseParamsJitConstants(params); + + jit.AddConstants({MakeJitConstant("TRANSPOSE_FRAMES", params.transpose_frames)}); + + return jit; +} + +void STFTKernelBase::GetUpdateDispatchDataFunc(KernelData& kd) const { + kd.update_dispatch_data_func = [](const Params& params, KernelData& kd) { + const auto& prim_params = static_cast(params); + auto dispatchData = SetDefault(prim_params); + OPENVINO_ASSERT(kd.kernels.size() == 1, "[GPU] Invalid kernels size for update dispatch data func"); + kd.kernels[0].params.workGroups.global = dispatchData.gws; + kd.kernels[0].params.workGroups.local = dispatchData.lws; + kd.kernels[0].skip_execution = KernelData::SkipKernelExecution(prim_params); + }; +} + +STFTKernelBase::DispatchData STFTKernelBase::SetDefault(const STFT_params& params) { + CommonDispatchData dispatchData; + const auto inLayout = params.inputs.front().GetLayout(); + const auto& output = params.outputs.front(); + const auto outLayout = output.GetLayout(); + + OPENVINO_ASSERT(output.Dimentions() == 4); + OPENVINO_ASSERT(output.X().v == 2); + + std::vector> dimsByGws; + + if (params.transpose_frames) { + dispatchData.gws = {output.Feature().v, output.Y().v, output.Batch().v}; + dimsByGws = {{Tensor::DataChannelName::FEATURE}, + {Tensor::DataChannelName::Y}, + {Tensor::DataChannelName::BATCH}}; + } else { + dispatchData.gws = {output.Y().v, output.Feature().v, output.Batch().v}; + dimsByGws = {{Tensor::DataChannelName::Y}, + {Tensor::DataChannelName::FEATURE}, + {Tensor::DataChannelName::BATCH}}; + } + dispatchData.lws = + GetOptimalLocalWorkGroupSizes(dispatchData.gws, params.engineInfo, inLayout, outLayout, dimsByGws); + + return dispatchData; +} + +KernelsData STFTKernelBase::GetCommonKernelsData(const Params& params) const { + assert(params.GetType() == KernelType::STFT); + + const auto& prim_params = static_cast(params); + + auto dispatchData = SetDefault(prim_params); + KernelData k_data = KernelData::Default(params); + + auto cldnn_jit = GetJitConstants(prim_params); + auto entry_point = GetEntryPoint(kernelName, prim_params.layerID, params); + auto jit = CreateJit(kernelName, cldnn_jit, entry_point); + + GetUpdateDispatchDataFunc(k_data); + + auto& kernel = k_data.kernels[0]; + FillCLKernelData(kernel, + dispatchData, + params.engineInfo, + kernelName, + jit, + entry_point, + "", + false, + false, + 4, + GetFusedPrimitiveInputsCount(params), + 1, + prim_params.is_shape_agnostic); + + return {k_data}; +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h new file mode 100644 index 00000000000000..75ad08280e6c74 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_base.h @@ -0,0 +1,34 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "kernel_base_opencl.h" +#include "kernel_selector_params.h" + +namespace kernel_selector { +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// STFT +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +struct STFT_params : public base_params { + STFT_params() : base_params(KernelType::STFT), transpose_frames(false) {} + bool transpose_frames; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// STFTKernelBase +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +class STFTKernelBase : public KernelBaseOpenCL { +public: + using KernelBaseOpenCL::KernelBaseOpenCL; + + using DispatchData = CommonDispatchData; + +protected: + JitConstants GetJitConstants(const STFT_params& params) const; + static DispatchData SetDefault(const STFT_params& params); + KernelsData GetCommonKernelsData(const Params& params) const; + void GetUpdateDispatchDataFunc(KernelData& kd) const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp new file mode 100644 index 00000000000000..dfc9a9596fe342 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "stft_kernel_ref.h" + +namespace kernel_selector { +ParamsKey STFTKernelRef::GetSupportedKey() const { + ParamsKey k; + + k.EnableInputDataType(Datatype::INT32); + k.EnableInputDataType(Datatype::INT64); + k.EnableInputDataType(Datatype::F32); + k.EnableInputDataType(Datatype::F16); + + k.EnableOutputDataType(Datatype::F32); + k.EnableOutputDataType(Datatype::F16); + + k.EnableInputLayout(DataLayout::bfyx); + + k.EnableOutputLayout(DataLayout::bfyx); + + k.EnableBatching(); + k.EnableDifferentTypes(); + k.EnableDynamicShapesSupport(); + return k; +} + +KernelsData STFTKernelRef::GetKernelsData(const Params& params) const { + return GetCommonKernelsData(params); +} + +KernelsPriority STFTKernelRef::GetKernelsPriority(const Params& /*params*/) const { + return FORCE_PRIORITY_9; +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h new file mode 100644 index 00000000000000..3651539fd7dadb --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_ref.h @@ -0,0 +1,18 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "stft_kernel_base.h" + +namespace kernel_selector { +class STFTKernelRef : public STFTKernelBase { +public: + STFTKernelRef() : STFTKernelBase("stft_ref") {} + + KernelsData GetKernelsData(const Params& params) const override; + KernelsPriority GetKernelsPriority(const Params& params) const override; + ParamsKey GetSupportedKey() const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp new file mode 100644 index 00000000000000..02edc108c2e680 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "stft_kernel_selector.h" + +#include "stft_kernel_ref.h" + +namespace kernel_selector { +STFT_kernel_selector::STFT_kernel_selector() { + Attach(); +} + +KernelsData STFT_kernel_selector::GetBestKernels(const Params& params) const { + return GetNaiveBestKernel(params, KernelType::STFT); +} +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h new file mode 100644 index 00000000000000..7e1f9e714cc203 --- /dev/null +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/stft/stft_kernel_selector.h @@ -0,0 +1,21 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "kernel_selector.h" + +namespace kernel_selector { +class STFT_kernel_selector : public kernel_selector_base { +public: + static STFT_kernel_selector& Instance() { + static STFT_kernel_selector instance; + return instance; + } + + STFT_kernel_selector(); + + KernelsData GetBestKernels(const Params& params) const override; +}; +} // namespace kernel_selector diff --git a/src/plugins/intel_gpu/src/plugin/ops/stft.cpp b/src/plugins/intel_gpu/src/plugin/ops/stft.cpp new file mode 100644 index 00000000000000..9b082cf717683d --- /dev/null +++ b/src/plugins/intel_gpu/src/plugin/ops/stft.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/stft.hpp" + +#include "intel_gpu/plugin/common_utils.hpp" +#include "intel_gpu/plugin/program_builder.hpp" +#include "intel_gpu/primitives/stft.hpp" + +namespace ov { +namespace intel_gpu { + +static void CreateSTFTOp(ProgramBuilder& p, const std::shared_ptr& op) { + validate_inputs_count(op, {4}); + auto inputs = p.GetInputInfo(op); + auto prim = + cldnn::STFT(layer_type_name_ID(op), inputs[0], inputs[1], inputs[2], inputs[3], op->get_transpose_frames()); + p.add_primitive(*op, prim); +} + +REGISTER_FACTORY_IMPL(v15, STFT); + +} // namespace intel_gpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/plugin/program_builder.cpp b/src/plugins/intel_gpu/src/plugin/program_builder.cpp index a9bb813d0ce587..a87c5cbcbd87b4 100644 --- a/src/plugins/intel_gpu/src/plugin/program_builder.cpp +++ b/src/plugins/intel_gpu/src/plugin/program_builder.cpp @@ -10,6 +10,7 @@ #include "openvino/op/lstm_sequence.hpp" #include "openvino/op/loop.hpp" #include "openvino/op/search_sorted.hpp" +#include "openvino/op/stft.hpp" #include "ov_ops/dynamic_quantize.hpp" #include "intel_gpu/plugin/common_utils.hpp" @@ -360,7 +361,8 @@ bool ProgramBuilder::requires_new_shape_infer(const std::shared_ptr& o // HACK: SearchSorted has specific shape requirements. // E.g. static input shapes: sorted:[8], values:[2,3,4] are prefectly fine, // but sorted:[8,1,1,1], values:[2,3,4,1] is not valid. - if (ov::is_type(op)) + // Similar case for STFT. + if (ov::is_type(op) || ov::is_type(op)) return true; if (ov::is_type(op)) diff --git a/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp b/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp index 1a869b8afbddf2..1fab1692d97cbe 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/bcast_and_pad_zp_buffers.hpp @@ -13,7 +13,7 @@ namespace intel_gpu { // and adds optional padding to align elements count to `alignment` value class BroadcastAndPadZeroPointBuffers : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("BroadcastAndPadZeroPointBuffers", "0"); + OPENVINO_MATCHER_PASS_RTTI("BroadcastAndPadZeroPointBuffers"); BroadcastAndPadZeroPointBuffers(size_t alignment = 1, bool supports_immad = false); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp b/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp index 90d62a8e7a1fa5..bf372ae19c1553 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/binary_conv_to_conv.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class ConvertBinaryConvolutionToConvolution: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertBinaryConvolutionToConvolution", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertBinaryConvolutionToConvolution"); ConvertBinaryConvolutionToConvolution(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp index ac93d446ee749d..bea51c2e768576 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/clamp_fp16_output.hpp @@ -18,7 +18,7 @@ namespace intel_gpu { */ class ClampFP16Output: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ov::intel_gpu::ClampFP16Output"); + OPENVINO_MATCHER_PASS_RTTI("ov::intel_gpu::ClampFP16Output"); ClampFP16Output(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp index 656b4c6fd99c20..0d23eb49829217 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_convolution.cpp @@ -102,13 +102,13 @@ ov::Tensor get_compensation(std::shared_ptr w, std::shared_ptr& m) override; }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.hpp index d2bc71a91f1285..ec8f2bc3a8f4c7 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_fc_to_compressed.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class ConvertFullyConnectedToFullyConnectedCompressed: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertFullyConnectedToFullyConnectedCompressed", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertFullyConnectedToFullyConnectedCompressed"); ConvertFullyConnectedToFullyConnectedCompressed(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.hpp index 4e7a9cb312113f..4520944a99faf3 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_matmul_to_fc.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class ConvertMatMulToFullyConnected: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertMatMulToFullyConnected", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertMatMulToFullyConnected"); ConvertMatMulToFullyConnected(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.hpp index 656025154f120e..0b52197008f127 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_pooling_to_reduce.hpp @@ -12,7 +12,7 @@ namespace intel_gpu { class ConvertAvgPoolingToReduce : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertAvgPoolingToReduce", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertAvgPoolingToReduce"); ConvertAvgPoolingToReduce(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_shapeof.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_shapeof.hpp index 580ac7e64a2659..e08814489065e5 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_shapeof.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_shapeof.hpp @@ -12,7 +12,7 @@ namespace intel_gpu { class ConvertShapeOf1To3 : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertShapeOf1To3", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertShapeOf1To3"); ConvertShapeOf1To3(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp index 83162e36b4c0ae..987d3e8a69b589 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class ConvertStridedSlicesToVariadicSplit : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("ConvertStridedSlicesToVariadicSplit", "0"); + OPENVINO_MATCHER_PASS_RTTI("ConvertStridedSlicesToVariadicSplit"); ConvertStridedSlicesToVariadicSplit(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_for_false_keepdims.hpp b/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_for_false_keepdims.hpp index 8eae1a63b6174f..4405fe8c8d056a 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_for_false_keepdims.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_for_false_keepdims.hpp @@ -17,6 +17,8 @@ namespace intel_gpu { */ class DecomposeReduceForFalseKeepDims : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("DecomposeReduceForFalseKeepDims"); + // Decompose reduce if keep_dims is false and it reduces batch and spatial axes DecomposeReduceForFalseKeepDims(); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_scalar_output.hpp b/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_scalar_output.hpp index cb5db2b715c333..85dc9df868b0ea 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_scalar_output.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/decompose_reduce_scalar_output.hpp @@ -15,7 +15,7 @@ namespace intel_gpu { // detect this case and decompose Reduce by dimension to avoid poor performance. class DecomposeReduceForScalarOutput : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DecomposeReduceForScalarOutput", "0"); + OPENVINO_MATCHER_PASS_RTTI("DecomposeReduceForScalarOutput"); DecomposeReduceForScalarOutput(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.hpp b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.hpp index b5d956f7872b5c..8d340c35941d23 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class DynamicQuantizeFullyConnected: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("DynamicQuantizeFullyConnected", "0"); + OPENVINO_MATCHER_PASS_RTTI("DynamicQuantizeFullyConnected"); DynamicQuantizeFullyConnected(uint64_t group_size); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/einsum_decomposition.hpp b/src/plugins/intel_gpu/src/plugin/transformations/einsum_decomposition.hpp index 2a78c6927cd36e..785ec469cb16a1 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/einsum_decomposition.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/einsum_decomposition.hpp @@ -16,6 +16,7 @@ namespace intel_gpu { */ class EinsumDecomposition : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("EinsumDecomposition"); EinsumDecomposition(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp index 44db1882f8e87f..7088fd170b92f6 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_convert_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class FullyConnectedConvertFusion: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FullyConnectedConvertFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("FullyConnectedConvertFusion"); FullyConnectedConvertFusion(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp index 67abaa3df54357..bedd5723d73d3d 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_horizontal_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class FullyConnectedHorizontalFusion: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FullyConnectedHorizontalFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("FullyConnectedHorizontalFusion"); FullyConnectedHorizontalFusion(bool fuse_mlp_swiglu = false); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/fc_per_layer_scaling.hpp b/src/plugins/intel_gpu/src/plugin/transformations/fc_per_layer_scaling.hpp index 5c0d7d07f5b411..72fa60d818f8d4 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/fc_per_layer_scaling.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/fc_per_layer_scaling.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class FullyConnectedPerLayerScaling: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("FullyConnectedPerLayerScaling", "0"); + OPENVINO_MATCHER_PASS_RTTI("FullyConnectedPerLayerScaling"); FullyConnectedPerLayerScaling(float scale_factor); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/group_norm_composition.hpp b/src/plugins/intel_gpu/src/plugin/transformations/group_norm_composition.hpp index 889d0e9ec57e56..ccd30b4fe2c66f 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/group_norm_composition.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/group_norm_composition.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class GroupNormComposition : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("GroupNormComposition", "0"); + OPENVINO_MATCHER_PASS_RTTI("GroupNormComposition"); GroupNormComposition(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/increase_position_ids_precision.hpp b/src/plugins/intel_gpu/src/plugin/transformations/increase_position_ids_precision.hpp index b6046958f8d177..f8bed4ddeee2a8 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/increase_position_ids_precision.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/increase_position_ids_precision.hpp @@ -17,7 +17,7 @@ namespace intel_gpu { */ class IncreasePositionIdsPrecision : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("IncreasePositionIdsPrecision", "0"); + OPENVINO_MATCHER_PASS_RTTI("IncreasePositionIdsPrecision"); IncreasePositionIdsPrecision(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp b/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp index 2a6c4a347f9217..0fc96b6215ba95 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/indirect_kv_cache.hpp @@ -44,13 +44,13 @@ class IndirectKVCache : public ov::pass::GraphRewrite { class IndirectGemmOpt : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("IndirectGemmOpt", "0"); + OPENVINO_MATCHER_PASS_RTTI("IndirectGemmOpt"); IndirectGemmOpt(); }; class IndirectSDPAOpt : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("IndirectSDPAOpt", "0"); + OPENVINO_MATCHER_PASS_RTTI("IndirectSDPAOpt"); IndirectSDPAOpt(); }; } // namespace intel_gpu diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp index 6903b52963a879..6b6150a8e32db4 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp @@ -126,7 +126,7 @@ std::shared_ptr class KVCacheCompressionMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("KVCacheCompressionMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("KVCacheCompressionMatcher"); KVCacheCompressionMatcher(ov::element::Type compression_dt); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp index 8be42a1311094b..f22b32b23ea407 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_fusion.cpp @@ -30,7 +30,7 @@ namespace intel_gpu { class KVCacheFusionMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("KVCacheFusionMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("KVCacheFusionMatcher"); KVCacheFusionMatcher(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp b/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp index f573abd4589e8f..7237e5c97ace70 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/move_fc_reshape_to_weights.hpp @@ -30,7 +30,7 @@ namespace intel_gpu { */ class MoveFCReshapeToWeights: public ov::pass::MatcherPass { public: - OPENVINO_RTTI("MoveFCReshapeToWeights", "0"); + OPENVINO_MATCHER_PASS_RTTI("MoveFCReshapeToWeights"); MoveFCReshapeToWeights(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp b/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp index 3a38bb92ad5167..702b18e7fc5dc9 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/optimize_subsequent_reshapes.hpp @@ -15,7 +15,7 @@ namespace intel_gpu { */ class OptimizeSubsequentReshapes : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("OptimizeSubsequentReshapes", "0"); + OPENVINO_MATCHER_PASS_RTTI("OptimizeSubsequentReshapes"); OptimizeSubsequentReshapes(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp b/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp index 079028f0154848..77922903c287bd 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/print_model_statistics.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class PrintModelStatistics : public ov::pass::ModelPass { public: - OPENVINO_RTTI("PrintModelStatistics", "0"); + OPENVINO_MODEL_PASS_RTTI("PrintModelStatistics"); PrintModelStatistics() = default; bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp index 4869100054b819..a845c7a7aa86b0 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/transpose_fusion.hpp @@ -17,19 +17,19 @@ class TransposeFusion: public ov::pass::GraphRewrite { class TransposeMatMulMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeMatMulMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeMatMulMatcher"); TransposeMatMulMatcher(bool supports_immad); }; class TransposeMatMulTransposeMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeMatMulTransposeMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeMatMulTransposeMatcher"); TransposeMatMulTransposeMatcher(bool supports_immad); }; class TransposeSDPAMatcher : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("TransposeSDPAMatcher", "0"); + OPENVINO_MATCHER_PASS_RTTI("TransposeSDPAMatcher"); TransposeSDPAMatcher(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp index 35ed30cdc9726e..3e5926561820b8 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class UnsqueezeBroadcastReshapeMatmulFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UnsqueezeBroadcastReshapeMatmulFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("UnsqueezeBroadcastReshapeMatmulFusion"); UnsqueezeBroadcastReshapeMatmulFusion(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp index ede3ac16fb51b5..e740e846409ea9 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.hpp @@ -11,7 +11,7 @@ namespace intel_gpu { class UnsqueezeBroadcastReshapeSDPAFusion : public ov::pass::MatcherPass { public: - OPENVINO_RTTI("UnsqueezeBroadcastReshapeSDPAFusion", "0"); + OPENVINO_MATCHER_PASS_RTTI("UnsqueezeBroadcastReshapeSDPAFusion"); UnsqueezeBroadcastReshapeSDPAFusion(); }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 44d68740a0dfb7..53ab9aa188b7aa 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -996,6 +996,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { } // AZP does not support 8bit weight + // XXX: This is currently wrapped as GPU_DEBUG_IF as dynamic_quantize_asym is not exposed through public API. GPU_DEBUG_IF(debug_config->dynamic_quantize_asym && (root->get_input_element_type(1) == ov::element::i8 || root->get_input_element_type(1) == ov::element::u8)) { GPU_DEBUG_TRACE << root->get_friendly_name() << " dyn_quan is turned off: asym quantization does not support 8bit weight" << std::endl; diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp new file mode 100644 index 00000000000000..755c0514ae436b --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/stft.cpp @@ -0,0 +1,17 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "single_op_tests/stft.hpp" + +#include "common_test_utils/test_constants.hpp" + +namespace ov { +namespace test { + +INSTANTIATE_TEST_SUITE_P(smoke_STFT_static, + STFTLayerTest, + STFTLayerTest::GetTestDataForDevice(ov::test::utils::DEVICE_GPU), + STFTLayerTest::getTestCaseName); +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp new file mode 100644 index 00000000000000..060ebfd4a071fd --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/test_cases/stft_gpu_test.cpp @@ -0,0 +1,208 @@ +// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include +#include + +#include "test_utils.h" + +using namespace cldnn; +using namespace ::tests; + +namespace { + +constexpr float REL_EPS = 2e-3f; +constexpr float ABS_EPS = 1e-5f; + +namespace helpers { +// TODO: Move to common place. + +// Converts float vector to another type vector. +template +std::vector ConverFloatVector(const std::vector& vec) { + std::vector ret; + ret.reserve(vec.size()); + for (const auto& val : vec) { + ret.push_back(T(val)); + } + return ret; +} + +// Allocates tensoer with given shape and data. +template +memory::ptr AllocateTensor(ov::PartialShape shape, const std::vector& data) { + const layout lo = {shape, ov::element::from(), cldnn::format::bfyx}; + EXPECT_EQ(lo.get_linear_size(), data.size()); + memory::ptr tensor = get_test_engine().allocate_memory(lo); + set_values(tensor, data); + return tensor; +} + +template +void CompareTypedBuffers(const memory::ptr& output, const memory::ptr& expectedOutput, cldnn::stream& stream) { + mem_lock output_ptr(output, stream); + mem_lock wanted_output_ptr(expectedOutput, stream); + + ASSERT_EQ(output->get_layout(), expectedOutput->get_layout()); + ASSERT_EQ(output_ptr.size(), wanted_output_ptr.size()); + for (size_t i = 0; i < output_ptr.size(); ++i) + ASSERT_TRUE(are_equal(wanted_output_ptr[i], output_ptr[i], REL_EPS, ABS_EPS)) << "at index " << i; +} + +void CompareBuffers(const memory::ptr& output, const memory::ptr& expectedOutput, cldnn::stream& stream) { + ASSERT_EQ(output->get_layout(), expectedOutput->get_layout()); + auto type = output->get_layout().data_type; + + switch (type) { + case data_types::f32: + helpers::CompareTypedBuffers(output, expectedOutput, stream); + break; + + default: + GTEST_FAIL() << "Unsupported data type: " << type; + break; + } +} + +} // namespace helpers + +struct STFTTestParams { + ov::PartialShape signalShape; + ov::PartialShape windowShape; + ov::PartialShape outputShape; + int64_t frameSize; + int64_t frameStep; + bool transposedFrames; + std::vector signalData; + std::vector windowData; + std::vector expectedOutput; + std::string testcaseName; +}; + +class stft_test : public ::testing::TestWithParam { +public: + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "signalShape=" << param.signalShape; + result << "_windowShape=" << param.windowShape; + result << "_outputShape=" << param.outputShape; + result << "_frameSize=" << param.frameSize; + result << "_frameStep=" << param.frameStep; + result << "_transposedFrames=" << param.transposedFrames; + result << "_" << param.testcaseName; + return result.str(); + } + + struct STFTInferenceParams { + bool transposedFrames; + memory::ptr signal; + memory::ptr window; + memory::ptr frameSize; + memory::ptr frameStep; + memory::ptr expectedOutput; + }; + + template + STFTInferenceParams PrepareInferenceParams(const STFTTestParams& testParam) { + using T = typename ov::element_type_traits::value_type; + STFTInferenceParams ret; + + ret.transposedFrames = testParam.transposedFrames; + + ret.signal = + helpers::AllocateTensor(testParam.signalShape, helpers::ConverFloatVector(testParam.signalData)); + ret.window = + helpers::AllocateTensor(testParam.windowShape, helpers::ConverFloatVector(testParam.windowData)); + ret.expectedOutput = + helpers::AllocateTensor(testParam.outputShape, helpers::ConverFloatVector(testParam.expectedOutput)); + + ret.frameStep = helpers::AllocateTensor({}, {testParam.frameStep}); + ret.frameSize = helpers::AllocateTensor({}, {testParam.frameSize}); + + return ret; + } + + void Execute(const STFTInferenceParams& params) { + // Prepare the network. + auto stream = get_test_stream_ptr(get_test_default_config(engine_)); + + auto scalar_layout = params.frameSize->get_layout(); + scalar_layout.set_partial_shape({}); + + topology topology; + topology.add(input_layout("signal", params.signal->get_layout())); + topology.add(input_layout("window", params.window->get_layout())); + topology.add(input_layout("frameSize", scalar_layout)); + topology.add(input_layout("frameStep", scalar_layout)); + topology.add(STFT("stft", + input_info("signal"), + input_info("window"), + input_info("frameSize"), + input_info("frameStep"), + params.transposedFrames)); + + cldnn::network::ptr network = get_network(engine_, topology, get_test_default_config(engine_), stream, false); + + network->set_input_data("signal", params.signal); + network->set_input_data("window", params.window); + network->set_input_data("frameSize", params.frameSize); + network->set_input_data("frameStep", params.frameStep); + + // Run and check results. + auto outputs = network->execute(); + + auto output = outputs.at("stft").get_memory(); + + helpers::CompareBuffers(output, params.expectedOutput, get_test_stream()); + } + +private: + engine& engine_ = get_test_engine(); +}; + +std::vector generateTestParams() { + std::vector params; +#define TEST_DATA(signalShape, \ + windowShape, \ + outputShape, \ + frameSize, \ + frameStep, \ + transposedFrames, \ + signalData, \ + windowData, \ + expectedOutput, \ + testcaseName) \ + params.push_back(STFTTestParams{signalShape, \ + windowShape, \ + outputShape, \ + frameSize, \ + frameStep, \ + transposedFrames, \ + signalData, \ + windowData, \ + expectedOutput, \ + testcaseName}); + +#include "unit_test_utils/tests_data/stft_data.h" +#undef TEST_DATA + + return params; +} + +} // namespace + +#define STFT_TEST_P(precision) \ + TEST_P(stft_test, ref_comp_##precision) { \ + Execute(PrepareInferenceParams(GetParam())); \ + } + +STFT_TEST_P(f32); + +INSTANTIATE_TEST_SUITE_P(stft_test_suit, + stft_test, + testing::ValuesIn(generateTestParams()), + stft_test::getTestCaseName); diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp index e77a02a4439018..24d682c603a799 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/avoid.hpp @@ -23,6 +23,7 @@ namespace avoid { // Note: this pattern is only utilized by the online partitioner class RMSNorm : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::avoid::RMSNorm"); RMSNorm(const std::shared_ptr& snapshot, const std::string& avoid_device); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp index 77bc9fb3f90418..8a70290dfabe4e 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/compute.hpp @@ -23,41 +23,49 @@ namespace compute { class DQMatMulGQu4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulGQu4"); DQMatMulGQu4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulCWu4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulCWu4"); DQMatMulCWu4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulGQi4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulGQi4"); DQMatMulGQi4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulCWi4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulCWi4"); DQMatMulCWi4(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class DQMatMulConv : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::DQMatMulConv"); DQMatMulConv(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class VocabMatMul : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::VocabMatMul"); VocabMatMul(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class RMSNorm : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::RMSNorm"); RMSNorm(const std::shared_ptr& snapshot, const std::string& isol_tag); }; class RMSNorm2 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::compute::RMSNorm2"); RMSNorm2(const std::shared_ptr& snapshot, const std::string& isol_tag); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp index da06a5304c8bd7..4b4cbdd823dfb2 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.hpp @@ -54,6 +54,9 @@ void finalize_remap(Function& fbody, Subgraph& fsg, const ClosureRemap& m); namespace SymmNoZP { class DCOFFPassBase : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmNoZP::DCOFFPassBase"); + protected: DCOffMode m_dcoff_mode = DCOffMode::CAST_ONLY; ov::element::Type m_dcoff_type; @@ -94,6 +97,9 @@ class DCOFFPassGather final : public DCOFFPassBase { namespace SymmZP { // TODO: Not sure if it is actually Symm.. class DCOFFPassBase : public ov::pass::MatcherPass { +public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassBase"); + protected: DCOffMode m_dcoff_mode = DCOffMode::CAST_ONLY; ov::element::Type m_dcoff_type; @@ -129,21 +135,26 @@ class DCOFFPassConvert1 final : public DCOFFPassBase { class DCOFFPassReshape2 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassReshape2"); DCOFFPassReshape2(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; class DCOFFPassReshape3 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassReshape3"); DCOFFPassReshape3(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; class DCOFFPassReshape4 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::DCOFFPassReshape4"); DCOFFPassReshape4(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; class CWAI1 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::CWAI1"); + using CPtr = std::shared_ptr; using Results = std::reference_wrapper>; @@ -152,6 +163,8 @@ class CWAI1 : public ov::pass::MatcherPass { class CWAI2 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::CWAI2"); + using CPtr = std::shared_ptr; using Results = std::reference_wrapper>; @@ -160,6 +173,8 @@ class CWAI2 : public ov::pass::MatcherPass { class CWAI3 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::SymmZP::CWAI3"); + using CPtr = std::shared_ptr; using Results = std::reference_wrapper>; @@ -171,6 +186,7 @@ class CWAI3 : public ov::pass::MatcherPass { namespace AsymmZP { class DCOFFPassReshape : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::AsymmZP::DCOFFPassReshape"); DCOFFPassReshape(DCOffMode dcoff_mode, ov::element::Type dcoff_type, DCOFFParamRef pref); }; diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp index 904ce88039d2eb..f2cdee0f436d57 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/opt.hpp @@ -64,31 +64,37 @@ struct Context { class DQMatMulCWi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulCWi"); explicit DQMatMulCWi(Context::Ref ctx); }; class DQMatMulGQi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQi"); explicit DQMatMulGQi(Context::Ref ctx); }; class DQMatMulGQ2i : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQ2i"); explicit DQMatMulGQ2i(Context::Ref ctx); }; class DQMatMulGQiP : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQiP"); explicit DQMatMulGQiP(Context::Ref ctx); }; class DQMatMulGQ2iP : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQMatMulGQ2iP"); explicit DQMatMulGQ2iP(Context::Ref ctx); }; class DQParMMGQ : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQParMMGQ"); explicit DQParMMGQ(Context::Ref ctx); }; @@ -98,16 +104,19 @@ void mergeParallelMatMuls(const std::shared_ptr& m, Context& ctx); class DQLiftGatherAsymCW : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQLiftGatherAsymCW"); DQLiftGatherAsymCW(); }; class DQLiftGatherSymCW : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQLiftGatherSymCW"); DQLiftGatherSymCW(); }; class DQLiftGatherSymGQ : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQLiftGatherSymGQ"); DQLiftGatherSymGQ(); }; @@ -115,21 +124,25 @@ class DQLiftGatherSymGQ : public ov::pass::MatcherPass { class DQUnpackDictGatheru : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictGatheru"); DQUnpackDictGatheru(Context::Ref ctx); }; class DQUnpackDictGatherGQi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictGatherGQi"); DQUnpackDictGatherGQi(Context::Ref ctx); }; class HostGather : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::HostGather"); HostGather(Context::Ref ctx); }; class HostGatherDQ : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::HostGatherDQ"); HostGatherDQ(Context::Ref ctx); }; @@ -137,43 +150,51 @@ class HostGatherDQ : public ov::pass::MatcherPass { class DQUnpackDictMatMulCWu : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictMatMulCWu"); DQUnpackDictMatMulCWu(Context::Ref ctx); }; class DQUnpackDictMatMulGQi : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::DQUnpackDictMatMulGQi"); DQUnpackDictMatMulGQi(Context::Ref ctx); }; class CompressDictMatMulf32 : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::CompressDictMatMulf32"); CompressDictMatMulf32(Context::Ref ctx); }; // Slice last Matmul class SliceLastMatmul : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmul"); SliceLastMatmul(); }; class SliceLastMatmulAdd : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmulAdd"); SliceLastMatmulAdd(); }; class SliceLastMatmulTranspose : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmulTranspose"); SliceLastMatmulTranspose(); }; class SliceLastMatmulMultiply : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::SliceLastMatmulMultiply"); SliceLastMatmulMultiply(); }; // Convolution to MatMul class ConvToMatmul : public ov::pass::MatcherPass { public: + OPENVINO_MATCHER_PASS_RTTI("npuw::patterns::opt::ConvToMatmul"); ConvToMatmul(Context::Ref ctx); }; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index 19de8ce5eb6a79..362258598a1344 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -240,7 +240,7 @@ static std::map inputRanges = { {ov::op::v1::BatchToSpace::get_type_info_static(), Range({{0, 15}}, {{0, 8, 32}})}, {ov::op::v15::BitwiseLeftShift::get_type_info_static(), Range({{0, 5}, {0, 4}}, {})}, {ov::op::v15::BitwiseRightShift::get_type_info_static(), Range({{0, 5}, {0, 4}}, {})}, - {ov::op::v15::STFT::get_type_info_static(), Range({{16, 24}, {1, 16}}, {{-100, 100}, {-100, 100}})}, + {ov::op::v15::STFT::get_type_info_static(), Range({{16, 24}, {1, 16}}, {{0, 1, 10000}, {0, 1, 10000}})}, }; class ModelRange { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp index 0e165ace364eae..22de0ef8c75b50 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/single_op/stft.hpp @@ -25,6 +25,17 @@ typedef std::tuple, class STFTLayerTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo& obj); + using TGenData = + testing::internal::CartesianProductHolder>, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ParamGenerator, + testing::internal::ValueArray>; + + static const TGenData GetTestDataForDevice(const char* deviceName); protected: void SetUp() override; diff --git a/src/tests/functional/shared_test_classes/src/single_op/stft.cpp b/src/tests/functional/shared_test_classes/src/single_op/stft.cpp index 2d25b6f386024b..141728b6d60fb7 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/stft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/stft.cpp @@ -84,5 +84,82 @@ void STFTLayerTest::SetUp() { function = std::make_shared(STFT->outputs(), ov::ParameterVector{in_signal, in_window}); } } + +const STFTLayerTest::TGenData STFTLayerTest::GetTestDataForDevice(const char* deviceName) { + const std::vector data_type = {ov::element::bf16, ov::element::f16}; + const std::vector step_size_type = {ov::element::i32, ov::element::i64}; + + const std::vector> input_shapes = { + { + // Static shapes + {{}, {{128}}}, // 1st input + {{}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Static shapes + {{}, {{1, 128}}}, // 1st input + {{}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Static shapes + {{}, {{2, 226}}}, // 1st input + {{}, {{16}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims in the first input shape + {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input + {{}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims in the first and second input shape + {{-1}, {{128}}}, // 1st input + {{-1}, {{8}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims in the first and second input shape + {{-1, -1}, {{1, 128}, {2, 226}}}, // 1st input + {{-1}, {{8}, {16}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }, + { + // Dynamic dims with range in the first and second input shape + {{{2, 4}, {1, 300}}, {{2, 226}, {3, 128}}}, // 1st input + {{{3, 16}}, {{4}, {16}}}, // 2nd input + {{}, {{}}}, // 3rd input + {{}, {{}}} // 4th input + }}; + + const std::vector frame_size = {16, 24}; + const std::vector step_size = {2, 3, 4}; + + const std::vector transpose_frames = { + false, + true, + }; + + std::vector in_types = {utils::InputLayerType::CONSTANT, utils::InputLayerType::PARAMETER}; + + auto data = ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(frame_size), + ::testing::ValuesIn(step_size), + ::testing::ValuesIn(transpose_frames), + ::testing::ValuesIn(data_type), + ::testing::ValuesIn(step_size_type), + ::testing::ValuesIn(in_types), + ::testing::Values(deviceName)); + + return data; +} } // namespace test } // namespace ov diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp index dbd945bc80a45b..461136bc08aaa8 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/graph_comparator.hpp @@ -127,6 +127,7 @@ namespace ov { namespace pass { class InjectionPass : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("InjectionPass"); using injection_callback = std::function)>; explicit InjectionPass(injection_callback callback) : ModelPass(), m_callback(std::move(callback)) {} @@ -268,6 +269,7 @@ class InitUniqueNames : public ov::pass::ModelPass { UniqueNamesHolder::Ptr m_unh; public: + OPENVINO_MODEL_PASS_RTTI("InitUniqueNames"); InitUniqueNames(UniqueNamesHolder::Ptr unh) : m_unh(unh) {} bool run_on_model(const std::shared_ptr& f) override { m_unh->init_names(f); @@ -279,6 +281,7 @@ class CheckUniqueNames : public ov::pass::ModelPass { UniqueNamesHolder::Ptr m_unh; public: + OPENVINO_MODEL_PASS_RTTI("CheckUniqueNames"); CheckUniqueNames(UniqueNamesHolder::Ptr unh, bool soft_names_comparison = false, bool result_friendly_names_check = true) diff --git a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp index d781d92b57052a..91ef3fd6a7ebe1 100644 --- a/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp +++ b/src/tests/test_utils/common_test_utils/src/ov_test_utils.cpp @@ -17,6 +17,7 @@ namespace pass { class CopyTensorNamesToRefModel : public ov::pass::ModelPass { public: + OPENVINO_MODEL_PASS_RTTI("CopyTensorNamesToRefModel"); CopyTensorNamesToRefModel(const std::shared_ptr& ref_model) : m_ref_model(ref_model) {} bool run_on_model(const std::shared_ptr& f) override { const auto& orig_results = f->get_results(); diff --git a/src/tests/test_utils/unit_test_utils/tests_data/stft_data.h b/src/tests/test_utils/unit_test_utils/tests_data/stft_data.h new file mode 100644 index 00000000000000..4a6257743f560b --- /dev/null +++ b/src/tests/test_utils/unit_test_utils/tests_data/stft_data.h @@ -0,0 +1,1299 @@ +#pragma once + +#define LIST(...) \ + { __VA_ARGS__ } + +// TEST_DATA(signalShape, +// windowShape, +// outputShape, +// frameSize, +// frameStep, +// transposedFrames, +// signalData, +// windowData, +// expectedOutput, +// testcaseName) + +// NOTE: expected output were generated using pyTorch.searchsorted implementation. + +TEST_DATA(LIST(48), + LIST(16), + LIST(9, 3, 2), + 16, + 16, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + 2.061596, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + -1.064999, + 0.282346, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + -0.660887, + -0.935516, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + 0.566273, + 2.586032, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.90856, + -2.40086, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + 2.472012, + 0.479764, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + -1.628981, + 0.973055, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.790362, + -0.677371, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 3.969412, + 0.0, + 0.012426, + 0.0), + "test_case_0"); + +TEST_DATA(LIST(1, 48), + LIST(16), + LIST(1, 9, 3, 2), + 16, + 16, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + 2.061596, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + -1.064999, + 0.282346, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + -0.660887, + -0.935516, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + 0.566273, + 2.586032, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.90856, + -2.40086, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + 2.472012, + 0.479764, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + -1.628981, + 0.973055, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.790362, + -0.677371, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 3.969412, + 0.0, + 0.012426, + 0.0), + "test_case_1"); + +TEST_DATA(LIST(1, 48), + LIST(16), + LIST(1, 9, 2, 2), + 16, + 32, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 0.012426, + 0.0), + "test_case_2"); + +TEST_DATA(LIST(3, 48), + LIST(16), + LIST(3, 9, 2, 2), + 16, + 32, + true, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766, + -0.700153, + 0.785022, + 0.449904, + -0.437027, + 0.846442, + -2.708081, + -0.357738, + 1.859077, + -1.122008, + 0.797016, + -0.205318, + 0.620443, + -1.210487, + -0.100233, + -0.644188, + 1.252426, + -1.503346, + 1.685813, + -0.655548, + 0.148169, + 0.98681, + -1.806409, + -0.789457, + 0.934387, + 0.819341, + -0.359637, + -0.394646, + -0.040578, + 1.10817, + 1.745871, + -0.706232, + 2.154361, + -0.417549, + 0.724758, + -1.090765, + 0.9193, + 0.535271, + -0.979016, + 0.870831, + -0.405604, + -0.192899, + 0.242223, + -2.103053, + -0.234349, + -1.273937, + -0.334684, + -1.239732, + 1.185672, + 1.292743, + 0.741054, + -0.700485, + -0.252933, + -0.760226, + 0.68806, + 0.761746, + 0.065581, + -0.189028, + 0.253604, + 0.17645, + 0.993091, + 0.771911, + -0.45738, + -0.123291, + -0.150833, + -1.207279, + -1.033516, + -0.975503, + 0.626698, + 0.50241, + -1.377113, + -0.788385, + 0.043618, + -0.945737, + 0.093409, + -0.43187, + 0.748239, + 1.084854, + 1.147015, + 0.171417, + -0.231462, + -1.082049, + 0.213656, + -0.888956, + 0.489294, + -0.722142, + 0.920915, + -1.327819, + -1.384117, + -1.432893, + -0.535934, + 0.48227, + 0.220006, + -0.589304, + -1.305996, + -1.089244, + 1.762965), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + -2.69633, + 0.0, + -0.832353, + 0.394573, + 1.075557, + -1.680968, + -1.071078, + -0.247753, + 0.872213, + 0.11946, + 0.658023, + -0.816808, + -0.348891, + 1.429143, + -0.030171, + 2.561505, + -0.474217, + -0.146423, + -0.226757, + -2.46842, + -0.450706, + -0.031404, + 0.078811, + 0.923822, + 1.696735, + -2.559718, + 0.915031, + -0.677954, + -1.028739, + 3.200351, + -1.483484, + 0.0, + 0.012426, + 0.0, + -0.789636, + 0.0, + -2.698145, + 0.0, + -0.35829, + 0.7881, + 1.297723, + -2.351696, + 2.059313, + -1.108987, + 0.58228, + 1.450589, + -1.325991, + -0.840667, + -1.163221, + -0.362671, + -0.79498, + 3.236621, + 0.830829, + 0.296243, + 2.930576, + -2.854989, + 0.544256, + 1.448767, + -3.939284, + -0.528339, + -0.229411, + -3.582516, + 3.241738, + 2.887635, + 0.092836, + 3.078159, + -2.836527, + 0.0, + -1.212438, + 0.0, + 1.769482, + 0.0, + -4.335095, + 0.0, + -1.254114, + 0.665334, + 3.115947, + 0.51333, + -0.628684, + 0.238478, + -2.409475, + -2.158762, + 2.008016, + -0.98037, + 2.824623, + 2.211092, + -0.863346, + 0.059172, + -1.077194, + -0.687371, + -0.326849, + 1.138742, + -0.569806, + 0.200439, + 0.238943, + -0.929741, + 0.855134, + 0.989918, + 0.329058, + -0.27983, + 0.360809, + -1.762999, + -0.775532, + 0.0, + -1.864981, + 0.0), + "test_case_3"); + +TEST_DATA(LIST(3, 48), + LIST(16), + LIST(3, 2, 9, 2), + 16, + 32, + false, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766, + -0.700153, + 0.785022, + 0.449904, + -0.437027, + 0.846442, + -2.708081, + -0.357738, + 1.859077, + -1.122008, + 0.797016, + -0.205318, + 0.620443, + -1.210487, + -0.100233, + -0.644188, + 1.252426, + -1.503346, + 1.685813, + -0.655548, + 0.148169, + 0.98681, + -1.806409, + -0.789457, + 0.934387, + 0.819341, + -0.359637, + -0.394646, + -0.040578, + 1.10817, + 1.745871, + -0.706232, + 2.154361, + -0.417549, + 0.724758, + -1.090765, + 0.9193, + 0.535271, + -0.979016, + 0.870831, + -0.405604, + -0.192899, + 0.242223, + -2.103053, + -0.234349, + -1.273937, + -0.334684, + -1.239732, + 1.185672, + 1.292743, + 0.741054, + -0.700485, + -0.252933, + -0.760226, + 0.68806, + 0.761746, + 0.065581, + -0.189028, + 0.253604, + 0.17645, + 0.993091, + 0.771911, + -0.45738, + -0.123291, + -0.150833, + -1.207279, + -1.033516, + -0.975503, + 0.626698, + 0.50241, + -1.377113, + -0.788385, + 0.043618, + -0.945737, + 0.093409, + -0.43187, + 0.748239, + 1.084854, + 1.147015, + 0.171417, + -0.231462, + -1.082049, + 0.213656, + -0.888956, + 0.489294, + -0.722142, + 0.920915, + -1.327819, + -1.384117, + -1.432893, + -0.535934, + 0.48227, + 0.220006, + -0.589304, + -1.305996, + -1.089244, + 1.762965), + LIST(0.0, + 0.03806, + 0.146447, + 0.308658, + 0.5, + 0.691342, + 0.853553, + 0.96194, + 1.0, + 0.96194, + 0.853553, + 0.691342, + 0.5, + 0.308658, + 0.146446, + 0.03806), + LIST(2.500471, + 0.0, + -0.832353, + 0.394573, + -1.071078, + -0.247753, + 0.658023, + -0.816808, + -0.030171, + 2.561505, + -0.226757, + -2.46842, + 0.078811, + 0.923822, + 0.915031, + -0.677954, + -1.483484, + 0.0, + -2.69633, + 0.0, + 1.075557, + -1.680968, + 0.872213, + 0.11946, + -0.348891, + 1.429143, + -0.474217, + -0.146423, + -0.450706, + -0.031404, + 1.696735, + -2.559718, + -1.028739, + 3.200351, + 0.012426, + 0.0, + -0.789636, + 0.0, + -0.35829, + 0.7881, + 2.059313, + -1.108987, + -1.325991, + -0.840667, + -0.79498, + 3.236621, + 2.930576, + -2.854989, + -3.939284, + -0.528339, + 3.241738, + 2.887635, + -2.836527, + 0.0, + -2.698145, + 0.0, + 1.297723, + -2.351696, + 0.58228, + 1.450589, + -1.163221, + -0.362671, + 0.830829, + 0.296243, + 0.544256, + 1.448767, + -0.229411, + -3.582516, + 0.092836, + 3.078159, + -1.212438, + 0.0, + 1.769482, + 0.0, + -1.254114, + 0.665334, + -0.628684, + 0.238478, + 2.008016, + -0.98037, + -0.863346, + 0.059172, + -0.326849, + 1.138742, + 0.238943, + -0.929741, + 0.329058, + -0.27983, + -0.775532, + 0.0, + -4.335095, + 0.0, + 3.115947, + 0.51333, + -2.409475, + -2.158762, + 2.824623, + 2.211092, + -1.077194, + -0.687371, + -0.569806, + 0.200439, + 0.855134, + 0.989918, + 0.360809, + -1.762999, + -1.864981, + 0.0), + "test_case_4"); + +TEST_DATA(LIST(1, 48), + LIST(33), + LIST(1, 2, 17, 2), + 33, + 11, + false, + LIST(0.421005, + 0.023465, + -0.394774, + 0.974785, + 0.512418, + 0.219203, + -0.338684, + 1.122448, + -0.128486, + -0.484502, + 0.56865, + 1.332561, + 0.222876, + 0.093504, + 0.89355, + -0.659941, + 1.187668, + -2.086986, + -0.42915, + -0.198203, + 1.385841, + 0.052231, + 0.587976, + -0.684267, + -0.045731, + 0.629634, + 1.78197, + -1.381788, + 0.812565, + 0.522027, + 0.013688, + -0.070836, + 1.131118, + 0.147256, + -0.452005, + 0.263287, + 0.489187, + -0.710666, + 0.531863, + -0.996062, + 0.188195, + -0.022838, + -0.961315, + 0.174546, + -2.681746, + -0.311997, + -0.007598, + 0.152766), + LIST(0.0, + 0.009036, + 0.035816, + 0.079373, + 0.138133, + 0.209972, + 0.292293, + 0.382121, + 0.476209, + 0.571157, + 0.663534, + 0.75, + 0.82743, + 0.893027, + 0.944418, + 0.979746, + 0.997736, + 0.997736, + 0.979746, + 0.944418, + 0.893027, + 0.82743, + 0.75, + 0.663534, + 0.571157, + 0.476209, + 0.382121, + 0.292292, + 0.209971, + 0.138133, + 0.079373, + 0.035816, + 0.009036), + LIST(2.579306, + 0.0, + -0.152245, + -0.738268, + -2.416611, + 1.261868, + 2.867594, + -1.060565, + -3.284244, + 0.170454, + 2.868702, + -0.841069, + -1.104891, + 3.300689, + -1.140553, + -4.425755, + 0.251539, + 2.542669, + 1.792233, + -0.387007, + 1.135473, + 1.006229, + -3.639657, + -1.375989, + 1.341596, + 1.403905, + -0.208277, + -2.707592, + 0.588243, + 3.234172, + 1.73603, + -4.193725, + -1.924587, + 6.196737, + 3.055838, + 0.0, + -2.899791, + 0.050701, + 1.185766, + 0.087218, + 0.653565, + 0.058316, + 0.340882, + -0.196132, + -0.708827, + -1.955343, + -1.790521, + 2.863453, + 2.38143, + -0.021196, + -1.090831, + -2.4223, + 2.221659, + 2.73066, + -4.076646, + -3.259353, + 3.247396, + 2.531169, + -1.041836, + -0.880713, + 0.399545, + 2.26406, + -0.611558, + -1.956384, + 1.462047, + 1.335275, + -1.2002, + -5.702536), + "test_case_5"); + +TEST_DATA(LIST(48), + LIST(7), + LIST(6, 13, 2), + 11, + 3, + true, + LIST(-0.41676, + -0.05627, + -2.1362, + 1.64027, + -1.79344, + -0.84175, + 0.50288, + -1.24529, + -1.05795, + -0.90901, + 0.55145, + 2.29221, + 0.04154, + -1.11793, + 0.53906, + -0.59616, + -0.01913, + 1.175, + -0.74787, + 0.00903, + -0.87811, + -0.15643, + 0.25657, + -0.98878, + -0.33882, + -0.23618, + -0.63766, + -1.18761, + -1.42122, + -0.1535, + -0.26906, + 2.23137, + -2.43477, + 0.11273, + 0.37044, + 1.35963, + 0.50186, + -0.84421, + 0.00001, + 0.54235, + -0.31351, + 0.77101, + -1.86809, + 1.73118, + 1.46768, + -0.33568, + 0.61134, + 0.04797), + LIST(0.0, 0.25, 0.75, 1.0, 0.75, 0.25, 0.0), + LIST(-1.71092, + 0., + -2.41009, + 0., + 2.23022, + 0., + -0.7409, + 0., + 0.45297, + 0., + -1.11149, + 0., + -1.14862, + 0., + -2.14551, + 0., + -1.16026, + 0., + -0.65135, + 0., + 1.83099, + 0., + -0.1793, + 0., + -0.2968, + 0., + 1.47212, + 0.71877, + 2.17268, + 0.79158, + -2.28473, + -0.93586, + 0.4625, + 0.34192, + -0.56009, + -0.32899, + 0.93528, + 0.44276, + 1.11077, + 0.05564, + 1.82719, + -0.1221, + 0.71587, + 1.50743, + 1.10802, + -0.41842, + -1.71345, + -0.67438, + 0.05781, + 0.40969, + 0.4558, + -0.24137, + -0.54856, + -1.56669, + -1.47087, + -1.22889, + 2.1535, + 1.84441, + 0.18738, + -0.28908, + 0.66134, + 0.88008, + -0.66811, + -0.52077, + -1.02705, + -0.15929, + -1.12869, + 0.2893, + 0.0583, + -1.66476, + -2.16394, + 0.18383, + 1.42389, + 1.02343, + 0.32308, + -0.7337, + -0.68826, + 0.55139, + -0.91886, + 1.85309, + 0.52177, + 0.97814, + -1.50306, + -2.29021, + -0.76526, + -0.28515, + -0.47423, + -1.4385, + 0.63386, + 0.43591, + 0.90989, + 0.38369, + 0.51776, + -0.36462, + -0.31809, + 0.57129, + 2.99689, + 0.98808, + -1.06897, + -0.98176, + -0.81284, + 0.72147, + 0.63521, + -1.1571, + 1.74128, + -1.03922, + 0.14692, + -0.1082, + 0.64531, + 1.98433, + 0.856, + 1.12631, + 0.14133, + 1.66429, + -0.63884, + -0.57479, + -0.6772, + -0.71798, + -0.19529, + 0.22579, + 0.09013, + 0.66192, + -2.7275, + -2.70068, + 0.6808, + 0.74142, + 0.95724, + -0.28153, + -0.33733, + 2.09067, + -0.89051, + -0.04374, + -0.16546, + -0.69762, + -0.12612, + -1.43585, + -0.37017, + -1.74231, + 0.00518, + -1.6207, + 0.29356, + 0.84215, + 0.2579, + 0.98549, + 0.05179, + -0.0244, + 0.03393, + -1.30044, + 1.1122, + 3.98255, + -0.23778, + -0.54982, + -0.43563, + -0.19685, + 0.08299, + -2.86001), + "test_case_6"); \ No newline at end of file diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py index 902adebc2226fc..69f08acd9754d2 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOps.py @@ -1,11 +1,10 @@ # Copyright (C) 2018-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import platform -import sys - import numpy as np +import platform import pytest +import sys from common.tf_layer_test_class import CommonTFLayerTest @@ -14,7 +13,7 @@ class TestUnaryOps(CommonTFLayerTest): def _prepare_input(self, inputs_dict): non_negative = ['Sqrt', 'Log'] - narrow_borders = ["Sinh", "Cosh", "Tanh", "Exp", "Selu"] + narrow_borders = ["Tanh"] within_one = ['Asin', 'Acos', 'Atanh'] from_one = ['Acosh'] @@ -76,25 +75,14 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_legacy_fronte 'Asin': tf.math.asin, 'Asinh': tf.math.asinh, 'Atan': tf.math.atan, - 'Atanh': tf.math.atanh, 'BitwiseNot': tf.bitwise.invert, 'Ceiling': tf.math.ceil, - 'Cos': tf.math.cos, - 'Cosh': tf.math.cosh, - 'Elu': tf.nn.elu, - 'Erf': tf.math.erf, - 'Exp': tf.math.exp, 'Floor': tf.math.floor, 'Log': tf.math.log, 'LogicalNot': tf.math.logical_not, # 'Mish': tfa.activations.mish, # temporarily moved to `create_net_with_mish()` 'Negative': tf.math.negative, - 'Selu': tf.nn.selu, - 'Sigmoid': tf.nn.sigmoid, 'Sign': tf.math.sign, - 'Sin': tf.math.sin, - 'Sinh': tf.math.sinh, - 'SoftPlus': tf.nn.softplus, 'Square': tf.math.square, 'Tan': tf.math.tan, 'Tanh': tf.math.tanh, @@ -126,15 +114,8 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_legacy_fronte test_data_precommit = [dict(shape=[4, 6, 8, 10, 12])] @pytest.mark.parametrize("params", test_data_precommit) - @pytest.mark.parametrize("op_type", ['Elu', - 'Sigmoid', - 'Sin', - 'Sinh', - 'Cos', - 'Cosh', - 'Abs', + @pytest.mark.parametrize("op_type", ['Abs', 'Negative', - 'Exp', 'Tan', 'Tanh', 'Floor', @@ -145,15 +126,11 @@ def create_net_with_unary_op(self, shape, ir_version, op_type, use_legacy_fronte 'Atan', 'Log', 'Sign', - 'SoftPlus', - 'Atanh', 'Acosh', 'Asinh', 'LogicalNot', 'Square', - 'Erf', - 'BitwiseNot' - ]) + 'BitwiseNot']) @pytest.mark.nightly def test_unary_op_precommit(self, params, ie_device, precision, ir_version, temp_dir, op_type, use_legacy_frontend): @@ -188,15 +165,8 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, dict(shape=[4, 6, 8, 10, 12])] @pytest.mark.parametrize("params", test_data) - @pytest.mark.parametrize("op_type", ['Elu', - 'Sigmoid', - 'Sin', - 'Sinh', - 'Cos', - 'Cosh', - 'Abs', + @pytest.mark.parametrize("op_type", ['Abs', 'Negative', - 'Exp', 'Tan', 'Tanh', 'Floor', @@ -206,17 +176,12 @@ def test_unary_op_mish_precommit(self, params, ie_device, precision, ir_version, 'Acos', 'Atan', 'Log', - 'LogicalNot', 'Sign', - 'SoftPlus', - 'Atanh', 'Acosh', 'Asinh', + 'LogicalNot', 'Square', - 'Erf', - 'Selu', - 'BitwiseNot' - ]) + 'BitwiseNot']) @pytest.mark.nightly @pytest.mark.skipif(sys.platform == 'darwin', reason="Ticket - 122182") @pytest.mark.xfail(platform.machine() in ["aarch64", "arm64", "ARM64"], reason='Ticket - 122716') diff --git a/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py new file mode 100644 index 00000000000000..4ff4d589cbae32 --- /dev/null +++ b/tests/layer_tests/tensorflow_tests/test_tf_UnaryOpsAllRealDomain.py @@ -0,0 +1,70 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import platform +import pytest +import tensorflow as tf +from common.tf_layer_test_class import CommonTFLayerTest + +rng = np.random.default_rng(253512) + + +class TestUnaryOpsAllRealDomain(CommonTFLayerTest): + def _prepare_input(self, inputs_info): + assert 'x:0' in inputs_info, "Test error: inputs_info must contain `x`" + x_shape = inputs_info['x:0'] + inputs_data = {} + inputs_data['x:0'] = rng.uniform(-5.0, 5.0, x_shape).astype(self.input_type) + return inputs_data + + def create_unary_net(self, input_shape, input_type, op_type): + op_type_map = { + 'Elu': lambda x: tf.raw_ops.Elu(features=x), + 'Sigmoid': tf.raw_ops.Sigmoid, + 'Sin': tf.raw_ops.Sin, + 'Sinh': tf.raw_ops.Sinh, + 'Cos': tf.raw_ops.Cos, + 'Cosh': tf.raw_ops.Cosh, + 'Exp': tf.raw_ops.Exp, + 'Atan': tf.raw_ops.Atan, + 'Softplus': lambda x: tf.raw_ops.Softplus(features=x), + 'Erf': tf.raw_ops.Erf, + 'Selu': lambda x: tf.raw_ops.Selu(features=x) + } + + self.input_type = input_type + tf.compat.v1.reset_default_graph() + # Create the graph and model + with tf.compat.v1.Session() as sess: + x = tf.compat.v1.placeholder(input_type, input_shape, 'x') + op_type_map[op_type](x=x) + tf.compat.v1.global_variables_initializer() + + tf_net = sess.graph_def + + return tf_net, None + + @pytest.mark.parametrize("input_shape", [[], [2], [3, 4], [3, 2, 4]]) + @pytest.mark.parametrize("input_type", [np.float16, np.float32, np.float64]) + @pytest.mark.parametrize("op_type", ['Elu', + 'Sigmoid', + 'Sin', + 'Sinh', + 'Cos', + 'Cosh', + 'Exp', + 'Atan', + 'Softplus', + 'Erf', + 'Selu']) + @pytest.mark.precommit + @pytest.mark.nightly + def test_unary_ops(self, input_shape, input_type, op_type, + ie_device, precision, ir_version, temp_dir, + use_legacy_frontend): + if platform.machine() in ["aarch64", "arm64", "ARM64"] and op_type in ['Cos', 'Cosh', 'Sinh', 'Exp']: + pytest.skip("159585: accuracy error on ARM") + self._test(*self.create_unary_net(input_shape, input_type, op_type), + ie_device, precision, ir_version, temp_dir=temp_dir, + use_legacy_frontend=use_legacy_frontend, custom_eps=1e-3)