From 5fba4415e2db090284b907c8ca8888f80f0a419c Mon Sep 17 00:00:00 2001
From: Maxim Vafin
Date: Mon, 20 Jan 2025 06:05:05 +0100
Subject: [PATCH 01/35] [PT FE] Support different aliases of existing
operations (#28531)
### Details:
- *Support: `aten::equal`, `aten::index_put`, `aten::logsumexp`,
`prim::abs`*
### Tickets:
- *ticket-id*
Signed-off-by: Maxim Vafin
---
src/frontends/pytorch/src/op/index_put_.cpp | 2 +-
src/frontends/pytorch/src/op/log.cpp | 8 +++--
src/frontends/pytorch/src/op_table.cpp | 7 ++--
.../pytorch_tests/test_logsumexp.py | 34 +++++++++++++++++++
.../pytorch_tests/test_unary_ops.py | 27 +++++++++++++--
5 files changed, 70 insertions(+), 8 deletions(-)
create mode 100644 tests/layer_tests/pytorch_tests/test_logsumexp.py
diff --git a/src/frontends/pytorch/src/op/index_put_.cpp b/src/frontends/pytorch/src/op/index_put_.cpp
index 1b5725a8a95bb3..4591862d8f04c1 100644
--- a/src/frontends/pytorch/src/op/index_put_.cpp
+++ b/src/frontends/pytorch/src/op/index_put_.cpp
@@ -10,7 +10,7 @@ namespace frontend {
namespace pytorch {
namespace op {
-OutputVector translate_index_put_(const NodeContext& context) {
+OutputVector translate_index_put(const NodeContext& context) {
// Pass as PtFrameworkNode to register as `inplace_op`. Conversion to OV operators is done as transformation.
auto node = std::make_shared(context.get_decoder(), context.inputs());
return {context.mark_node(node)};
diff --git a/src/frontends/pytorch/src/op/log.cpp b/src/frontends/pytorch/src/op/log.cpp
index e932538c86520e..dbda6329deeb4f 100644
--- a/src/frontends/pytorch/src/op/log.cpp
+++ b/src/frontends/pytorch/src/op/log.cpp
@@ -77,7 +77,7 @@ OutputVector translate_log10(const NodeContext& context) {
};
OutputVector translate_logsumexp(const NodeContext& context) {
- num_inputs_check(context, 1, 2);
+ num_inputs_check(context, 1, 3);
auto input = context.get_input(0);
ov::Output dim;
if (!context.input_is_none(1)) {
@@ -85,8 +85,12 @@ OutputVector translate_logsumexp(const NodeContext& context) {
} else {
dim = context.mark_node(get_axes_range(context, 0));
}
+ bool keepdim = false;
+ if (!context.input_is_none(2)) {
+ keepdim = context.const_input(2);
+ }
auto exp = context.mark_node(std::make_shared(input));
- auto sum = context.mark_node(std::make_shared(exp, dim, false));
+ auto sum = context.mark_node(std::make_shared(exp, dim, keepdim));
auto log = context.mark_node(std::make_shared(sum));
return {log};
};
diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp
index f00391e08e2a32..27dd55f77955e0 100644
--- a/src/frontends/pytorch/src/op_table.cpp
+++ b/src/frontends/pytorch/src/op_table.cpp
@@ -116,7 +116,7 @@ OP_CONVERTER(translate_index);
OP_CONVERTER(translate_index_add);
OP_CONVERTER(translate_index_copy_);
OP_CONVERTER(translate_index_fill_);
-OP_CONVERTER(translate_index_put_);
+OP_CONVERTER(translate_index_put);
OP_CONVERTER(translate_index_select);
OP_CONVERTER(translate_instance_norm);
OP_CONVERTER(translate_int);
@@ -464,6 +464,7 @@ const std::unordered_map get_supported_ops_ts() {
{"aten::empty", op::translate_empty},
{"aten::empty_like", op::translate_empty_like},
{"aten::eq", op::translate_1to1_match_2_inputs_align_types},
+ {"aten::equal", op::translate_1to1_match_2_inputs_align_types},
{"aten::erf", op::translate_erf},
{"aten::erfc", op::translate_erfc},
{"aten::exp", op::optional_out, 1>},
@@ -507,7 +508,7 @@ const std::unordered_map get_supported_ops_ts() {
// aten::index - Supported in limited set of patterns
{"aten::index_copy_", op::inplace_op},
{"aten::index_fill_", op::inplace_op},
- {"aten::index_put_", op::inplace_op},
+ {"aten::index_put", op::translate_index_put},
{"aten::index_add", op::translate_index_add},
{"aten::index_select", op::translate_index_select},
{"aten::instance_norm", op::translate_instance_norm},
@@ -550,6 +551,7 @@ const std::unordered_map get_supported_ops_ts() {
{"aten::log2_", op::inplace_op},
{"aten::log10", op::optional_out},
{"aten::log10_", op::inplace_op},
+ {"aten::logsumexp", op::translate_logsumexp},
{"aten::lstm", op::translate_lstm},
{"aten::lt", op::translate_1to1_match_2_inputs_align_types},
{"aten::masked_fill", op::translate_masked_fill},
@@ -714,6 +716,7 @@ const std::unordered_map get_supported_ops_ts() {
{"ov_ext::embedding", op::translate_embedding_ext},
{"ov_ext::conv1d", op::translate_conv1d_ext},
{"ov_ext::linear", op::translate_linear},
+ {"prim::abs", op::translate_1to1_match_1_inputs},
{"prim::Constant", op::translate_constant},
{"prim::device", op::translate_constant},
// prim::DictConstruct - Supported in limited set of patterns
diff --git a/tests/layer_tests/pytorch_tests/test_logsumexp.py b/tests/layer_tests/pytorch_tests/test_logsumexp.py
new file mode 100644
index 00000000000000..806e3b80540d5a
--- /dev/null
+++ b/tests/layer_tests/pytorch_tests/test_logsumexp.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2018-2025 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+
+import numpy as np
+import pytest
+import torch
+
+from pytorch_layer_test_class import PytorchLayerTest
+
+
+class aten_logsumexp(torch.nn.Module):
+ def __init__(self, dim, keepdim) -> None:
+ super().__init__()
+ self.dim = dim
+ self.keepdim = keepdim
+
+ def forward(self, input_tensor):
+ return torch.logsumexp(input_tensor, dim=self.dim, keepdim=self.keepdim)
+
+
+class TestLogsumexp(PytorchLayerTest):
+ def _prepare_input(self):
+ return (np.random.randn(2, 5, 9, 7),)
+
+ @pytest.mark.parametrize("dim", [
+ 0, 1, 2, 3, -1, -2, -3, -4
+ ])
+ @pytest.mark.parametrize("keepdim", [True, False])
+ @pytest.mark.nightly
+ @pytest.mark.precommit
+ @pytest.mark.precommit_fx_backend
+ def test_logsumexp(self, dim, keepdim, ie_device, precision, ir_version):
+ self._test(aten_logsumexp(dim, keepdim), None, "aten::logsumexp",
+ ie_device, precision, ir_version)
diff --git a/tests/layer_tests/pytorch_tests/test_unary_ops.py b/tests/layer_tests/pytorch_tests/test_unary_ops.py
index 9807343080043c..584a80fe4ce254 100644
--- a/tests/layer_tests/pytorch_tests/test_unary_ops.py
+++ b/tests/layer_tests/pytorch_tests/test_unary_ops.py
@@ -75,7 +75,7 @@
class unary_op_net(torch.nn.Module):
def __init__(self, op, dtype):
- super(unary_op_net, self).__init__()
+ super().__init__()
self.dtype = dtype
self.op = op
@@ -87,7 +87,7 @@ def forward(self, x):
class unary_op_out_net(torch.nn.Module):
def __init__(self, op, dtype):
- super(unary_op_out_net, self).__init__()
+ super().__init__()
self.dtype = dtype
self.op = op
@@ -101,7 +101,7 @@ def forward(self, x):
class unary_func_op_inplace_net(torch.nn.Module):
def __init__(self, op, dtype):
- super(unary_func_op_inplace_net, self).__init__()
+ super().__init__()
self.dtype = dtype
self.op = op
@@ -111,6 +111,17 @@ def forward(self, x):
return y, x1
+class prim_abs_net(torch.nn.Module):
+ def __init__(self, dtype):
+ super().__init__()
+ self.dtype = dtype
+
+ def forward(self, x):
+ x1 = x.to(self.dtype)
+ y = abs(x1)
+ return y, x1
+
+
class TestUnaryOp(PytorchLayerTest):
def _prepare_input(self):
# random number in range [1, 11)
@@ -265,3 +276,13 @@ def test_unary_func_op_inplace(self, op_type, dtype, ie_device, precision, ir_ve
self.dtype = dtype
self._test(unary_func_op_inplace_net(OPS[op_type], dtype), None, op_type + "_",
ie_device, precision, ir_version)
+
+ @pytest.mark.nightly
+ @pytest.mark.precommit
+ @pytest.mark.precommit_torch_export
+ @pytest.mark.precommit_fx_backend
+ @pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.int8, torch.uint8, torch.int32, torch.int64])
+ def test_prim_abs(self, dtype, ie_device, precision, ir_version):
+ self.dtype = dtype
+ self._test(prim_abs_net(dtype), None, "prim::abs",
+ ie_device, precision, ir_version)
From d757efd7fb3415a3dbda10941b3dae0ace0ac16e Mon Sep 17 00:00:00 2001
From: Maxim Vafin
Date: Mon, 20 Jan 2025 07:32:00 +0100
Subject: [PATCH 02/35] [PT FE] Support aten::concatenate (#28518)
### Details:
- *Support `aten::concatenate`*
### Tickets:
- *CVS-160777*
Signed-off-by: Maxim Vafin
---
src/frontends/pytorch/src/op_table.cpp | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp
index 27dd55f77955e0..00e3a55b0bc327 100644
--- a/src/frontends/pytorch/src/op_table.cpp
+++ b/src/frontends/pytorch/src/op_table.cpp
@@ -432,6 +432,7 @@ const std::unordered_map get_supported_ops_ts() {
{"aten::col2im", op::translate_col2im},
{"aten::complex", op::translate_complex},
{"aten::concat", op::translate_cat},
+ {"aten::concatenate", op::translate_cat},
{"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail,
// we assume all tensors are contiguous
{"aten::conv_transpose1d", op::translate_conv_transposend},
From 78a1d1b907cc336e93df0c599202af76f09cb20c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 20 Jan 2025 15:36:13 +0400
Subject: [PATCH 03/35] Bump paddlepaddle from 2.6.1 to 2.6.2 in /tests
(#28547)
Bumps [paddlepaddle](https://github.com/paddlepaddle/paddle) from 2.6.1
to 2.6.2.
Commits
[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=paddlepaddle&package-manager=pip&previous-version=2.6.1&new-version=2.6.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
tests/constraints.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/constraints.txt b/tests/constraints.txt
index 30ba701095ecf4..a806b7dfb47c18 100644
--- a/tests/constraints.txt
+++ b/tests/constraints.txt
@@ -13,7 +13,7 @@ defusedxml>=0.7.1
tensorflow>=2.5,<2.19.0
requests>=2.25.1
opencv-python>=4.5
-paddlepaddle==2.6.1
+paddlepaddle==2.6.2
protobuf>=3.18.1,<6.0.0
py>=1.9.0
pytest>=5.0,<8.4
From ace5379eb62846d6167bca15e9ff17cceaf6a4e1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 20 Jan 2025 12:02:38 +0000
Subject: [PATCH 04/35] Bump pytest-xdist from 2.1.0 to 3.6.1 in /tests
(#28548)
Bumps [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) from
2.1.0 to 3.6.1.
Changelog
Sourced from pytest-xdist's
changelog.
pytest-xdist 3.6.1 (2024-04-28)
Bug Fixes
[#1071](https://github.com/pytest-dev/pytest-xdist/issues/1071)
<https://github.com/pytest-dev/pytest-xdist/issues/1071>
_:
Add backward compatibility for deadlock issue with the
execnet
new main_thread_only
"execmodel" triggered when pytest-cov accesses rinfo.
pytest-xdist 3.6.0 (2024-04-19)
This release was YANKED due to a regression fixed in 3.6.1.
Features
[#1027](https://github.com/pytest-dev/pytest-xdist/issues/1027)
<https://github.com/pytest-dev/pytest-xdist/pull/1027>
_:pytest-xdist
workers now always execute the tests in the main thread.
Previously some tests might end up executing in a separate thread other
than main
in the workers, due to some internal
execnet`` details. This can cause problems specially with async
frameworks where the event loop is running in the ``main`` thread (for
example
#620
pytest-dev/pytest-xdist#620`__).
Bug Fixes
-
[#1024](https://github.com/pytest-dev/pytest-xdist/issues/1024)
<https://github.com/pytest-dev/pytest-xdist/issues/1024>
_:
Added proper handling of shouldstop
(such as set by
--max-fail
) and shouldfail
conditions in
workers.
Previously, a worker might have continued executing further tests before
the controller could terminate the session.
-
[#1028](https://github.com/pytest-dev/pytest-xdist/issues/1028)
<https://github.com/pytest-dev/pytest-xdist/issues/1028>
_:
Fixed compatibility issue between looponfail
and editable
installs.
-
[#620](https://github.com/pytest-dev/pytest-xdist/issues/620)
<https://github.com/pytest-dev/pytest-xdist/issues/620>
_:
Use the new main_thread_only
execnet
"execmodel" so that code which expects to only run in the main
thread will now work as expected.
-
[#937](https://github.com/pytest-dev/pytest-xdist/issues/937)
<https://github.com/pytest-dev/pytest-xdist/issues/937>
_:
Fixed a bug where plugin would raise an incompatibility error with
--pdb
despite using -n0
.
Removals
-
[#1053](https://github.com/pytest-dev/pytest-xdist/issues/1053)
<https://github.com/pytest-dev/pytest-xdist/issues/1053>
_:
Dropped support for Python 3.7.
-
[#1057](https://github.com/pytest-dev/pytest-xdist/issues/1057)
<https://github.com/pytest-dev/pytest-xdist/issues/1057>
_:
pytest>=7.0.0 is now required.
execnet>=2.1.0 is now required.
Trivial Changes
-
[#1020](https://github.com/pytest-dev/pytest-xdist/issues/1020)
<https://github.com/pytest-dev/pytest-xdist/issues/1020>
_:
pytest-xdist's setup.py
file is removed.
If you relied on this file, e.g. to install pytest using
setup.py install
,
please see Why you shouldn't invoke setup.py directly
<https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html#summary>
_
for alternatives.
... (truncated)
Commits
4dd2978
Release 3.6.1
b397288
Merge pull request #1072
from zmedico/gateway-cache-rinfo
12b3cce
Cache execnet gateway rinfo during WorkerController setup
c93a106
build(deps): bump hynek/build-and-inspect-python-package (#1066)
52e2022
[pre-commit.ci] pre-commit autoupdate (#1073)
699f939
Merge pull request #1070
from pytest-dev/release-3.6.0
80bc0b8
Release 3.6.0
20e3ac7
Use execnet main_thread_only execmodel (#1027)
0a4238f
Merge pull request #1067
from pytest-dev/pre-commit-ci-update-config
0686279
[pre-commit.ci] pre-commit autoupdate
- Additional commits viewable in compare
view
[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-xdist&package-manager=pip&previous-version=2.1.0&new-version=3.6.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
tests/e2e_tests/requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/e2e_tests/requirements.txt b/tests/e2e_tests/requirements.txt
index 934a5bcbc90888..a9d7bb0861ddd2 100644
--- a/tests/e2e_tests/requirements.txt
+++ b/tests/e2e_tests/requirements.txt
@@ -26,7 +26,7 @@ pytest-cov==2.11.1
pytest-html
pytest-json-report==1.5.0
# pytest-metadata==1.7.0
-pytest-xdist==2.1.0
+pytest-xdist==3.6.1
pytest-timeout==2.3.1
# for common utils, e2e_tests
From 3e8bc27b226049f5d0d5395e1edea2af704e02e0 Mon Sep 17 00:00:00 2001
From: Arseniy Obolenskiy
Date: Mon, 20 Jan 2025 13:05:27 +0100
Subject: [PATCH 05/35] [CPU] Replace custom THROW_ERROR macros usage with
THROW_CPU_NODE_ERR (#28510)
### Details:
Replace custom THROW_ERROR macros usage for error reporting in nodes
implementation with THROW_CPU_NODE_ERR to unify error handling
infrastructure in CPU plugin
### Tickets:
- 160275
---
.../intel_cpu/src/nodes/depth_to_space.cpp | 24 +++---
src/plugins/intel_cpu/src/nodes/eye.cpp | 2 -
src/plugins/intel_cpu/src/nodes/gather.cpp | 20 ++---
.../intel_cpu/src/nodes/gather_elements.cpp | 10 +--
src/plugins/intel_cpu/src/nodes/gather_nd.cpp | 22 +++--
.../intel_cpu/src/nodes/grid_sample.cpp | 2 -
.../intel_cpu/src/nodes/interaction.cpp | 4 +-
src/plugins/intel_cpu/src/nodes/mha.cpp | 20 ++---
src/plugins/intel_cpu/src/nodes/normalize.cpp | 19 ++---
src/plugins/intel_cpu/src/nodes/priorbox.cpp | 8 +-
.../intel_cpu/src/nodes/space_to_depth.cpp | 24 +++---
src/plugins/intel_cpu/src/nodes/split.cpp | 18 ++--
.../intel_cpu/src/nodes/tensoriterator.cpp | 82 +++++++++----------
src/plugins/intel_cpu/src/nodes/unique.cpp | 14 ++--
14 files changed, 123 insertions(+), 146 deletions(-)
diff --git a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp
index bf0823885ebc71..ed8f1776d6c974 100644
--- a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp
+++ b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp
@@ -14,8 +14,6 @@
#include "openvino/opsets/opset1.hpp"
#include "utils/general_utils.h"
-#define THROW_ERROR(...) OPENVINO_THROW("DepthToSpace layer with name '", getName(), "' ", __VA_ARGS__)
-
using namespace dnnl::impl;
namespace ov {
@@ -73,11 +71,11 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
if (inputShapes.size() != 1 || outputShapes.size() != 1)
- THROW_ERROR("has incorrect number of input/output edges!");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output edges!");
auto depthToSpace = ov::as_type_ptr(op);
if (!depthToSpace)
- THROW_ERROR("supports only opset1");
+ THROW_CPU_NODE_ERR("supports only opset1");
const auto modeNgraph = depthToSpace->get_mode();
if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST) {
@@ -85,22 +83,22 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte
} else if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) {
attrs.mode = Mode::DEPTH_FIRST;
} else {
- THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph));
+ THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph));
}
attrs.blockSize = depthToSpace->get_block_size();
if (attrs.blockSize == 0)
- THROW_ERROR("has incorrect block_size parameter is zero!");
+ THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!");
const size_t srcRank = getInputShapeAtPort(0).getRank();
const size_t dstRank = getOutputShapeAtPort(0).getRank();
if (srcRank < 3)
- THROW_ERROR("has incorrect number of input dimensions");
+ THROW_CPU_NODE_ERR("has incorrect number of input dimensions");
if (srcRank > 5)
- THROW_ERROR("doesn't support dimensions with rank greater than 5");
+ THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5");
if (srcRank != dstRank)
- THROW_ERROR("has incorrect number of input/output dimensions");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions");
const size_t nSpatialDims = srcRank - 2;
attrs.blockStep = static_cast(std::pow(attrs.blockSize, nSpatialDims));
@@ -164,11 +162,11 @@ void DepthToSpace::createPrimitive() {
auto dstMemPtr = getDstMemoryAtPort(0);
auto srcMemPtr = getSrcMemoryAtPort(0);
if (!dstMemPtr)
- THROW_ERROR("has null destination memory");
+ THROW_CPU_NODE_ERR("has null destination memory");
if (!srcMemPtr)
- THROW_ERROR("has null input memory");
+ THROW_CPU_NODE_ERR("has null input memory");
if (getSelectedPrimitiveDescriptor() == nullptr)
- THROW_ERROR("has unidentified preferable primitive descriptor");
+ THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor");
const auto& memoryDesc = srcMemPtr->getDesc();
attrs.dataSize = memoryDesc.getPrecision().size();
@@ -305,7 +303,7 @@ void DepthToSpace::DepthToSpaceExecutor::exec(const MemoryPtr& srcMemPtr, const
void DepthToSpace::execute(const dnnl::stream& strm) {
if (!execPtr) {
- THROW_ERROR("doesn't have a compiled executor.");
+ THROW_CPU_NODE_ERR("doesn't have a compiled executor.");
}
int MB = getSrcMemoryAtPort(0)->getStaticDims()[0];
diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp
index 873d07673c8990..ef4995a87fd492 100644
--- a/src/plugins/intel_cpu/src/nodes/eye.cpp
+++ b/src/plugins/intel_cpu/src/nodes/eye.cpp
@@ -12,8 +12,6 @@
#include "shape_inference/shape_inference.hpp"
#include "utils/bfloat16.hpp"
-#define THROW_ERROR(...) OPENVINO_THROW(NameFromType(getType()), " node with name '", getName(), "' ", __VA_ARGS__)
-
namespace ov {
namespace intel_cpu {
namespace node {
diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp
index e72901d7d43e62..f349990f56f620 100644
--- a/src/plugins/intel_cpu/src/nodes/gather.cpp
+++ b/src/plugins/intel_cpu/src/nodes/gather.cpp
@@ -24,8 +24,6 @@
using namespace dnnl::impl::cpu;
-#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)
-
namespace ov {
namespace intel_cpu {
namespace node {
@@ -69,7 +67,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co
if (one_of(op->get_input_size(), 4u, 5u) && op->get_output_size() == 1u) {
compressed = true;
} else if (op->get_input_size() != 3 || op->get_output_size() != 1) {
- THROW_ERROR("has incorrect number of input/output edges!");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output edges!");
}
const auto& dataShape = getInputShapeAtPort(GATHER_DATA);
@@ -80,7 +78,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co
isIdxShapeStat = idxShape.isStatic();
const auto indicesRank = idxShape.getRank();
if (dataSrcRank == 0lu || indicesRank == 0lu)
- THROW_ERROR("has incorrect input parameters ranks.");
+ THROW_CPU_NODE_ERR("has incorrect input parameters ranks.");
if (ov::is_type(op)) {
batchDims = static_cast(ov::as_type_ptr(op)->get_batch_dims());
@@ -104,7 +102,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co
if (batchDims < 0)
batchDims += indicesRank;
if (batchDims < 0 || batchDims > std::min(static_cast(dataSrcRank), static_cast(indicesRank)))
- THROW_ERROR("has incorrect batch_dims ", batchDims, "!");
+ THROW_CPU_NODE_ERR("has incorrect batch_dims ", batchDims, "!");
if (ov::is_type(op->get_input_node_ptr(GATHER_AXIS))) {
isAxisInputConst = true;
@@ -112,7 +110,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co
if (axis < 0)
axis += dataSrcRank;
if (axis < 0 || axis >= dataSrcRank || batchDims > axis)
- THROW_ERROR("has incorrect input parameter axis value: ", axis);
+ THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis);
}
if (auto indices = ov::as_type(op->get_input_node_ptr(GATHER_INDICES))) {
@@ -339,12 +337,12 @@ bool Gather::needPrepareParams() const {
void Gather::prepareParams() {
auto dataMemPtr = getSrcMemoryAtPort(GATHER_DATA);
if (!dataMemPtr || !dataMemPtr->isDefined())
- THROW_ERROR(" has undefined input data memory.");
+ THROW_CPU_NODE_ERR("has undefined input data memory.");
auto idxMemPtr = getSrcMemoryAtPort(GATHER_INDICES);
if (!idxMemPtr || !idxMemPtr->isDefined())
- THROW_ERROR(" has undefined input indices memory.");
+ THROW_CPU_NODE_ERR("has undefined input indices memory.");
if (getSelectedPrimitiveDescriptor() == nullptr)
- THROW_ERROR(" has unidentified preferable primitive descriptor.");
+ THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor.");
// short 1D vector fast execution impl (typical in shape infer subgraph)
canOptimize1DCase = false;
@@ -363,7 +361,7 @@ void Gather::prepareParams() {
if (axis < 0)
axis += dataSrcRank;
if (axis < 0 || axis >= dataSrcRank || batchDims > axis)
- THROW_ERROR("has incorrect input parameter axis value: ", axis);
+ THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis);
}
if (!isDataShapeStat || !isAxisInputConst) {
@@ -553,7 +551,7 @@ void Gather::executeDynamicImpl(const dnnl::stream& strm) {
void Gather::initShortParams(threadExecParams& p, const uint64_t start) {
if (!jitKernel)
- THROW_ERROR("has uninitialized kernel in function initShortParams.");
+ THROW_CPU_NODE_ERR("has uninitialized kernel in function initShortParams.");
const uint64_t idxElPerVec = jitKernel->getIdxElPerVec();
if (afterAxisSize == 1) { // Elementwise gather.
diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp
index 7a494d184ce9c1..29bc32370d03de 100644
--- a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp
+++ b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp
@@ -38,19 +38,19 @@ GatherElements::GatherElements(const std::shared_ptr& op, const GraphC
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
if (inputShapes.size() != 2 || outputShapes.size() != 1)
- THROW_CPU_NODE_ERR(" has invalid number of input/output edges.");
+ THROW_CPU_NODE_ERR("has invalid number of input/output edges.");
const auto dataRank = getInputShapeAtPort(dataIndex_).getRank();
const auto indicesRank = getInputShapeAtPort(indicesIndex_).getRank();
if (dataRank != indicesRank)
- THROW_CPU_NODE_ERR(" has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks.");
+ THROW_CPU_NODE_ERR("has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks.");
auto gatherElementsOp = ov::as_type_ptr(op);
auto axis = gatherElementsOp->get_axis();
if (axis < 0)
axis += dataRank;
if (axis < 0 || axis >= static_cast(dataRank))
- THROW_CPU_NODE_ERR(" has invalid axis attribute: ", axis);
+ THROW_CPU_NODE_ERR("has invalid axis attribute: ", axis);
axis_ = axis;
}
@@ -78,12 +78,12 @@ void GatherElements::initSupportedPrimitiveDescriptors() {
sizeof(element_type_traits::value_type),
sizeof(element_type_traits::value_type),
sizeof(element_type_traits::value_type))) {
- THROW_CPU_NODE_ERR(" has unsupported 'inputData' input precision: ", inDataPrecision);
+ THROW_CPU_NODE_ERR("has unsupported 'inputData' input precision: ", inDataPrecision);
}
ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(indicesIndex_);
if (!one_of(indicesPrecision, ov::element::i32, ov::element::i64)) {
- THROW_CPU_NODE_ERR(" has unsupported 'indices' input precision: ", indicesPrecision);
+ THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision);
}
dataTypeSize_ = inDataPrecision.size();
diff --git a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp
index 1124bec41632b8..8df99882adc9cf 100644
--- a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp
+++ b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp
@@ -14,8 +14,6 @@
#include "openvino/core/parallel.hpp"
#include "utils/general_utils.h"
-#define THROW_ERROR(...) OPENVINO_THROW("GatherND layer with name '", getName(), "' ", __VA_ARGS__)
-
namespace ov {
namespace intel_cpu {
namespace node {
@@ -43,7 +41,7 @@ GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr
}
if (inputShapes.size() != 2 && outputShapes.size() != 1)
- THROW_ERROR("has invalid number of input/output edges.");
+ THROW_CPU_NODE_ERR("has invalid number of input/output edges.");
const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank();
const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank();
@@ -53,10 +51,10 @@ GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr
} else if (auto gatherNdOp = ov::as_type_ptr(op)) {
attrs.batchDims = gatherNdOp->get_batch_dims();
} else {
- THROW_ERROR("has support only opset5.");
+ THROW_CPU_NODE_ERR("has support only opset5.");
}
if (attrs.batchDims >= std::min(dataInputRank, indicesInputRank))
- THROW_ERROR("has invalid batch_dims attribute: ", attrs.batchDims);
+ THROW_CPU_NODE_ERR("has invalid batch_dims attribute: ", attrs.batchDims);
}
void GatherND::initSupportedPrimitiveDescriptors() {
@@ -68,7 +66,7 @@ void GatherND::initSupportedPrimitiveDescriptors() {
sizeof(element_type_traits::value_type),
sizeof(element_type_traits::value_type),
sizeof(element_type_traits::value_type))) {
- THROW_ERROR("has unsupported 'data' input precision: ", inDataPrecision);
+ THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", inDataPrecision);
}
attrs.dataSize = inDataPrecision.size();
@@ -80,7 +78,7 @@ void GatherND::initSupportedPrimitiveDescriptors() {
ov::element::u16,
ov::element::i8,
ov::element::u8)) {
- THROW_ERROR("has unsupported 'indices' input precision: ", indicesPrecision);
+ THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision);
}
addSupportedPrimDesc({{LayoutType::ncsp, inDataPrecision}, {LayoutType::ncsp, ov::element::i32}},
@@ -93,13 +91,13 @@ void GatherND::prepareParams() {
auto idxMemPtr = getSrcMemoryAtPort(GATHERND_INDEXES);
auto dstMemPtr = getDstMemoryAtPort(0);
if (!srcMemPtr || !srcMemPtr->isDefined())
- THROW_ERROR(" has undefined input memory of 'data'.");
+ THROW_CPU_NODE_ERR("has undefined input memory of 'data'.");
if (!idxMemPtr || !idxMemPtr->isDefined())
- THROW_ERROR(" has undefined input memory of 'indices'.");
+ THROW_CPU_NODE_ERR("has undefined input memory of 'indices'.");
if (!dstMemPtr || !dstMemPtr->isDefined())
- THROW_ERROR(" has undefined output memory.");
+ THROW_CPU_NODE_ERR("has undefined output memory.");
if (getSelectedPrimitiveDescriptor() == nullptr)
- THROW_ERROR(" has unidentified preferable primitive descriptor.");
+ THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor.");
attrs.srcDims = srcMemPtr->getStaticDims();
attrs.srcStrides = srcMemPtr->getDescWithType()->getStrides();
@@ -141,7 +139,7 @@ GatherND::GatherNDExecutor::GatherNDExecutor(const GatherNDAttributes& attrs)
void GatherND::execute(const dnnl::stream& strm) {
if (!execPtr)
- THROW_ERROR("has not compiled executor.");
+ THROW_CPU_NODE_ERR("has not compiled executor.");
execPtr->exec(getSrcMemoryAtPort(GATHERND_DATA), getSrcMemoryAtPort(GATHERND_INDEXES), getDstMemoryAtPort(0));
}
diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp
index 0e25c64acfe534..7a8eb1088453c7 100644
--- a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp
+++ b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp
@@ -14,8 +14,6 @@ using namespace ov::intel_cpu::node;
using namespace dnnl::impl::cpu;
#endif // OPENVINO_ARCH_X86_64
-#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)
-
bool GridSample::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept {
try {
if (!ov::is_type(op)) {
diff --git a/src/plugins/intel_cpu/src/nodes/interaction.cpp b/src/plugins/intel_cpu/src/nodes/interaction.cpp
index 13c846da6e2bea..d1ffcb3546754a 100644
--- a/src/plugins/intel_cpu/src/nodes/interaction.cpp
+++ b/src/plugins/intel_cpu/src/nodes/interaction.cpp
@@ -28,8 +28,6 @@ namespace ov {
namespace intel_cpu {
namespace node {
-#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)
-
#if defined(OPENVINO_ARCH_X86_64)
template
@@ -346,7 +344,7 @@ void Interaction::prepareParams() {
moveFeatureKernel->create_ker();
moveInteractKernel->create_ker();
} else {
- THROW_ERROR("cannot create jit eltwise kernel");
+ THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
#ifdef CPU_DEBUG_CAPS
if (prim) {
diff --git a/src/plugins/intel_cpu/src/nodes/mha.cpp b/src/plugins/intel_cpu/src/nodes/mha.cpp
index e1f4a774011dc9..43867cd99b2b01 100644
--- a/src/plugins/intel_cpu/src/nodes/mha.cpp
+++ b/src/plugins/intel_cpu/src/nodes/mha.cpp
@@ -25,8 +25,6 @@ using namespace dnnl::impl::cpu::x64;
using namespace dnnl::impl::cpu::x64::matmul;
using namespace Xbyak;
-#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)
-
namespace ov {
namespace intel_cpu {
namespace node {
@@ -879,7 +877,7 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr& brgKerne
ctx.K,
&strides);
if (status != dnnl_success) {
- THROW_ERROR("cannot be executed due to invalid brgconv params");
+ THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params");
}
ctx.is_with_amx = use_amx;
@@ -893,11 +891,11 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr& brgKerne
brgemm_kernel_t* brgKernel_ = nullptr;
status = brgemm_kernel_create(&brgKernel_, brgDesc);
if (status != dnnl_success) {
- THROW_ERROR("cannot be executed due to invalid brgconv params");
+ THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params");
}
brgKernel.reset(brgKernel_);
#else
- THROW_ERROR("is not supported on non-x86_64");
+ THROW_CPU_NODE_ERR("is not supported on non-x86_64");
#endif // OPENVINO_ARCH_X86_64
}
@@ -972,7 +970,7 @@ void MHA::init_brgemm_copy_b(std::unique_ptr& brgCop
#if defined(OPENVINO_ARCH_X86_64)
auto ret = create_brgemm_matmul_copy_b(brgCopyKernel, &brgCopyKernelConf);
if (ret != dnnl::impl::status_t::dnnl_success)
- THROW_ERROR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret);
+ THROW_CPU_NODE_ERR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret);
#endif // OPENVINO_ARCH_X86_64
}
@@ -1204,7 +1202,7 @@ void MHA::prepareParams() {
}
#endif // OPENVINO_ARCH_X86_64
if (!mulAddSoftmaxKernel) {
- THROW_ERROR("cannot create jit eltwise kernel");
+ THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
}
@@ -1228,7 +1226,7 @@ void MHA::prepareParams() {
}
#endif // OPENVINO_ARCH_X86_64
if (!convertReorderKernel) {
- THROW_ERROR("cannot create jit eltwise kernel");
+ THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
}
@@ -1255,7 +1253,7 @@ void MHA::prepareParams() {
#endif // OPENVINO_ARCH_X86_64
if (!convertTransposeKernel) {
- THROW_ERROR("cannot create jit eltwise kernel");
+ THROW_CPU_NODE_ERR("cannot create jit eltwise kernel");
}
}
@@ -1312,7 +1310,7 @@ void MHA::callBrgemm(brgemmCtx& ctx,
brgemm_kernel_execute(brgKernel.get(), 1, pin0, pin1, nullptr, pout, wsp);
}
#else
- THROW_ERROR("is not supported on non-x64 platforms");
+ THROW_CPU_NODE_ERR("is not supported on non-x64 platforms");
#endif // OPENVINO_ARCH_X86_64
}
@@ -1547,7 +1545,7 @@ void MHA::execute(const dnnl::stream& strm) {
} else if (inputPrecisions[1] == ov::element::i8) {
mhaImpl();
} else {
- THROW_ERROR("doesn't support provided input precisions");
+ THROW_CPU_NODE_ERR("doesn't support provided input precisions");
}
}
diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp
index e416781cdf69a2..13322254ab4ee1 100644
--- a/src/plugins/intel_cpu/src/nodes/normalize.cpp
+++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp
@@ -35,7 +35,6 @@ using namespace Xbyak;
#if defined(OPENVINO_ARCH_X86_64)
# define GET_OFF(field) offsetof(jit_normalize_call_args, field)
#endif
-#define THROW_ERROR(...) OPENVINO_THROW("NormalizeL2 layer with name '", getName(), "' ", __VA_ARGS__)
namespace ov {
namespace intel_cpu {
@@ -782,10 +781,10 @@ NormalizeL2::NormalizeL2(const std::shared_ptr& op, const GraphContext
}
if (inputShapes.size() != 2 || outputShapes.size() != 1)
- THROW_ERROR(" has incorrect number of input/output edges");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output edges");
if (getInputShapeAtPort(DATA).getRank() > 4 || getInputShapeAtPort(DATA).getRank() < 2) {
- THROW_ERROR("has invalid input shape. Normalize supports from 2D to 4D blobs.");
+ THROW_CPU_NODE_ERR("has invalid input shape. Normalize supports from 2D to 4D blobs.");
}
auto norm = ov::as_type_ptr(op);
@@ -825,7 +824,7 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() {
ov::element::f16,
ov::element::i8,
ov::element::u8)) {
- THROW_ERROR("has unsupported input precision: ", inputPrecision);
+ THROW_CPU_NODE_ERR("has unsupported input precision: ", inputPrecision);
}
if (!one_of(outputPrecision,
ov::element::f32,
@@ -833,7 +832,7 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() {
ov::element::f16,
ov::element::i8,
ov::element::u8)) {
- THROW_ERROR("has unsupported output precision: ", outputPrecision);
+ THROW_CPU_NODE_ERR("has unsupported output precision: ", outputPrecision);
}
attrs.input_prec = inputPrecision;
@@ -914,11 +913,11 @@ void NormalizeL2::createPrimitive() {
auto dstMemPtr = getDstMemoryAtPort(DATA);
auto srcMemPtr = getSrcMemoryAtPort(DATA);
if (!dstMemPtr)
- THROW_ERROR("can't get destination memory");
+ THROW_CPU_NODE_ERR("can't get destination memory");
if (!srcMemPtr)
- THROW_ERROR("can't get input memory");
+ THROW_CPU_NODE_ERR("can't get input memory");
if (getSelectedPrimitiveDescriptor() == nullptr)
- THROW_ERROR("has nullable preferable primitive descriptor");
+ THROW_CPU_NODE_ERR("has nullable preferable primitive descriptor");
if (!attrs.cornerCase) {
if (srcMemPtr->getDesc().hasLayoutType(LayoutType::ncsp)) {
@@ -930,7 +929,7 @@ void NormalizeL2::createPrimitive() {
} else if (srcMemPtr->getDesc().hasLayoutType(LayoutType::nspc)) {
attrs.layout = LayoutType::nspc;
} else {
- THROW_ERROR("has selected layout which is not supported");
+ THROW_CPU_NODE_ERR("has selected layout which is not supported");
}
}
@@ -972,7 +971,7 @@ void NormalizeL2::executeDynamicImpl(const dnnl::stream& strm) {
void NormalizeL2::execute(const dnnl::stream& strm) {
if (!execPtr)
- THROW_ERROR("doesn't have a compiled executor.");
+ THROW_CPU_NODE_ERR("doesn't have a compiled executor.");
const uint8_t* src_ptr = getSrcDataAtPortAs(DATA);
uint8_t* dst_ptr = getDstDataAtPortAs(DATA);
diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.cpp b/src/plugins/intel_cpu/src/nodes/priorbox.cpp
index d1a2acd05d1a7a..3bf6a47797e044 100644
--- a/src/plugins/intel_cpu/src/nodes/priorbox.cpp
+++ b/src/plugins/intel_cpu/src/nodes/priorbox.cpp
@@ -14,8 +14,6 @@
#include "openvino/opsets/opset1.hpp"
#include "shape_inference/custom/priorbox.hpp"
-#define THROW_ERROR(...) OPENVINO_THROW("PriorBox layer with name '", getName(), "': ", __VA_ARGS__)
-
namespace ov {
namespace intel_cpu {
namespace node {
@@ -69,7 +67,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr
exist = false;
if (std::fabs(aspect_ratio_item) < std::numeric_limits::epsilon()) {
- THROW_ERROR("Aspect_ratio param can't be equal to zero");
+ THROW_CPU_NODE_ERR("has aspect_ratio param can't be equal to zero");
}
for (float _aspect_ratio : aspect_ratio) {
@@ -94,7 +92,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr
if (attrs.variance.size() == 1 || attrs.variance.size() == 4) {
for (float i : attrs.variance) {
if (i < 0) {
- THROW_ERROR("Variance must be > 0.");
+ THROW_CPU_NODE_ERR("variance must be > 0.");
}
variance.push_back(i);
@@ -102,7 +100,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr
} else if (attrs.variance.empty()) {
variance.push_back(0.1f);
} else {
- THROW_ERROR("Wrong number of variance values. Not less than 1 and more than 4 variance values.");
+ THROW_CPU_NODE_ERR("has wrong number of variance values. Not less than 1 and more than 4 variance values.");
}
}
diff --git a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp
index 859944161d48b9..0384dabc63d73c 100644
--- a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp
+++ b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp
@@ -15,8 +15,6 @@
#include "openvino/util/pp.hpp"
#include "utils/general_utils.h"
-#define THROW_ERROR(...) OPENVINO_THROW("SpaceToDepth layer with name '", getName(), "' ", __VA_ARGS__)
-
using namespace dnnl;
using namespace dnnl::impl;
@@ -76,11 +74,11 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
if (inputShapes.size() != 1 || outputShapes.size() != 1)
- THROW_ERROR("has incorrect number of input/output edges!");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output edges!");
auto spaceToDepth = ov::as_type_ptr(op);
if (!spaceToDepth)
- THROW_ERROR("supports only opset1");
+ THROW_CPU_NODE_ERR("supports only opset1");
const auto modeNgraph = spaceToDepth->get_mode();
if (modeNgraph == ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST) {
@@ -88,21 +86,21 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte
} else if (modeNgraph == ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST) {
attrs.mode = Mode::DEPTH_FIRST;
} else {
- THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph));
+ THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph));
}
attrs.blockSize = spaceToDepth->get_block_size();
if (attrs.blockSize == 0)
- THROW_ERROR("has incorrect block_size parameter is zero!");
+ THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!");
const size_t srcRank = getInputShapeAtPort(0).getRank();
const size_t dstRank = getOutputShapeAtPort(0).getRank();
if (srcRank < 3)
- THROW_ERROR("has incorrect number of input dimensions");
+ THROW_CPU_NODE_ERR("has incorrect number of input dimensions");
if (srcRank > 5)
- THROW_ERROR("doesn't support dimensions with rank greater than 5");
+ THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5");
if (srcRank != dstRank)
- THROW_ERROR("has incorrect number of input/output dimensions");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions");
attrs.nSpatialDims = srcRank - 2;
attrs.blockStep = static_cast(std::pow(attrs.blockSize, attrs.nSpatialDims));
}
@@ -164,11 +162,11 @@ void SpaceToDepth::createPrimitive() {
auto dstMemPtr = getDstMemoryAtPort(0);
auto srcMemPtr = getSrcMemoryAtPort(0);
if (!dstMemPtr)
- THROW_ERROR("has null destination memory");
+ THROW_CPU_NODE_ERR("has null destination memory");
if (!srcMemPtr)
- THROW_ERROR("has null input memory");
+ THROW_CPU_NODE_ERR("has null input memory");
if (getSelectedPrimitiveDescriptor() == nullptr)
- THROW_ERROR("has unidentified preferable primitive descriptor");
+ THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor");
const auto& memoryDesc = srcMemPtr->getDesc();
attrs.dataSize = memoryDesc.getPrecision().size();
@@ -301,7 +299,7 @@ void SpaceToDepth::SpaceToDepthExecutor::exec(const uint8_t* srcData, uint8_t* d
void SpaceToDepth::execute(const dnnl::stream& strm) {
if (!execPtr) {
- THROW_ERROR("doesn't have a compiled executor.");
+ THROW_CPU_NODE_ERR("doesn't have a compiled executor.");
}
const uint8_t* srcData = getSrcDataAtPortAs(0);
uint8_t* dstData = getDstDataAtPortAs(0);
diff --git a/src/plugins/intel_cpu/src/nodes/split.cpp b/src/plugins/intel_cpu/src/nodes/split.cpp
index 59ab2776ba884b..af8295cbe98a9e 100644
--- a/src/plugins/intel_cpu/src/nodes/split.cpp
+++ b/src/plugins/intel_cpu/src/nodes/split.cpp
@@ -19,8 +19,6 @@
#include "utils/general_utils.h"
#include "utils/ngraph_utils.hpp"
-#define THROW_ERROR(...) OPENVINO_THROW("Split layer with name '", getName(), "' ", __VA_ARGS__)
-
using namespace dnnl;
namespace ov {
@@ -74,7 +72,7 @@ Split::Split(const std::shared_ptr& op, const GraphContext::CPtr& cont
axis += inRank;
}
if (axis >= static_cast(inRank)) {
- THROW_ERROR("Split node with name '", op->get_friendly_name(), "' has invalid value of axis parameter: ", axis);
+ THROW_CPU_NODE_ERR("has invalid value of axis parameter: ", axis);
}
this->axis = axis;
}
@@ -92,14 +90,14 @@ void Split::initSupportedPrimitiveDescriptors() {
for (size_t i = 0; i < outputShapes.size(); i++) {
const auto& o_Dims = outputShapes[i].getDims();
if (dstFirstDims.size() != o_Dims.size()) {
- THROW_ERROR("only supports output blobs with equal number of dimensions");
+ THROW_CPU_NODE_ERR("only supports output blobs with equal number of dimensions");
}
for (size_t j = 0; j < dstFirstDims.size(); j++) {
if (j == axis)
continue;
if (!dimsEqualWeak(o_Dims[j], dstFirstDims[j]))
- THROW_ERROR("has incorrect output dimensions");
+ THROW_CPU_NODE_ERR("has incorrect output dimensions");
}
}
@@ -256,7 +254,7 @@ void Split::createPrimitive() {
void Split::prepareParams() {
const auto& srcMemPtr = getSrcMemoryAtPort(0);
if (!srcMemPtr || !srcMemPtr->isDefined()) {
- THROW_ERROR("has undefined input memory");
+ THROW_CPU_NODE_ERR("has undefined input memory");
}
if (!constSplitLengths) {
@@ -271,7 +269,7 @@ void Split::prepareParams() {
for (size_t port = 0; port < outputShapes.size(); ++port) {
const auto& outMemPtr = this->getDstMemoryAtPort(port);
if (!outMemPtr || !outMemPtr->isDefined()) {
- THROW_ERROR("has undefined destination memory");
+ THROW_CPU_NODE_ERR("has undefined destination memory");
}
if (outMemPtr->getShape().hasZeroDims()) {
@@ -301,7 +299,7 @@ void Split::execute(const dnnl::stream& strm) {
}
if (dstMemPtrs.empty())
- THROW_ERROR("Output data pointers have not been initialized.");
+ THROW_CPU_NODE_ERR("Output data pointers have not been initialized.");
const auto& srcMem = getParentEdgeAt(0)->getMemory();
@@ -323,7 +321,7 @@ void Split::initOptimalPrimitiveDescriptor() {
Node::initOptimalPrimitiveDescriptor();
auto selected_pd = getSelectedPrimitiveDescriptor();
if (selected_pd == nullptr)
- THROW_ERROR("Preferable primitive descriptor is not set.");
+ THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set.");
auto config = selected_pd->getConfig();
canUseOptimizedNspc2Ncsp = false;
@@ -487,7 +485,7 @@ std::vector Split::getRawDstMemPtrs() const {
for (size_t i = 0; i < dstMemPtrs.size(); ++i) {
result[i] = dstMemPtrs[i].second->getDataAs();
if (!result[i]) {
- THROW_ERROR("can't get child edge indx ", dstMemPtrs[i].first, " data.");
+ THROW_CPU_NODE_ERR("can't get child edge indx ", dstMemPtrs[i].first, " data.");
}
}
return result;
diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp
index fbd6361eca53fc..cffde3a81d23dd 100644
--- a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp
+++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp
@@ -25,8 +25,6 @@ namespace ov {
namespace intel_cpu {
namespace node {
-#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " layer with name '", getName(), "' ", __VA_ARGS__)
-
static NodeConfig make_plain_config(const std::shared_ptr& op) {
NodeConfig config;
@@ -435,7 +433,7 @@ TensorIterator::TensorIterator(const std::shared_ptr& op, const GraphC
void TensorIterator::getSupportedDescriptors() {
auto tiOp = ov::as_type_ptr(ngraphOp);
if (!tiOp) {
- THROW_ERROR("cannot be cast to ov::op::util::SubGraphOp");
+ THROW_CPU_NODE_ERR("cannot be cast to ov::op::util::SubGraphOp");
}
const std::shared_ptr body = tiOp->get_function();
sub_graph.CreateGraph(body, context);
@@ -519,7 +517,7 @@ void TensorIterator::getSupportedDescriptors() {
-1,
1});
} else {
- THROW_ERROR("has incorrect type of the input description.");
+ THROW_CPU_NODE_ERR("has incorrect type of the input description.");
}
}
@@ -537,7 +535,7 @@ void TensorIterator::getSupportedDescriptors() {
} else if (auto ti = ov::as_type_ptr(ngraphOp)) {
algorithm = Algorithm::TensorIteratorCommon;
} else {
- THROW_ERROR("isn't supported!");
+ THROW_CPU_NODE_ERR("isn't supported!");
}
}
@@ -894,11 +892,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
const auto getNumIterations = [this](const PortMap& rule, const std::vector& dimensions) -> int {
const auto axis = rule.axis;
if (axis < 0 || static_cast(axis) >= dimensions.size()) {
- THROW_ERROR(": Invalid \"axis\" value in an iteration component: ",
- rule.axis,
- ", dimensions number = ",
- dimensions.size(),
- " (out of range)");
+ THROW_CPU_NODE_ERR(": Invalid \"axis\" value in an iteration component: ",
+ rule.axis,
+ ", dimensions number = ",
+ dimensions.size(),
+ " (out of range)");
}
const auto space = dimensions[axis];
const int start = static_cast((rule.start < 0 ? (space + 1) : 0) + rule.start);
@@ -906,7 +904,9 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
const auto stride = rule.stride;
if (stride == 0) {
- THROW_ERROR(": Invalid \"stride\" value in an iteration component: ", rule.stride, " (infinite loop)");
+ THROW_CPU_NODE_ERR(": Invalid \"stride\" value in an iteration component: ",
+ rule.stride,
+ " (infinite loop)");
}
const auto step = std::abs(stride);
@@ -914,21 +914,21 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
const auto dst = stride < 0 ? start : end;
const auto length = dst - src;
if (src < 0 || src >= dst || dst > static_cast(space) || length < step) {
- THROW_ERROR(": Invalid \"start\",\"stride\",\"end\" values in an iteration component",
- ": \"start\" = ",
- rule.start,
- ", \"stride\" = ",
- rule.stride,
- ", \"end\" = ",
- rule.end);
+ THROW_CPU_NODE_ERR(": Invalid \"start\",\"stride\",\"end\" values in an iteration component",
+ ": \"start\" = ",
+ rule.start,
+ ", \"stride\" = ",
+ rule.stride,
+ ", \"end\" = ",
+ rule.end);
}
if (length % step != 0) {
- THROW_ERROR(": Each iteration must be the same size: length (",
- length,
- ") is not divisible by step (",
- step,
- ")");
+ THROW_CPU_NODE_ERR(": Each iteration must be the same size: length (",
+ length,
+ ") is not divisible by step (",
+ step,
+ ")");
}
return static_cast(length / step);
@@ -943,11 +943,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
}
if (rule.from < 0 || rule.from >= static_cast(inputShapes.size())) {
- THROW_ERROR(": Invalid \"from\" value: \"from\" = ",
- rule.from,
- " inputs number = ",
- inputShapes.size(),
- " (out of range)");
+ THROW_CPU_NODE_ERR(": Invalid \"from\" value: \"from\" = ",
+ rule.from,
+ " inputs number = ",
+ inputShapes.size(),
+ " (out of range)");
}
const auto currentNumIterations = getNumIterations(rule, dims);
@@ -955,10 +955,10 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
isDefault = false;
numIterations = currentNumIterations;
} else if (numIterations != currentNumIterations) {
- THROW_ERROR(": There are at least two different iterations numbers: ",
- numIterations,
- " and ",
- currentNumIterations);
+ THROW_CPU_NODE_ERR(": There are at least two different iterations numbers: ",
+ numIterations,
+ " and ",
+ currentNumIterations);
}
}
@@ -972,11 +972,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
continue;
if (rule.from < 0 || rule.from >= static_cast(outputShapes.size())) {
- THROW_ERROR(": Invalid \"from\" value: \"from\" = ",
- rule.from,
- " inputs number = ",
- outputShapes.size(),
- " (out of range)");
+ THROW_CPU_NODE_ERR(": Invalid \"from\" value: \"from\" = ",
+ rule.from,
+ " inputs number = ",
+ outputShapes.size(),
+ " (out of range)");
}
const auto currentNumIterations = getNumIterations(rule, dims);
@@ -984,10 +984,10 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap,
isDefault = false;
numIterations = currentNumIterations;
} else if (numIterations != currentNumIterations) {
- THROW_ERROR(": There are at least two different iterations numbers: ",
- numIterations,
- " and ",
- currentNumIterations);
+ THROW_CPU_NODE_ERR(": There are at least two different iterations numbers: ",
+ numIterations,
+ " and ",
+ currentNumIterations);
}
}
diff --git a/src/plugins/intel_cpu/src/nodes/unique.cpp b/src/plugins/intel_cpu/src/nodes/unique.cpp
index 391e1967a8c682..5a5888090ef6ee 100644
--- a/src/plugins/intel_cpu/src/nodes/unique.cpp
+++ b/src/plugins/intel_cpu/src/nodes/unique.cpp
@@ -14,8 +14,6 @@
using namespace ov::intel_cpu;
using namespace ov::intel_cpu::node;
-#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__)
-
bool Unique::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept {
try {
if (!ov::is_type(op)) {
@@ -41,7 +39,7 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co
}
if (!one_of(op->get_input_size(), 1u, 2u) || op->get_output_size() != 4)
- THROW_ERROR("has incorrect number of input/output edges.");
+ THROW_CPU_NODE_ERR("has incorrect number of input/output edges.");
for (int i = 0; i < 4; i++) {
definedOutputs[i] = !op->get_output_target_inputs(i).empty();
@@ -55,8 +53,8 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co
axis += op->get_input_partial_shape(IN_DATA).rank().get_length();
}
if (axis < 0 || axis >= op->get_input_partial_shape(IN_DATA).rank().get_length()) {
- THROW_ERROR("has invalid axis value: ",
- ov::as_type(op->get_input_node_ptr(AXIS))->cast_vector()[0]);
+ THROW_CPU_NODE_ERR("has invalid axis value: ",
+ ov::as_type(op->get_input_node_ptr(AXIS))->cast_vector()[0]);
}
} else {
flattened = true;
@@ -93,18 +91,18 @@ void Unique::createPrimitive() {
void Unique::prepareParams() {
auto dataMemPtr = getSrcMemoryAtPort(IN_DATA);
if (!dataMemPtr) {
- THROW_ERROR(" has null input data memory.");
+ THROW_CPU_NODE_ERR("has null input data memory.");
}
for (int i = 0; i < 4; i++) {
if (definedOutputs[i]) {
auto dstMemPtr = getDstMemoryAtPort(i);
if (!dstMemPtr) {
- THROW_ERROR(" has null output memory at port ", i);
+ THROW_CPU_NODE_ERR("has null output memory at port ", i);
}
}
}
if (getSelectedPrimitiveDescriptor() == nullptr) {
- THROW_ERROR(" has unidentified preferable primitive descriptor.");
+ THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor.");
}
size_t srcLen = 1;
From f46e3e9d143a18316e14f6d632fde318e329607f Mon Sep 17 00:00:00 2001
From: Xuejun Zhai
Date: Mon, 20 Jan 2025 20:15:57 +0800
Subject: [PATCH 06/35] [Hetro][Func Test] only the nightly tests can use hw
plugin (#28545)
### Details:
- *item1*
- *...*
### Tickets:
- *ticket-id*
Signed-off-by: Zhai, Xuejun
---
.../behavior/ov_plugin/core_threading_tests.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp
index 39dc277f25a11e..b0152a06b8ab0f 100644
--- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp
+++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp
@@ -7,7 +7,7 @@
namespace {
const Params params[] = {
std::tuple{ov::test::utils::DEVICE_HETERO,
- {{ov::device::priorities.name(), ov::test::utils::DEVICE_CPU}}},
+ {{ov::device::priorities.name(), ov::test::utils::DEVICE_TEMPLATE}}},
};
} // namespace
@@ -19,4 +19,4 @@ INSTANTIATE_TEST_SUITE_P(nightly_HETERO,
INSTANTIATE_TEST_SUITE_P(HETERO_Streams,
CoreThreadingTestsWithIter,
testing::Combine(testing::ValuesIn(params), testing::Values(4), testing::Values(50)),
- CoreThreadingTestsWithIter::getTestCaseName);
\ No newline at end of file
+ CoreThreadingTestsWithIter::getTestCaseName);
From 96c22330d5aa953752c22942a00e3032e4b1c9f0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 20 Jan 2025 12:24:33 +0000
Subject: [PATCH 07/35] Bump pytest-dependency from 0.5.1 to 0.6.0 in /tests
(#28549)
Bumps [pytest-dependency](https://github.com/RKrahl/pytest-dependency)
from 0.5.1 to 0.6.0.
Changelog
Sourced from pytest-dependency's
changelog.
0.6.0 (2023-12-31)
Documentation
-------------
[#39](https://github.com/RKrahl/pytest-dependency/issues/39)
,
[#41](https://github.com/RKrahl/pytest-dependency/issues/41)
,
[#59](https://github.com/RKrahl/pytest-dependency/issues/59)
_:
Review documentation
Incompatible changes
- Drop support for Python 2.
Bug fixes and minor changes
[#40](https://github.com/RKrahl/pytest-dependency/issues/40)
_:
add logging.
[#50](https://github.com/RKrahl/pytest-dependency/issues/50)
,
[#51](https://github.com/RKrahl/pytest-dependency/issues/51)
:
test suite incompatibility with pytest 6.2.0.
[#58](https://github.com/RKrahl/pytest-dependency/issues/58)
_:
declare the type of automark_dependency ini-option correctly
as bool.
Internal
[#75](https://github.com/RKrahl/pytest-dependency/issues/75)
_:
review build tool chain.
.. _#39:
RKrahl/pytest-dependency#39
.. _#40:
RKrahl/pytest-dependency#40
.. _#41:
RKrahl/pytest-dependency#41
.. _#50:
RKrahl/pytest-dependency#50
.. _#51:
RKrahl/pytest-dependency#51
.. _#58:
RKrahl/pytest-dependency#58
.. _#59:
RKrahl/pytest-dependency#59
.. _#75:
RKrahl/pytest-dependency#75
Commits
2cae589
Merge branch 'develop'
def647e
Prepare release 0.6.0
2baac9b
Merge branch 'doc' into develop
38baf8c
Update changelog
e2edf54
Explicitely set language to 'en'
f11cf56
Rewrite introduction to the debugging guide
346a344
Move the changelog to the end, after the API reference
463227e
Review README and bump copyright year
eb48f32
Fixup 695ea27: trailing whitespace
695ea27
Update install instructions
- Additional commits viewable in compare
view
[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-dependency&package-manager=pip&previous-version=0.5.1&new-version=0.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
tests/constraints.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/constraints.txt b/tests/constraints.txt
index a806b7dfb47c18..45aac9051f2fd2 100644
--- a/tests/constraints.txt
+++ b/tests/constraints.txt
@@ -17,7 +17,7 @@ paddlepaddle==2.6.2
protobuf>=3.18.1,<6.0.0
py>=1.9.0
pytest>=5.0,<8.4
-pytest-dependency==0.5.1
+pytest-dependency==0.6.0
pytest-html==4.1.1
pytest-timeout==2.3.1
kornia==0.8.0
From 0fce5f3a17fc0d782e6468d5e048d6c449caa453 Mon Sep 17 00:00:00 2001
From: Pawel Raasz
Date: Mon, 20 Jan 2025 13:34:55 +0100
Subject: [PATCH 08/35] [cpu] Remove custom shape inference factories (#27924)
### Details:
- Remove custom shape inference factories CPU nodes.
### Related PR
- #27770
### Tickets:
- CVS-118704
---------
Signed-off-by: Raasz, Pawel
Co-authored-by: Michal Lukaszewski
Co-authored-by: Maksim Kutakov
---
src/frontends/tensorflow/src/frontend.cpp | 11 +++--
src/plugins/intel_cpu/src/nodes/deconv.cpp | 37 ++++++++++++++---
src/plugins/intel_cpu/src/nodes/eye.cpp | 16 +-------
src/plugins/intel_cpu/src/nodes/reference.cpp | 30 +++++++-------
src/plugins/intel_cpu/src/nodes/reference.h | 1 +
.../src/shape_inference/shape_inference.cpp | 41 ++++---------------
.../src/shape_inference/shape_inference.hpp | 1 -
7 files changed, 61 insertions(+), 76 deletions(-)
diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp
index 006a4e22e06304..e4e35c42b08b35 100644
--- a/src/frontends/tensorflow/src/frontend.cpp
+++ b/src/frontends/tensorflow/src/frontend.cpp
@@ -466,12 +466,11 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr
// recommend to use openvino-tokenizers if some unconverted operations from tokenizers are met
if (unsupported_ops_from_tokenizers.size() > 0) {
- exception_message
- << "\nEncountered unconverted operation(s) for which openvino-tokenizers package "
- "provides conversion extension(s): "
- << unsupported_ops_from_tokenizers
- << ". Install OpenVINO Tokenizers, refer to the documentation: "
- "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n";
+ exception_message << "\nEncountered unconverted operation(s) for which openvino-tokenizers package "
+ "provides conversion extension(s): "
+ << unsupported_ops_from_tokenizers
+ << ". Install OpenVINO Tokenizers, refer to the documentation: "
+ "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n";
}
}
diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp
index 886497bd57cc29..4090244a17ec32 100644
--- a/src/plugins/intel_cpu/src/nodes/deconv.cpp
+++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp
@@ -125,16 +125,43 @@ bool DeconvKey::operator==(const DeconvKey& rhs) const {
* input. Since in case it exists, plugin should pass the input data to the shape inference function.
*
*/
-class DeconfolutionShapeInferFactory : public ShapeInferFactory {
+class DeconvolutionShapeInferFactory : public ShapeInferFactory {
public:
- DeconfolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {}
+ DeconvolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {}
ShapeInferPtr makeShapeInfer() const override {
- const auto port_mask = (m_op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK;
- return make_shape_inference(m_op, port_mask);
+ return std::make_shared(m_op);
}
private:
+ class DeconvolutionShapeInfer : public IShapeInfer {
+ public:
+ DeconvolutionShapeInfer(const std::shared_ptr& op)
+ : m_shape_infer(make_shape_inference(op)),
+ m_port_mask((op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK) {}
+
+ Result infer(const std::vector>& input_shapes,
+ const std::unordered_map& data_dependency) override {
+ return m_shape_infer->infer(input_shapes, data_dependency);
+ }
+
+ const ov::CoordinateDiff& get_pads_begin() override {
+ return m_shape_infer->get_pads_begin();
+ }
+
+ const ov::CoordinateDiff& get_pads_end() override {
+ return m_shape_infer->get_pads_end();
+ }
+
+ port_mask_t get_port_mask() const override {
+ return m_port_mask;
+ };
+
+ private:
+ ShapeInferPtr m_shape_infer;
+ const port_mask_t m_port_mask;
+ };
+
std::shared_ptr m_op;
};
} // namespace
@@ -165,7 +192,7 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr&
}
Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr& context)
- : Node(op, context, DeconfolutionShapeInferFactory(op)) {
+ : Node(op, context, DeconvolutionShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage))
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp
index ef4995a87fd492..411a77260aa7d6 100644
--- a/src/plugins/intel_cpu/src/nodes/eye.cpp
+++ b/src/plugins/intel_cpu/src/nodes/eye.cpp
@@ -29,22 +29,8 @@ bool Eye::isSupportedOperation(const std::shared_ptr& op, std::s
return true;
}
-namespace {
-class EyeShapeInferFactory : public ShapeInferFactory {
-public:
- EyeShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {}
- ShapeInferPtr makeShapeInfer() const override {
- return (m_op->get_input_size() == 4) ? make_shape_inference(m_op)
- : make_shape_inference(m_op, PortMask(Eye::ROWS_NUM, Eye::COLS_NUM));
- }
-
-private:
- std::shared_ptr m_op;
-};
-} // namespace
-
Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr& context)
- : Node(op, context, EyeShapeInferFactory(op)) {
+ : Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp
index c7f1bbe30ff574..3283f7a43253ab 100644
--- a/src/plugins/intel_cpu/src/nodes/reference.cpp
+++ b/src/plugins/intel_cpu/src/nodes/reference.cpp
@@ -12,22 +12,10 @@
namespace ov {
namespace intel_cpu {
-class ReferenceShapeInferFactory : public ShapeInferFactory {
-public:
- ReferenceShapeInferFactory(std::shared_ptr op) : m_op{std::move(op)} {}
-
- ShapeInferPtr makeShapeInfer() const override {
- return make_shape_inference(m_op, FULL_PORT_MASK);
- }
-
-private:
- std::shared_ptr m_op;
-};
-
namespace node {
Reference::Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, std::string errorMessage)
- : Node(op, context, ReferenceShapeInferFactory(op)),
+ : Node(op, context, NgraphShapeInferFactory(op)),
ovCoreNode(op),
additionalErrorMessage(std::move(errorMessage)) {
if (!op->has_evaluate()) {
@@ -61,7 +49,9 @@ void Reference::initSupportedPrimitiveDescriptors() {
addSupportedPrimDesc(inputConfigurators, outputConfigurators, impl_desc_type::ref);
}
-void Reference::createPrimitive() {}
+void Reference::createPrimitive() {
+ hasOutputShapeDataDependency = isDynamicNode() && outputShapeDataDependency();
+}
void Reference::execute(const dnnl::stream& strm) {
auto inputs = prepareInputs();
@@ -72,6 +62,14 @@ void Reference::execute(const dnnl::stream& strm) {
}
void Reference::executeDynamicImpl(const dnnl::stream& strm) {
+ if (!hasOutputShapeDataDependency) {
+ // if there is no data dependency for the output shape, we can execute the operation as is, similar to the
+ // static case, since the shapes are already calculated
+ execute(strm);
+ return;
+ }
+
+ // if there is data dependency, we need to perform shape inference first
auto inputs = prepareInputs();
ov::TensorVector outputs;
auto result = Node::shapeInfer();
@@ -125,7 +123,9 @@ bool Reference::created() const {
}
bool Reference::needShapeInfer() const {
- return false;
+ // If there is data dependency for the output shape, let's assume the node has internal dynamism (in general case),
+ // so we postpone the shape inference until the actual execution
+ return !hasOutputShapeDataDependency && Node::needShapeInfer();
}
ov::TensorVector Reference::prepareInputs() const {
diff --git a/src/plugins/intel_cpu/src/nodes/reference.h b/src/plugins/intel_cpu/src/nodes/reference.h
index 782c55716506a8..f0a37ae6529f5f 100644
--- a/src/plugins/intel_cpu/src/nodes/reference.h
+++ b/src/plugins/intel_cpu/src/nodes/reference.h
@@ -36,6 +36,7 @@ class Reference : public Node {
private:
const std::shared_ptr ovCoreNode;
const std::string additionalErrorMessage;
+ bool hasOutputShapeDataDependency = false; // flag to cache the output shape data dependency check result
};
} // namespace node
diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp
index 5ba7e7173792fd..ba7832aef71fab 100644
--- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp
+++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp
@@ -234,8 +234,7 @@ class ShapeInferFallback : public ShapeInferBase {
ov::optional> infer(const std::vector& input_shapes,
const ov::ITensorAccessor& tensor_accessor) override {
- auto op = m_node.get();
- std::vector output_shapes;
+ const auto op = m_node.get();
std::shared_ptr local_op;
ov::OutputVector new_inputs;
@@ -252,7 +251,7 @@ class ShapeInferFallback : public ShapeInferBase {
local_op = op->clone_with_new_inputs(new_inputs);
local_op->validate_and_infer_types();
- output_shapes.resize(local_op->get_output_size());
+ std::vector output_shapes(local_op->get_output_size());
for (size_t i = 0; i < output_shapes.size(); ++i) {
const auto& partial_shape = local_op->get_output_partial_shape(i);
@@ -265,6 +264,11 @@ class ShapeInferFallback : public ShapeInferBase {
return {std::move(output_shapes)};
}
+
+ port_mask_t get_port_mask() const override {
+ // For fallback return full port mask to try get data for all node's inputs
+ return FULL_PORT_MASK;
+ }
};
template
@@ -610,34 +614,6 @@ const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{
#undef _OV_OP_SHAPE_INFER_MASK_REG
#undef _OV_OP_SHAPE_INFER_VA_REG
-class ShapeInferCustomMask : public IShapeInfer {
-public:
- ShapeInferCustomMask(ShapeInferPtr shape_infer, port_mask_t port_mask)
- : m_shape_infer{std::move(shape_infer)},
- m_port_mask{port_mask} {}
-
- Result infer(const std::vector>& input_shapes,
- const std::unordered_map& data_dependency) override {
- return m_shape_infer->infer(input_shapes, data_dependency);
- }
-
- const ov::CoordinateDiff& get_pads_begin() override {
- return m_shape_infer->get_pads_begin();
- }
-
- const ov::CoordinateDiff& get_pads_end() override {
- return m_shape_infer->get_pads_end();
- }
-
- port_mask_t get_port_mask() const override {
- return m_port_mask;
- }
-
-private:
- const ShapeInferPtr m_shape_infer;
- const port_mask_t m_port_mask;
-};
-
std::shared_ptr make_shape_inference(std::shared_ptr op) {
if (auto shape_infer = IStaticShapeInferFactory::make(op->get_type_info(), op)) {
return shape_infer;
@@ -652,8 +628,5 @@ std::shared_ptr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask) {
- return std::make_shared(make_shape_inference(std::move(op)), port_mask);
-}
} // namespace intel_cpu
} // namespace ov
diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp
index 21b36e76ddd9a7..cb937127b219f0 100644
--- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp
+++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp
@@ -32,6 +32,5 @@ class IStaticShapeInfer : public IShapeInfer {
};
std::shared_ptr make_shape_inference(std::shared_ptr op);
-ShapeInferPtr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask);
} // namespace intel_cpu
} // namespace ov
From 1025c76d098c435972bb42ff43d3262a0d82c7cf Mon Sep 17 00:00:00 2001
From: Michal Miotk
Date: Mon, 20 Jan 2025 15:02:31 +0100
Subject: [PATCH 09/35] [GPU] added missing info about conv autopad (#28552)
### Details:
- fix yolov3 dynamic inference
### Tickets:
- CVS-157866
---
.../src/graph/graph_optimizer/prepare_primitive_fusing.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp
index 2120a1308ea290..ce5333f95a1b59 100644
--- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp
+++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp
@@ -399,7 +399,8 @@ void prepare_primitive_fusing::fuse_bias(program &p) {
desc->padding_begin,
desc->padding_end,
desc->grouped_weights_shape,
- conv.get_output_layout().data_type);
+ conv.get_output_layout().data_type,
+ desc->auto_pad);
// Copy transposed flag to new prim as convolution node might be produced by deconv -> conv replacement before this pass
conv_with_bias_prim->transposed = desc->transposed;
From 2999477ad77cad3de4aadb5f56996bf2f7f5dd43 Mon Sep 17 00:00:00 2001
From: Mikhail Ryzhov
Date: Mon, 20 Jan 2025 20:52:25 +0100
Subject: [PATCH 10/35] [GHA] Save JS artifacts (#28521)
### Details:
- JS package is needed to build extensions using provider action
-
### Tickets:
- *ticket-id*
---
.github/workflows/job_build_linux.yml | 9 ++++++++-
.github/workflows/job_build_windows.yml | 15 ++++++++++++++-
.github/workflows/job_openvino_js.yml | 9 +++++++--
.github/workflows/windows_vs2019_release.yml | 11 ++++++++---
4 files changed, 37 insertions(+), 7 deletions(-)
diff --git a/.github/workflows/job_build_linux.yml b/.github/workflows/job_build_linux.yml
index c56de5872cc2df..d1dfd0504ae194 100644
--- a/.github/workflows/job_build_linux.yml
+++ b/.github/workflows/job_build_linux.yml
@@ -234,6 +234,11 @@ jobs:
-DENABLE_WHEEL=OFF
cmake --build ${BUILD_DIR} --parallel $(nproc)
cmake --install ${BUILD_DIR} --prefix ${INSTALL_DIR_JS}
+
+ - name: Pack openvino_js_package
+ if: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }}
+ run: tar -cvf - * | pigz > ${BUILD_DIR}/openvino_js_package.tar.gz
+ working-directory: ${{ env.INSTALL_DIR_JS }}
- name: Build RPM packages
if: ${{ inputs.build-rpm-packages }}
@@ -279,7 +284,7 @@ jobs:
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
with:
name: openvino_js_package
- path: ${{ env.INSTALL_DIR_JS }}
+ path: ${{ env.BUILD_DIR }}/openvino_js_package.tar.gz
if-no-files-found: 'error'
- name: Upload openvino developer package
@@ -333,8 +338,10 @@ jobs:
${{ env.BUILD_DIR }}/openvino_tests.tar.gz
${{ env.BUILD_DIR }}/deb
${{ env.MANIFEST_PATH }}
+ ${{ env.STORE_JS == 'true' && format('{0}/openvino_js_package.tar.gz', env.BUILD_DIR) || '' }}
${{ env.STORE_WHEELS == 'true' && format('{0}/wheels', env.INSTALL_WHEELS_DIR) || '' }}
storage_dir: ${{ env.PRODUCT_TYPE }}
storage_root: ${{ env.ARTIFACTS_SHARE }}
env:
STORE_WHEELS: ${{ inputs.os != 'debian_10' && inputs.arch != 'arm' }}
+ STORE_JS: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }}
diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml
index d5d42ffcfea8d2..f0c150c4ac4db4 100644
--- a/.github/workflows/job_build_windows.yml
+++ b/.github/workflows/job_build_windows.yml
@@ -265,6 +265,17 @@ jobs:
-DENABLE_WHEEL=OFF
cmake --build ${{ env.BUILD_DIR }} --parallel $ENV:NUMBER_OF_PROCESSORS
cmake --install ${{ env.BUILD_DIR }} --config ${{ env.CMAKE_BUILD_TYPE }} --prefix ${{ env.INSTALL_DIR_JS }}
+
+ - name: Pack JS Artifacts
+ if: ${{ fromJSON(inputs.affected-components).JS_API }}
+ run: |
+ $file = Get-ChildItem -Path "${{ env.INSTALL_DIR_JS }}"
+ $compress = @{
+ Path = $file
+ CompressionLevel = "Optimal"
+ DestinationPath = "${{ env.BUILD_DIR }}/openvino_js_package.zip"
+ }
+ Compress-Archive @compress
#
# Upload build artifacts and logs
@@ -297,7 +308,7 @@ jobs:
uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3
with:
name: openvino_js_package
- path: ${{ env.INSTALL_DIR_JS }}
+ path: ${{ env.BUILD_DIR }}/openvino_js_package.zip
if-no-files-found: 'error'
- name: Store artifacts to a shared drive
@@ -309,8 +320,10 @@ jobs:
${{ env.BUILD_DIR }}/openvino_package.zip
${{ env.BUILD_DIR }}/openvino_tests.zip
${{ env.MANIFEST_PATH }}
+ ${{ env.STORE_JS == 'true' && format('{0}/openvino_js_package.zip', env.BUILD_DIR) || '' }}
${{ env.STORE_WHEELS == 'true' && format('{0}/wheels', env.INSTALL_WHEELS_DIR) || '' }}
storage_dir: ${{ env.PRODUCT_TYPE }}
storage_root: ${{ env.ARTIFACTS_SHARE }}
env:
STORE_WHEELS: ${{ inputs.build-type != 'Debug' }}
+ STORE_JS: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }}
diff --git a/.github/workflows/job_openvino_js.yml b/.github/workflows/job_openvino_js.yml
index fd04d8842daae7..dbee8511c4187b 100644
--- a/.github/workflows/job_openvino_js.yml
+++ b/.github/workflows/job_openvino_js.yml
@@ -45,11 +45,16 @@ jobs:
echo "OPENVINO_JS_DIR=$GITHUB_WORKSPACE/openvino/src/bindings/js" >> "$GITHUB_ENV"
echo "OPENVINO_JS_LIBS_DIR=$GITHUB_WORKSPACE/openvino/src/bindings/js/node/bin" >> "$GITHUB_ENV"
- - name: Download OpenVINO JS package
+ - name: Download OpenVINO artifacts (JS)
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
- name: openvino_js_package
+ pattern: openvino_[js]*
path: ${{ env.OPENVINO_JS_LIBS_DIR }}
+ merge-multiple: true
+
+ - name: Extract OpenVINO packages
+ run: pigz -dc openvino_js_package.tar.gz | tar -xf - -C ${OPENVINO_JS_LIBS_DIR}
+ working-directory: ${{ env.OPENVINO_JS_LIBS_DIR }}
- name: Setup Node ${{ env.NODE_VERSION }}
if: runner.os != 'Linux' # Node is already installed in the Docker image
diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml
index 5708b529f25acc..92d826de1d8394 100644
--- a/.github/workflows/windows_vs2019_release.yml
+++ b/.github/workflows/windows_vs2019_release.yml
@@ -192,12 +192,17 @@ jobs:
sparse-checkout: |
src/bindings/js
path: 'openvino'
-
- - name: Download OpenVINO js package
+
+ - name: Download OpenVINO artifacts (JS)
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
- name: openvino_js_package
+ pattern: openvino_[js]*
path: ${{ env.OPENVINO_JS_LIBS_DIR }}
+ merge-multiple: true
+
+ - name: Extract OpenVINO packages
+ run: Expand-Archive openvino_js_package.zip -DestinationPath .
+ working-directory: ${{ env.OPENVINO_JS_LIBS_DIR }}
- name: Setup Node ${{ env.NODE_VERSION }}
uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
From bb78f44476bb1701c4982423588f4472382dc140 Mon Sep 17 00:00:00 2001
From: Vladislav Golubev
Date: Mon, 20 Jan 2025 20:55:35 +0100
Subject: [PATCH 11/35] [LPT] Fix medium static code analyzer issues (#28483)
### Tickets:
- *CVS-1521493*
- *CVS-130703*
- *CVS-121616*
- *CVS-121618*
---
.../common/fake_quantize_dequantization.hpp | 3 ++-
.../quantization_granularity_attribute.hpp | 2 +-
.../src/assign_and_read_value.cpp | 17 ++++-------------
.../low_precision_transformations/src/clamp.cpp | 3 ++-
.../src/eliminate_fake_quantize.cpp | 2 +-
.../src/fake_quantize_dequantization.cpp | 3 ---
.../src/markup_quantization_granularity.cpp | 6 +++---
.../src/network_helper.cpp | 1 +
.../src/pull_reshape_through_dequantization.cpp | 6 +++---
.../pull_transpose_through_dequantization.cpp | 2 +-
10 files changed, 18 insertions(+), 27 deletions(-)
diff --git a/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp b/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp
index 1035e88ed1d0f0..0d16dbba891b61 100644
--- a/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp
@@ -50,7 +50,8 @@ class LP_TRANSFORMATIONS_API FakeQuantizeDequantization {
const std::shared_ptr& elementwise,
std::shared_ptr& constant);
- size_t channelDimIndex;
+ // for most node with layout NC, NCHW, NCDWH, index of channel dimension is 1
+ size_t channelDimIndex = 1ul;
Output data;
std::shared_ptr convert;
std::shared_ptr subtract;
diff --git a/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp
index e74f601f4bd4de..c43d061fb455b3 100644
--- a/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp
@@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API QuantizationGranularityAttribute : public ov::Runti
};
QuantizationGranularityAttribute() : granularity(Granularity::PerChannel) {}
- QuantizationGranularityAttribute(const Granularity granularity) : granularity(granularity) {}
+ QuantizationGranularityAttribute(const Granularity& granularity) : granularity(granularity) {}
bool operator==(const QuantizationGranularityAttribute& attribute) const {
return this->granularity == attribute.granularity;
diff --git a/src/common/low_precision_transformations/src/assign_and_read_value.cpp b/src/common/low_precision_transformations/src/assign_and_read_value.cpp
index 27b79e4d347102..e65e35890c0600 100644
--- a/src/common/low_precision_transformations/src/assign_and_read_value.cpp
+++ b/src/common/low_precision_transformations/src/assign_and_read_value.cpp
@@ -20,31 +20,22 @@ namespace low_precision {
AssignAndReadValueTransformation::AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params) :
LayerTransformation(params), model(model) {
MATCHER_SCOPE(AssignAndReadValueTransformation);
- auto assign3 = pattern::wrap_type({ pattern::wrap_type() });
- auto assign6 = pattern::wrap_type({ pattern::wrap_type() });
+ auto assign_m = pattern::wrap_type({ pattern::wrap_type() });
ov::graph_rewrite_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) {
- const auto& opsMap = m.get_pattern_value_map();
- auto op = m.get_match_root();
- auto assignIt = opsMap.find(assign3);
- if (assignIt == opsMap.end()) {
- assignIt = opsMap.find(assign6);
- }
- const auto assign = assignIt->second.get_node_shared_ptr();
+ const auto assign = m.get_match_root();
// check that we have ReadValue as the first dependency
if (assign->get_control_dependencies().empty()) {
return false;
}
- if (transformation_callback(op)) {
+ if (transformation_callback(assign)) {
return false;
}
return transform(*context, m);
};
- auto m = std::make_shared(
- std::make_shared(OutputVector{ assign3, assign6 }),
- matcher_name);
+ auto m = std::make_shared(assign_m, matcher_name);
this->register_matcher(m, callback);
}
diff --git a/src/common/low_precision_transformations/src/clamp.cpp b/src/common/low_precision_transformations/src/clamp.cpp
index 80748f549bf1ba..89150e81470bce 100644
--- a/src/common/low_precision_transformations/src/clamp.cpp
+++ b/src/common/low_precision_transformations/src/clamp.cpp
@@ -72,7 +72,8 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa
replace_node_update_name(newClamp, replacement);
- element::Type outputClampType = dequantization.multiply ?
+ OPENVINO_ASSERT(dequantization.multiply != nullptr || dequantization.subtract != nullptr, "incorrect dequantization ops configuration");
+ const auto outputClampType = dequantization.multiply ?
dequantization.multiply->get_output_element_type(0) :
dequantization.subtract->get_output_element_type(0);
ov::pass::low_precision::NetworkHelper::setOutDataPrecision(replacement, outputClampType);
diff --git a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp
index cb5d9270a43768..1a09d9914de3bf 100644
--- a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp
+++ b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp
@@ -51,7 +51,7 @@ bool check_interval(const std::shared_ptr& fq,
const std::shared_ptr& constant,
const float value,
const float max_diff,
- const bool exact_comparison) noexcept {
+ const bool exact_comparison) {
bool need_to_check_intervals = false;
const auto& constant_values = constant->cast_vector();
for (const auto constant_value : constant_values) {
diff --git a/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp b/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp
index a96a5032b5fef9..7246c9869ce7d8 100644
--- a/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp
+++ b/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp
@@ -32,9 +32,6 @@ FakeQuantizeDequantization::FakeQuantizeDequantization(
subtractConstant(subtractConstant),
multiply(multiply),
multiplyConstant(multiplyConstant) {
- // for most node with layout NC, NCHW, NCDWH, index of channel dimension is 1
- channelDimIndex = 1ul;
-
const auto rank = data.get_partial_shape().rank();
if (rank.is_static()) {
std::string data_src_type = data.get_node()->get_type_name();
diff --git a/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp b/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp
index b9d5ac2ec4dead..f59aca3498c9f0 100644
--- a/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp
+++ b/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp
@@ -30,7 +30,7 @@ ov::pass::low_precision::MarkupQuantizationGranularity::MarkupQuantizationGranul
bool ov::pass::low_precision::MarkupQuantizationGranularity::run_on_model(const std::shared_ptr& f) {
RUN_ON_FUNCTION_SCOPE(MarkupPerTensorQuantization);
auto setRestriction = [](const std::shared_ptr& node, const std::vector& restrictedPorts) {
- auto createAttribute = [](Input& input, const QuantizationGranularityAttribute::Granularity granularity){
+ auto createAttribute = [](Input& input, const QuantizationGranularityAttribute::Granularity& granularity){
auto &rt = input.get_rt_info();
rt.emplace(QuantizationGranularityAttribute::get_type_info_static(), QuantizationGranularityAttribute(granularity));
};
@@ -43,14 +43,14 @@ bool ov::pass::low_precision::MarkupQuantizationGranularity::run_on_model(const
}
} else {
// markup specific ports
- for (const auto item : restrictedPorts) {
+ for (const auto& item : restrictedPorts) {
Input input = node->input(item.port);
createAttribute(input, item.granularity);
}
}
};
- for (const std::shared_ptr& node : f->get_ordered_ops()) {
+ for (const auto& node : f->get_ordered_ops()) {
if (node->get_input_size() == 0) {
continue;
}
diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp
index e57fdcfb1b8e81..afb7e19c13e7ad 100644
--- a/src/common/low_precision_transformations/src/network_helper.cpp
+++ b/src/common/low_precision_transformations/src/network_helper.cpp
@@ -622,6 +622,7 @@ std::shared_ptr NetworkHelper::separateInStandaloneBranch(std::shared_
parent = multiply->output(0);
}
+ OPENVINO_ASSERT(dequantization.multiply != nullptr || dequantization.subtract != nullptr, "incorrect dequantization ops configuration");
const auto originalParent = dequantization.multiply ?
dequantization.multiply->shared_from_this() :
dequantization.subtract->shared_from_this();
diff --git a/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp b/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp
index 157a204af3a089..6e33afc09461f2 100644
--- a/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp
+++ b/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp
@@ -101,7 +101,7 @@ std::shared_ptr moveThroughConvert(const std::shared_ptr& reshape, c
void fuseConstant(const std::shared_ptr& reshape, const std::shared_ptr& constant) {
ov::OutputVector result(1);
- reshape->constant_fold(result, { constant, reshape->input_value(1) });
+ OPENVINO_ASSERT(reshape->constant_fold(result, { constant, reshape->input_value(1) }), "Reshape constant folding failed");
const auto newConstant = result[0].get_node_shared_ptr();
replace_node(reshape, newConstant);
copy_runtime_info({ constant, reshape }, newConstant);
@@ -139,7 +139,7 @@ ov::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDeq
return false;
}
- while (reshape != nullptr) {
+ do {
const auto parent = reshape->get_input_node_shared_ptr(0);
if (ov::is_type(parent) || ov::is_type(parent)) {
reshape = pull_reshape_through_dequantization::moveThroughElementwise(reshape, parent);
@@ -151,7 +151,7 @@ ov::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDeq
} else {
THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type";
}
- }
+ } while (reshape != nullptr);
return true;
};
diff --git a/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp b/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp
index a4557288c74f23..3f3533f12a7da7 100644
--- a/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp
+++ b/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp
@@ -110,7 +110,7 @@ ov::pass::low_precision::PullTransposeThroughDequantization::PullTransposeThroug
ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher & m) -> bool {
const auto& opsMap = m.get_pattern_value_map();
- auto transpose = opsMap.find(matcherTranspose)->second.get_node()->shared_from_this();
+ auto transpose = opsMap.at(matcherTranspose).get_node_shared_ptr();
while (transpose != nullptr) {
const auto parent = transpose->get_input_node_shared_ptr(0);
From 155f6968b00e5931506e079b17c2820d164be6f8 Mon Sep 17 00:00:00 2001
From: Ekaterina Shiryaeva
Date: Mon, 20 Jan 2025 21:07:34 +0100
Subject: [PATCH 12/35] [NPUW] Fix scales processing in CWAI for nf4 (#28523)
### Tickets:
- *E-149709*
---
.../intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp
index 93a43c9b82570a..a4a03dea982438 100644
--- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp
+++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp
@@ -890,7 +890,8 @@ CWAI3::CWAI3(CWAI3::Results scales) {
auto matched_valueA = std::static_pointer_cast(matched_nodeA);
auto matched_valueC = std::static_pointer_cast(matched_nodeC);
- if (ov::element::i4 == matched_valueA->get_element_type() &&
+ if ((ov::element::i4 == matched_valueA->get_element_type() ||
+ ov::element::nf4 == matched_valueA->get_element_type()) &&
(ov::element::f16 == matched_valueC->get_element_type() ||
ov::element::f32 == matched_valueC->get_element_type())) {
LOG_DEBUG("Matched: " << matched_valueC);
From 1ad48635dc3bd31407c0a6aff93fcf9aedfa266a Mon Sep 17 00:00:00 2001
From: Tomasz Jankowski
Date: Mon, 20 Jan 2025 21:40:45 +0100
Subject: [PATCH 13/35] [RTTI] Use OV dynamic cast on Android only (#28519)
### Details:
OV dynamic casting causes issue in external software with badly formed
OV RTTI definitions, so it's replaced with standard dynamic casting,
except for Android.
### Tickets:
- CVS-160749
---------
Signed-off-by: Tomasz Jankowski
Co-authored-by: Ilya Lavrenov
---
src/core/include/openvino/core/type.hpp | 12 ++++++
src/core/tests/rtti.cpp | 56 ++++++++++++++++++++++++-
2 files changed, 67 insertions(+), 1 deletion(-)
diff --git a/src/core/include/openvino/core/type.hpp b/src/core/include/openvino/core/type.hpp
index 4877b9ce02b251..812208855fa7f3 100644
--- a/src/core/include/openvino/core/type.hpp
+++ b/src/core/include/openvino/core/type.hpp
@@ -77,6 +77,10 @@ struct OPENVINO_API DiscreteTypeInfo {
OPENVINO_API
std::ostream& operator<<(std::ostream& s, const DiscreteTypeInfo& info);
+#if defined(__ANDROID__) || defined(ANDROID)
+# define OPENVINO_DYNAMIC_CAST
+#endif
+
/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a
/// Type*/shared_ptr
template
@@ -93,7 +97,11 @@ template
typename std::enable_if(std::declval())), Type*>::value,
Type*>::type
as_type(Value value) {
+#ifdef OPENVINO_DYNAMIC_CAST
return ov::is_type(value) ? static_cast(value) : nullptr;
+#else
+ return dynamic_cast(value);
+#endif
}
namespace util {
@@ -114,7 +122,11 @@ struct AsTypePtr> {
/// Type, nullptr otherwise
template
auto as_type_ptr(const U& value) -> decltype(::ov::util::AsTypePtr::template call(value)) {
+#ifdef OPENVINO_DYNAMIC_CAST
return ::ov::util::AsTypePtr::template call(value);
+#else
+ return std::dynamic_pointer_cast(value);
+#endif
}
} // namespace ov
diff --git a/src/core/tests/rtti.cpp b/src/core/tests/rtti.cpp
index 1fd8787ee60f38..9cfa225f4a3010 100644
--- a/src/core/tests/rtti.cpp
+++ b/src/core/tests/rtti.cpp
@@ -5,10 +5,12 @@
#include "common_test_utils/test_tools.hpp"
#include "gtest/gtest.h"
#include "openvino/op/op.hpp"
+#include "openvino/pass/matcher_pass.hpp"
-using namespace ov;
using namespace std;
+namespace ov::test {
+
class OpType : public ov::op::Op {
public:
OPENVINO_OP("OpType");
@@ -88,3 +90,55 @@ TEST(rtti, op_with_type_version_parent_old) {
ASSERT_NE(type_info.parent, nullptr);
ASSERT_EQ(*type_info.parent, OpType::get_type_info_static());
}
+
+#if !defined(__ANDROID__) && !defined(ANDROID)
+
+class IncompleteRtti : public pass::MatcherPass {
+public:
+ OPENVINO_RTTI("IncompleteRtti", "rtti_test");
+};
+
+class DerivedIncompleteRtti : public IncompleteRtti {
+public:
+ OPENVINO_RTTI("DerivedIncompleteRtti", "rtti_test", IncompleteRtti);
+};
+
+// Assert backward compatibility of RTTI definition without parent but casted with as_type or as_type_ptr pointer work.
+TEST(rtti, assert_casting_without_parent) {
+ {
+ IncompleteRtti incomplete;
+ DerivedIncompleteRtti derived;
+
+ auto pass_A = as_type(&incomplete);
+ auto pass_B = as_type(&derived);
+ auto pass_C = as_type(&derived);
+
+ EXPECT_NE(nullptr, pass_A);
+ EXPECT_NE(nullptr, pass_B);
+ EXPECT_NE(nullptr, pass_C);
+
+ EXPECT_NE(nullptr, as_type(pass_A));
+ EXPECT_NE(nullptr, as_type(pass_B));
+ EXPECT_NE(nullptr, as_type(pass_B));
+ EXPECT_NE(nullptr, as_type(pass_C));
+ }
+ {
+ auto incomplete = std::make_shared();
+ auto derived = std::make_shared();
+
+ auto pass_A = as_type_ptr(incomplete);
+ auto pass_B = as_type_ptr(derived);
+ auto pass_C = as_type_ptr(derived);
+
+ EXPECT_NE(nullptr, pass_A);
+ EXPECT_NE(nullptr, pass_B);
+ EXPECT_NE(nullptr, pass_C);
+
+ EXPECT_NE(nullptr, as_type_ptr(pass_A));
+ EXPECT_NE(nullptr, as_type_ptr(pass_B));
+ EXPECT_NE(nullptr, as_type_ptr(pass_B));
+ EXPECT_NE(nullptr, as_type_ptr(pass_C));
+ }
+}
+#endif // ANDROID
+} // namespace ov::test
From 08be7ae090cb1871490cd7ec521a8e80422152e9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 20 Jan 2025 21:24:34 +0000
Subject: [PATCH 14/35] Bump reviewdog/action-shellcheck from 1.27.0 to 1.29.0
(#28571)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Bumps
[reviewdog/action-shellcheck](https://github.com/reviewdog/action-shellcheck)
from 1.27.0 to 1.29.0.
Release notes
Sourced from reviewdog/action-shellcheck's
releases.
Release v1.29.0
What's Changed
New Contributors
Full Changelog: https://github.com/reviewdog/action-shellcheck/compare/v1.28.0...v1.29.0
Release v1.28.0
What's Changed
New Contributors
Full Changelog: https://github.com/reviewdog/action-shellcheck/compare/v1.27.0...v1.28.0
Commits
6e0e63d
Merge pull request #70
from reviewdog/depup/reviewdog/reviewdog
958d9e1
Merge pull request #71
from abitrolly/patch-1
44addb0
Show shellcheck version after install
fff8e91
chore(deps): update reviewdog/reviewdog to 0.20.3
22f96e3
Merge pull request #69
from reviewdog/add_fail_level
e48fb59
Add line break
d394b4f
Add fail_level and deduplicate fail_on_error
- See full diff in compare
view
[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=reviewdog/action-shellcheck&package-manager=github_actions&previous-version=1.27.0&new-version=1.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)
Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.
[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)
---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
---
.github/workflows/code_style.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml
index 97b399b1abf48d..89fb4e64670d8d 100644
--- a/.github/workflows/code_style.yml
+++ b/.github/workflows/code_style.yml
@@ -98,7 +98,7 @@ jobs:
# always provide suggestions even for skipped scripts in ov_shellcheck tagret
- name: ShellCheck action
if: always()
- uses: reviewdog/action-shellcheck@ccaafec556ffa154f112bfcb7b9c9574190b7091 # v1.27.0
+ uses: reviewdog/action-shellcheck@6e0e63d1750d02d761b3df0f2c5ba9f9ac4a9ed7 # v1.29.0
with:
level: style
reporter: github-pr-review
From 0efe897a15ce6470b3eb78ef119b3b620966ab2f Mon Sep 17 00:00:00 2001
From: Vladislav Golubev
Date: Mon, 20 Jan 2025 22:55:07 +0100
Subject: [PATCH 15/35] [LPT] Cleanup base LayerTransformation class from
legacy TransformationContext (#28327)
### Details:
- *`TransformationContext` is not used anywhere and
`LayerTransformation::context` always equal to `nullptr`*
- *This PR completely removes `TransformationContext`*
- *Also, `LayerTransformation` class is cleaned up from legacy methods
which are not used anywhere*
### Tickets:
- *N\A*
---
.../include/low_precision/add.hpp | 4 +-
.../low_precision/assign_and_read_value.hpp | 4 +-
.../include/low_precision/avg_pool.hpp | 4 +-
.../include/low_precision/batch_to_space.hpp | 4 +-
.../include/low_precision/broadcast.hpp | 2 +-
.../include/low_precision/clamp.hpp | 4 +-
.../low_precision/cleanup_transformation.hpp | 2 +-
.../include/low_precision/concat.hpp | 4 +-
.../include/low_precision/convert.hpp | 2 +-
.../include/low_precision/convolution.hpp | 2 +-
.../convolution_backprop_data.hpp | 4 +-
.../include/low_precision/depth_to_space.hpp | 2 +-
.../low_precision/eliminate_fake_quantize.hpp | 4 +-
.../eltwise_base_transformation.hpp | 2 +-
.../include/low_precision/fake_quantize.hpp | 9 +-
.../fake_quantize_decomposition.hpp | 2 +-
.../include/low_precision/fold_convert.hpp | 4 +-
.../low_precision/fold_fake_quantize.hpp | 4 +-
.../include/low_precision/fuse_convert.hpp | 4 +-
.../fuse_elementwise_to_fake_quantize.hpp | 2 +-
.../fuse_multiply_to_fake_quantize.hpp | 2 +-
.../fuse_subtract_to_fake_quantize.hpp | 2 +-
.../include/low_precision/gather.hpp | 4 +-
.../low_precision/group_convolution.hpp | 2 +-
.../include/low_precision/interpolate.hpp | 4 +-
.../low_precision/layer_transformation.hpp | 67 +++-----------
.../include/low_precision/mat_mul.hpp | 4 +-
.../include/low_precision/max_pool.hpp | 4 +-
.../low_precision/move_fake_quantize.hpp | 4 +-
.../include/low_precision/multiply.hpp | 2 +-
.../low_precision/multiply_partial.hpp | 4 +-
.../multiply_to_group_convolution.hpp | 4 +-
.../include/low_precision/mvn.hpp | 4 +-
.../include/low_precision/network_helper.hpp | 1 -
.../include/low_precision/normalize_l2.hpp | 4 +-
.../include/low_precision/pad.hpp | 4 +-
.../include/low_precision/prelu.hpp | 4 +-
.../include/low_precision/recurrent_cell.hpp | 6 +-
.../reduce_base_transformation.hpp | 4 +-
.../include/low_precision/reduce_max.hpp | 2 +-
.../include/low_precision/reduce_mean.hpp | 2 +-
.../include/low_precision/reduce_min.hpp | 2 +-
.../include/low_precision/reduce_sum.hpp | 2 +-
.../include/low_precision/relu.hpp | 4 +-
.../include/low_precision/reshape.hpp | 4 +-
.../low_precision/shuffle_channels.hpp | 4 +-
.../include/low_precision/slice.hpp | 4 +-
.../include/low_precision/space_to_batch.hpp | 4 +-
.../include/low_precision/split.hpp | 9 +-
.../include/low_precision/squeeze.hpp | 4 +-
.../include/low_precision/strided_slice.hpp | 4 +-
.../include/low_precision/subtract.hpp | 2 +-
.../low_precision/transformation_context.hpp | 39 --------
.../transparent_base_transformation.hpp | 4 +-
.../include/low_precision/transpose.hpp | 4 +-
.../include/low_precision/unsqueeze.hpp | 4 +-
.../weightable_layer_transformation.hpp | 13 +--
.../low_precision_transformations/src/add.cpp | 12 +--
.../src/assign_and_read_value.cpp | 12 +--
.../src/avg_pool.cpp | 12 +--
.../src/batch_to_space.cpp | 12 +--
.../src/broadcast.cpp | 6 +-
.../src/clamp.cpp | 12 +--
.../src/cleanup_transformation.cpp | 2 +-
.../src/concat.cpp | 10 +--
.../src/convert.cpp | 6 +-
.../src/convolution.cpp | 10 +--
.../src/convolution_backprop_data.cpp | 14 +--
.../src/depth_to_space.cpp | 6 +-
.../src/eliminate_fake_quantize.cpp | 10 +--
.../src/eltwise_base_transformation.cpp | 4 +-
.../src/fake_quantize.cpp | 7 +-
.../src/fake_quantize_decomposition.cpp | 6 +-
.../src/fold_convert.cpp | 12 +--
.../src/fold_fake_quantize.cpp | 8 +-
.../src/fuse_convert.cpp | 10 +--
.../src/fuse_elementwise_to_fake_quantize.cpp | 4 +-
.../src/fuse_multiply_to_fake_quantize.cpp | 8 +-
.../src/fuse_subtract_to_fake_quantize.cpp | 8 +-
.../src/gather.cpp | 12 +--
.../src/group_convolution.cpp | 11 +--
.../src/interpolate.cpp | 12 +--
.../src/layer_transformation.cpp | 88 ++-----------------
.../src/mat_mul.cpp | 12 +--
.../src/max_pool.cpp | 12 +--
.../src/move_fake_quantize.cpp | 12 +--
.../src/multiply.cpp | 10 +--
.../src/multiply_partial.cpp | 12 +--
.../src/multiply_to_group_convolution.cpp | 10 +--
.../low_precision_transformations/src/mvn.cpp | 12 +--
.../src/normalize_l2.cpp | 12 +--
.../low_precision_transformations/src/pad.cpp | 12 +--
.../src/prelu.cpp | 12 +--
.../src/recurrent_cell.cpp | 18 ++--
.../src/reduce_base_transformation.cpp | 8 +-
.../src/reduce_max.cpp | 6 +-
.../src/reduce_mean.cpp | 6 +-
.../src/reduce_min.cpp | 6 +-
.../src/reduce_sum.cpp | 6 +-
.../src/relu.cpp | 12 +--
.../src/reshape.cpp | 12 +--
.../src/shuffle_channels.cpp | 12 +--
.../src/slice.cpp | 12 +--
.../src/space_to_batch.cpp | 12 +--
.../src/split.cpp | 14 ++-
.../src/squeeze.cpp | 12 +--
.../src/strided_slice.cpp | 10 +--
.../src/subtract.cpp | 6 +-
.../src/transformation_context.cpp | 18 ----
.../src/transparent_base_transformation.cpp | 8 +-
.../src/transpose.cpp | 12 +--
.../src/unsqueeze.cpp | 12 +--
.../src/variadic_split.cpp | 2 +-
.../src/weightable_layer_transformation.cpp | 11 +--
.../tests/layer_transformation.hpp | 1 -
.../simple_low_precision_transformer.cpp | 1 -
116 files changed, 382 insertions(+), 575 deletions(-)
delete mode 100644 src/common/low_precision_transformations/include/low_precision/transformation_context.hpp
delete mode 100644 src/common/low_precision_transformations/src/transformation_context.cpp
diff --git a/src/common/low_precision_transformations/include/low_precision/add.hpp b/src/common/low_precision_transformations/include/low_precision/add.hpp
index 2c97087696d2f7..55efbf940e94b7 100644
--- a/src/common/low_precision_transformations/include/low_precision/add.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/add.hpp
@@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API AddTransformation : public EltwiseBaseTransformatio
public:
OPENVINO_RTTI("AddTransformation", "0", EltwiseBaseTransformation);
AddTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp
index edef4d63aa134a..9134293d5512dd 100644
--- a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp
@@ -15,8 +15,8 @@ class LP_TRANSFORMATIONS_API AssignAndReadValueTransformation : public LayerTran
public:
OPENVINO_RTTI("AssignAndReadValueTransformation", "0", LayerTransformation);
AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
+ bool transform(ov::pass::pattern::Matcher& m) override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
private:
std::shared_ptr model;
diff --git a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp
index ac8b91aeb57504..7dfac41beffb06 100644
--- a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp
@@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API AvgPoolTransformation : public LayerTransformation
public:
OPENVINO_RTTI("AvgPoolTransformation", "0", LayerTransformation);
AvgPoolTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp
index 7859a29ec3a046..b729eb1fc956d3 100644
--- a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp
@@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API BatchToSpaceTransformation : public LayerTransforma
public:
OPENVINO_RTTI("BatchToSpaceTransformation", "0", LayerTransformation);
BatchToSpaceTransformation(const Params& params = Params());
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp
index 05f7cadb88e888..75096e322a6571 100644
--- a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp
@@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API BroadcastTransformation : public TransparentBaseTra
public:
OPENVINO_RTTI("BroadcastTransformation", "0", TransparentBaseTransformation);
BroadcastTransformation(const Params& params = Params());
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/clamp.hpp b/src/common/low_precision_transformations/include/low_precision/clamp.hpp
index d79a6ad159e21b..c41d80939bca8f 100644
--- a/src/common/low_precision_transformations/include/low_precision/clamp.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/clamp.hpp
@@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API ClampTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("ClampTransformation", "0", LayerTransformation);
ClampTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
+ bool transform(ov::pass::pattern::Matcher& m) override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp
index 503c519ea60f22..52de352c0bb5d9 100644
--- a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp
@@ -19,7 +19,7 @@ class LP_TRANSFORMATIONS_API CleanupTransformation : public LayerTransformation
CleanupTransformation(const Params& params);
virtual ~CleanupTransformation() = default;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
static bool canBeTransformedStatic(
const std::shared_ptr& layer,
const std::vector& defaultPrecisions = precision_set::get_int8_support());
diff --git a/src/common/low_precision_transformations/include/low_precision/concat.hpp b/src/common/low_precision_transformations/include/low_precision/concat.hpp
index c082e30dfa1ecd..a4511ef0f7c099 100644
--- a/src/common/low_precision_transformations/include/low_precision/concat.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/concat.hpp
@@ -31,9 +31,9 @@ class LP_TRANSFORMATIONS_API ConcatTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("ConcatTransformation", "0", LayerTransformation);
ConcatTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
static bool isQuantizedStatic(const std::shared_ptr& layer);
};
diff --git a/src/common/low_precision_transformations/include/low_precision/convert.hpp b/src/common/low_precision_transformations/include/low_precision/convert.hpp
index 7cbd79be03bb2b..edfb58076c9d20 100644
--- a/src/common/low_precision_transformations/include/low_precision/convert.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/convert.hpp
@@ -15,7 +15,7 @@ class LP_TRANSFORMATIONS_API ConvertTransformation : public LayerTransformation
public:
OPENVINO_RTTI("ConvertTransformation", "0", LayerTransformation);
ConvertTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/convolution.hpp b/src/common/low_precision_transformations/include/low_precision/convolution.hpp
index 428a8adf00ca17..74a61817c15b18 100644
--- a/src/common/low_precision_transformations/include/low_precision/convolution.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/convolution.hpp
@@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API ConvolutionTransformation : public WeightableLayerT
public:
OPENVINO_RTTI("ConvolutionTransformation", "0", WeightableLayerTransformation);
ConvolutionTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isQuantized(const std::shared_ptr& layer,
const std::vector&defaultPrecisions) const override;
static bool isQuantizedStatic(const std::shared_ptr& layer,
diff --git a/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp b/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp
index 6221a75aca5fb2..9b1e2580e59193 100644
--- a/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp
@@ -21,8 +21,8 @@ namespace low_precision {
class LP_TRANSFORMATIONS_API ConvolutionBackpropDataTransformation : public WeightableLayerTransformation {
public:
ConvolutionBackpropDataTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
bool isQuantized(const std::shared_ptr& layer,
const std::vector&defaultPrecisions) const override;
static bool isQuantizedStatic(const std::shared_ptr& layer,
diff --git a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp
index e86a2de2941b3c..1ace395ac8331d 100644
--- a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp
@@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API DepthToSpaceTransformation : public TransparentBase
public:
OPENVINO_RTTI("DepthToSpaceTransformation", "0", TransparentBaseTransformation);
DepthToSpaceTransformation(const Params& params = Params());
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp
index bfaa0c3b3a2b1b..190d146a741151 100644
--- a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp
@@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API EliminateFakeQuantizeTransformation : public Cleanu
public:
OPENVINO_RTTI("EliminateFakeQuantizeTransformation", "0", CleanupTransformation);
EliminateFakeQuantizeTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp
index 5d3361e7283eb9..9c3c5d1c3b2a5d 100644
--- a/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp
@@ -19,7 +19,7 @@ namespace low_precision {
class LP_TRANSFORMATIONS_API EltwiseBaseTransformation : public LayerTransformation {
public:
EltwiseBaseTransformation(const Params& params) : LayerTransformation(params) {}
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
static bool isBroadcasted(const PartialShape& shape);
diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp
index 640814dc15cabb..8f5c67dbc0bcc4 100644
--- a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp
@@ -23,16 +23,15 @@ class LP_TRANSFORMATIONS_API FakeQuantizeTransformation : public LayerTransforma
public:
OPENVINO_RTTI("FakeQuantizeTransformation", "0", LayerTransformation);
FakeQuantizeTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
static bool checkElementwise(const std::shared_ptr& eltwise);
static std::shared_ptr fuseElementwise(
- TransformationContext& context,
- MatcherPass* matcherPass,
- const std::shared_ptr& fakeQuantize,
- const bool updatePrecisions);
+ MatcherPass* matcherPass,
+ const std::shared_ptr& fakeQuantize,
+ const bool updatePrecisions);
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp
index 4d2ee8d88fadaf..8289a9ea5493f7 100644
--- a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp
@@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API FakeQuantizeDecompositionTransformation : public La
public:
OPENVINO_RTTI("FakeQuantizeDecompositionTransformation", "0", LayerTransformation);
FakeQuantizeDecompositionTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp
index bc5342b5cca4f1..d0d864835c8f98 100644
--- a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp
@@ -25,8 +25,8 @@ class LP_TRANSFORMATIONS_API FoldConvertTransformation : public CleanupTransform
public:
OPENVINO_RTTI("FoldConvertTransformation", "0", CleanupTransformation);
FoldConvertTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp
index c47c39a78ef081..b345ce5edbd80a 100644
--- a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp
@@ -23,8 +23,8 @@ class LP_TRANSFORMATIONS_API FoldFakeQuantizeTransformation : public LayerTransf
public:
OPENVINO_RTTI("FoldFakeQuantizeTransformation", "0", LayerTransformation);
FoldFakeQuantizeTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
bool isConstantOutput(std::shared_ptr op) const;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp
index 0ff0dc60821486..06d252961e2c26 100644
--- a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp
@@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API FuseConvertTransformation : public CleanupTransform
public:
OPENVINO_RTTI("FuseConvertTransformation", "0", CleanupTransformation);
FuseConvertTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp
index ab1a589845aa10..13b73a1112f4c5 100644
--- a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp
@@ -21,7 +21,7 @@ class LP_TRANSFORMATIONS_API FuseElementwiseToFakeQuantizeTransformation : publi
FuseElementwiseToFakeQuantizeTransformation(const Params& params);
virtual ~FuseElementwiseToFakeQuantizeTransformation() = default;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp
index 67471a56a4a6b8..1933a07bbb881b 100644
--- a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp
@@ -24,7 +24,7 @@ class LP_TRANSFORMATIONS_API FuseMultiplyToFakeQuantizeTransformation : public F
public:
OPENVINO_RTTI("FuseMultiplyToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation);
FuseMultiplyToFakeQuantizeTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp
index c5dd8994e2a512..644aafb740d8ff 100644
--- a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp
@@ -24,7 +24,7 @@ class LP_TRANSFORMATIONS_API FuseSubtractToFakeQuantizeTransformation : public F
public:
OPENVINO_RTTI("FuseSubtractToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation);
FuseSubtractToFakeQuantizeTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/gather.hpp b/src/common/low_precision_transformations/include/low_precision/gather.hpp
index 6aebd3fb094e0a..980ec8f1e9b992 100644
--- a/src/common/low_precision_transformations/include/low_precision/gather.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/gather.hpp
@@ -15,9 +15,9 @@ class LP_TRANSFORMATIONS_API GatherTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("GatherTransformation", "0", LayerTransformation);
GatherTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp
index 6551a929339830..f1e0bb44bddad8 100644
--- a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp
@@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API GroupConvolutionTransformation : public Convolution
public:
OPENVINO_RTTI("GroupConvolutionTransformation", "0", ConvolutionTransformation);
GroupConvolutionTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isQuantized(const std::shared_ptr& layer,
const std::vector& defaultPrecisions) const override;
static bool isQuantizedStatic(const std::shared_ptr& layer,
diff --git a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp
index 634d422dc2b09b..d715a24cc73e5d 100644
--- a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp
@@ -22,9 +22,9 @@ class LP_TRANSFORMATIONS_API InterpolateTransformation : public LayerTransformat
public:
OPENVINO_RTTI("InterpolateTransformation", "0", LayerTransformation);
InterpolateTransformation(const Params& params = Params());
- bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp
index c675ade19b516b..b3c7aaa16ea33a 100644
--- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp
@@ -12,27 +12,15 @@
#include
#include "openvino/pass/matcher_pass.hpp"
-#include "transformation_context.hpp"
#include "quantization_details.hpp"
#include "low_precision/common/ie_lpt_exception.hpp"
#include "common/fake_quantize_dequantization.hpp"
/*****************************************************
* Debug capability
- * - ORIGINAL_MODEL_PATH : Specify with existing folder name
- * to serialize original model into it (XML & BIN extensions were added)
- * - TRANSFORMED_MODEL_PATH : Specify with existing folder name
- * to serialize original model into it (XML & BIN extensions were added)
- * - LPT_PRINT_DEQUANTIZATION_INFO : Define it to enable
- * dequantization layers printing
- * - LPT_DISPLAY_PRECISION : Define it to to display precision info
- * during low precision transformations
- *
+ * - LPT_PRINT_DEQUANTIZATION_INFO : Define it to enable dequantization info printing: scales, shifts, etc.
*****************************************************/
-// #define LPT_ORIGINAL_MODEL_PATH "/localdisk/orig.model"
-// #define LPT_TRANSFORMED_MODEL_PATH "/localdisk/transformed.model"
// #define LPT_PRINT_DEQUANTIZATION_INFO
-// #define LPT_DISPLAY_PRECISION
namespace ov {
namespace pass {
@@ -301,15 +289,9 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass
LayerTransformation(const Params& params);
virtual ~LayerTransformation() = default;
- virtual bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) = 0;
+ virtual bool transform(ov::pass::pattern::Matcher &m) = 0;
- void setContext(TransformationContext* context) noexcept;
-
- void setUpdatePrecisions(const bool updatePrecisions);
-
- void setDefaultPrecisions(const std::vector& defaultPrecisions);
-
- virtual bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const;
+ virtual bool canBeTransformed(const std::shared_ptr& layer) const;
static bool canBeTransformedStatic(const std::shared_ptr& layer,
const std::vector& defaultPrecisions = precision_set::get_int8_support());
@@ -352,59 +334,32 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass
const std::vector& dequantizationShifts);
#endif
- bool updatePrecisions;
- element::Type deqPrecision;
- std::vector defaultPrecisions;
- bool reshapeIgnorePerTensorQuantizationCheck;
- bool scalingMode;
+ const bool updatePrecisions;
+ const element::Type deqPrecision;
+ const std::vector defaultPrecisions;
+ const bool reshapeIgnorePerTensorQuantizationCheck;
+ const bool scalingMode;
static constexpr char originalLayerPostfix[] = "_original";
- TransformationContext* context;
protected:
std::shared_ptr moveDequantizationAfter(
- TransformationContext &context,
const std::shared_ptr& operation,
const FakeQuantizeDequantization& dequantization,
const bool updateOutputPrecision = true,
const bool moveSubtract = true) const;
std::shared_ptr moveDequantizationBefore(
- TransformationContext& context,
const std::shared_ptr& operation,
const FakeQuantizeDequantization& dequantization,
const bool moveSubtract = true) const;
- bool updateOutput(
- TransformationContext &context,
- std::shared_ptr lastNode,
- std::shared_ptr originalNode) const;
-
- void updateOutput(
- TransformationContext& context,
- std::shared_ptr lastNode,
- std::string originalName) const;
-
- void addPattern(ov::pass::GraphRewrite& pass, TransformationContext& context, std::shared_ptr patternRoot);
-
- //TODO: replace with canBeTransformed when quantization by special dimension is supported for all transformations
- bool canBeTransformedSpatialDimension(const TransformationContext& context, std::shared_ptr layer) const;
+ bool updateOutput(const std::shared_ptr& lastNode, const std::shared_ptr& originalNode) const;
- template
- void addSingleNodePattern(ov::pass::GraphRewrite& pass, TransformationContext& context) const {
- using namespace ov;
-
- auto is_op_type = [](std::shared_ptr n) {
- return !!as_type_ptr(n);
- };
- auto p_node = std::make_shared(element::f32, Shape{}, is_op_type);
-
- addPattern(pass, context, p_node);
- }
+ // TODO: replace with canBeTransformed when quantization by special dimension is supported for all transformations
+ bool canBeTransformedSpatialDimension(const std::shared_ptr& layer) const;
};
-typedef std::shared_ptr LayerTransformationPtr;
-
} // namespace low_precision
} // namespace pass
} // namespace ov
diff --git a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp
index 0b6115e9345b0e..910154fe0e16e0 100644
--- a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp
@@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API MatMulTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("MatMulTransformation", "0", LayerTransformation);
MatMulTransformation(const Params& params = Params());
- bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp
index 7d499c9ec254f3..f6307ed69cbfbe 100644
--- a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp
@@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API MaxPoolTransformation : public LayerTransformation
public:
OPENVINO_RTTI("MaxPoolTransformation", "0", LayerTransformation);
MaxPoolTransformation(const Params& params = Params());
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp
index 628c88b38992e4..96a344cc4620fe 100644
--- a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp
@@ -16,8 +16,8 @@ class LP_TRANSFORMATIONS_API MoveFakeQuantize : public LayerTransformation {
public:
OPENVINO_RTTI("MoveFakeQuantize", "0", LayerTransformation);
MoveFakeQuantize(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/multiply.hpp b/src/common/low_precision_transformations/include/low_precision/multiply.hpp
index fd51b8cac07f35..5658a5bf71bedf 100644
--- a/src/common/low_precision_transformations/include/low_precision/multiply.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/multiply.hpp
@@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API MultiplyTransformation : public WeightableLayerTran
public:
OPENVINO_RTTI("MultiplyTransformation", "0", WeightableLayerTransformation);
MultiplyTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
protected:
size_t getInputChannels(const std::shared_ptr op) const override;
diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp
index 3bee03cfb1a265..7f05baeaf3b12e 100644
--- a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp
@@ -23,8 +23,8 @@ class LP_TRANSFORMATIONS_API MultiplyPartialTransformation : public EltwiseBaseT
public:
OPENVINO_RTTI("MultiplyPartialTransformation", "0", EltwiseBaseTransformation);
MultiplyPartialTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp
index 45252777252fc6..3d6fc228331b13 100644
--- a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp
@@ -27,8 +27,8 @@ class LP_TRANSFORMATIONS_API MultiplyToGroupConvolutionTransformation : public C
const Params& params = Params(),
const PrecisionsRestriction::PrecisionsByPorts& restrictions = {});
~MultiplyToGroupConvolutionTransformation() override {}
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
bool isQuantized(const std::shared_ptr& layer,
const std::vector& defaultPrecisions) const override;
diff --git a/src/common/low_precision_transformations/include/low_precision/mvn.hpp b/src/common/low_precision_transformations/include/low_precision/mvn.hpp
index cd73075ad5740b..061cca9917c43f 100644
--- a/src/common/low_precision_transformations/include/low_precision/mvn.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/mvn.hpp
@@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API MVNTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("MVNTransformation", "0", LayerTransformation);
MVNTransformation(const Params& params = Params());
- bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp
index 40f2973b0701df..d4a3ba6d429044 100644
--- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp
@@ -16,7 +16,6 @@
#include "rt_info/precisions_attribute.hpp"
#include "rt_info/quantization_granularity_attribute.hpp"
#include "rt_info/intervals_alignment_attribute.hpp"
-#include "transformation_context.hpp"
#include "quantization_details.hpp"
#include "transformations/utils/utils.hpp"
#include "common/fake_quantize_dequantization.hpp"
diff --git a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp
index c2777ca0652a07..8d16867982e5fe 100644
--- a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp
@@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API NormalizeL2Transformation : public LayerTransformat
public:
OPENVINO_RTTI("NormalizeL2Transformation", "0", LayerTransformation);
NormalizeL2Transformation(const Params& params = Params());
- bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/pad.hpp b/src/common/low_precision_transformations/include/low_precision/pad.hpp
index 49012e19a604e8..595d7b02dbd77e 100644
--- a/src/common/low_precision_transformations/include/low_precision/pad.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/pad.hpp
@@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API PadTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("PadTransformation", "0", LayerTransformation);
PadTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, pattern::Matcher& m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
+ bool transform(pattern::Matcher& m) override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
};
diff --git a/src/common/low_precision_transformations/include/low_precision/prelu.hpp b/src/common/low_precision_transformations/include/low_precision/prelu.hpp
index df64677b861dbb..12af2f536b28f2 100644
--- a/src/common/low_precision_transformations/include/low_precision/prelu.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/prelu.hpp
@@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API PReluTransformation : public LayerTransformation {
public:
OPENVINO_RTTI("PReluTransformation", "0", LayerTransformation);
PReluTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override;
+ bool canBeTransformed(const std::shared_ptr& op) const override;
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp
index fc0401b08dd74e..9cb8ed91c4b70b 100644
--- a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp
@@ -16,8 +16,8 @@ class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransform
public:
OPENVINO_RTTI("RecurrentCellTransformation", "0", LayerTransformation);
RecurrentCellTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override;
+ bool transform(ov::pass::pattern::Matcher &m) override;
+ bool canBeTransformed(const std::shared_ptr& layer) const override;
bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override;
void propagateSkipCleanupAttribute(std::shared_ptr dequantization_multiply);
static std::shared_ptr wrap_fake_quantize(const std::shared_ptr parameter);
@@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransform
static std::shared_ptr wrap_dequantization(const std::shared_ptr parameter, const bool with_subtract);
private:
- void propagate(TransformationContext& context, const std::shared_ptr node);
+ void propagate(const std::shared_ptr node);
};
} // namespace low_precision
diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp
index 4a42edd60d80c8..c91a8364f71c08 100644
--- a/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp
@@ -22,8 +22,8 @@ namespace low_precision {
class LP_TRANSFORMATIONS_API ReduceBaseTransformation : public LayerTransformation {
public:
ReduceBaseTransformation(const Params& params = Params());
- bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override;
+ bool transform(ov::pass::pattern::Matcher& m) override;
+ bool canBeTransformed(const std::shared_ptr& reduce) const override;
protected:
virtual void changeDequantizationValues(
diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp
index 33f685ba8ca74c..f4e824a43fdec7 100644
--- a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp
+++ b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp
@@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMaxTransformation : public ReduceBaseTransfor
OPENVINO_RTTI("ReduceMaxTransformation", "0", ReduceBaseTransformation);
ReduceMaxTransformation(const Params& params = Params());
bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override;
- bool canBeTransformed(const TransformationContext& context, std::shared_ptr