From 5fba4415e2db090284b907c8ca8888f80f0a419c Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 20 Jan 2025 06:05:05 +0100 Subject: [PATCH 01/35] [PT FE] Support different aliases of existing operations (#28531) ### Details: - *Support: `aten::equal`, `aten::index_put`, `aten::logsumexp`, `prim::abs`* ### Tickets: - *ticket-id* Signed-off-by: Maxim Vafin --- src/frontends/pytorch/src/op/index_put_.cpp | 2 +- src/frontends/pytorch/src/op/log.cpp | 8 +++-- src/frontends/pytorch/src/op_table.cpp | 7 ++-- .../pytorch_tests/test_logsumexp.py | 34 +++++++++++++++++++ .../pytorch_tests/test_unary_ops.py | 27 +++++++++++++-- 5 files changed, 70 insertions(+), 8 deletions(-) create mode 100644 tests/layer_tests/pytorch_tests/test_logsumexp.py diff --git a/src/frontends/pytorch/src/op/index_put_.cpp b/src/frontends/pytorch/src/op/index_put_.cpp index 1b5725a8a95bb3..4591862d8f04c1 100644 --- a/src/frontends/pytorch/src/op/index_put_.cpp +++ b/src/frontends/pytorch/src/op/index_put_.cpp @@ -10,7 +10,7 @@ namespace frontend { namespace pytorch { namespace op { -OutputVector translate_index_put_(const NodeContext& context) { +OutputVector translate_index_put(const NodeContext& context) { // Pass as PtFrameworkNode to register as `inplace_op`. Conversion to OV operators is done as transformation. auto node = std::make_shared(context.get_decoder(), context.inputs()); return {context.mark_node(node)}; diff --git a/src/frontends/pytorch/src/op/log.cpp b/src/frontends/pytorch/src/op/log.cpp index e932538c86520e..dbda6329deeb4f 100644 --- a/src/frontends/pytorch/src/op/log.cpp +++ b/src/frontends/pytorch/src/op/log.cpp @@ -77,7 +77,7 @@ OutputVector translate_log10(const NodeContext& context) { }; OutputVector translate_logsumexp(const NodeContext& context) { - num_inputs_check(context, 1, 2); + num_inputs_check(context, 1, 3); auto input = context.get_input(0); ov::Output dim; if (!context.input_is_none(1)) { @@ -85,8 +85,12 @@ OutputVector translate_logsumexp(const NodeContext& context) { } else { dim = context.mark_node(get_axes_range(context, 0)); } + bool keepdim = false; + if (!context.input_is_none(2)) { + keepdim = context.const_input(2); + } auto exp = context.mark_node(std::make_shared(input)); - auto sum = context.mark_node(std::make_shared(exp, dim, false)); + auto sum = context.mark_node(std::make_shared(exp, dim, keepdim)); auto log = context.mark_node(std::make_shared(sum)); return {log}; }; diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index f00391e08e2a32..27dd55f77955e0 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -116,7 +116,7 @@ OP_CONVERTER(translate_index); OP_CONVERTER(translate_index_add); OP_CONVERTER(translate_index_copy_); OP_CONVERTER(translate_index_fill_); -OP_CONVERTER(translate_index_put_); +OP_CONVERTER(translate_index_put); OP_CONVERTER(translate_index_select); OP_CONVERTER(translate_instance_norm); OP_CONVERTER(translate_int); @@ -464,6 +464,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::empty", op::translate_empty}, {"aten::empty_like", op::translate_empty_like}, {"aten::eq", op::translate_1to1_match_2_inputs_align_types}, + {"aten::equal", op::translate_1to1_match_2_inputs_align_types}, {"aten::erf", op::translate_erf}, {"aten::erfc", op::translate_erfc}, {"aten::exp", op::optional_out, 1>}, @@ -507,7 +508,7 @@ const std::unordered_map get_supported_ops_ts() { // aten::index - Supported in limited set of patterns {"aten::index_copy_", op::inplace_op}, {"aten::index_fill_", op::inplace_op}, - {"aten::index_put_", op::inplace_op}, + {"aten::index_put", op::translate_index_put}, {"aten::index_add", op::translate_index_add}, {"aten::index_select", op::translate_index_select}, {"aten::instance_norm", op::translate_instance_norm}, @@ -550,6 +551,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::log2_", op::inplace_op}, {"aten::log10", op::optional_out}, {"aten::log10_", op::inplace_op}, + {"aten::logsumexp", op::translate_logsumexp}, {"aten::lstm", op::translate_lstm}, {"aten::lt", op::translate_1to1_match_2_inputs_align_types}, {"aten::masked_fill", op::translate_masked_fill}, @@ -714,6 +716,7 @@ const std::unordered_map get_supported_ops_ts() { {"ov_ext::embedding", op::translate_embedding_ext}, {"ov_ext::conv1d", op::translate_conv1d_ext}, {"ov_ext::linear", op::translate_linear}, + {"prim::abs", op::translate_1to1_match_1_inputs}, {"prim::Constant", op::translate_constant}, {"prim::device", op::translate_constant}, // prim::DictConstruct - Supported in limited set of patterns diff --git a/tests/layer_tests/pytorch_tests/test_logsumexp.py b/tests/layer_tests/pytorch_tests/test_logsumexp.py new file mode 100644 index 00000000000000..806e3b80540d5a --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_logsumexp.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class aten_logsumexp(torch.nn.Module): + def __init__(self, dim, keepdim) -> None: + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, input_tensor): + return torch.logsumexp(input_tensor, dim=self.dim, keepdim=self.keepdim) + + +class TestLogsumexp(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(2, 5, 9, 7),) + + @pytest.mark.parametrize("dim", [ + 0, 1, 2, 3, -1, -2, -3, -4 + ]) + @pytest.mark.parametrize("keepdim", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_fx_backend + def test_logsumexp(self, dim, keepdim, ie_device, precision, ir_version): + self._test(aten_logsumexp(dim, keepdim), None, "aten::logsumexp", + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_unary_ops.py b/tests/layer_tests/pytorch_tests/test_unary_ops.py index 9807343080043c..584a80fe4ce254 100644 --- a/tests/layer_tests/pytorch_tests/test_unary_ops.py +++ b/tests/layer_tests/pytorch_tests/test_unary_ops.py @@ -75,7 +75,7 @@ class unary_op_net(torch.nn.Module): def __init__(self, op, dtype): - super(unary_op_net, self).__init__() + super().__init__() self.dtype = dtype self.op = op @@ -87,7 +87,7 @@ def forward(self, x): class unary_op_out_net(torch.nn.Module): def __init__(self, op, dtype): - super(unary_op_out_net, self).__init__() + super().__init__() self.dtype = dtype self.op = op @@ -101,7 +101,7 @@ def forward(self, x): class unary_func_op_inplace_net(torch.nn.Module): def __init__(self, op, dtype): - super(unary_func_op_inplace_net, self).__init__() + super().__init__() self.dtype = dtype self.op = op @@ -111,6 +111,17 @@ def forward(self, x): return y, x1 +class prim_abs_net(torch.nn.Module): + def __init__(self, dtype): + super().__init__() + self.dtype = dtype + + def forward(self, x): + x1 = x.to(self.dtype) + y = abs(x1) + return y, x1 + + class TestUnaryOp(PytorchLayerTest): def _prepare_input(self): # random number in range [1, 11) @@ -265,3 +276,13 @@ def test_unary_func_op_inplace(self, op_type, dtype, ie_device, precision, ir_ve self.dtype = dtype self._test(unary_func_op_inplace_net(OPS[op_type], dtype), None, op_type + "_", ie_device, precision, ir_version) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + @pytest.mark.precommit_fx_backend + @pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.int8, torch.uint8, torch.int32, torch.int64]) + def test_prim_abs(self, dtype, ie_device, precision, ir_version): + self.dtype = dtype + self._test(prim_abs_net(dtype), None, "prim::abs", + ie_device, precision, ir_version) From d757efd7fb3415a3dbda10941b3dae0ace0ac16e Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 20 Jan 2025 07:32:00 +0100 Subject: [PATCH 02/35] [PT FE] Support aten::concatenate (#28518) ### Details: - *Support `aten::concatenate`* ### Tickets: - *CVS-160777* Signed-off-by: Maxim Vafin --- src/frontends/pytorch/src/op_table.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 27dd55f77955e0..00e3a55b0bc327 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -432,6 +432,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::col2im", op::translate_col2im}, {"aten::complex", op::translate_complex}, {"aten::concat", op::translate_cat}, + {"aten::concatenate", op::translate_cat}, {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, // we assume all tensors are contiguous {"aten::conv_transpose1d", op::translate_conv_transposend}, From 78a1d1b907cc336e93df0c599202af76f09cb20c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 15:36:13 +0400 Subject: [PATCH 03/35] Bump paddlepaddle from 2.6.1 to 2.6.2 in /tests (#28547) Bumps [paddlepaddle](https://github.com/paddlepaddle/paddle) from 2.6.1 to 2.6.2.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=paddlepaddle&package-manager=pip&previous-version=2.6.1&new-version=2.6.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index 30ba701095ecf4..a806b7dfb47c18 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -13,7 +13,7 @@ defusedxml>=0.7.1 tensorflow>=2.5,<2.19.0 requests>=2.25.1 opencv-python>=4.5 -paddlepaddle==2.6.1 +paddlepaddle==2.6.2 protobuf>=3.18.1,<6.0.0 py>=1.9.0 pytest>=5.0,<8.4 From ace5379eb62846d6167bca15e9ff17cceaf6a4e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:02:38 +0000 Subject: [PATCH 04/35] Bump pytest-xdist from 2.1.0 to 3.6.1 in /tests (#28548) Bumps [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) from 2.1.0 to 3.6.1.
Changelog

Sourced from pytest-xdist's changelog.

pytest-xdist 3.6.1 (2024-04-28)

Bug Fixes

  • [#1071](https://github.com/pytest-dev/pytest-xdist/issues/1071) <https://github.com/pytest-dev/pytest-xdist/issues/1071>_: Add backward compatibility for deadlock issue with the execnet new main_thread_only "execmodel" triggered when pytest-cov accesses rinfo.

pytest-xdist 3.6.0 (2024-04-19)

This release was YANKED due to a regression fixed in 3.6.1.

Features

  • [#1027](https://github.com/pytest-dev/pytest-xdist/issues/1027) <https://github.com/pytest-dev/pytest-xdist/pull/1027>_:pytest-xdist workers now always execute the tests in the main thread. Previously some tests might end up executing in a separate thread other than main in the workers, due to some internal execnet`` details. This can cause problems specially with async frameworks where the event loop is running in the ``main`` thread (for example #620 pytest-dev/pytest-xdist#620`__).

Bug Fixes

  • [#1024](https://github.com/pytest-dev/pytest-xdist/issues/1024) <https://github.com/pytest-dev/pytest-xdist/issues/1024>_: Added proper handling of shouldstop (such as set by --max-fail) and shouldfail conditions in workers. Previously, a worker might have continued executing further tests before the controller could terminate the session.

  • [#1028](https://github.com/pytest-dev/pytest-xdist/issues/1028) <https://github.com/pytest-dev/pytest-xdist/issues/1028>_: Fixed compatibility issue between looponfail and editable installs.

  • [#620](https://github.com/pytest-dev/pytest-xdist/issues/620) <https://github.com/pytest-dev/pytest-xdist/issues/620>_: Use the new main_thread_only execnet "execmodel" so that code which expects to only run in the main thread will now work as expected.

  • [#937](https://github.com/pytest-dev/pytest-xdist/issues/937) <https://github.com/pytest-dev/pytest-xdist/issues/937>_: Fixed a bug where plugin would raise an incompatibility error with --pdb despite using -n0.

Removals

  • [#1053](https://github.com/pytest-dev/pytest-xdist/issues/1053) <https://github.com/pytest-dev/pytest-xdist/issues/1053>_: Dropped support for Python 3.7.

  • [#1057](https://github.com/pytest-dev/pytest-xdist/issues/1057) <https://github.com/pytest-dev/pytest-xdist/issues/1057>_: pytest>=7.0.0 is now required.

    execnet>=2.1.0 is now required.

Trivial Changes

  • [#1020](https://github.com/pytest-dev/pytest-xdist/issues/1020) <https://github.com/pytest-dev/pytest-xdist/issues/1020>_: pytest-xdist's setup.py file is removed.

    If you relied on this file, e.g. to install pytest using setup.py install, please see Why you shouldn't invoke setup.py directly <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html#summary>_ for alternatives.

... (truncated)

Commits
  • 4dd2978 Release 3.6.1
  • b397288 Merge pull request #1072 from zmedico/gateway-cache-rinfo
  • 12b3cce Cache execnet gateway rinfo during WorkerController setup
  • c93a106 build(deps): bump hynek/build-and-inspect-python-package (#1066)
  • 52e2022 [pre-commit.ci] pre-commit autoupdate (#1073)
  • 699f939 Merge pull request #1070 from pytest-dev/release-3.6.0
  • 80bc0b8 Release 3.6.0
  • 20e3ac7 Use execnet main_thread_only execmodel (#1027)
  • 0a4238f Merge pull request #1067 from pytest-dev/pre-commit-ci-update-config
  • 0686279 [pre-commit.ci] pre-commit autoupdate
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-xdist&package-manager=pip&previous-version=2.1.0&new-version=3.6.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/e2e_tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e_tests/requirements.txt b/tests/e2e_tests/requirements.txt index 934a5bcbc90888..a9d7bb0861ddd2 100644 --- a/tests/e2e_tests/requirements.txt +++ b/tests/e2e_tests/requirements.txt @@ -26,7 +26,7 @@ pytest-cov==2.11.1 pytest-html pytest-json-report==1.5.0 # pytest-metadata==1.7.0 -pytest-xdist==2.1.0 +pytest-xdist==3.6.1 pytest-timeout==2.3.1 # for common utils, e2e_tests From 3e8bc27b226049f5d0d5395e1edea2af704e02e0 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 20 Jan 2025 13:05:27 +0100 Subject: [PATCH 05/35] [CPU] Replace custom THROW_ERROR macros usage with THROW_CPU_NODE_ERR (#28510) ### Details: Replace custom THROW_ERROR macros usage for error reporting in nodes implementation with THROW_CPU_NODE_ERR to unify error handling infrastructure in CPU plugin ### Tickets: - 160275 --- .../intel_cpu/src/nodes/depth_to_space.cpp | 24 +++--- src/plugins/intel_cpu/src/nodes/eye.cpp | 2 - src/plugins/intel_cpu/src/nodes/gather.cpp | 20 ++--- .../intel_cpu/src/nodes/gather_elements.cpp | 10 +-- src/plugins/intel_cpu/src/nodes/gather_nd.cpp | 22 +++-- .../intel_cpu/src/nodes/grid_sample.cpp | 2 - .../intel_cpu/src/nodes/interaction.cpp | 4 +- src/plugins/intel_cpu/src/nodes/mha.cpp | 20 ++--- src/plugins/intel_cpu/src/nodes/normalize.cpp | 19 ++--- src/plugins/intel_cpu/src/nodes/priorbox.cpp | 8 +- .../intel_cpu/src/nodes/space_to_depth.cpp | 24 +++--- src/plugins/intel_cpu/src/nodes/split.cpp | 18 ++-- .../intel_cpu/src/nodes/tensoriterator.cpp | 82 +++++++++---------- src/plugins/intel_cpu/src/nodes/unique.cpp | 14 ++-- 14 files changed, 123 insertions(+), 146 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp index bf0823885ebc71..ed8f1776d6c974 100644 --- a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp @@ -14,8 +14,6 @@ #include "openvino/opsets/opset1.hpp" #include "utils/general_utils.h" -#define THROW_ERROR(...) OPENVINO_THROW("DepthToSpace layer with name '", getName(), "' ", __VA_ARGS__) - using namespace dnnl::impl; namespace ov { @@ -73,11 +71,11 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (inputShapes.size() != 1 || outputShapes.size() != 1) - THROW_ERROR("has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); auto depthToSpace = ov::as_type_ptr(op); if (!depthToSpace) - THROW_ERROR("supports only opset1"); + THROW_CPU_NODE_ERR("supports only opset1"); const auto modeNgraph = depthToSpace->get_mode(); if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST) { @@ -85,22 +83,22 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte } else if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) { attrs.mode = Mode::DEPTH_FIRST; } else { - THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph)); + THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph)); } attrs.blockSize = depthToSpace->get_block_size(); if (attrs.blockSize == 0) - THROW_ERROR("has incorrect block_size parameter is zero!"); + THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!"); const size_t srcRank = getInputShapeAtPort(0).getRank(); const size_t dstRank = getOutputShapeAtPort(0).getRank(); if (srcRank < 3) - THROW_ERROR("has incorrect number of input dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input dimensions"); if (srcRank > 5) - THROW_ERROR("doesn't support dimensions with rank greater than 5"); + THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5"); if (srcRank != dstRank) - THROW_ERROR("has incorrect number of input/output dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions"); const size_t nSpatialDims = srcRank - 2; attrs.blockStep = static_cast(std::pow(attrs.blockSize, nSpatialDims)); @@ -164,11 +162,11 @@ void DepthToSpace::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(0); if (!dstMemPtr) - THROW_ERROR("has null destination memory"); + THROW_CPU_NODE_ERR("has null destination memory"); if (!srcMemPtr) - THROW_ERROR("has null input memory"); + THROW_CPU_NODE_ERR("has null input memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR("has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const auto& memoryDesc = srcMemPtr->getDesc(); attrs.dataSize = memoryDesc.getPrecision().size(); @@ -305,7 +303,7 @@ void DepthToSpace::DepthToSpaceExecutor::exec(const MemoryPtr& srcMemPtr, const void DepthToSpace::execute(const dnnl::stream& strm) { if (!execPtr) { - THROW_ERROR("doesn't have a compiled executor."); + THROW_CPU_NODE_ERR("doesn't have a compiled executor."); } int MB = getSrcMemoryAtPort(0)->getStaticDims()[0]; diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index 873d07673c8990..ef4995a87fd492 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -12,8 +12,6 @@ #include "shape_inference/shape_inference.hpp" #include "utils/bfloat16.hpp" -#define THROW_ERROR(...) OPENVINO_THROW(NameFromType(getType()), " node with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index e72901d7d43e62..f349990f56f620 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -24,8 +24,6 @@ using namespace dnnl::impl::cpu; -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -69,7 +67,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co if (one_of(op->get_input_size(), 4u, 5u) && op->get_output_size() == 1u) { compressed = true; } else if (op->get_input_size() != 3 || op->get_output_size() != 1) { - THROW_ERROR("has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); } const auto& dataShape = getInputShapeAtPort(GATHER_DATA); @@ -80,7 +78,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co isIdxShapeStat = idxShape.isStatic(); const auto indicesRank = idxShape.getRank(); if (dataSrcRank == 0lu || indicesRank == 0lu) - THROW_ERROR("has incorrect input parameters ranks."); + THROW_CPU_NODE_ERR("has incorrect input parameters ranks."); if (ov::is_type(op)) { batchDims = static_cast(ov::as_type_ptr(op)->get_batch_dims()); @@ -104,7 +102,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co if (batchDims < 0) batchDims += indicesRank; if (batchDims < 0 || batchDims > std::min(static_cast(dataSrcRank), static_cast(indicesRank))) - THROW_ERROR("has incorrect batch_dims ", batchDims, "!"); + THROW_CPU_NODE_ERR("has incorrect batch_dims ", batchDims, "!"); if (ov::is_type(op->get_input_node_ptr(GATHER_AXIS))) { isAxisInputConst = true; @@ -112,7 +110,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co if (axis < 0) axis += dataSrcRank; if (axis < 0 || axis >= dataSrcRank || batchDims > axis) - THROW_ERROR("has incorrect input parameter axis value: ", axis); + THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis); } if (auto indices = ov::as_type(op->get_input_node_ptr(GATHER_INDICES))) { @@ -339,12 +337,12 @@ bool Gather::needPrepareParams() const { void Gather::prepareParams() { auto dataMemPtr = getSrcMemoryAtPort(GATHER_DATA); if (!dataMemPtr || !dataMemPtr->isDefined()) - THROW_ERROR(" has undefined input data memory."); + THROW_CPU_NODE_ERR("has undefined input data memory."); auto idxMemPtr = getSrcMemoryAtPort(GATHER_INDICES); if (!idxMemPtr || !idxMemPtr->isDefined()) - THROW_ERROR(" has undefined input indices memory."); + THROW_CPU_NODE_ERR("has undefined input indices memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR(" has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); // short 1D vector fast execution impl (typical in shape infer subgraph) canOptimize1DCase = false; @@ -363,7 +361,7 @@ void Gather::prepareParams() { if (axis < 0) axis += dataSrcRank; if (axis < 0 || axis >= dataSrcRank || batchDims > axis) - THROW_ERROR("has incorrect input parameter axis value: ", axis); + THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis); } if (!isDataShapeStat || !isAxisInputConst) { @@ -553,7 +551,7 @@ void Gather::executeDynamicImpl(const dnnl::stream& strm) { void Gather::initShortParams(threadExecParams& p, const uint64_t start) { if (!jitKernel) - THROW_ERROR("has uninitialized kernel in function initShortParams."); + THROW_CPU_NODE_ERR("has uninitialized kernel in function initShortParams."); const uint64_t idxElPerVec = jitKernel->getIdxElPerVec(); if (afterAxisSize == 1) { // Elementwise gather. diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp index 7a494d184ce9c1..29bc32370d03de 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp @@ -38,19 +38,19 @@ GatherElements::GatherElements(const std::shared_ptr& op, const GraphC OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (inputShapes.size() != 2 || outputShapes.size() != 1) - THROW_CPU_NODE_ERR(" has invalid number of input/output edges."); + THROW_CPU_NODE_ERR("has invalid number of input/output edges."); const auto dataRank = getInputShapeAtPort(dataIndex_).getRank(); const auto indicesRank = getInputShapeAtPort(indicesIndex_).getRank(); if (dataRank != indicesRank) - THROW_CPU_NODE_ERR(" has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks."); + THROW_CPU_NODE_ERR("has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks."); auto gatherElementsOp = ov::as_type_ptr(op); auto axis = gatherElementsOp->get_axis(); if (axis < 0) axis += dataRank; if (axis < 0 || axis >= static_cast(dataRank)) - THROW_CPU_NODE_ERR(" has invalid axis attribute: ", axis); + THROW_CPU_NODE_ERR("has invalid axis attribute: ", axis); axis_ = axis; } @@ -78,12 +78,12 @@ void GatherElements::initSupportedPrimitiveDescriptors() { sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type))) { - THROW_CPU_NODE_ERR(" has unsupported 'inputData' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR("has unsupported 'inputData' input precision: ", inDataPrecision); } ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(indicesIndex_); if (!one_of(indicesPrecision, ov::element::i32, ov::element::i64)) { - THROW_CPU_NODE_ERR(" has unsupported 'indices' input precision: ", indicesPrecision); + THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision); } dataTypeSize_ = inDataPrecision.size(); diff --git a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp index 1124bec41632b8..8df99882adc9cf 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp @@ -14,8 +14,6 @@ #include "openvino/core/parallel.hpp" #include "utils/general_utils.h" -#define THROW_ERROR(...) OPENVINO_THROW("GatherND layer with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -43,7 +41,7 @@ GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr } if (inputShapes.size() != 2 && outputShapes.size() != 1) - THROW_ERROR("has invalid number of input/output edges."); + THROW_CPU_NODE_ERR("has invalid number of input/output edges."); const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank(); const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank(); @@ -53,10 +51,10 @@ GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr } else if (auto gatherNdOp = ov::as_type_ptr(op)) { attrs.batchDims = gatherNdOp->get_batch_dims(); } else { - THROW_ERROR("has support only opset5."); + THROW_CPU_NODE_ERR("has support only opset5."); } if (attrs.batchDims >= std::min(dataInputRank, indicesInputRank)) - THROW_ERROR("has invalid batch_dims attribute: ", attrs.batchDims); + THROW_CPU_NODE_ERR("has invalid batch_dims attribute: ", attrs.batchDims); } void GatherND::initSupportedPrimitiveDescriptors() { @@ -68,7 +66,7 @@ void GatherND::initSupportedPrimitiveDescriptors() { sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type))) { - THROW_ERROR("has unsupported 'data' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", inDataPrecision); } attrs.dataSize = inDataPrecision.size(); @@ -80,7 +78,7 @@ void GatherND::initSupportedPrimitiveDescriptors() { ov::element::u16, ov::element::i8, ov::element::u8)) { - THROW_ERROR("has unsupported 'indices' input precision: ", indicesPrecision); + THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision); } addSupportedPrimDesc({{LayoutType::ncsp, inDataPrecision}, {LayoutType::ncsp, ov::element::i32}}, @@ -93,13 +91,13 @@ void GatherND::prepareParams() { auto idxMemPtr = getSrcMemoryAtPort(GATHERND_INDEXES); auto dstMemPtr = getDstMemoryAtPort(0); if (!srcMemPtr || !srcMemPtr->isDefined()) - THROW_ERROR(" has undefined input memory of 'data'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'data'."); if (!idxMemPtr || !idxMemPtr->isDefined()) - THROW_ERROR(" has undefined input memory of 'indices'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'indices'."); if (!dstMemPtr || !dstMemPtr->isDefined()) - THROW_ERROR(" has undefined output memory."); + THROW_CPU_NODE_ERR("has undefined output memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR(" has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); attrs.srcDims = srcMemPtr->getStaticDims(); attrs.srcStrides = srcMemPtr->getDescWithType()->getStrides(); @@ -141,7 +139,7 @@ GatherND::GatherNDExecutor::GatherNDExecutor(const GatherNDAttributes& attrs) void GatherND::execute(const dnnl::stream& strm) { if (!execPtr) - THROW_ERROR("has not compiled executor."); + THROW_CPU_NODE_ERR("has not compiled executor."); execPtr->exec(getSrcMemoryAtPort(GATHERND_DATA), getSrcMemoryAtPort(GATHERND_INDEXES), getDstMemoryAtPort(0)); } diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp index 0e25c64acfe534..7a8eb1088453c7 100644 --- a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp @@ -14,8 +14,6 @@ using namespace ov::intel_cpu::node; using namespace dnnl::impl::cpu; #endif // OPENVINO_ARCH_X86_64 -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - bool GridSample::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { if (!ov::is_type(op)) { diff --git a/src/plugins/intel_cpu/src/nodes/interaction.cpp b/src/plugins/intel_cpu/src/nodes/interaction.cpp index 13c846da6e2bea..d1ffcb3546754a 100644 --- a/src/plugins/intel_cpu/src/nodes/interaction.cpp +++ b/src/plugins/intel_cpu/src/nodes/interaction.cpp @@ -28,8 +28,6 @@ namespace ov { namespace intel_cpu { namespace node { -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - #if defined(OPENVINO_ARCH_X86_64) template @@ -346,7 +344,7 @@ void Interaction::prepareParams() { moveFeatureKernel->create_ker(); moveInteractKernel->create_ker(); } else { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } #ifdef CPU_DEBUG_CAPS if (prim) { diff --git a/src/plugins/intel_cpu/src/nodes/mha.cpp b/src/plugins/intel_cpu/src/nodes/mha.cpp index e1f4a774011dc9..43867cd99b2b01 100644 --- a/src/plugins/intel_cpu/src/nodes/mha.cpp +++ b/src/plugins/intel_cpu/src/nodes/mha.cpp @@ -25,8 +25,6 @@ using namespace dnnl::impl::cpu::x64; using namespace dnnl::impl::cpu::x64::matmul; using namespace Xbyak; -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -879,7 +877,7 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr& brgKerne ctx.K, &strides); if (status != dnnl_success) { - THROW_ERROR("cannot be executed due to invalid brgconv params"); + THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params"); } ctx.is_with_amx = use_amx; @@ -893,11 +891,11 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr& brgKerne brgemm_kernel_t* brgKernel_ = nullptr; status = brgemm_kernel_create(&brgKernel_, brgDesc); if (status != dnnl_success) { - THROW_ERROR("cannot be executed due to invalid brgconv params"); + THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params"); } brgKernel.reset(brgKernel_); #else - THROW_ERROR("is not supported on non-x86_64"); + THROW_CPU_NODE_ERR("is not supported on non-x86_64"); #endif // OPENVINO_ARCH_X86_64 } @@ -972,7 +970,7 @@ void MHA::init_brgemm_copy_b(std::unique_ptr& brgCop #if defined(OPENVINO_ARCH_X86_64) auto ret = create_brgemm_matmul_copy_b(brgCopyKernel, &brgCopyKernelConf); if (ret != dnnl::impl::status_t::dnnl_success) - THROW_ERROR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret); + THROW_CPU_NODE_ERR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret); #endif // OPENVINO_ARCH_X86_64 } @@ -1204,7 +1202,7 @@ void MHA::prepareParams() { } #endif // OPENVINO_ARCH_X86_64 if (!mulAddSoftmaxKernel) { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } } @@ -1228,7 +1226,7 @@ void MHA::prepareParams() { } #endif // OPENVINO_ARCH_X86_64 if (!convertReorderKernel) { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } } @@ -1255,7 +1253,7 @@ void MHA::prepareParams() { #endif // OPENVINO_ARCH_X86_64 if (!convertTransposeKernel) { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } } @@ -1312,7 +1310,7 @@ void MHA::callBrgemm(brgemmCtx& ctx, brgemm_kernel_execute(brgKernel.get(), 1, pin0, pin1, nullptr, pout, wsp); } #else - THROW_ERROR("is not supported on non-x64 platforms"); + THROW_CPU_NODE_ERR("is not supported on non-x64 platforms"); #endif // OPENVINO_ARCH_X86_64 } @@ -1547,7 +1545,7 @@ void MHA::execute(const dnnl::stream& strm) { } else if (inputPrecisions[1] == ov::element::i8) { mhaImpl(); } else { - THROW_ERROR("doesn't support provided input precisions"); + THROW_CPU_NODE_ERR("doesn't support provided input precisions"); } } diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index e416781cdf69a2..13322254ab4ee1 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -35,7 +35,6 @@ using namespace Xbyak; #if defined(OPENVINO_ARCH_X86_64) # define GET_OFF(field) offsetof(jit_normalize_call_args, field) #endif -#define THROW_ERROR(...) OPENVINO_THROW("NormalizeL2 layer with name '", getName(), "' ", __VA_ARGS__) namespace ov { namespace intel_cpu { @@ -782,10 +781,10 @@ NormalizeL2::NormalizeL2(const std::shared_ptr& op, const GraphContext } if (inputShapes.size() != 2 || outputShapes.size() != 1) - THROW_ERROR(" has incorrect number of input/output edges"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges"); if (getInputShapeAtPort(DATA).getRank() > 4 || getInputShapeAtPort(DATA).getRank() < 2) { - THROW_ERROR("has invalid input shape. Normalize supports from 2D to 4D blobs."); + THROW_CPU_NODE_ERR("has invalid input shape. Normalize supports from 2D to 4D blobs."); } auto norm = ov::as_type_ptr(op); @@ -825,7 +824,7 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() { ov::element::f16, ov::element::i8, ov::element::u8)) { - THROW_ERROR("has unsupported input precision: ", inputPrecision); + THROW_CPU_NODE_ERR("has unsupported input precision: ", inputPrecision); } if (!one_of(outputPrecision, ov::element::f32, @@ -833,7 +832,7 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() { ov::element::f16, ov::element::i8, ov::element::u8)) { - THROW_ERROR("has unsupported output precision: ", outputPrecision); + THROW_CPU_NODE_ERR("has unsupported output precision: ", outputPrecision); } attrs.input_prec = inputPrecision; @@ -914,11 +913,11 @@ void NormalizeL2::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(DATA); auto srcMemPtr = getSrcMemoryAtPort(DATA); if (!dstMemPtr) - THROW_ERROR("can't get destination memory"); + THROW_CPU_NODE_ERR("can't get destination memory"); if (!srcMemPtr) - THROW_ERROR("can't get input memory"); + THROW_CPU_NODE_ERR("can't get input memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR("has nullable preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has nullable preferable primitive descriptor"); if (!attrs.cornerCase) { if (srcMemPtr->getDesc().hasLayoutType(LayoutType::ncsp)) { @@ -930,7 +929,7 @@ void NormalizeL2::createPrimitive() { } else if (srcMemPtr->getDesc().hasLayoutType(LayoutType::nspc)) { attrs.layout = LayoutType::nspc; } else { - THROW_ERROR("has selected layout which is not supported"); + THROW_CPU_NODE_ERR("has selected layout which is not supported"); } } @@ -972,7 +971,7 @@ void NormalizeL2::executeDynamicImpl(const dnnl::stream& strm) { void NormalizeL2::execute(const dnnl::stream& strm) { if (!execPtr) - THROW_ERROR("doesn't have a compiled executor."); + THROW_CPU_NODE_ERR("doesn't have a compiled executor."); const uint8_t* src_ptr = getSrcDataAtPortAs(DATA); uint8_t* dst_ptr = getDstDataAtPortAs(DATA); diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.cpp b/src/plugins/intel_cpu/src/nodes/priorbox.cpp index d1a2acd05d1a7a..3bf6a47797e044 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox.cpp @@ -14,8 +14,6 @@ #include "openvino/opsets/opset1.hpp" #include "shape_inference/custom/priorbox.hpp" -#define THROW_ERROR(...) OPENVINO_THROW("PriorBox layer with name '", getName(), "': ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -69,7 +67,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr exist = false; if (std::fabs(aspect_ratio_item) < std::numeric_limits::epsilon()) { - THROW_ERROR("Aspect_ratio param can't be equal to zero"); + THROW_CPU_NODE_ERR("has aspect_ratio param can't be equal to zero"); } for (float _aspect_ratio : aspect_ratio) { @@ -94,7 +92,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr if (attrs.variance.size() == 1 || attrs.variance.size() == 4) { for (float i : attrs.variance) { if (i < 0) { - THROW_ERROR("Variance must be > 0."); + THROW_CPU_NODE_ERR("variance must be > 0."); } variance.push_back(i); @@ -102,7 +100,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr } else if (attrs.variance.empty()) { variance.push_back(0.1f); } else { - THROW_ERROR("Wrong number of variance values. Not less than 1 and more than 4 variance values."); + THROW_CPU_NODE_ERR("has wrong number of variance values. Not less than 1 and more than 4 variance values."); } } diff --git a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp index 859944161d48b9..0384dabc63d73c 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp +++ b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp @@ -15,8 +15,6 @@ #include "openvino/util/pp.hpp" #include "utils/general_utils.h" -#define THROW_ERROR(...) OPENVINO_THROW("SpaceToDepth layer with name '", getName(), "' ", __VA_ARGS__) - using namespace dnnl; using namespace dnnl::impl; @@ -76,11 +74,11 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (inputShapes.size() != 1 || outputShapes.size() != 1) - THROW_ERROR("has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); auto spaceToDepth = ov::as_type_ptr(op); if (!spaceToDepth) - THROW_ERROR("supports only opset1"); + THROW_CPU_NODE_ERR("supports only opset1"); const auto modeNgraph = spaceToDepth->get_mode(); if (modeNgraph == ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST) { @@ -88,21 +86,21 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte } else if (modeNgraph == ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST) { attrs.mode = Mode::DEPTH_FIRST; } else { - THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph)); + THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph)); } attrs.blockSize = spaceToDepth->get_block_size(); if (attrs.blockSize == 0) - THROW_ERROR("has incorrect block_size parameter is zero!"); + THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!"); const size_t srcRank = getInputShapeAtPort(0).getRank(); const size_t dstRank = getOutputShapeAtPort(0).getRank(); if (srcRank < 3) - THROW_ERROR("has incorrect number of input dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input dimensions"); if (srcRank > 5) - THROW_ERROR("doesn't support dimensions with rank greater than 5"); + THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5"); if (srcRank != dstRank) - THROW_ERROR("has incorrect number of input/output dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions"); attrs.nSpatialDims = srcRank - 2; attrs.blockStep = static_cast(std::pow(attrs.blockSize, attrs.nSpatialDims)); } @@ -164,11 +162,11 @@ void SpaceToDepth::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(0); if (!dstMemPtr) - THROW_ERROR("has null destination memory"); + THROW_CPU_NODE_ERR("has null destination memory"); if (!srcMemPtr) - THROW_ERROR("has null input memory"); + THROW_CPU_NODE_ERR("has null input memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR("has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const auto& memoryDesc = srcMemPtr->getDesc(); attrs.dataSize = memoryDesc.getPrecision().size(); @@ -301,7 +299,7 @@ void SpaceToDepth::SpaceToDepthExecutor::exec(const uint8_t* srcData, uint8_t* d void SpaceToDepth::execute(const dnnl::stream& strm) { if (!execPtr) { - THROW_ERROR("doesn't have a compiled executor."); + THROW_CPU_NODE_ERR("doesn't have a compiled executor."); } const uint8_t* srcData = getSrcDataAtPortAs(0); uint8_t* dstData = getDstDataAtPortAs(0); diff --git a/src/plugins/intel_cpu/src/nodes/split.cpp b/src/plugins/intel_cpu/src/nodes/split.cpp index 59ab2776ba884b..af8295cbe98a9e 100644 --- a/src/plugins/intel_cpu/src/nodes/split.cpp +++ b/src/plugins/intel_cpu/src/nodes/split.cpp @@ -19,8 +19,6 @@ #include "utils/general_utils.h" #include "utils/ngraph_utils.hpp" -#define THROW_ERROR(...) OPENVINO_THROW("Split layer with name '", getName(), "' ", __VA_ARGS__) - using namespace dnnl; namespace ov { @@ -74,7 +72,7 @@ Split::Split(const std::shared_ptr& op, const GraphContext::CPtr& cont axis += inRank; } if (axis >= static_cast(inRank)) { - THROW_ERROR("Split node with name '", op->get_friendly_name(), "' has invalid value of axis parameter: ", axis); + THROW_CPU_NODE_ERR("has invalid value of axis parameter: ", axis); } this->axis = axis; } @@ -92,14 +90,14 @@ void Split::initSupportedPrimitiveDescriptors() { for (size_t i = 0; i < outputShapes.size(); i++) { const auto& o_Dims = outputShapes[i].getDims(); if (dstFirstDims.size() != o_Dims.size()) { - THROW_ERROR("only supports output blobs with equal number of dimensions"); + THROW_CPU_NODE_ERR("only supports output blobs with equal number of dimensions"); } for (size_t j = 0; j < dstFirstDims.size(); j++) { if (j == axis) continue; if (!dimsEqualWeak(o_Dims[j], dstFirstDims[j])) - THROW_ERROR("has incorrect output dimensions"); + THROW_CPU_NODE_ERR("has incorrect output dimensions"); } } @@ -256,7 +254,7 @@ void Split::createPrimitive() { void Split::prepareParams() { const auto& srcMemPtr = getSrcMemoryAtPort(0); if (!srcMemPtr || !srcMemPtr->isDefined()) { - THROW_ERROR("has undefined input memory"); + THROW_CPU_NODE_ERR("has undefined input memory"); } if (!constSplitLengths) { @@ -271,7 +269,7 @@ void Split::prepareParams() { for (size_t port = 0; port < outputShapes.size(); ++port) { const auto& outMemPtr = this->getDstMemoryAtPort(port); if (!outMemPtr || !outMemPtr->isDefined()) { - THROW_ERROR("has undefined destination memory"); + THROW_CPU_NODE_ERR("has undefined destination memory"); } if (outMemPtr->getShape().hasZeroDims()) { @@ -301,7 +299,7 @@ void Split::execute(const dnnl::stream& strm) { } if (dstMemPtrs.empty()) - THROW_ERROR("Output data pointers have not been initialized."); + THROW_CPU_NODE_ERR("Output data pointers have not been initialized."); const auto& srcMem = getParentEdgeAt(0)->getMemory(); @@ -323,7 +321,7 @@ void Split::initOptimalPrimitiveDescriptor() { Node::initOptimalPrimitiveDescriptor(); auto selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) - THROW_ERROR("Preferable primitive descriptor is not set."); + THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set."); auto config = selected_pd->getConfig(); canUseOptimizedNspc2Ncsp = false; @@ -487,7 +485,7 @@ std::vector Split::getRawDstMemPtrs() const { for (size_t i = 0; i < dstMemPtrs.size(); ++i) { result[i] = dstMemPtrs[i].second->getDataAs(); if (!result[i]) { - THROW_ERROR("can't get child edge indx ", dstMemPtrs[i].first, " data."); + THROW_CPU_NODE_ERR("can't get child edge indx ", dstMemPtrs[i].first, " data."); } } return result; diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp index fbd6361eca53fc..cffde3a81d23dd 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp @@ -25,8 +25,6 @@ namespace ov { namespace intel_cpu { namespace node { -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " layer with name '", getName(), "' ", __VA_ARGS__) - static NodeConfig make_plain_config(const std::shared_ptr& op) { NodeConfig config; @@ -435,7 +433,7 @@ TensorIterator::TensorIterator(const std::shared_ptr& op, const GraphC void TensorIterator::getSupportedDescriptors() { auto tiOp = ov::as_type_ptr(ngraphOp); if (!tiOp) { - THROW_ERROR("cannot be cast to ov::op::util::SubGraphOp"); + THROW_CPU_NODE_ERR("cannot be cast to ov::op::util::SubGraphOp"); } const std::shared_ptr body = tiOp->get_function(); sub_graph.CreateGraph(body, context); @@ -519,7 +517,7 @@ void TensorIterator::getSupportedDescriptors() { -1, 1}); } else { - THROW_ERROR("has incorrect type of the input description."); + THROW_CPU_NODE_ERR("has incorrect type of the input description."); } } @@ -537,7 +535,7 @@ void TensorIterator::getSupportedDescriptors() { } else if (auto ti = ov::as_type_ptr(ngraphOp)) { algorithm = Algorithm::TensorIteratorCommon; } else { - THROW_ERROR("isn't supported!"); + THROW_CPU_NODE_ERR("isn't supported!"); } } @@ -894,11 +892,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, const auto getNumIterations = [this](const PortMap& rule, const std::vector& dimensions) -> int { const auto axis = rule.axis; if (axis < 0 || static_cast(axis) >= dimensions.size()) { - THROW_ERROR(": Invalid \"axis\" value in an iteration component: ", - rule.axis, - ", dimensions number = ", - dimensions.size(), - " (out of range)"); + THROW_CPU_NODE_ERR(": Invalid \"axis\" value in an iteration component: ", + rule.axis, + ", dimensions number = ", + dimensions.size(), + " (out of range)"); } const auto space = dimensions[axis]; const int start = static_cast((rule.start < 0 ? (space + 1) : 0) + rule.start); @@ -906,7 +904,9 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, const auto stride = rule.stride; if (stride == 0) { - THROW_ERROR(": Invalid \"stride\" value in an iteration component: ", rule.stride, " (infinite loop)"); + THROW_CPU_NODE_ERR(": Invalid \"stride\" value in an iteration component: ", + rule.stride, + " (infinite loop)"); } const auto step = std::abs(stride); @@ -914,21 +914,21 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, const auto dst = stride < 0 ? start : end; const auto length = dst - src; if (src < 0 || src >= dst || dst > static_cast(space) || length < step) { - THROW_ERROR(": Invalid \"start\",\"stride\",\"end\" values in an iteration component", - ": \"start\" = ", - rule.start, - ", \"stride\" = ", - rule.stride, - ", \"end\" = ", - rule.end); + THROW_CPU_NODE_ERR(": Invalid \"start\",\"stride\",\"end\" values in an iteration component", + ": \"start\" = ", + rule.start, + ", \"stride\" = ", + rule.stride, + ", \"end\" = ", + rule.end); } if (length % step != 0) { - THROW_ERROR(": Each iteration must be the same size: length (", - length, - ") is not divisible by step (", - step, - ")"); + THROW_CPU_NODE_ERR(": Each iteration must be the same size: length (", + length, + ") is not divisible by step (", + step, + ")"); } return static_cast(length / step); @@ -943,11 +943,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, } if (rule.from < 0 || rule.from >= static_cast(inputShapes.size())) { - THROW_ERROR(": Invalid \"from\" value: \"from\" = ", - rule.from, - " inputs number = ", - inputShapes.size(), - " (out of range)"); + THROW_CPU_NODE_ERR(": Invalid \"from\" value: \"from\" = ", + rule.from, + " inputs number = ", + inputShapes.size(), + " (out of range)"); } const auto currentNumIterations = getNumIterations(rule, dims); @@ -955,10 +955,10 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, isDefault = false; numIterations = currentNumIterations; } else if (numIterations != currentNumIterations) { - THROW_ERROR(": There are at least two different iterations numbers: ", - numIterations, - " and ", - currentNumIterations); + THROW_CPU_NODE_ERR(": There are at least two different iterations numbers: ", + numIterations, + " and ", + currentNumIterations); } } @@ -972,11 +972,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, continue; if (rule.from < 0 || rule.from >= static_cast(outputShapes.size())) { - THROW_ERROR(": Invalid \"from\" value: \"from\" = ", - rule.from, - " inputs number = ", - outputShapes.size(), - " (out of range)"); + THROW_CPU_NODE_ERR(": Invalid \"from\" value: \"from\" = ", + rule.from, + " inputs number = ", + outputShapes.size(), + " (out of range)"); } const auto currentNumIterations = getNumIterations(rule, dims); @@ -984,10 +984,10 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, isDefault = false; numIterations = currentNumIterations; } else if (numIterations != currentNumIterations) { - THROW_ERROR(": There are at least two different iterations numbers: ", - numIterations, - " and ", - currentNumIterations); + THROW_CPU_NODE_ERR(": There are at least two different iterations numbers: ", + numIterations, + " and ", + currentNumIterations); } } diff --git a/src/plugins/intel_cpu/src/nodes/unique.cpp b/src/plugins/intel_cpu/src/nodes/unique.cpp index 391e1967a8c682..5a5888090ef6ee 100644 --- a/src/plugins/intel_cpu/src/nodes/unique.cpp +++ b/src/plugins/intel_cpu/src/nodes/unique.cpp @@ -14,8 +14,6 @@ using namespace ov::intel_cpu; using namespace ov::intel_cpu::node; -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - bool Unique::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { if (!ov::is_type(op)) { @@ -41,7 +39,7 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co } if (!one_of(op->get_input_size(), 1u, 2u) || op->get_output_size() != 4) - THROW_ERROR("has incorrect number of input/output edges."); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges."); for (int i = 0; i < 4; i++) { definedOutputs[i] = !op->get_output_target_inputs(i).empty(); @@ -55,8 +53,8 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co axis += op->get_input_partial_shape(IN_DATA).rank().get_length(); } if (axis < 0 || axis >= op->get_input_partial_shape(IN_DATA).rank().get_length()) { - THROW_ERROR("has invalid axis value: ", - ov::as_type(op->get_input_node_ptr(AXIS))->cast_vector()[0]); + THROW_CPU_NODE_ERR("has invalid axis value: ", + ov::as_type(op->get_input_node_ptr(AXIS))->cast_vector()[0]); } } else { flattened = true; @@ -93,18 +91,18 @@ void Unique::createPrimitive() { void Unique::prepareParams() { auto dataMemPtr = getSrcMemoryAtPort(IN_DATA); if (!dataMemPtr) { - THROW_ERROR(" has null input data memory."); + THROW_CPU_NODE_ERR("has null input data memory."); } for (int i = 0; i < 4; i++) { if (definedOutputs[i]) { auto dstMemPtr = getDstMemoryAtPort(i); if (!dstMemPtr) { - THROW_ERROR(" has null output memory at port ", i); + THROW_CPU_NODE_ERR("has null output memory at port ", i); } } } if (getSelectedPrimitiveDescriptor() == nullptr) { - THROW_ERROR(" has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); } size_t srcLen = 1; From f46e3e9d143a18316e14f6d632fde318e329607f Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Mon, 20 Jan 2025 20:15:57 +0800 Subject: [PATCH 06/35] [Hetro][Func Test] only the nightly tests can use hw plugin (#28545) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* Signed-off-by: Zhai, Xuejun --- .../behavior/ov_plugin/core_threading_tests.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp index 39dc277f25a11e..b0152a06b8ab0f 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp @@ -7,7 +7,7 @@ namespace { const Params params[] = { std::tuple{ov::test::utils::DEVICE_HETERO, - {{ov::device::priorities.name(), ov::test::utils::DEVICE_CPU}}}, + {{ov::device::priorities.name(), ov::test::utils::DEVICE_TEMPLATE}}}, }; } // namespace @@ -19,4 +19,4 @@ INSTANTIATE_TEST_SUITE_P(nightly_HETERO, INSTANTIATE_TEST_SUITE_P(HETERO_Streams, CoreThreadingTestsWithIter, testing::Combine(testing::ValuesIn(params), testing::Values(4), testing::Values(50)), - CoreThreadingTestsWithIter::getTestCaseName); \ No newline at end of file + CoreThreadingTestsWithIter::getTestCaseName); From 96c22330d5aa953752c22942a00e3032e4b1c9f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:24:33 +0000 Subject: [PATCH 07/35] Bump pytest-dependency from 0.5.1 to 0.6.0 in /tests (#28549) Bumps [pytest-dependency](https://github.com/RKrahl/pytest-dependency) from 0.5.1 to 0.6.0.
Changelog

Sourced from pytest-dependency's changelog.

0.6.0 (2023-12-31)


Documentation
-------------
  • [#39](https://github.com/RKrahl/pytest-dependency/issues/39), [#41](https://github.com/RKrahl/pytest-dependency/issues/41), [#59](https://github.com/RKrahl/pytest-dependency/issues/59)_: Review documentation

Incompatible changes

  • Drop support for Python 2.

Bug fixes and minor changes

  • [#40](https://github.com/RKrahl/pytest-dependency/issues/40)_: add logging.
  • [#50](https://github.com/RKrahl/pytest-dependency/issues/50), [#51](https://github.com/RKrahl/pytest-dependency/issues/51): test suite incompatibility with pytest 6.2.0.
  • [#58](https://github.com/RKrahl/pytest-dependency/issues/58)_: declare the type of automark_dependency ini-option correctly as bool.

Internal

  • [#75](https://github.com/RKrahl/pytest-dependency/issues/75)_: review build tool chain.

.. _#39: RKrahl/pytest-dependency#39 .. _#40: RKrahl/pytest-dependency#40 .. _#41: RKrahl/pytest-dependency#41 .. _#50: RKrahl/pytest-dependency#50 .. _#51: RKrahl/pytest-dependency#51 .. _#58: RKrahl/pytest-dependency#58 .. _#59: RKrahl/pytest-dependency#59 .. _#75: RKrahl/pytest-dependency#75

Commits
  • 2cae589 Merge branch 'develop'
  • def647e Prepare release 0.6.0
  • 2baac9b Merge branch 'doc' into develop
  • 38baf8c Update changelog
  • e2edf54 Explicitely set language to 'en'
  • f11cf56 Rewrite introduction to the debugging guide
  • 346a344 Move the changelog to the end, after the API reference
  • 463227e Review README and bump copyright year
  • eb48f32 Fixup 695ea27: trailing whitespace
  • 695ea27 Update install instructions
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-dependency&package-manager=pip&previous-version=0.5.1&new-version=0.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index a806b7dfb47c18..45aac9051f2fd2 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -17,7 +17,7 @@ paddlepaddle==2.6.2 protobuf>=3.18.1,<6.0.0 py>=1.9.0 pytest>=5.0,<8.4 -pytest-dependency==0.5.1 +pytest-dependency==0.6.0 pytest-html==4.1.1 pytest-timeout==2.3.1 kornia==0.8.0 From 0fce5f3a17fc0d782e6468d5e048d6c449caa453 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 20 Jan 2025 13:34:55 +0100 Subject: [PATCH 08/35] [cpu] Remove custom shape inference factories (#27924) ### Details: - Remove custom shape inference factories CPU nodes. ### Related PR - #27770 ### Tickets: - CVS-118704 --------- Signed-off-by: Raasz, Pawel Co-authored-by: Michal Lukaszewski Co-authored-by: Maksim Kutakov --- src/frontends/tensorflow/src/frontend.cpp | 11 +++-- src/plugins/intel_cpu/src/nodes/deconv.cpp | 37 ++++++++++++++--- src/plugins/intel_cpu/src/nodes/eye.cpp | 16 +------- src/plugins/intel_cpu/src/nodes/reference.cpp | 30 +++++++------- src/plugins/intel_cpu/src/nodes/reference.h | 1 + .../src/shape_inference/shape_inference.cpp | 41 ++++--------------- .../src/shape_inference/shape_inference.hpp | 1 - 7 files changed, 61 insertions(+), 76 deletions(-) diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp index 006a4e22e06304..e4e35c42b08b35 100644 --- a/src/frontends/tensorflow/src/frontend.cpp +++ b/src/frontends/tensorflow/src/frontend.cpp @@ -466,12 +466,11 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr // recommend to use openvino-tokenizers if some unconverted operations from tokenizers are met if (unsupported_ops_from_tokenizers.size() > 0) { - exception_message - << "\nEncountered unconverted operation(s) for which openvino-tokenizers package " - "provides conversion extension(s): " - << unsupported_ops_from_tokenizers - << ". Install OpenVINO Tokenizers, refer to the documentation: " - "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n"; + exception_message << "\nEncountered unconverted operation(s) for which openvino-tokenizers package " + "provides conversion extension(s): " + << unsupported_ops_from_tokenizers + << ". Install OpenVINO Tokenizers, refer to the documentation: " + "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n"; } } diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index 886497bd57cc29..4090244a17ec32 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -125,16 +125,43 @@ bool DeconvKey::operator==(const DeconvKey& rhs) const { * input. Since in case it exists, plugin should pass the input data to the shape inference function. * */ -class DeconfolutionShapeInferFactory : public ShapeInferFactory { +class DeconvolutionShapeInferFactory : public ShapeInferFactory { public: - DeconfolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} + DeconvolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override { - const auto port_mask = (m_op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK; - return make_shape_inference(m_op, port_mask); + return std::make_shared(m_op); } private: + class DeconvolutionShapeInfer : public IShapeInfer { + public: + DeconvolutionShapeInfer(const std::shared_ptr& op) + : m_shape_infer(make_shape_inference(op)), + m_port_mask((op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK) {} + + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + return m_shape_infer->infer(input_shapes, data_dependency); + } + + const ov::CoordinateDiff& get_pads_begin() override { + return m_shape_infer->get_pads_begin(); + } + + const ov::CoordinateDiff& get_pads_end() override { + return m_shape_infer->get_pads_end(); + } + + port_mask_t get_port_mask() const override { + return m_port_mask; + }; + + private: + ShapeInferPtr m_shape_infer; + const port_mask_t m_port_mask; + }; + std::shared_ptr m_op; }; } // namespace @@ -165,7 +192,7 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr& } Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr& context) - : Node(op, context, DeconfolutionShapeInferFactory(op)) { + : Node(op, context, DeconvolutionShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index ef4995a87fd492..411a77260aa7d6 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -29,22 +29,8 @@ bool Eye::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -namespace { -class EyeShapeInferFactory : public ShapeInferFactory { -public: - EyeShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} - ShapeInferPtr makeShapeInfer() const override { - return (m_op->get_input_size() == 4) ? make_shape_inference(m_op) - : make_shape_inference(m_op, PortMask(Eye::ROWS_NUM, Eye::COLS_NUM)); - } - -private: - std::shared_ptr m_op; -}; -} // namespace - Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr& context) - : Node(op, context, EyeShapeInferFactory(op)) { + : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index c7f1bbe30ff574..3283f7a43253ab 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -12,22 +12,10 @@ namespace ov { namespace intel_cpu { -class ReferenceShapeInferFactory : public ShapeInferFactory { -public: - ReferenceShapeInferFactory(std::shared_ptr op) : m_op{std::move(op)} {} - - ShapeInferPtr makeShapeInfer() const override { - return make_shape_inference(m_op, FULL_PORT_MASK); - } - -private: - std::shared_ptr m_op; -}; - namespace node { Reference::Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, std::string errorMessage) - : Node(op, context, ReferenceShapeInferFactory(op)), + : Node(op, context, NgraphShapeInferFactory(op)), ovCoreNode(op), additionalErrorMessage(std::move(errorMessage)) { if (!op->has_evaluate()) { @@ -61,7 +49,9 @@ void Reference::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inputConfigurators, outputConfigurators, impl_desc_type::ref); } -void Reference::createPrimitive() {} +void Reference::createPrimitive() { + hasOutputShapeDataDependency = isDynamicNode() && outputShapeDataDependency(); +} void Reference::execute(const dnnl::stream& strm) { auto inputs = prepareInputs(); @@ -72,6 +62,14 @@ void Reference::execute(const dnnl::stream& strm) { } void Reference::executeDynamicImpl(const dnnl::stream& strm) { + if (!hasOutputShapeDataDependency) { + // if there is no data dependency for the output shape, we can execute the operation as is, similar to the + // static case, since the shapes are already calculated + execute(strm); + return; + } + + // if there is data dependency, we need to perform shape inference first auto inputs = prepareInputs(); ov::TensorVector outputs; auto result = Node::shapeInfer(); @@ -125,7 +123,9 @@ bool Reference::created() const { } bool Reference::needShapeInfer() const { - return false; + // If there is data dependency for the output shape, let's assume the node has internal dynamism (in general case), + // so we postpone the shape inference until the actual execution + return !hasOutputShapeDataDependency && Node::needShapeInfer(); } ov::TensorVector Reference::prepareInputs() const { diff --git a/src/plugins/intel_cpu/src/nodes/reference.h b/src/plugins/intel_cpu/src/nodes/reference.h index 782c55716506a8..f0a37ae6529f5f 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.h +++ b/src/plugins/intel_cpu/src/nodes/reference.h @@ -36,6 +36,7 @@ class Reference : public Node { private: const std::shared_ptr ovCoreNode; const std::string additionalErrorMessage; + bool hasOutputShapeDataDependency = false; // flag to cache the output shape data dependency check result }; } // namespace node diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index 5ba7e7173792fd..ba7832aef71fab 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -234,8 +234,7 @@ class ShapeInferFallback : public ShapeInferBase { ov::optional> infer(const std::vector& input_shapes, const ov::ITensorAccessor& tensor_accessor) override { - auto op = m_node.get(); - std::vector output_shapes; + const auto op = m_node.get(); std::shared_ptr local_op; ov::OutputVector new_inputs; @@ -252,7 +251,7 @@ class ShapeInferFallback : public ShapeInferBase { local_op = op->clone_with_new_inputs(new_inputs); local_op->validate_and_infer_types(); - output_shapes.resize(local_op->get_output_size()); + std::vector output_shapes(local_op->get_output_size()); for (size_t i = 0; i < output_shapes.size(); ++i) { const auto& partial_shape = local_op->get_output_partial_shape(i); @@ -265,6 +264,11 @@ class ShapeInferFallback : public ShapeInferBase { return {std::move(output_shapes)}; } + + port_mask_t get_port_mask() const override { + // For fallback return full port mask to try get data for all node's inputs + return FULL_PORT_MASK; + } }; template @@ -610,34 +614,6 @@ const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{ #undef _OV_OP_SHAPE_INFER_MASK_REG #undef _OV_OP_SHAPE_INFER_VA_REG -class ShapeInferCustomMask : public IShapeInfer { -public: - ShapeInferCustomMask(ShapeInferPtr shape_infer, port_mask_t port_mask) - : m_shape_infer{std::move(shape_infer)}, - m_port_mask{port_mask} {} - - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - return m_shape_infer->infer(input_shapes, data_dependency); - } - - const ov::CoordinateDiff& get_pads_begin() override { - return m_shape_infer->get_pads_begin(); - } - - const ov::CoordinateDiff& get_pads_end() override { - return m_shape_infer->get_pads_end(); - } - - port_mask_t get_port_mask() const override { - return m_port_mask; - } - -private: - const ShapeInferPtr m_shape_infer; - const port_mask_t m_port_mask; -}; - std::shared_ptr make_shape_inference(std::shared_ptr op) { if (auto shape_infer = IStaticShapeInferFactory::make(op->get_type_info(), op)) { return shape_infer; @@ -652,8 +628,5 @@ std::shared_ptr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask) { - return std::make_shared(make_shape_inference(std::move(op)), port_mask); -} } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp index 21b36e76ddd9a7..cb937127b219f0 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp @@ -32,6 +32,5 @@ class IStaticShapeInfer : public IShapeInfer { }; std::shared_ptr make_shape_inference(std::shared_ptr op); -ShapeInferPtr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask); } // namespace intel_cpu } // namespace ov From 1025c76d098c435972bb42ff43d3262a0d82c7cf Mon Sep 17 00:00:00 2001 From: Michal Miotk Date: Mon, 20 Jan 2025 15:02:31 +0100 Subject: [PATCH 09/35] [GPU] added missing info about conv autopad (#28552) ### Details: - fix yolov3 dynamic inference ### Tickets: - CVS-157866 --- .../src/graph/graph_optimizer/prepare_primitive_fusing.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index 2120a1308ea290..ce5333f95a1b59 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -399,7 +399,8 @@ void prepare_primitive_fusing::fuse_bias(program &p) { desc->padding_begin, desc->padding_end, desc->grouped_weights_shape, - conv.get_output_layout().data_type); + conv.get_output_layout().data_type, + desc->auto_pad); // Copy transposed flag to new prim as convolution node might be produced by deconv -> conv replacement before this pass conv_with_bias_prim->transposed = desc->transposed; From 2999477ad77cad3de4aadb5f56996bf2f7f5dd43 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Mon, 20 Jan 2025 20:52:25 +0100 Subject: [PATCH 10/35] [GHA] Save JS artifacts (#28521) ### Details: - JS package is needed to build extensions using provider action - ### Tickets: - *ticket-id* --- .github/workflows/job_build_linux.yml | 9 ++++++++- .github/workflows/job_build_windows.yml | 15 ++++++++++++++- .github/workflows/job_openvino_js.yml | 9 +++++++-- .github/workflows/windows_vs2019_release.yml | 11 ++++++++--- 4 files changed, 37 insertions(+), 7 deletions(-) diff --git a/.github/workflows/job_build_linux.yml b/.github/workflows/job_build_linux.yml index c56de5872cc2df..d1dfd0504ae194 100644 --- a/.github/workflows/job_build_linux.yml +++ b/.github/workflows/job_build_linux.yml @@ -234,6 +234,11 @@ jobs: -DENABLE_WHEEL=OFF cmake --build ${BUILD_DIR} --parallel $(nproc) cmake --install ${BUILD_DIR} --prefix ${INSTALL_DIR_JS} + + - name: Pack openvino_js_package + if: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} + run: tar -cvf - * | pigz > ${BUILD_DIR}/openvino_js_package.tar.gz + working-directory: ${{ env.INSTALL_DIR_JS }} - name: Build RPM packages if: ${{ inputs.build-rpm-packages }} @@ -279,7 +284,7 @@ jobs: uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_js_package - path: ${{ env.INSTALL_DIR_JS }} + path: ${{ env.BUILD_DIR }}/openvino_js_package.tar.gz if-no-files-found: 'error' - name: Upload openvino developer package @@ -333,8 +338,10 @@ jobs: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz ${{ env.BUILD_DIR }}/deb ${{ env.MANIFEST_PATH }} + ${{ env.STORE_JS == 'true' && format('{0}/openvino_js_package.tar.gz', env.BUILD_DIR) || '' }} ${{ env.STORE_WHEELS == 'true' && format('{0}/wheels', env.INSTALL_WHEELS_DIR) || '' }} storage_dir: ${{ env.PRODUCT_TYPE }} storage_root: ${{ env.ARTIFACTS_SHARE }} env: STORE_WHEELS: ${{ inputs.os != 'debian_10' && inputs.arch != 'arm' }} + STORE_JS: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index d5d42ffcfea8d2..f0c150c4ac4db4 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -265,6 +265,17 @@ jobs: -DENABLE_WHEEL=OFF cmake --build ${{ env.BUILD_DIR }} --parallel $ENV:NUMBER_OF_PROCESSORS cmake --install ${{ env.BUILD_DIR }} --config ${{ env.CMAKE_BUILD_TYPE }} --prefix ${{ env.INSTALL_DIR_JS }} + + - name: Pack JS Artifacts + if: ${{ fromJSON(inputs.affected-components).JS_API }} + run: | + $file = Get-ChildItem -Path "${{ env.INSTALL_DIR_JS }}" + $compress = @{ + Path = $file + CompressionLevel = "Optimal" + DestinationPath = "${{ env.BUILD_DIR }}/openvino_js_package.zip" + } + Compress-Archive @compress # # Upload build artifacts and logs @@ -297,7 +308,7 @@ jobs: uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_js_package - path: ${{ env.INSTALL_DIR_JS }} + path: ${{ env.BUILD_DIR }}/openvino_js_package.zip if-no-files-found: 'error' - name: Store artifacts to a shared drive @@ -309,8 +320,10 @@ jobs: ${{ env.BUILD_DIR }}/openvino_package.zip ${{ env.BUILD_DIR }}/openvino_tests.zip ${{ env.MANIFEST_PATH }} + ${{ env.STORE_JS == 'true' && format('{0}/openvino_js_package.zip', env.BUILD_DIR) || '' }} ${{ env.STORE_WHEELS == 'true' && format('{0}/wheels', env.INSTALL_WHEELS_DIR) || '' }} storage_dir: ${{ env.PRODUCT_TYPE }} storage_root: ${{ env.ARTIFACTS_SHARE }} env: STORE_WHEELS: ${{ inputs.build-type != 'Debug' }} + STORE_JS: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} diff --git a/.github/workflows/job_openvino_js.yml b/.github/workflows/job_openvino_js.yml index fd04d8842daae7..dbee8511c4187b 100644 --- a/.github/workflows/job_openvino_js.yml +++ b/.github/workflows/job_openvino_js.yml @@ -45,11 +45,16 @@ jobs: echo "OPENVINO_JS_DIR=$GITHUB_WORKSPACE/openvino/src/bindings/js" >> "$GITHUB_ENV" echo "OPENVINO_JS_LIBS_DIR=$GITHUB_WORKSPACE/openvino/src/bindings/js/node/bin" >> "$GITHUB_ENV" - - name: Download OpenVINO JS package + - name: Download OpenVINO artifacts (JS) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: openvino_js_package + pattern: openvino_[js]* path: ${{ env.OPENVINO_JS_LIBS_DIR }} + merge-multiple: true + + - name: Extract OpenVINO packages + run: pigz -dc openvino_js_package.tar.gz | tar -xf - -C ${OPENVINO_JS_LIBS_DIR} + working-directory: ${{ env.OPENVINO_JS_LIBS_DIR }} - name: Setup Node ${{ env.NODE_VERSION }} if: runner.os != 'Linux' # Node is already installed in the Docker image diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 5708b529f25acc..92d826de1d8394 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -192,12 +192,17 @@ jobs: sparse-checkout: | src/bindings/js path: 'openvino' - - - name: Download OpenVINO js package + + - name: Download OpenVINO artifacts (JS) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: openvino_js_package + pattern: openvino_[js]* path: ${{ env.OPENVINO_JS_LIBS_DIR }} + merge-multiple: true + + - name: Extract OpenVINO packages + run: Expand-Archive openvino_js_package.zip -DestinationPath . + working-directory: ${{ env.OPENVINO_JS_LIBS_DIR }} - name: Setup Node ${{ env.NODE_VERSION }} uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 From bb78f44476bb1701c4982423588f4472382dc140 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Mon, 20 Jan 2025 20:55:35 +0100 Subject: [PATCH 11/35] [LPT] Fix medium static code analyzer issues (#28483) ### Tickets: - *CVS-1521493* - *CVS-130703* - *CVS-121616* - *CVS-121618* --- .../common/fake_quantize_dequantization.hpp | 3 ++- .../quantization_granularity_attribute.hpp | 2 +- .../src/assign_and_read_value.cpp | 17 ++++------------- .../low_precision_transformations/src/clamp.cpp | 3 ++- .../src/eliminate_fake_quantize.cpp | 2 +- .../src/fake_quantize_dequantization.cpp | 3 --- .../src/markup_quantization_granularity.cpp | 6 +++--- .../src/network_helper.cpp | 1 + .../src/pull_reshape_through_dequantization.cpp | 6 +++--- .../pull_transpose_through_dequantization.cpp | 2 +- 10 files changed, 18 insertions(+), 27 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp b/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp index 1035e88ed1d0f0..0d16dbba891b61 100644 --- a/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp +++ b/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp @@ -50,7 +50,8 @@ class LP_TRANSFORMATIONS_API FakeQuantizeDequantization { const std::shared_ptr& elementwise, std::shared_ptr& constant); - size_t channelDimIndex; + // for most node with layout NC, NCHW, NCDWH, index of channel dimension is 1 + size_t channelDimIndex = 1ul; Output data; std::shared_ptr convert; std::shared_ptr subtract; diff --git a/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp index e74f601f4bd4de..c43d061fb455b3 100644 --- a/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp +++ b/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API QuantizationGranularityAttribute : public ov::Runti }; QuantizationGranularityAttribute() : granularity(Granularity::PerChannel) {} - QuantizationGranularityAttribute(const Granularity granularity) : granularity(granularity) {} + QuantizationGranularityAttribute(const Granularity& granularity) : granularity(granularity) {} bool operator==(const QuantizationGranularityAttribute& attribute) const { return this->granularity == attribute.granularity; diff --git a/src/common/low_precision_transformations/src/assign_and_read_value.cpp b/src/common/low_precision_transformations/src/assign_and_read_value.cpp index 27b79e4d347102..e65e35890c0600 100644 --- a/src/common/low_precision_transformations/src/assign_and_read_value.cpp +++ b/src/common/low_precision_transformations/src/assign_and_read_value.cpp @@ -20,31 +20,22 @@ namespace low_precision { AssignAndReadValueTransformation::AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params) : LayerTransformation(params), model(model) { MATCHER_SCOPE(AssignAndReadValueTransformation); - auto assign3 = pattern::wrap_type({ pattern::wrap_type() }); - auto assign6 = pattern::wrap_type({ pattern::wrap_type() }); + auto assign_m = pattern::wrap_type({ pattern::wrap_type() }); ov::graph_rewrite_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) { - const auto& opsMap = m.get_pattern_value_map(); - auto op = m.get_match_root(); - auto assignIt = opsMap.find(assign3); - if (assignIt == opsMap.end()) { - assignIt = opsMap.find(assign6); - } - const auto assign = assignIt->second.get_node_shared_ptr(); + const auto assign = m.get_match_root(); // check that we have ReadValue as the first dependency if (assign->get_control_dependencies().empty()) { return false; } - if (transformation_callback(op)) { + if (transformation_callback(assign)) { return false; } return transform(*context, m); }; - auto m = std::make_shared( - std::make_shared(OutputVector{ assign3, assign6 }), - matcher_name); + auto m = std::make_shared(assign_m, matcher_name); this->register_matcher(m, callback); } diff --git a/src/common/low_precision_transformations/src/clamp.cpp b/src/common/low_precision_transformations/src/clamp.cpp index 80748f549bf1ba..89150e81470bce 100644 --- a/src/common/low_precision_transformations/src/clamp.cpp +++ b/src/common/low_precision_transformations/src/clamp.cpp @@ -72,7 +72,8 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa replace_node_update_name(newClamp, replacement); - element::Type outputClampType = dequantization.multiply ? + OPENVINO_ASSERT(dequantization.multiply != nullptr || dequantization.subtract != nullptr, "incorrect dequantization ops configuration"); + const auto outputClampType = dequantization.multiply ? dequantization.multiply->get_output_element_type(0) : dequantization.subtract->get_output_element_type(0); ov::pass::low_precision::NetworkHelper::setOutDataPrecision(replacement, outputClampType); diff --git a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp index cb5d9270a43768..1a09d9914de3bf 100644 --- a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp @@ -51,7 +51,7 @@ bool check_interval(const std::shared_ptr& fq, const std::shared_ptr& constant, const float value, const float max_diff, - const bool exact_comparison) noexcept { + const bool exact_comparison) { bool need_to_check_intervals = false; const auto& constant_values = constant->cast_vector(); for (const auto constant_value : constant_values) { diff --git a/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp b/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp index a96a5032b5fef9..7246c9869ce7d8 100644 --- a/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp @@ -32,9 +32,6 @@ FakeQuantizeDequantization::FakeQuantizeDequantization( subtractConstant(subtractConstant), multiply(multiply), multiplyConstant(multiplyConstant) { - // for most node with layout NC, NCHW, NCDWH, index of channel dimension is 1 - channelDimIndex = 1ul; - const auto rank = data.get_partial_shape().rank(); if (rank.is_static()) { std::string data_src_type = data.get_node()->get_type_name(); diff --git a/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp b/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp index b9d5ac2ec4dead..f59aca3498c9f0 100644 --- a/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp +++ b/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp @@ -30,7 +30,7 @@ ov::pass::low_precision::MarkupQuantizationGranularity::MarkupQuantizationGranul bool ov::pass::low_precision::MarkupQuantizationGranularity::run_on_model(const std::shared_ptr& f) { RUN_ON_FUNCTION_SCOPE(MarkupPerTensorQuantization); auto setRestriction = [](const std::shared_ptr& node, const std::vector& restrictedPorts) { - auto createAttribute = [](Input& input, const QuantizationGranularityAttribute::Granularity granularity){ + auto createAttribute = [](Input& input, const QuantizationGranularityAttribute::Granularity& granularity){ auto &rt = input.get_rt_info(); rt.emplace(QuantizationGranularityAttribute::get_type_info_static(), QuantizationGranularityAttribute(granularity)); }; @@ -43,14 +43,14 @@ bool ov::pass::low_precision::MarkupQuantizationGranularity::run_on_model(const } } else { // markup specific ports - for (const auto item : restrictedPorts) { + for (const auto& item : restrictedPorts) { Input input = node->input(item.port); createAttribute(input, item.granularity); } } }; - for (const std::shared_ptr& node : f->get_ordered_ops()) { + for (const auto& node : f->get_ordered_ops()) { if (node->get_input_size() == 0) { continue; } diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp index e57fdcfb1b8e81..afb7e19c13e7ad 100644 --- a/src/common/low_precision_transformations/src/network_helper.cpp +++ b/src/common/low_precision_transformations/src/network_helper.cpp @@ -622,6 +622,7 @@ std::shared_ptr NetworkHelper::separateInStandaloneBranch(std::shared_ parent = multiply->output(0); } + OPENVINO_ASSERT(dequantization.multiply != nullptr || dequantization.subtract != nullptr, "incorrect dequantization ops configuration"); const auto originalParent = dequantization.multiply ? dequantization.multiply->shared_from_this() : dequantization.subtract->shared_from_this(); diff --git a/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp b/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp index 157a204af3a089..6e33afc09461f2 100644 --- a/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp +++ b/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp @@ -101,7 +101,7 @@ std::shared_ptr moveThroughConvert(const std::shared_ptr& reshape, c void fuseConstant(const std::shared_ptr& reshape, const std::shared_ptr& constant) { ov::OutputVector result(1); - reshape->constant_fold(result, { constant, reshape->input_value(1) }); + OPENVINO_ASSERT(reshape->constant_fold(result, { constant, reshape->input_value(1) }), "Reshape constant folding failed"); const auto newConstant = result[0].get_node_shared_ptr(); replace_node(reshape, newConstant); copy_runtime_info({ constant, reshape }, newConstant); @@ -139,7 +139,7 @@ ov::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDeq return false; } - while (reshape != nullptr) { + do { const auto parent = reshape->get_input_node_shared_ptr(0); if (ov::is_type(parent) || ov::is_type(parent)) { reshape = pull_reshape_through_dequantization::moveThroughElementwise(reshape, parent); @@ -151,7 +151,7 @@ ov::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDeq } else { THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type"; } - } + } while (reshape != nullptr); return true; }; diff --git a/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp b/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp index a4557288c74f23..3f3533f12a7da7 100644 --- a/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp +++ b/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp @@ -110,7 +110,7 @@ ov::pass::low_precision::PullTransposeThroughDequantization::PullTransposeThroug ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher & m) -> bool { const auto& opsMap = m.get_pattern_value_map(); - auto transpose = opsMap.find(matcherTranspose)->second.get_node()->shared_from_this(); + auto transpose = opsMap.at(matcherTranspose).get_node_shared_ptr(); while (transpose != nullptr) { const auto parent = transpose->get_input_node_shared_ptr(0); From 155f6968b00e5931506e079b17c2820d164be6f8 Mon Sep 17 00:00:00 2001 From: Ekaterina Shiryaeva Date: Mon, 20 Jan 2025 21:07:34 +0100 Subject: [PATCH 12/35] [NPUW] Fix scales processing in CWAI for nf4 (#28523) ### Tickets: - *E-149709* --- .../intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp index 93a43c9b82570a..a4a03dea982438 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp @@ -890,7 +890,8 @@ CWAI3::CWAI3(CWAI3::Results scales) { auto matched_valueA = std::static_pointer_cast(matched_nodeA); auto matched_valueC = std::static_pointer_cast(matched_nodeC); - if (ov::element::i4 == matched_valueA->get_element_type() && + if ((ov::element::i4 == matched_valueA->get_element_type() || + ov::element::nf4 == matched_valueA->get_element_type()) && (ov::element::f16 == matched_valueC->get_element_type() || ov::element::f32 == matched_valueC->get_element_type())) { LOG_DEBUG("Matched: " << matched_valueC); From 1ad48635dc3bd31407c0a6aff93fcf9aedfa266a Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Mon, 20 Jan 2025 21:40:45 +0100 Subject: [PATCH 13/35] [RTTI] Use OV dynamic cast on Android only (#28519) ### Details: OV dynamic casting causes issue in external software with badly formed OV RTTI definitions, so it's replaced with standard dynamic casting, except for Android. ### Tickets: - CVS-160749 --------- Signed-off-by: Tomasz Jankowski Co-authored-by: Ilya Lavrenov --- src/core/include/openvino/core/type.hpp | 12 ++++++ src/core/tests/rtti.cpp | 56 ++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/src/core/include/openvino/core/type.hpp b/src/core/include/openvino/core/type.hpp index 4877b9ce02b251..812208855fa7f3 100644 --- a/src/core/include/openvino/core/type.hpp +++ b/src/core/include/openvino/core/type.hpp @@ -77,6 +77,10 @@ struct OPENVINO_API DiscreteTypeInfo { OPENVINO_API std::ostream& operator<<(std::ostream& s, const DiscreteTypeInfo& info); +#if defined(__ANDROID__) || defined(ANDROID) +# define OPENVINO_DYNAMIC_CAST +#endif + /// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a /// Type*/shared_ptr template @@ -93,7 +97,11 @@ template typename std::enable_if(std::declval())), Type*>::value, Type*>::type as_type(Value value) { +#ifdef OPENVINO_DYNAMIC_CAST return ov::is_type(value) ? static_cast(value) : nullptr; +#else + return dynamic_cast(value); +#endif } namespace util { @@ -114,7 +122,11 @@ struct AsTypePtr> { /// Type, nullptr otherwise template auto as_type_ptr(const U& value) -> decltype(::ov::util::AsTypePtr::template call(value)) { +#ifdef OPENVINO_DYNAMIC_CAST return ::ov::util::AsTypePtr::template call(value); +#else + return std::dynamic_pointer_cast(value); +#endif } } // namespace ov diff --git a/src/core/tests/rtti.cpp b/src/core/tests/rtti.cpp index 1fd8787ee60f38..9cfa225f4a3010 100644 --- a/src/core/tests/rtti.cpp +++ b/src/core/tests/rtti.cpp @@ -5,10 +5,12 @@ #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" #include "openvino/op/op.hpp" +#include "openvino/pass/matcher_pass.hpp" -using namespace ov; using namespace std; +namespace ov::test { + class OpType : public ov::op::Op { public: OPENVINO_OP("OpType"); @@ -88,3 +90,55 @@ TEST(rtti, op_with_type_version_parent_old) { ASSERT_NE(type_info.parent, nullptr); ASSERT_EQ(*type_info.parent, OpType::get_type_info_static()); } + +#if !defined(__ANDROID__) && !defined(ANDROID) + +class IncompleteRtti : public pass::MatcherPass { +public: + OPENVINO_RTTI("IncompleteRtti", "rtti_test"); +}; + +class DerivedIncompleteRtti : public IncompleteRtti { +public: + OPENVINO_RTTI("DerivedIncompleteRtti", "rtti_test", IncompleteRtti); +}; + +// Assert backward compatibility of RTTI definition without parent but casted with as_type or as_type_ptr pointer work. +TEST(rtti, assert_casting_without_parent) { + { + IncompleteRtti incomplete; + DerivedIncompleteRtti derived; + + auto pass_A = as_type(&incomplete); + auto pass_B = as_type(&derived); + auto pass_C = as_type(&derived); + + EXPECT_NE(nullptr, pass_A); + EXPECT_NE(nullptr, pass_B); + EXPECT_NE(nullptr, pass_C); + + EXPECT_NE(nullptr, as_type(pass_A)); + EXPECT_NE(nullptr, as_type(pass_B)); + EXPECT_NE(nullptr, as_type(pass_B)); + EXPECT_NE(nullptr, as_type(pass_C)); + } + { + auto incomplete = std::make_shared(); + auto derived = std::make_shared(); + + auto pass_A = as_type_ptr(incomplete); + auto pass_B = as_type_ptr(derived); + auto pass_C = as_type_ptr(derived); + + EXPECT_NE(nullptr, pass_A); + EXPECT_NE(nullptr, pass_B); + EXPECT_NE(nullptr, pass_C); + + EXPECT_NE(nullptr, as_type_ptr(pass_A)); + EXPECT_NE(nullptr, as_type_ptr(pass_B)); + EXPECT_NE(nullptr, as_type_ptr(pass_B)); + EXPECT_NE(nullptr, as_type_ptr(pass_C)); + } +} +#endif // ANDROID +} // namespace ov::test From 08be7ae090cb1871490cd7ec521a8e80422152e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:24:34 +0000 Subject: [PATCH 14/35] Bump reviewdog/action-shellcheck from 1.27.0 to 1.29.0 (#28571) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [reviewdog/action-shellcheck](https://github.com/reviewdog/action-shellcheck) from 1.27.0 to 1.29.0.
Release notes

Sourced from reviewdog/action-shellcheck's releases.

Release v1.29.0

What's Changed

New Contributors

Full Changelog: https://github.com/reviewdog/action-shellcheck/compare/v1.28.0...v1.29.0

Release v1.28.0

What's Changed

New Contributors

Full Changelog: https://github.com/reviewdog/action-shellcheck/compare/v1.27.0...v1.28.0

Commits
  • 6e0e63d Merge pull request #70 from reviewdog/depup/reviewdog/reviewdog
  • 958d9e1 Merge pull request #71 from abitrolly/patch-1
  • 44addb0 Show shellcheck version after install
  • fff8e91 chore(deps): update reviewdog/reviewdog to 0.20.3
  • 22f96e3 Merge pull request #69 from reviewdog/add_fail_level
  • e48fb59 Add line break
  • d394b4f Add fail_level and deduplicate fail_on_error
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=reviewdog/action-shellcheck&package-manager=github_actions&previous-version=1.27.0&new-version=1.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/code_style.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index 97b399b1abf48d..89fb4e64670d8d 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -98,7 +98,7 @@ jobs: # always provide suggestions even for skipped scripts in ov_shellcheck tagret - name: ShellCheck action if: always() - uses: reviewdog/action-shellcheck@ccaafec556ffa154f112bfcb7b9c9574190b7091 # v1.27.0 + uses: reviewdog/action-shellcheck@6e0e63d1750d02d761b3df0f2c5ba9f9ac4a9ed7 # v1.29.0 with: level: style reporter: github-pr-review From 0efe897a15ce6470b3eb78ef119b3b620966ab2f Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Mon, 20 Jan 2025 22:55:07 +0100 Subject: [PATCH 15/35] [LPT] Cleanup base LayerTransformation class from legacy TransformationContext (#28327) ### Details: - *`TransformationContext` is not used anywhere and `LayerTransformation::context` always equal to `nullptr`* - *This PR completely removes `TransformationContext`* - *Also, `LayerTransformation` class is cleaned up from legacy methods which are not used anywhere* ### Tickets: - *N\A* --- .../include/low_precision/add.hpp | 4 +- .../low_precision/assign_and_read_value.hpp | 4 +- .../include/low_precision/avg_pool.hpp | 4 +- .../include/low_precision/batch_to_space.hpp | 4 +- .../include/low_precision/broadcast.hpp | 2 +- .../include/low_precision/clamp.hpp | 4 +- .../low_precision/cleanup_transformation.hpp | 2 +- .../include/low_precision/concat.hpp | 4 +- .../include/low_precision/convert.hpp | 2 +- .../include/low_precision/convolution.hpp | 2 +- .../convolution_backprop_data.hpp | 4 +- .../include/low_precision/depth_to_space.hpp | 2 +- .../low_precision/eliminate_fake_quantize.hpp | 4 +- .../eltwise_base_transformation.hpp | 2 +- .../include/low_precision/fake_quantize.hpp | 9 +- .../fake_quantize_decomposition.hpp | 2 +- .../include/low_precision/fold_convert.hpp | 4 +- .../low_precision/fold_fake_quantize.hpp | 4 +- .../include/low_precision/fuse_convert.hpp | 4 +- .../fuse_elementwise_to_fake_quantize.hpp | 2 +- .../fuse_multiply_to_fake_quantize.hpp | 2 +- .../fuse_subtract_to_fake_quantize.hpp | 2 +- .../include/low_precision/gather.hpp | 4 +- .../low_precision/group_convolution.hpp | 2 +- .../include/low_precision/interpolate.hpp | 4 +- .../low_precision/layer_transformation.hpp | 67 +++----------- .../include/low_precision/mat_mul.hpp | 4 +- .../include/low_precision/max_pool.hpp | 4 +- .../low_precision/move_fake_quantize.hpp | 4 +- .../include/low_precision/multiply.hpp | 2 +- .../low_precision/multiply_partial.hpp | 4 +- .../multiply_to_group_convolution.hpp | 4 +- .../include/low_precision/mvn.hpp | 4 +- .../include/low_precision/network_helper.hpp | 1 - .../include/low_precision/normalize_l2.hpp | 4 +- .../include/low_precision/pad.hpp | 4 +- .../include/low_precision/prelu.hpp | 4 +- .../include/low_precision/recurrent_cell.hpp | 6 +- .../reduce_base_transformation.hpp | 4 +- .../include/low_precision/reduce_max.hpp | 2 +- .../include/low_precision/reduce_mean.hpp | 2 +- .../include/low_precision/reduce_min.hpp | 2 +- .../include/low_precision/reduce_sum.hpp | 2 +- .../include/low_precision/relu.hpp | 4 +- .../include/low_precision/reshape.hpp | 4 +- .../low_precision/shuffle_channels.hpp | 4 +- .../include/low_precision/slice.hpp | 4 +- .../include/low_precision/space_to_batch.hpp | 4 +- .../include/low_precision/split.hpp | 9 +- .../include/low_precision/squeeze.hpp | 4 +- .../include/low_precision/strided_slice.hpp | 4 +- .../include/low_precision/subtract.hpp | 2 +- .../low_precision/transformation_context.hpp | 39 -------- .../transparent_base_transformation.hpp | 4 +- .../include/low_precision/transpose.hpp | 4 +- .../include/low_precision/unsqueeze.hpp | 4 +- .../weightable_layer_transformation.hpp | 13 +-- .../low_precision_transformations/src/add.cpp | 12 +-- .../src/assign_and_read_value.cpp | 12 +-- .../src/avg_pool.cpp | 12 +-- .../src/batch_to_space.cpp | 12 +-- .../src/broadcast.cpp | 6 +- .../src/clamp.cpp | 12 +-- .../src/cleanup_transformation.cpp | 2 +- .../src/concat.cpp | 10 +-- .../src/convert.cpp | 6 +- .../src/convolution.cpp | 10 +-- .../src/convolution_backprop_data.cpp | 14 +-- .../src/depth_to_space.cpp | 6 +- .../src/eliminate_fake_quantize.cpp | 10 +-- .../src/eltwise_base_transformation.cpp | 4 +- .../src/fake_quantize.cpp | 7 +- .../src/fake_quantize_decomposition.cpp | 6 +- .../src/fold_convert.cpp | 12 +-- .../src/fold_fake_quantize.cpp | 8 +- .../src/fuse_convert.cpp | 10 +-- .../src/fuse_elementwise_to_fake_quantize.cpp | 4 +- .../src/fuse_multiply_to_fake_quantize.cpp | 8 +- .../src/fuse_subtract_to_fake_quantize.cpp | 8 +- .../src/gather.cpp | 12 +-- .../src/group_convolution.cpp | 11 +-- .../src/interpolate.cpp | 12 +-- .../src/layer_transformation.cpp | 88 ++----------------- .../src/mat_mul.cpp | 12 +-- .../src/max_pool.cpp | 12 +-- .../src/move_fake_quantize.cpp | 12 +-- .../src/multiply.cpp | 10 +-- .../src/multiply_partial.cpp | 12 +-- .../src/multiply_to_group_convolution.cpp | 10 +-- .../low_precision_transformations/src/mvn.cpp | 12 +-- .../src/normalize_l2.cpp | 12 +-- .../low_precision_transformations/src/pad.cpp | 12 +-- .../src/prelu.cpp | 12 +-- .../src/recurrent_cell.cpp | 18 ++-- .../src/reduce_base_transformation.cpp | 8 +- .../src/reduce_max.cpp | 6 +- .../src/reduce_mean.cpp | 6 +- .../src/reduce_min.cpp | 6 +- .../src/reduce_sum.cpp | 6 +- .../src/relu.cpp | 12 +-- .../src/reshape.cpp | 12 +-- .../src/shuffle_channels.cpp | 12 +-- .../src/slice.cpp | 12 +-- .../src/space_to_batch.cpp | 12 +-- .../src/split.cpp | 14 ++- .../src/squeeze.cpp | 12 +-- .../src/strided_slice.cpp | 10 +-- .../src/subtract.cpp | 6 +- .../src/transformation_context.cpp | 18 ---- .../src/transparent_base_transformation.cpp | 8 +- .../src/transpose.cpp | 12 +-- .../src/unsqueeze.cpp | 12 +-- .../src/variadic_split.cpp | 2 +- .../src/weightable_layer_transformation.cpp | 11 +-- .../tests/layer_transformation.hpp | 1 - .../simple_low_precision_transformer.cpp | 1 - 116 files changed, 382 insertions(+), 575 deletions(-) delete mode 100644 src/common/low_precision_transformations/include/low_precision/transformation_context.hpp delete mode 100644 src/common/low_precision_transformations/src/transformation_context.cpp diff --git a/src/common/low_precision_transformations/include/low_precision/add.hpp b/src/common/low_precision_transformations/include/low_precision/add.hpp index 2c97087696d2f7..55efbf940e94b7 100644 --- a/src/common/low_precision_transformations/include/low_precision/add.hpp +++ b/src/common/low_precision_transformations/include/low_precision/add.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API AddTransformation : public EltwiseBaseTransformatio public: OPENVINO_RTTI("AddTransformation", "0", EltwiseBaseTransformation); AddTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp index edef4d63aa134a..9134293d5512dd 100644 --- a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp +++ b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp @@ -15,8 +15,8 @@ class LP_TRANSFORMATIONS_API AssignAndReadValueTransformation : public LayerTran public: OPENVINO_RTTI("AssignAndReadValueTransformation", "0", LayerTransformation); AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; private: std::shared_ptr model; diff --git a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp index ac8b91aeb57504..7dfac41beffb06 100644 --- a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp +++ b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API AvgPoolTransformation : public LayerTransformation public: OPENVINO_RTTI("AvgPoolTransformation", "0", LayerTransformation); AvgPoolTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp index 7859a29ec3a046..b729eb1fc956d3 100644 --- a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API BatchToSpaceTransformation : public LayerTransforma public: OPENVINO_RTTI("BatchToSpaceTransformation", "0", LayerTransformation); BatchToSpaceTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp index 05f7cadb88e888..75096e322a6571 100644 --- a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp +++ b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp @@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API BroadcastTransformation : public TransparentBaseTra public: OPENVINO_RTTI("BroadcastTransformation", "0", TransparentBaseTransformation); BroadcastTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/clamp.hpp b/src/common/low_precision_transformations/include/low_precision/clamp.hpp index d79a6ad159e21b..c41d80939bca8f 100644 --- a/src/common/low_precision_transformations/include/low_precision/clamp.hpp +++ b/src/common/low_precision_transformations/include/low_precision/clamp.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API ClampTransformation : public LayerTransformation { public: OPENVINO_RTTI("ClampTransformation", "0", LayerTransformation); ClampTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp index 503c519ea60f22..52de352c0bb5d9 100644 --- a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp @@ -19,7 +19,7 @@ class LP_TRANSFORMATIONS_API CleanupTransformation : public LayerTransformation CleanupTransformation(const Params& params); virtual ~CleanupTransformation() = default; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; static bool canBeTransformedStatic( const std::shared_ptr& layer, const std::vector& defaultPrecisions = precision_set::get_int8_support()); diff --git a/src/common/low_precision_transformations/include/low_precision/concat.hpp b/src/common/low_precision_transformations/include/low_precision/concat.hpp index c082e30dfa1ecd..a4511ef0f7c099 100644 --- a/src/common/low_precision_transformations/include/low_precision/concat.hpp +++ b/src/common/low_precision_transformations/include/low_precision/concat.hpp @@ -31,9 +31,9 @@ class LP_TRANSFORMATIONS_API ConcatTransformation : public LayerTransformation { public: OPENVINO_RTTI("ConcatTransformation", "0", LayerTransformation); ConcatTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; static bool isQuantizedStatic(const std::shared_ptr& layer); }; diff --git a/src/common/low_precision_transformations/include/low_precision/convert.hpp b/src/common/low_precision_transformations/include/low_precision/convert.hpp index 7cbd79be03bb2b..edfb58076c9d20 100644 --- a/src/common/low_precision_transformations/include/low_precision/convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convert.hpp @@ -15,7 +15,7 @@ class LP_TRANSFORMATIONS_API ConvertTransformation : public LayerTransformation public: OPENVINO_RTTI("ConvertTransformation", "0", LayerTransformation); ConvertTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/convolution.hpp b/src/common/low_precision_transformations/include/low_precision/convolution.hpp index 428a8adf00ca17..74a61817c15b18 100644 --- a/src/common/low_precision_transformations/include/low_precision/convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convolution.hpp @@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API ConvolutionTransformation : public WeightableLayerT public: OPENVINO_RTTI("ConvolutionTransformation", "0", WeightableLayerTransformation); ConvolutionTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isQuantized(const std::shared_ptr& layer, const std::vector&defaultPrecisions) const override; static bool isQuantizedStatic(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp b/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp index 6221a75aca5fb2..9b1e2580e59193 100644 --- a/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp @@ -21,8 +21,8 @@ namespace low_precision { class LP_TRANSFORMATIONS_API ConvolutionBackpropDataTransformation : public WeightableLayerTransformation { public: ConvolutionBackpropDataTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isQuantized(const std::shared_ptr& layer, const std::vector&defaultPrecisions) const override; static bool isQuantizedStatic(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp index e86a2de2941b3c..1ace395ac8331d 100644 --- a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp @@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API DepthToSpaceTransformation : public TransparentBase public: OPENVINO_RTTI("DepthToSpaceTransformation", "0", TransparentBaseTransformation); DepthToSpaceTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp index bfaa0c3b3a2b1b..190d146a741151 100644 --- a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API EliminateFakeQuantizeTransformation : public Cleanu public: OPENVINO_RTTI("EliminateFakeQuantizeTransformation", "0", CleanupTransformation); EliminateFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp index 5d3361e7283eb9..9c3c5d1c3b2a5d 100644 --- a/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp @@ -19,7 +19,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API EltwiseBaseTransformation : public LayerTransformation { public: EltwiseBaseTransformation(const Params& params) : LayerTransformation(params) {} - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; static bool isBroadcasted(const PartialShape& shape); diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp index 640814dc15cabb..8f5c67dbc0bcc4 100644 --- a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp @@ -23,16 +23,15 @@ class LP_TRANSFORMATIONS_API FakeQuantizeTransformation : public LayerTransforma public: OPENVINO_RTTI("FakeQuantizeTransformation", "0", LayerTransformation); FakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; static bool checkElementwise(const std::shared_ptr& eltwise); static std::shared_ptr fuseElementwise( - TransformationContext& context, - MatcherPass* matcherPass, - const std::shared_ptr& fakeQuantize, - const bool updatePrecisions); + MatcherPass* matcherPass, + const std::shared_ptr& fakeQuantize, + const bool updatePrecisions); }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp index 4d2ee8d88fadaf..8289a9ea5493f7 100644 --- a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp @@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API FakeQuantizeDecompositionTransformation : public La public: OPENVINO_RTTI("FakeQuantizeDecompositionTransformation", "0", LayerTransformation); FakeQuantizeDecompositionTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp index bc5342b5cca4f1..d0d864835c8f98 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp @@ -25,8 +25,8 @@ class LP_TRANSFORMATIONS_API FoldConvertTransformation : public CleanupTransform public: OPENVINO_RTTI("FoldConvertTransformation", "0", CleanupTransformation); FoldConvertTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp index c47c39a78ef081..b345ce5edbd80a 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp @@ -23,8 +23,8 @@ class LP_TRANSFORMATIONS_API FoldFakeQuantizeTransformation : public LayerTransf public: OPENVINO_RTTI("FoldFakeQuantizeTransformation", "0", LayerTransformation); FoldFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; bool isConstantOutput(std::shared_ptr op) const; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp index 0ff0dc60821486..06d252961e2c26 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp @@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API FuseConvertTransformation : public CleanupTransform public: OPENVINO_RTTI("FuseConvertTransformation", "0", CleanupTransformation); FuseConvertTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp index ab1a589845aa10..13b73a1112f4c5 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp @@ -21,7 +21,7 @@ class LP_TRANSFORMATIONS_API FuseElementwiseToFakeQuantizeTransformation : publi FuseElementwiseToFakeQuantizeTransformation(const Params& params); virtual ~FuseElementwiseToFakeQuantizeTransformation() = default; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp index 67471a56a4a6b8..1933a07bbb881b 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp @@ -24,7 +24,7 @@ class LP_TRANSFORMATIONS_API FuseMultiplyToFakeQuantizeTransformation : public F public: OPENVINO_RTTI("FuseMultiplyToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation); FuseMultiplyToFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp index c5dd8994e2a512..644aafb740d8ff 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp @@ -24,7 +24,7 @@ class LP_TRANSFORMATIONS_API FuseSubtractToFakeQuantizeTransformation : public F public: OPENVINO_RTTI("FuseSubtractToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation); FuseSubtractToFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/gather.hpp b/src/common/low_precision_transformations/include/low_precision/gather.hpp index 6aebd3fb094e0a..980ec8f1e9b992 100644 --- a/src/common/low_precision_transformations/include/low_precision/gather.hpp +++ b/src/common/low_precision_transformations/include/low_precision/gather.hpp @@ -15,9 +15,9 @@ class LP_TRANSFORMATIONS_API GatherTransformation : public LayerTransformation { public: OPENVINO_RTTI("GatherTransformation", "0", LayerTransformation); GatherTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp index 6551a929339830..f1e0bb44bddad8 100644 --- a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API GroupConvolutionTransformation : public Convolution public: OPENVINO_RTTI("GroupConvolutionTransformation", "0", ConvolutionTransformation); GroupConvolutionTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isQuantized(const std::shared_ptr& layer, const std::vector& defaultPrecisions) const override; static bool isQuantizedStatic(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp index 634d422dc2b09b..d715a24cc73e5d 100644 --- a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp +++ b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp @@ -22,9 +22,9 @@ class LP_TRANSFORMATIONS_API InterpolateTransformation : public LayerTransformat public: OPENVINO_RTTI("InterpolateTransformation", "0", LayerTransformation); InterpolateTransformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index c675ade19b516b..b3c7aaa16ea33a 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -12,27 +12,15 @@ #include #include "openvino/pass/matcher_pass.hpp" -#include "transformation_context.hpp" #include "quantization_details.hpp" #include "low_precision/common/ie_lpt_exception.hpp" #include "common/fake_quantize_dequantization.hpp" /***************************************************** * Debug capability - * - ORIGINAL_MODEL_PATH : Specify with existing folder name - * to serialize original model into it (XML & BIN extensions were added) - * - TRANSFORMED_MODEL_PATH : Specify with existing folder name - * to serialize original model into it (XML & BIN extensions were added) - * - LPT_PRINT_DEQUANTIZATION_INFO : Define it to enable - * dequantization layers printing - * - LPT_DISPLAY_PRECISION : Define it to to display precision info - * during low precision transformations - * + * - LPT_PRINT_DEQUANTIZATION_INFO : Define it to enable dequantization info printing: scales, shifts, etc. *****************************************************/ -// #define LPT_ORIGINAL_MODEL_PATH "/localdisk/orig.model" -// #define LPT_TRANSFORMED_MODEL_PATH "/localdisk/transformed.model" // #define LPT_PRINT_DEQUANTIZATION_INFO -// #define LPT_DISPLAY_PRECISION namespace ov { namespace pass { @@ -301,15 +289,9 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass LayerTransformation(const Params& params); virtual ~LayerTransformation() = default; - virtual bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) = 0; + virtual bool transform(ov::pass::pattern::Matcher &m) = 0; - void setContext(TransformationContext* context) noexcept; - - void setUpdatePrecisions(const bool updatePrecisions); - - void setDefaultPrecisions(const std::vector& defaultPrecisions); - - virtual bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const; + virtual bool canBeTransformed(const std::shared_ptr& layer) const; static bool canBeTransformedStatic(const std::shared_ptr& layer, const std::vector& defaultPrecisions = precision_set::get_int8_support()); @@ -352,59 +334,32 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass const std::vector& dequantizationShifts); #endif - bool updatePrecisions; - element::Type deqPrecision; - std::vector defaultPrecisions; - bool reshapeIgnorePerTensorQuantizationCheck; - bool scalingMode; + const bool updatePrecisions; + const element::Type deqPrecision; + const std::vector defaultPrecisions; + const bool reshapeIgnorePerTensorQuantizationCheck; + const bool scalingMode; static constexpr char originalLayerPostfix[] = "_original"; - TransformationContext* context; protected: std::shared_ptr moveDequantizationAfter( - TransformationContext &context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool updateOutputPrecision = true, const bool moveSubtract = true) const; std::shared_ptr moveDequantizationBefore( - TransformationContext& context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool moveSubtract = true) const; - bool updateOutput( - TransformationContext &context, - std::shared_ptr lastNode, - std::shared_ptr originalNode) const; - - void updateOutput( - TransformationContext& context, - std::shared_ptr lastNode, - std::string originalName) const; - - void addPattern(ov::pass::GraphRewrite& pass, TransformationContext& context, std::shared_ptr patternRoot); - - //TODO: replace with canBeTransformed when quantization by special dimension is supported for all transformations - bool canBeTransformedSpatialDimension(const TransformationContext& context, std::shared_ptr layer) const; + bool updateOutput(const std::shared_ptr& lastNode, const std::shared_ptr& originalNode) const; - template - void addSingleNodePattern(ov::pass::GraphRewrite& pass, TransformationContext& context) const { - using namespace ov; - - auto is_op_type = [](std::shared_ptr n) { - return !!as_type_ptr(n); - }; - auto p_node = std::make_shared(element::f32, Shape{}, is_op_type); - - addPattern(pass, context, p_node); - } + // TODO: replace with canBeTransformed when quantization by special dimension is supported for all transformations + bool canBeTransformedSpatialDimension(const std::shared_ptr& layer) const; }; -typedef std::shared_ptr LayerTransformationPtr; - } // namespace low_precision } // namespace pass } // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp index 0b6115e9345b0e..910154fe0e16e0 100644 --- a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp +++ b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API MatMulTransformation : public LayerTransformation { public: OPENVINO_RTTI("MatMulTransformation", "0", LayerTransformation); MatMulTransformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp index 7d499c9ec254f3..f6307ed69cbfbe 100644 --- a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp +++ b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API MaxPoolTransformation : public LayerTransformation public: OPENVINO_RTTI("MaxPoolTransformation", "0", LayerTransformation); MaxPoolTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp index 628c88b38992e4..96a344cc4620fe 100644 --- a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp @@ -16,8 +16,8 @@ class LP_TRANSFORMATIONS_API MoveFakeQuantize : public LayerTransformation { public: OPENVINO_RTTI("MoveFakeQuantize", "0", LayerTransformation); MoveFakeQuantize(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply.hpp b/src/common/low_precision_transformations/include/low_precision/multiply.hpp index fd51b8cac07f35..5658a5bf71bedf 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply.hpp @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API MultiplyTransformation : public WeightableLayerTran public: OPENVINO_RTTI("MultiplyTransformation", "0", WeightableLayerTransformation); MultiplyTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; protected: size_t getInputChannels(const std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp index 3bee03cfb1a265..7f05baeaf3b12e 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp @@ -23,8 +23,8 @@ class LP_TRANSFORMATIONS_API MultiplyPartialTransformation : public EltwiseBaseT public: OPENVINO_RTTI("MultiplyPartialTransformation", "0", EltwiseBaseTransformation); MultiplyPartialTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp index 45252777252fc6..3d6fc228331b13 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp @@ -27,8 +27,8 @@ class LP_TRANSFORMATIONS_API MultiplyToGroupConvolutionTransformation : public C const Params& params = Params(), const PrecisionsRestriction::PrecisionsByPorts& restrictions = {}); ~MultiplyToGroupConvolutionTransformation() override {} - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; bool isQuantized(const std::shared_ptr& layer, const std::vector& defaultPrecisions) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/mvn.hpp b/src/common/low_precision_transformations/include/low_precision/mvn.hpp index cd73075ad5740b..061cca9917c43f 100644 --- a/src/common/low_precision_transformations/include/low_precision/mvn.hpp +++ b/src/common/low_precision_transformations/include/low_precision/mvn.hpp @@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API MVNTransformation : public LayerTransformation { public: OPENVINO_RTTI("MVNTransformation", "0", LayerTransformation); MVNTransformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp index 40f2973b0701df..d4a3ba6d429044 100644 --- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp @@ -16,7 +16,6 @@ #include "rt_info/precisions_attribute.hpp" #include "rt_info/quantization_granularity_attribute.hpp" #include "rt_info/intervals_alignment_attribute.hpp" -#include "transformation_context.hpp" #include "quantization_details.hpp" #include "transformations/utils/utils.hpp" #include "common/fake_quantize_dequantization.hpp" diff --git a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp index c2777ca0652a07..8d16867982e5fe 100644 --- a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp +++ b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp @@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API NormalizeL2Transformation : public LayerTransformat public: OPENVINO_RTTI("NormalizeL2Transformation", "0", LayerTransformation); NormalizeL2Transformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/pad.hpp b/src/common/low_precision_transformations/include/low_precision/pad.hpp index 49012e19a604e8..595d7b02dbd77e 100644 --- a/src/common/low_precision_transformations/include/low_precision/pad.hpp +++ b/src/common/low_precision_transformations/include/low_precision/pad.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API PadTransformation : public LayerTransformation { public: OPENVINO_RTTI("PadTransformation", "0", LayerTransformation); PadTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/prelu.hpp b/src/common/low_precision_transformations/include/low_precision/prelu.hpp index df64677b861dbb..12af2f536b28f2 100644 --- a/src/common/low_precision_transformations/include/low_precision/prelu.hpp +++ b/src/common/low_precision_transformations/include/low_precision/prelu.hpp @@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API PReluTransformation : public LayerTransformation { public: OPENVINO_RTTI("PReluTransformation", "0", LayerTransformation); PReluTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp index fc0401b08dd74e..9cb8ed91c4b70b 100644 --- a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp +++ b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp @@ -16,8 +16,8 @@ class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransform public: OPENVINO_RTTI("RecurrentCellTransformation", "0", LayerTransformation); RecurrentCellTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; void propagateSkipCleanupAttribute(std::shared_ptr dequantization_multiply); static std::shared_ptr wrap_fake_quantize(const std::shared_ptr parameter); @@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransform static std::shared_ptr wrap_dequantization(const std::shared_ptr parameter, const bool with_subtract); private: - void propagate(TransformationContext& context, const std::shared_ptr node); + void propagate(const std::shared_ptr node); }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp index 4a42edd60d80c8..c91a8364f71c08 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp @@ -22,8 +22,8 @@ namespace low_precision { class LP_TRANSFORMATIONS_API ReduceBaseTransformation : public LayerTransformation { public: ReduceBaseTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: virtual void changeDequantizationValues( diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp index 33f685ba8ca74c..f4e824a43fdec7 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMaxTransformation : public ReduceBaseTransfor OPENVINO_RTTI("ReduceMaxTransformation", "0", ReduceBaseTransformation); ReduceMaxTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: bool getUpdatePrecision(const std::shared_ptr& reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp index c5f9d7d0a5e239..4a689cc4007317 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMeanTransformation : public ReduceBaseTransfo OPENVINO_RTTI("ReduceMeanTransformation", "0", ReduceBaseTransformation); ReduceMeanTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: bool getUpdatePrecision(const std::shared_ptr& reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp index a229b441b8b6da..f41630989de361 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMinTransformation : public ReduceBaseTransfor OPENVINO_RTTI("ReduceMinTransformation", "0", ReduceBaseTransformation); ReduceMinTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: bool getUpdatePrecision(const std::shared_ptr& reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp index aba35e0f793c83..0efd79b2472624 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceSumTransformation : public ReduceBaseTransfor OPENVINO_RTTI("ReduceSumTransformation", "0", ReduceBaseTransformation); ReduceSumTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: void changeDequantizationValues( diff --git a/src/common/low_precision_transformations/include/low_precision/relu.hpp b/src/common/low_precision_transformations/include/low_precision/relu.hpp index 936d4f3f8fc9a8..cc92ea72c40f49 100644 --- a/src/common/low_precision_transformations/include/low_precision/relu.hpp +++ b/src/common/low_precision_transformations/include/low_precision/relu.hpp @@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API ReluTransformation : public LayerTransformation { public: OPENVINO_RTTI("ReluTransformation", "0", LayerTransformation); ReluTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/reshape.hpp b/src/common/low_precision_transformations/include/low_precision/reshape.hpp index 43858e67cce21a..b9857f6928a6aa 100644 --- a/src/common/low_precision_transformations/include/low_precision/reshape.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reshape.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API ReshapeTransformation : public LayerTransformation public: OPENVINO_RTTI("ReshapeTransformation", "0", LayerTransformation); ReshapeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; static bool canBeTransformed( const ov::Shape& subtractShape, diff --git a/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp b/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp index 12a60b128d707d..999c052a3108ad 100644 --- a/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp +++ b/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API ShuffleChannelsTransformation : public LayerTransfo public: OPENVINO_RTTI("ShuffleChannelsTransformation", "0", LayerTransformation); ShuffleChannelsTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; + bool transform(ov::pass::pattern::Matcher& m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/slice.hpp b/src/common/low_precision_transformations/include/low_precision/slice.hpp index c00028f0d71169..6b73536cf06c98 100644 --- a/src/common/low_precision_transformations/include/low_precision/slice.hpp +++ b/src/common/low_precision_transformations/include/low_precision/slice.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API SliceTransformation : public LayerTransformation { public: OPENVINO_RTTI("SliceTransformation", "0", LayerTransformation); SliceTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp index e05353aaf24d1c..48cda7b4c305fb 100644 --- a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp +++ b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API SpaceToBatchTransformation : public LayerTransforma public: OPENVINO_RTTI("SpaceToBatchTransformation", "0", LayerTransformation); SpaceToBatchTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/split.hpp b/src/common/low_precision_transformations/include/low_precision/split.hpp index bd3294fba7e691..bb48628a832372 100644 --- a/src/common/low_precision_transformations/include/low_precision/split.hpp +++ b/src/common/low_precision_transformations/include/low_precision/split.hpp @@ -25,13 +25,10 @@ class LP_TRANSFORMATIONS_API SplitTransformation : public LayerTransformation { public: OPENVINO_RTTI("SplitTransformation", "0", LayerTransformation); SplitTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; + bool transform(ov::pass::pattern::Matcher& m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; - void updateOutputs( - TransformationContext& context, - std::vector> lastNodes, - std::shared_ptr originalNode) const; + bool canBeTransformed(const std::shared_ptr& layer) const override; + void updateOutputs(std::vector> lastNodes, std::shared_ptr originalNode) const; }; } // namespace low_precision } // namespace pass diff --git a/src/common/low_precision_transformations/include/low_precision/squeeze.hpp b/src/common/low_precision_transformations/include/low_precision/squeeze.hpp index b19a676c6de1fd..599b9e2f2eadb3 100644 --- a/src/common/low_precision_transformations/include/low_precision/squeeze.hpp +++ b/src/common/low_precision_transformations/include/low_precision/squeeze.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API SqueezeTransformation : public LayerTransformation public: OPENVINO_RTTI("SqueezeTransformation", "0", LayerTransformation); SqueezeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp b/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp index 5081903c751dfb..a5bbaf983e0b07 100644 --- a/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp +++ b/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API StridedSliceTransformation : public LayerTransforma public: OPENVINO_RTTI("StridedSliceTransformation", "0", LayerTransformation); StridedSliceTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/subtract.hpp b/src/common/low_precision_transformations/include/low_precision/subtract.hpp index a7c6bfbe888ca7..c020480c1e314d 100644 --- a/src/common/low_precision_transformations/include/low_precision/subtract.hpp +++ b/src/common/low_precision_transformations/include/low_precision/subtract.hpp @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API SubtractTransformation : public LayerTransformation public: OPENVINO_RTTI("SubtractTransformation", "0", LayerTransformation); SubtractTransformation(const Params& params); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/transformation_context.hpp b/src/common/low_precision_transformations/include/low_precision/transformation_context.hpp deleted file mode 100644 index ec46224f8d88ae..00000000000000 --- a/src/common/low_precision_transformations/include/low_precision/transformation_context.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include "low_precision/quantization_details.hpp" - -namespace ov { -namespace pass { -namespace low_precision { - -/** - * @ingroup ov_transformation_common_api - * @brief TransformationContext instance is used to pass model transformation context data between transformations. - */ -class LP_TRANSFORMATIONS_API TransformationContext { -public: - TransformationContext(); - explicit TransformationContext(std::shared_ptr model); - std::shared_ptr model; - - // Used to store handled FakeQuantize operations. - // ConcatTransformation and FakeQuantizeTransformation handle FakeQuantize operations. ConcatTransformation handles FakeQuantize operation first. - // If updatePrecision transformation option is set to False then there are no FakeQuantize operation attributes to identify that the operation - // have been handled by ConcatTransformation already: - // - output precision is original (FP32), - // - intervals are changed but not equal to precision boundaries, - // - quantization level can be or can be not changed. - // To avoid FakeQuantize operation double handling by FakeQuantizeTransformation after ConcatTransformation, FakeQuantizeTransformation - // has to use this member. - std::unordered_set quantizedFakeQuantizeNames; -}; - -} // namespace low_precision -} // namespace pass -} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp index 792c749f525b53..c0350fe186942a 100644 --- a/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp @@ -20,8 +20,8 @@ class LP_TRANSFORMATIONS_API TransparentBaseTransformation : public LayerTransfo public: TransparentBaseTransformation(const Params& params) : LayerTransformation(params) {} ~TransparentBaseTransformation() override {}; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/transpose.hpp b/src/common/low_precision_transformations/include/low_precision/transpose.hpp index f2cedb31e2e5a7..a2fc1d4fa58598 100644 --- a/src/common/low_precision_transformations/include/low_precision/transpose.hpp +++ b/src/common/low_precision_transformations/include/low_precision/transpose.hpp @@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API TransposeTransformation : public LayerTransformatio public: OPENVINO_RTTI("TransposeTransformation", "0", LayerTransformation); TransposeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp b/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp index 98152eaf919524..41728d1acf289e 100644 --- a/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp +++ b/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API UnsqueezeTransformation : public LayerTransformatio public: OPENVINO_RTTI("UnsqueezeTransformation", "0", LayerTransformation); UnsqueezeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp index dfb75067ff426b..7b5c5b782d9a65 100644 --- a/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp @@ -5,7 +5,6 @@ #pragma once #include -#include "transformation_context.hpp" #include "layer_transformation.hpp" #include "openvino/opsets/opset1.hpp" @@ -42,17 +41,11 @@ class LP_TRANSFORMATIONS_API WeightableLayerTransformation : public LayerTransfo WeightableLayerTransformation(const Params& params, const CanBeTransformedParams& canBeTransformedParams = {}); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; - bool canConvolutionBeTransformed(const TransformationContext& context, std::shared_ptr layer, - const std::vector& defaultPrecisions) const; + bool canBeTransformed(const std::shared_ptr& layer) const override; + bool canConvolutionBeTransformed(const std::shared_ptr& layer, + const ov::element::TypeVector& defaultPrecisions) const; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - static bool checkPrecisionOnActivation( - const std::shared_ptr& node, - const std::vector& supportedPrecisionsOnActivations) { - return true; - } - static bool isQuantizedStatic(const std::shared_ptr& layer, const bool reshapeIsRequired, const std::vector& defaultPrecisions = precision_set::get_int8_support()); diff --git a/src/common/low_precision_transformations/src/add.cpp b/src/common/low_precision_transformations/src/add.cpp index b895d3325377de..e55577fda4ce3a 100644 --- a/src/common/low_precision_transformations/src/add.cpp +++ b/src/common/low_precision_transformations/src/add.cpp @@ -95,16 +95,16 @@ AddTransformation::AddTransformation(const Params& params) : EltwiseBaseTransfor if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool AddTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool AddTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr op = ov::as_type_ptr(m.get_match_root()); - if ((op == nullptr) || (!canBeTransformed(context, op))) { + if ((op == nullptr) || (!canBeTransformed(op))) { return false; } @@ -229,7 +229,7 @@ bool AddTransformation::transform(TransformationContext& context, ov::pass::patt ov::copy_runtime_info({ add, newMultiply }, newMultiply); } - updateOutput(context, newMultiply, newAddOrSubtract); + updateOutput(newMultiply, newAddOrSubtract); if (fullPathIndex != -1) { std::shared_ptr node = add; @@ -240,7 +240,7 @@ bool AddTransformation::transform(TransformationContext& context, ov::pass::patt return true; } -bool AddTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool AddTransformation::canBeTransformed(const std::shared_ptr& layer) const { const FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 0ul); if (dequantization1.multiplyHasZeroOrDenormal()) { return false; @@ -251,7 +251,7 @@ bool AddTransformation::canBeTransformed(const TransformationContext& context, s return false; } - return EltwiseBaseTransformation::canBeTransformed(context, layer); + return EltwiseBaseTransformation::canBeTransformed(layer); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/assign_and_read_value.cpp b/src/common/low_precision_transformations/src/assign_and_read_value.cpp index e65e35890c0600..0b3f775c57ad22 100644 --- a/src/common/low_precision_transformations/src/assign_and_read_value.cpp +++ b/src/common/low_precision_transformations/src/assign_and_read_value.cpp @@ -32,15 +32,15 @@ AssignAndReadValueTransformation::AssignAndReadValueTransformation(const std::sh if (transformation_callback(assign)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(assign_m, matcher_name); this->register_matcher(m, callback); } -bool AssignAndReadValueTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool AssignAndReadValueTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -90,13 +90,13 @@ bool AssignAndReadValueTransformation::transform(TransformationContext& context, return true; } - FakeQuantizeTransformation::fuseElementwise(context, this, fakeQuantize, updatePrecisions); + FakeQuantizeTransformation::fuseElementwise(this, fakeQuantize, updatePrecisions); return true; } -bool AssignAndReadValueTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool AssignAndReadValueTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/avg_pool.cpp b/src/common/low_precision_transformations/src/avg_pool.cpp index c9bfa67cfc1cfb..7a38834efbdb0d 100644 --- a/src/common/low_precision_transformations/src/avg_pool.cpp +++ b/src/common/low_precision_transformations/src/avg_pool.cpp @@ -27,28 +27,28 @@ AvgPoolTransformation::AvgPoolTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool AvgPoolTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool AvgPoolTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr pooling = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); const bool updatePrecision = isPrecisionPreserved(pooling); - const auto newOperation = moveDequantizationAfter(context, pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions), updatePrecision); + const auto newOperation = moveDequantizationAfter(pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions), updatePrecision); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool AvgPoolTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool AvgPoolTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/batch_to_space.cpp b/src/common/low_precision_transformations/src/batch_to_space.cpp index 6745227e848f7b..b231c7110d3d29 100644 --- a/src/common/low_precision_transformations/src/batch_to_space.cpp +++ b/src/common/low_precision_transformations/src/batch_to_space.cpp @@ -26,15 +26,15 @@ BatchToSpaceTransformation::BatchToSpaceTransformation(const Params& params) : L if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool BatchToSpaceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool BatchToSpaceTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } @@ -46,13 +46,13 @@ bool BatchToSpaceTransformation::canBeTransformed(const TransformationContext& c return dequantization.isPerTensor(); } -bool BatchToSpaceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool BatchToSpaceTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr op = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(op, NetworkHelper::getDequantization(op, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; diff --git a/src/common/low_precision_transformations/src/broadcast.cpp b/src/common/low_precision_transformations/src/broadcast.cpp index 5e78ca0ef50996..e59d9de3c3e5d9 100644 --- a/src/common/low_precision_transformations/src/broadcast.cpp +++ b/src/common/low_precision_transformations/src/broadcast.cpp @@ -35,15 +35,15 @@ BroadcastTransformation::BroadcastTransformation(const Params& params) : Transpa if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool BroadcastTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool BroadcastTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/clamp.cpp b/src/common/low_precision_transformations/src/clamp.cpp index 89150e81470bce..440cee10adc3a3 100644 --- a/src/common/low_precision_transformations/src/clamp.cpp +++ b/src/common/low_precision_transformations/src/clamp.cpp @@ -24,15 +24,15 @@ ClampTransformation::ClampTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ClampTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ClampTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -45,7 +45,7 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa return false; } - const auto newClamp = ov::as_type_ptr(moveDequantizationAfter(context, clamp, dequantization, false, moveSubtract)); + const auto newClamp = ov::as_type_ptr(moveDequantizationAfter(clamp, dequantization, false, moveSubtract)); std::shared_ptr replacement; { @@ -82,8 +82,8 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa return true; } -bool ClampTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool ClampTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/cleanup_transformation.cpp b/src/common/low_precision_transformations/src/cleanup_transformation.cpp index 3a7cb0da5d5c36..e3c363818013b5 100644 --- a/src/common/low_precision_transformations/src/cleanup_transformation.cpp +++ b/src/common/low_precision_transformations/src/cleanup_transformation.cpp @@ -13,7 +13,7 @@ namespace low_precision { CleanupTransformation::CleanupTransformation(const Params& params) : LayerTransformation(params) { } -bool CleanupTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool CleanupTransformation::canBeTransformed(const std::shared_ptr& layer) const { return canBeTransformedStatic(layer); } diff --git a/src/common/low_precision_transformations/src/concat.cpp b/src/common/low_precision_transformations/src/concat.cpp index 05b1aa940c9191..fe39ed8d4f65b2 100644 --- a/src/common/low_precision_transformations/src/concat.cpp +++ b/src/common/low_precision_transformations/src/concat.cpp @@ -32,15 +32,15 @@ ConcatTransformation::ConcatTransformation(const Params& params) : LayerTransfor return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ConcatTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -193,7 +193,7 @@ bool ConcatTransformation::transform(TransformationContext& context, ov::pass::p NetworkHelper::insertDequantizationAfter(concat, lastDequantization, newConcat); NetworkHelper::copyInfo(concat, newConcat); - updateOutput(context, lastDequantization, newConcat); + updateOutput(lastDequantization, newConcat); OPENVINO_DEBUG("LPT: done: ", newConcat); return true; @@ -203,7 +203,7 @@ bool ConcatTransformation::isPrecisionPreserved(std::shared_ptr) const noe return true; } -bool ConcatTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) const { std::shared_ptr concat = ov::as_type_ptr(layer); if (concat == nullptr) { return false; diff --git a/src/common/low_precision_transformations/src/convert.cpp b/src/common/low_precision_transformations/src/convert.cpp index 4b773fc67c52c1..f1a7ae83e1dd73 100644 --- a/src/common/low_precision_transformations/src/convert.cpp +++ b/src/common/low_precision_transformations/src/convert.cpp @@ -31,20 +31,20 @@ ConvertTransformation::ConvertTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ConvertTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool ConvertTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr convert = ov::as_type_ptr(m.get_match_root()); if (!convert) { return false; } - if (!canBeTransformed(context, convert)) { + if (!canBeTransformed(convert)) { return false; } diff --git a/src/common/low_precision_transformations/src/convolution.cpp b/src/common/low_precision_transformations/src/convolution.cpp index ebf75e450f6384..ab9ed1e133d4b3 100644 --- a/src/common/low_precision_transformations/src/convolution.cpp +++ b/src/common/low_precision_transformations/src/convolution.cpp @@ -39,7 +39,7 @@ ConvolutionTransformation::ConvolutionTransformation(const Params& params) : Wei if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -62,10 +62,10 @@ size_t ConvolutionTransformation::getInputChannels(const std::shared_ptrget_input_node_shared_ptr(1); const auto reshapeFromWeights = ov::as_type_ptr(weightInput); FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ? @@ -97,7 +97,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ov::pa auto newFQ = std::get<1>(res_tuple); auto dequantize = std::get<2>(res_tuple); if (newFQ != nullptr && dequantize != nullptr) - updateOutput(context, dequantize, newFQ); + updateOutput(dequantize, newFQ); if (updatePrecisions && !fqOnWeightsWasDecomposed) { return false; @@ -338,7 +338,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ov::pa const auto finalDequantization = NetworkHelper::optimizeMultipliesAfter(newMultiplyAfter); ov::copy_runtime_info({ convolution, finalDequantization }, finalDequantization); - updateOutput(context, finalDequantization, convolution); + updateOutput(finalDequantization, convolution); const auto onActiviation = convolution->get_input_node_shared_ptr(0); if (ov::is_type(onActiviation)) { diff --git a/src/common/low_precision_transformations/src/convolution_backprop_data.cpp b/src/common/low_precision_transformations/src/convolution_backprop_data.cpp index 25abd4061ca4d3..5017abd3486071 100644 --- a/src/common/low_precision_transformations/src/convolution_backprop_data.cpp +++ b/src/common/low_precision_transformations/src/convolution_backprop_data.cpp @@ -51,7 +51,7 @@ ConvolutionBackpropDataTransformation::ConvolutionBackpropDataTransformation(con if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -74,10 +74,10 @@ size_t ConvolutionBackpropDataTransformation::getInputChannels(const std::shared return channels.get_length(); } -bool ConvolutionBackpropDataTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool ConvolutionBackpropDataTransformation::transform(ov::pass::pattern::Matcher &m) { auto convolutionBackpropData = m.get_match_root(); - if (!canBeTransformed(context, convolutionBackpropData)) { + if (!canBeTransformed(convolutionBackpropData)) { auto weightsInput = convolutionBackpropData->get_input_node_shared_ptr(1); std::shared_ptr reshapeFromWeights = ov::as_type_ptr(weightsInput); FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ? @@ -149,7 +149,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con auto newFQ = std::get<1>(res_tuple); auto dequantize = std::get<2>(res_tuple); if (newFQ != nullptr && dequantize != nullptr) - updateOutput(context, dequantize, newFQ); + updateOutput(dequantize, newFQ); dequantization = NetworkHelper::getDequantization(convolutionBackpropData, defaultPrecisions, 1ul); @@ -225,7 +225,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con const auto finalDequantization = NetworkHelper::optimizeMultipliesAfter(newMultiplyAfter); ov::copy_runtime_info({ convolutionBackpropData, finalDequantization }, finalDequantization); - updateOutput(context, finalDequantization, convolutionBackpropData); + updateOutput(finalDequantization, convolutionBackpropData); const auto onActiviation = convolutionBackpropData->get_input_node_shared_ptr(0); if (ov::is_type(onActiviation)) { @@ -245,8 +245,8 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con return true; } -bool ConvolutionBackpropDataTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - return canConvolutionBeTransformed(context, op, defaultPrecisions); +bool ConvolutionBackpropDataTransformation::canBeTransformed(const std::shared_ptr& op) const { + return canConvolutionBeTransformed(op, defaultPrecisions); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/depth_to_space.cpp b/src/common/low_precision_transformations/src/depth_to_space.cpp index eb518d62202840..941ac308c0b5b9 100644 --- a/src/common/low_precision_transformations/src/depth_to_space.cpp +++ b/src/common/low_precision_transformations/src/depth_to_space.cpp @@ -20,15 +20,15 @@ DepthToSpaceTransformation::DepthToSpaceTransformation(const Params& params) : T if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool DepthToSpaceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool DepthToSpaceTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp index 1a09d9914de3bf..88e544aa238714 100644 --- a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp @@ -30,16 +30,16 @@ EliminateFakeQuantizeTransformation::EliminateFakeQuantizeTransformation(const P if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; const auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool EliminateFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool EliminateFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher& m) { const auto root = m.get_match_root(); - if (!canBeTransformed(context, root)) { + if (!canBeTransformed(root)) { return false; } @@ -115,8 +115,8 @@ bool check_intervals(const std::shared_ptr& fakeQuanti } } // namespace -bool EliminateFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!CleanupTransformation::canBeTransformed(context, operation)) { +bool EliminateFakeQuantizeTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!CleanupTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp b/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp index f7cf7033543b40..f5594fc9fcf8cb 100644 --- a/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp @@ -33,8 +33,8 @@ bool EltwiseBaseTransformation::isBroadcasted(const PartialShape& shape) { return true; } -bool EltwiseBaseTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool EltwiseBaseTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/fake_quantize.cpp b/src/common/low_precision_transformations/src/fake_quantize.cpp index 8f3f8835ece8b0..4bfb24a57abd65 100644 --- a/src/common/low_precision_transformations/src/fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize.cpp @@ -28,14 +28,14 @@ FakeQuantizeTransformation::FakeQuantizeTransformation(const Params& params) : L return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool FakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto layer = ov::as_type_ptr(m.get_match_root()); if (!layer || !QuantizationDetails::outputLayoutIsSupported(layer)) { return false; @@ -44,7 +44,7 @@ bool FakeQuantizeTransformation::transform(TransformationContext& context, ov::p bool wasHandled = false; std::shared_ptr fakeQuantize = layer; do { - fakeQuantize = fuseElementwise(context, this, fakeQuantize, updatePrecisions); + fakeQuantize = fuseElementwise(this, fakeQuantize, updatePrecisions); wasHandled = wasHandled || (fakeQuantize != nullptr); } while (fakeQuantize != nullptr); @@ -158,7 +158,6 @@ bool FakeQuantizeTransformation::checkElementwise(const std::shared_ptr& e } std::shared_ptr FakeQuantizeTransformation::fuseElementwise( - TransformationContext& context, MatcherPass* matcherPass, const std::shared_ptr& fakeQuantize, const bool updatePrecisions) { diff --git a/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp b/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp index d4345aef1aaccd..32040b06f80fba 100644 --- a/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp @@ -32,7 +32,7 @@ FakeQuantizeDecompositionTransformation::FakeQuantizeDecompositionTransformation return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -276,7 +276,7 @@ std::tuple, std::shared_ptr> decomposeFakeQuantize( } // namespace } // namespace fq_decomposition -bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool FakeQuantizeDecompositionTransformation::transform(ov::pass::pattern::Matcher& m) { auto node = ov::as_type_ptr(m.get_match_root()); if (!node || !NetworkHelper::isQuantizeSupported(node)) { return false; @@ -427,7 +427,7 @@ bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& c return rewritten; } - updateOutput(context, dequantize, newFakeQuantize); + updateOutput(dequantize, newFakeQuantize); if (precisionsAttribute.value().size() != 1ul) { precisionsAttribute.value() = { dataPrecision.precision }; diff --git a/src/common/low_precision_transformations/src/fold_convert.cpp b/src/common/low_precision_transformations/src/fold_convert.cpp index 2308bcc936e220..e5e3a361c2f483 100644 --- a/src/common/low_precision_transformations/src/fold_convert.cpp +++ b/src/common/low_precision_transformations/src/fold_convert.cpp @@ -24,15 +24,15 @@ FoldConvertTransformation::FoldConvertTransformation(const Params& params) : Cle if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; this->register_matcher(matcher, callback); } -bool FoldConvertTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FoldConvertTransformation::transform(ov::pass::pattern::Matcher &m) { const auto subtract = m.get_match_root(); - if (!canBeTransformed(context, subtract)) { + if (!canBeTransformed(subtract)) { return false; } @@ -46,7 +46,7 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ov::pa assert(ov::is_type(resultConstant)); replace_node(convert, resultConstant); - updateOutput(context, resultConstant, convert); + updateOutput(resultConstant, convert); }; foldConvert(0ul); @@ -55,9 +55,9 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ov::pa return true; } -bool FoldConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { +bool FoldConvertTransformation::canBeTransformed(const std::shared_ptr& operation) const { return - CleanupTransformation::canBeTransformed(context, operation) && + CleanupTransformation::canBeTransformed(operation) && ((ov::is_type(operation->get_input_node_ptr(1)) && ov::is_type(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) || (ov::is_type(operation->get_input_node_ptr(0)) && diff --git a/src/common/low_precision_transformations/src/fold_fake_quantize.cpp b/src/common/low_precision_transformations/src/fold_fake_quantize.cpp index 2f275ccb995c4f..3963c1eea20ef1 100644 --- a/src/common/low_precision_transformations/src/fold_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fold_fake_quantize.cpp @@ -26,20 +26,20 @@ FoldFakeQuantizeTransformation::FoldFakeQuantizeTransformation(const Params& par if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(fakeQuantize, matcher_name); this->register_matcher(m, callback); } -bool FoldFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FoldFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto fakeQuantize = ov::as_type_ptr(m.get_match_root()); if (fakeQuantize == nullptr) { return false; } - if (!canBeTransformed(context, fakeQuantize)) { + if (!canBeTransformed(fakeQuantize)) { return false; } @@ -76,7 +76,7 @@ bool FoldFakeQuantizeTransformation::isConstantOutput(std::shared_ptr return vecLow == vecHigh; } -bool FoldFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { +bool FoldFakeQuantizeTransformation::canBeTransformed(const std::shared_ptr& op) const { if (!NetworkHelper::isConstantPath(op) && !isConstantOutput(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_convert.cpp b/src/common/low_precision_transformations/src/fuse_convert.cpp index bda3cd8b3d38c8..889233c03236b6 100644 --- a/src/common/low_precision_transformations/src/fuse_convert.cpp +++ b/src/common/low_precision_transformations/src/fuse_convert.cpp @@ -40,7 +40,7 @@ FuseConvertTransformation::FuseConvertTransformation(const Params& params) : Cle if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; this->register_matcher(matcher, callback); @@ -68,9 +68,9 @@ std::shared_ptr removeConvertIfPossibleForSubtract( } // namespace -bool FuseConvertTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FuseConvertTransformation::transform(ov::pass::pattern::Matcher &m) { const auto op = m.get_match_root(); - if (!canBeTransformed(context, op)) { + if (!canBeTransformed(op)) { return false; } @@ -114,8 +114,8 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ov::pa return true; } -bool FuseConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!CleanupTransformation::canBeTransformed(context, op)) { +bool FuseConvertTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!CleanupTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp index e9418da055c929..5f8d9be15eb20b 100644 --- a/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp @@ -15,8 +15,8 @@ namespace low_precision { FuseElementwiseToFakeQuantizeTransformation::FuseElementwiseToFakeQuantizeTransformation(const Params& params) : CleanupTransformation(params) { } -bool FuseElementwiseToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!CleanupTransformation::canBeTransformed(context, operation)) { +bool FuseElementwiseToFakeQuantizeTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!CleanupTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp index 56d67cb2edcbab..6b77e42f581af0 100644 --- a/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp @@ -25,16 +25,16 @@ FuseMultiplyToFakeQuantizeTransformation::FuseMultiplyToFakeQuantizeTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FuseMultiplyToFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -86,7 +86,7 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& newFakeQuantize->set_levels(intervalAlignment.as().levels); } - updateOutput(context, newFakeQuantize, multiply); + updateOutput(newFakeQuantize, multiply); return true; } diff --git a/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp index 61603cc8826713..73862fc856a944 100644 --- a/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp @@ -25,16 +25,16 @@ FuseSubtractToFakeQuantizeTransformation::FuseSubtractToFakeQuantizeTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FuseSubtractToFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto subtract = m.get_match_root(); - if (!canBeTransformed(context, subtract)) { + if (!canBeTransformed(subtract)) { return false; } @@ -81,7 +81,7 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& replace_node(subtract, newFakeQuantize); NetworkHelper::copyInfo(fakeQuantize, newFakeQuantize); - updateOutput(context, newFakeQuantize, subtract); + updateOutput(newFakeQuantize, subtract); return true; } diff --git a/src/common/low_precision_transformations/src/gather.cpp b/src/common/low_precision_transformations/src/gather.cpp index 1a8aa377cff2aa..4c5959d5c373e0 100644 --- a/src/common/low_precision_transformations/src/gather.cpp +++ b/src/common/low_precision_transformations/src/gather.cpp @@ -95,16 +95,16 @@ GatherTransformation::GatherTransformation(const Params& params) : LayerTransfor if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(gather, matcher_name); this->register_matcher(m, callback); } -bool GatherTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool GatherTransformation::transform(ov::pass::pattern::Matcher &m) { auto node = m.get_match_root(); - if (!canBeTransformed(context, m.get_match_root())) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -120,14 +120,14 @@ bool GatherTransformation::transform(TransformationContext& context, ov::pass::p replace_node(dequantization.subtractConstant, newConstant); } - const auto newOperation = moveDequantizationAfter(context, gather, NetworkHelper::getDequantization(gather, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(gather, NetworkHelper::getDequantization(gather, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool GatherTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool GatherTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/group_convolution.cpp b/src/common/low_precision_transformations/src/group_convolution.cpp index 6e2f48cd10e734..feabc004f05144 100644 --- a/src/common/low_precision_transformations/src/group_convolution.cpp +++ b/src/common/low_precision_transformations/src/group_convolution.cpp @@ -25,7 +25,7 @@ GroupConvolutionTransformation::GroupConvolutionTransformation(const Params& par if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -37,15 +37,12 @@ bool GroupConvolutionTransformation::isQuantized(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/src/interpolate.cpp b/src/common/low_precision_transformations/src/interpolate.cpp index f1d9a2d505788a..5559de793500e6 100644 --- a/src/common/low_precision_transformations/src/interpolate.cpp +++ b/src/common/low_precision_transformations/src/interpolate.cpp @@ -46,7 +46,7 @@ InterpolateTransformation::InterpolateTransformation(const Params& params) : Lay if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto matcher = std::make_shared( @@ -56,13 +56,13 @@ InterpolateTransformation::InterpolateTransformation(const Params& params) : Lay this->register_matcher(matcher, callback); } -bool InterpolateTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool InterpolateTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr interpolate = m.get_match_root(); - if (!canBeTransformed(context, m.get_match_root())) { + if (!canBeTransformed(m.get_match_root())) { return false; } interpolate = NetworkHelper::separateInStandaloneBranch(interpolate, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, interpolate, NetworkHelper::getDequantization(interpolate, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(interpolate, NetworkHelper::getDequantization(interpolate, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -84,8 +84,8 @@ bool InterpolateTransformation::isPrecisionPreserved(std::shared_ptr layer return false; } -bool InterpolateTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool InterpolateTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/layer_transformation.cpp b/src/common/low_precision_transformations/src/layer_transformation.cpp index fa014a078a22d3..e7a1af82d95614 100644 --- a/src/common/low_precision_transformations/src/layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/layer_transformation.cpp @@ -45,22 +45,9 @@ LayerTransformation::LayerTransformation(const Params& params) : deqPrecision(params.deqPrecision), defaultPrecisions(params.defaultPrecisions), reshapeIgnorePerTensorQuantizationCheck(params.reshapeIgnorePerTensorQuantizationCheck), - scalingMode(params.scalingMode), - context(nullptr) {} + scalingMode(params.scalingMode) {} -void LayerTransformation::setContext(TransformationContext* context) noexcept { - this->context = context; -} - -void LayerTransformation::setUpdatePrecisions(const bool updatePrecisions) { - this->updatePrecisions = updatePrecisions; -} - -void LayerTransformation::setDefaultPrecisions(const std::vector& defaultPrecisions) { - this->defaultPrecisions = defaultPrecisions; -} - -bool LayerTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool LayerTransformation::canBeTransformed(const std::shared_ptr& layer) const { if (!isQuantized(layer, defaultPrecisions)) { return false; } @@ -126,7 +113,7 @@ bool LayerTransformation::canBeTransformedStatic(const std::shared_ptr& la return true; } -bool LayerTransformation::canBeTransformedSpatialDimension(const TransformationContext& context, std::shared_ptr layer) const { +bool LayerTransformation::canBeTransformedSpatialDimension(const std::shared_ptr& layer) const { if (!isQuantized(layer, defaultPrecisions)) { OPENVINO_DEBUG("LPT: early exit: not quantized"); return false; @@ -397,7 +384,6 @@ DataPrecision LayerTransformation::getDataPrecision( } std::shared_ptr LayerTransformation::moveDequantizationAfter( - TransformationContext &context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool updateOutputPrecision, @@ -408,31 +394,28 @@ std::shared_ptr LayerTransformation::moveDequantizationAfter( updateOutputPrecision, moveSubtract, defaultPrecisions); - updateOutput(context, result.lastDequantization, result.newOperation); + updateOutput(result.lastDequantization, result.newOperation); return result.newOperation; } std::shared_ptr LayerTransformation::moveDequantizationBefore( - TransformationContext& context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool moveSubtract) const { const auto result = ov::pass::low_precision::NetworkHelper::moveDequantizationBefore(operation, dequantization, moveSubtract); - updateOutput(context, result.newOperation, result.lastDequantization); + updateOutput(result.newOperation, result.lastDequantization); return result.newOperation; } -bool LayerTransformation::updateOutput( - TransformationContext &context, - std::shared_ptr lastNode, - std::shared_ptr originalNode) const { +bool LayerTransformation::updateOutput(const std::shared_ptr& lastNode, + const std::shared_ptr& originalNode) const { bool was_updated = false; for (auto output : lastNode->outputs()) { for (auto input : output.get_target_inputs()) { if (ov::is_type(input.get_node())) { - const std::string originalName = originalNode->get_friendly_name(); + const auto originalName = originalNode->get_friendly_name(); originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix); lastNode->set_friendly_name(originalName); was_updated = true; @@ -442,61 +425,6 @@ bool LayerTransformation::updateOutput( } return was_updated; } - -void LayerTransformation::updateOutput( - TransformationContext& context, - std::shared_ptr lastNode, - std::string originalName) const { - const size_t outputSize = context.model->get_output_size(); - for (size_t i = 0; i < outputSize; ++i) { - std::shared_ptr result = context.model->get_output_op(i); - std::shared_ptr outputNode = result->get_input_node_shared_ptr(0); - if (outputNode.get() == lastNode.get()) { - lastNode->set_friendly_name(originalName); - break; - } - } -} - -void LayerTransformation::addPattern(ov::pass::GraphRewrite& pass, TransformationContext& context, std::shared_ptr patternRoot) { - MATCHER_SCOPE(SingleNodeMatcher); - ov::graph_rewrite_callback internal_callback = [this, &context](ov::pass::pattern::Matcher &m) { - const bool result = transform(context, m); - (void)result; -#ifdef LPT_DISPLAY_PRECISION - if (result) { - auto operationNode = m.get_match_root(); - std::cout << "Operation was transformed: " << - operationNode->get_type_name() << ", " << - operationNode->get_friendly_name() << ", output operation precision: " << - ((operationNode->get_output_size() == 1u) ? operationNode->get_output_element_type(0) : ov::element::Type()) << - std::endl; - } -#endif - return false; - }; - // TODO: better name for matcher? required? - auto m = std::make_shared(patternRoot, matcher_name); - auto match_pass = std::make_shared( - m->get_name(), - m, - [m, internal_callback](const std::shared_ptr& node) -> bool { - OPENVINO_DEBUG("Running matcher ", m->get_name(), " on ", node); - OV_PASS_CALLBACK(m); - if (std::dynamic_pointer_cast(m)->match(node->output(0))) { - OPENVINO_DEBUG("Matcher ", m->get_name(), " matched ", node); - bool status = internal_callback(*m.get()); - // explicitly clear Matcher state because it holds pointers to matched nodes - m->clear_state(); - return status; - } - m->clear_state(); - return false; - }, - ov::pass::PassProperty::CHANGE_DYNAMIC_STATE); - pass.add_matcher(match_pass); -} - } // namespace low_precision } // namespace pass } // namespace ov diff --git a/src/common/low_precision_transformations/src/mat_mul.cpp b/src/common/low_precision_transformations/src/mat_mul.cpp index f2d471bb222739..b153173d264a6e 100644 --- a/src/common/low_precision_transformations/src/mat_mul.cpp +++ b/src/common/low_precision_transformations/src/mat_mul.cpp @@ -32,16 +32,16 @@ MatMulTransformation::MatMulTransformation(const Params& params) : LayerTransfor if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MatMulTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool MatMulTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr matMul = ov::as_type_ptr(m.get_match_root()); - if ((matMul == nullptr) || !canBeTransformed(context, matMul)) { + if ((matMul == nullptr) || !canBeTransformed(matMul)) { return false; } @@ -174,7 +174,7 @@ bool MatMulTransformation::transform(TransformationContext &context, ov::pass::p NetworkHelper::insertDequantizationAfter(matMul, newMultiply, newMatMul); copy_runtime_info({ newMultiply, matMul }, newMultiply); - updateOutput(context, newMultiply, newMatMul); + updateOutput(newMultiply, newMatMul); OPENVINO_DEBUG("LPT: done: ", newMatMul); return true; @@ -184,8 +184,8 @@ bool MatMulTransformation::isPrecisionPreserved(std::shared_ptr layer) con return false; } -bool MatMulTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformedSpatialDimension(context, layer)) { +bool MatMulTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformedSpatialDimension(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/max_pool.cpp b/src/common/low_precision_transformations/src/max_pool.cpp index d9b06644037847..ef0a508fac3a65 100644 --- a/src/common/low_precision_transformations/src/max_pool.cpp +++ b/src/common/low_precision_transformations/src/max_pool.cpp @@ -26,15 +26,15 @@ MaxPoolTransformation::MaxPoolTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MaxPoolTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool MaxPoolTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } @@ -51,13 +51,13 @@ bool MaxPoolTransformation::canBeTransformed(const TransformationContext& contex return true; } -bool MaxPoolTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool MaxPoolTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr pooling = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; diff --git a/src/common/low_precision_transformations/src/move_fake_quantize.cpp b/src/common/low_precision_transformations/src/move_fake_quantize.cpp index abee9cbd081a0f..54b54a332db561 100644 --- a/src/common/low_precision_transformations/src/move_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/move_fake_quantize.cpp @@ -46,7 +46,7 @@ MoveFakeQuantize::MoveFakeQuantize(const Params& params) : LayerTransformation(p return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared( @@ -55,9 +55,9 @@ MoveFakeQuantize::MoveFakeQuantize(const Params& params) : LayerTransformation(p this->register_matcher(m, callback); } -bool MoveFakeQuantize::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool MoveFakeQuantize::transform(ov::pass::pattern::Matcher& m) { const auto fq = m.get_match_root(); - if (!canBeTransformed(context, fq)) { + if (!canBeTransformed(fq)) { return false; } @@ -156,16 +156,16 @@ bool MoveFakeQuantize::transform(TransformationContext& context, ov::pass::patte newConcat->set_friendly_name(concat->get_friendly_name()); NetworkHelper::copyInfo(concat, newConcat); if (!dequantization.empty()) { - moveDequantizationBefore(context, newConcat, dequantization); + moveDequantizationBefore(newConcat, dequantization); return true; } replace_node(fq, newConcat); - updateOutput(context, newConcat, fq); + updateOutput(newConcat, fq); return true; } -bool MoveFakeQuantize::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool MoveFakeQuantize::canBeTransformed(const std::shared_ptr& layer) const { auto operation = layer->get_input_node_shared_ptr(0); std::shared_ptr concat; if (is_type(operation)) { diff --git a/src/common/low_precision_transformations/src/multiply.cpp b/src/common/low_precision_transformations/src/multiply.cpp index 4c1f3c073febcf..8dd6cce059d96e 100644 --- a/src/common/low_precision_transformations/src/multiply.cpp +++ b/src/common/low_precision_transformations/src/multiply.cpp @@ -33,16 +33,16 @@ MultiplyTransformation::MultiplyTransformation(const Params& params) : if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MultiplyTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool MultiplyTransformation::transform(ov::pass::pattern::Matcher& m) { auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -100,7 +100,7 @@ bool MultiplyTransformation::transform(TransformationContext& context, ov::pass: ov::op::TemporaryReplaceOutputType(in2, deqPrecision).get()); replace_node(multiply, new_multiply); - updateOutput(context, new_multiply, multiply); + updateOutput(new_multiply, multiply); return true; } @@ -128,7 +128,7 @@ bool MultiplyTransformation::transform(TransformationContext& context, ov::pass: multiply->get_output_element_type(0)); replace_node(multiply, new_scales); - const auto was_updated = updateOutput(context, new_scales, multiply); + const auto was_updated = updateOutput(new_scales, multiply); NetworkHelper::copyInfo(multiply, new_multiply, !was_updated); return true; diff --git a/src/common/low_precision_transformations/src/multiply_partial.cpp b/src/common/low_precision_transformations/src/multiply_partial.cpp index aea1bf49b8ffc1..e01b09324712ef 100644 --- a/src/common/low_precision_transformations/src/multiply_partial.cpp +++ b/src/common/low_precision_transformations/src/multiply_partial.cpp @@ -32,16 +32,16 @@ MultiplyPartialTransformation::MultiplyPartialTransformation(const Params& param if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MultiplyPartialTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool MultiplyPartialTransformation::transform(ov::pass::pattern::Matcher& m) { auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -168,7 +168,7 @@ bool MultiplyPartialTransformation::transform(TransformationContext& context, ov } replace_node(multiply, newMultiply); - updateOutput(context, newMultiply, multiply); + updateOutput(newMultiply, multiply); if (fullPathIndex != -1) { NetworkHelper::foldDequantization(newMultiply, fullPathIndex, defaultPrecisions); @@ -178,7 +178,7 @@ bool MultiplyPartialTransformation::transform(TransformationContext& context, ov return true; } -bool MultiplyPartialTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool MultiplyPartialTransformation::canBeTransformed(const std::shared_ptr& layer) const { FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 0ul); FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 1ul); @@ -193,7 +193,7 @@ bool MultiplyPartialTransformation::canBeTransformed(const TransformationContext return false; } - return EltwiseBaseTransformation::canBeTransformed(context, layer); + return EltwiseBaseTransformation::canBeTransformed(layer); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp index 466b7ad6c75e5d..8e52eb38ee8ee1 100644 --- a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp +++ b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp @@ -24,16 +24,16 @@ MultiplyToGroupConvolutionTransformation::MultiplyToGroupConvolutionTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool MultiplyToGroupConvolutionTransformation::transform(ov::pass::pattern::Matcher &m) { const auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -142,8 +142,8 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& return true; } -bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!CleanupTransformation::canBeTransformed(context, operation)) { +bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!CleanupTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/mvn.cpp b/src/common/low_precision_transformations/src/mvn.cpp index 4c848b69b82661..22dfc2f9816ed0 100644 --- a/src/common/low_precision_transformations/src/mvn.cpp +++ b/src/common/low_precision_transformations/src/mvn.cpp @@ -52,15 +52,15 @@ MVNTransformation::MVNTransformation(const Params& params) : LayerTransformation if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MVNTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool MVNTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } @@ -117,9 +117,9 @@ bool MVNTransformation::canBeTransformed(const TransformationContext& context, s return false; } -bool MVNTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool MVNTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr operation = m.get_match_root(); - if (!canBeTransformed(context, operation)) { + if (!canBeTransformed(operation)) { return false; } @@ -167,7 +167,7 @@ bool MVNTransformation::transform(TransformationContext &context, ov::pass::patt NetworkHelper::insertDequantizationAfter(mvn, newMultiply, newMVN); - updateOutput(context, newMultiply, newMVN); + updateOutput(newMultiply, newMVN); OPENVINO_DEBUG("LPT: done: ", newMVN); return true; diff --git a/src/common/low_precision_transformations/src/normalize_l2.cpp b/src/common/low_precision_transformations/src/normalize_l2.cpp index c9f1cc6a7fe8af..9c30456f84afba 100644 --- a/src/common/low_precision_transformations/src/normalize_l2.cpp +++ b/src/common/low_precision_transformations/src/normalize_l2.cpp @@ -47,15 +47,15 @@ NormalizeL2Transformation::NormalizeL2Transformation(const Params& params) : Lay if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool NormalizeL2Transformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool NormalizeL2Transformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } @@ -97,9 +97,9 @@ bool NormalizeL2Transformation::canBeTransformed(const TransformationContext& co return true; } -bool NormalizeL2Transformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool NormalizeL2Transformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr operation = m.get_match_root(); - if (!canBeTransformed(context, operation)) { + if (!canBeTransformed(operation)) { return false; } @@ -146,7 +146,7 @@ bool NormalizeL2Transformation::transform(TransformationContext &context, ov::pa NetworkHelper::insertDequantizationAfter(normalize, newMultiply, newNormalize); ov::copy_runtime_info({ normalize, newMultiply }, newMultiply); - updateOutput(context, newMultiply, newNormalize); + updateOutput(newMultiply, newNormalize); OPENVINO_DEBUG("LPT: done: ", newNormalize); return true; diff --git a/src/common/low_precision_transformations/src/pad.cpp b/src/common/low_precision_transformations/src/pad.cpp index 12310ec5724f6c..c023ee9d14d9ee 100644 --- a/src/common/low_precision_transformations/src/pad.cpp +++ b/src/common/low_precision_transformations/src/pad.cpp @@ -31,7 +31,7 @@ PadTransformation::PadTransformation(const Params& params) : LayerTransformation if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -50,8 +50,8 @@ namespace { } } // namespace -bool PadTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool PadTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -164,14 +164,14 @@ bool PadTransformation::transform(TransformationContext& context, ov::pass::patt const auto convertedZero = ov::opset1::Constant::create(dequantization.data.get_element_type(), Shape{}, { padConstantValue }); pad->set_argument(3, convertedZero); - const auto newOperation = moveDequantizationAfter(context, pad, dequantization); + const auto newOperation = moveDequantizationAfter(pad, dequantization); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool PadTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformedSpatialDimension(context, op)) { +bool PadTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformedSpatialDimension(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/prelu.cpp b/src/common/low_precision_transformations/src/prelu.cpp index 46e0d692f0faca..e30bb6fa041074 100644 --- a/src/common/low_precision_transformations/src/prelu.cpp +++ b/src/common/low_precision_transformations/src/prelu.cpp @@ -28,22 +28,22 @@ PReluTransformation::PReluTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool PReluTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool PReluTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr prelu = m.get_match_root(); - if (!canBeTransformed(context, prelu)) { + if (!canBeTransformed(prelu)) { return false; } prelu = NetworkHelper::separateInStandaloneBranch(prelu, defaultPrecisions); const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(prelu, defaultPrecisions, 0); - const auto newOperation = moveDequantizationAfter(context, prelu, dequantization, false, false); + const auto newOperation = moveDequantizationAfter(prelu, dequantization, false, false); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -53,8 +53,8 @@ bool PReluTransformation::isPrecisionPreserved(std::shared_ptr op) const n return false; } -bool PReluTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool PReluTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/recurrent_cell.cpp b/src/common/low_precision_transformations/src/recurrent_cell.cpp index cec96044502596..34d851d6a2b464 100644 --- a/src/common/low_precision_transformations/src/recurrent_cell.cpp +++ b/src/common/low_precision_transformations/src/recurrent_cell.cpp @@ -43,7 +43,7 @@ RecurrentCellTransformation::RecurrentCellTransformation(const Params& params) : return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared( @@ -116,7 +116,7 @@ std::vector> get_supported_precisions(std::shar } // namespace -void RecurrentCellTransformation::propagate(TransformationContext& context, const std::shared_ptr node) { +void RecurrentCellTransformation::propagate(const std::shared_ptr node) { if (!isSupportedForPerChannelQuantization(node)) { return; } @@ -126,7 +126,7 @@ void RecurrentCellTransformation::propagate(TransformationContext& context, cons if (dequantization.empty()) { return; } - const auto& new_node = moveDequantizationAfter(context, normalized_node, dequantization); + const auto& new_node = moveDequantizationAfter(normalized_node, dequantization); const auto& new_dequantization = NetworkHelper::getDequantizationBelow(new_node); if (new_dequantization.empty()) { @@ -136,12 +136,12 @@ void RecurrentCellTransformation::propagate(TransformationContext& context, cons for (auto output : new_dequantization.multiply->outputs()) { for (auto input : output.get_target_inputs()) { auto child = input.get_node()->shared_from_this(); - propagate(context, child); + propagate(child); } } } -bool RecurrentCellTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool RecurrentCellTransformation::transform(ov::pass::pattern::Matcher& m) { const auto lstm = m.get_match_root(); const auto inputs = get_supported_precisions(lstm); for (const auto& input : inputs) { @@ -179,13 +179,13 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: for (const auto& output : multiply->outputs()) { for (const auto& input : output.get_target_inputs()) { const auto input_node = input.get_node(); - propagate(context, input_node->shared_from_this()); + propagate(input_node->shared_from_this()); } } } } - if (!canBeTransformed(context, lstm)) { + if (!canBeTransformed(lstm)) { return false; } @@ -228,7 +228,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: propagateSkipCleanupAttribute(deq_multiply); this->register_new_node(new_fq); - updateOutput(context, deq_multiply, new_fq); + updateOutput(deq_multiply, new_fq); } else { continue; } @@ -245,7 +245,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: return true; } -bool RecurrentCellTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr lstm) const { +bool RecurrentCellTransformation::canBeTransformed(const std::shared_ptr& lstm) const { const auto inputs = get_supported_precisions(lstm); for (const auto& index : inputs) { const auto& input = lstm->get_input_node_ptr(index.first); diff --git a/src/common/low_precision_transformations/src/reduce_base_transformation.cpp b/src/common/low_precision_transformations/src/reduce_base_transformation.cpp index 5fe679d8c997bf..c39681bc660f21 100644 --- a/src/common/low_precision_transformations/src/reduce_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/reduce_base_transformation.cpp @@ -16,8 +16,8 @@ namespace low_precision { ReduceBaseTransformation::ReduceBaseTransformation(const Params& params) : LayerTransformation(params) {} -bool ReduceBaseTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ReduceBaseTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -29,13 +29,13 @@ bool ReduceBaseTransformation::transform(TransformationContext& context, ov::pas // updatePrecision depends on type and parameters of the reduce const bool updatePrecision = getUpdatePrecision(reduce); - const auto newOperation = moveDequantizationAfter(context, reduce, dequantization, updatePrecision); + const auto newOperation = moveDequantizationAfter(reduce, dequantization, updatePrecision); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool ReduceBaseTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceBaseTransformation::canBeTransformed(const std::shared_ptr& reduce) const { const auto dequantization = NetworkHelper::getDequantization(reduce, defaultPrecisions); if (dequantization.empty()) { return false; diff --git a/src/common/low_precision_transformations/src/reduce_max.cpp b/src/common/low_precision_transformations/src/reduce_max.cpp index 4cf9c2ed2100aa..65d021accf3452 100644 --- a/src/common/low_precision_transformations/src/reduce_max.cpp +++ b/src/common/low_precision_transformations/src/reduce_max.cpp @@ -23,19 +23,19 @@ ReduceMaxTransformation::ReduceMaxTransformation(const Params& params) : ReduceB if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceMaxTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceMaxTransformation::canBeTransformed(const std::shared_ptr& reduce) const { if (!ov::is_type(reduce)) { return false; } - if (!ReduceBaseTransformation::canBeTransformed(context, reduce)) { + if (!ReduceBaseTransformation::canBeTransformed(reduce)) { return false; } diff --git a/src/common/low_precision_transformations/src/reduce_mean.cpp b/src/common/low_precision_transformations/src/reduce_mean.cpp index 451a1d4c3804df..55f080587290b9 100644 --- a/src/common/low_precision_transformations/src/reduce_mean.cpp +++ b/src/common/low_precision_transformations/src/reduce_mean.cpp @@ -23,15 +23,15 @@ ReduceMeanTransformation::ReduceMeanTransformation(const Params& params) : Reduc if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceMeanTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { - return ov::is_type(reduce) ? ReduceBaseTransformation::canBeTransformed(context, reduce) : false; +bool ReduceMeanTransformation::canBeTransformed(const std::shared_ptr& reduce) const { + return ov::is_type(reduce) ? ReduceBaseTransformation::canBeTransformed(reduce) : false; } bool ReduceMeanTransformation::isPrecisionPreserved(std::shared_ptr reduce) const noexcept { diff --git a/src/common/low_precision_transformations/src/reduce_min.cpp b/src/common/low_precision_transformations/src/reduce_min.cpp index d7433c322718c0..6ad0cfea259b38 100644 --- a/src/common/low_precision_transformations/src/reduce_min.cpp +++ b/src/common/low_precision_transformations/src/reduce_min.cpp @@ -22,19 +22,19 @@ ReduceMinTransformation::ReduceMinTransformation(const Params& params) : ReduceB if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceMinTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceMinTransformation::canBeTransformed(const std::shared_ptr& reduce) const { if (!ov::is_type(reduce)) { return false; } - if (!ReduceBaseTransformation::canBeTransformed(context, reduce)) { + if (!ReduceBaseTransformation::canBeTransformed(reduce)) { return false; } diff --git a/src/common/low_precision_transformations/src/reduce_sum.cpp b/src/common/low_precision_transformations/src/reduce_sum.cpp index 1bc8bf75d27a7f..d28fbdc9c559e7 100644 --- a/src/common/low_precision_transformations/src/reduce_sum.cpp +++ b/src/common/low_precision_transformations/src/reduce_sum.cpp @@ -23,16 +23,16 @@ ReduceSumTransformation::ReduceSumTransformation(const Params& params) : ReduceB if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceSumTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceSumTransformation::canBeTransformed(const std::shared_ptr& reduce) const { const auto reduceSum = ov::as_type_ptr(reduce); - if (!reduceSum || !ReduceBaseTransformation::canBeTransformed(context, reduceSum)) { + if (!reduceSum || !ReduceBaseTransformation::canBeTransformed(reduceSum)) { return false; } diff --git a/src/common/low_precision_transformations/src/relu.cpp b/src/common/low_precision_transformations/src/relu.cpp index 6d39cccc0e0260..ca2de4e05f0b84 100644 --- a/src/common/low_precision_transformations/src/relu.cpp +++ b/src/common/low_precision_transformations/src/relu.cpp @@ -28,22 +28,22 @@ ReluTransformation::ReluTransformation(const Params& params) : LayerTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReluTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool ReluTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr relu = m.get_match_root(); - if (!canBeTransformed(context, relu)) { + if (!canBeTransformed(relu)) { return false; } relu = NetworkHelper::separateInStandaloneBranch(relu, defaultPrecisions); const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(relu, defaultPrecisions, 0); - const auto newOperation = moveDequantizationAfter(context, relu, dequantization); + const auto newOperation = moveDequantizationAfter(relu, dequantization); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -53,8 +53,8 @@ bool ReluTransformation::isPrecisionPreserved(std::shared_ptr op) const no return true; } -bool ReluTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool ReluTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/reshape.cpp b/src/common/low_precision_transformations/src/reshape.cpp index 4d9b9c53a782f6..cf24edc18953b4 100644 --- a/src/common/low_precision_transformations/src/reshape.cpp +++ b/src/common/low_precision_transformations/src/reshape.cpp @@ -48,7 +48,7 @@ ReshapeTransformation::ReshapeTransformation(const Params& params) : LayerTransf } } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -146,19 +146,19 @@ void reshapeDequantizationConstant(const std::shared_ptr& r } // namespace -bool ReshapeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool ReshapeTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr reshape = ov::as_type_ptr(m.get_match_root()); if (NetworkHelper::isConstantPath(reshape)) { return false; } - if (!canBeTransformed(context, reshape)) { + if (!canBeTransformed(reshape)) { return false; } reshape = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(reshape, defaultPrecisions)); reshapeDequantizationConstant(reshape, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, reshape, NetworkHelper::getDequantization(reshape, defaultPrecisions, 0)); + const auto newOperation = moveDequantizationAfter(reshape, NetworkHelper::getDequantization(reshape, defaultPrecisions, 0)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -188,8 +188,8 @@ inline size_t getFirstChangedDimension(const PartialShape& shape1, const Partial return i; } -bool ReshapeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool ReshapeTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/shuffle_channels.cpp b/src/common/low_precision_transformations/src/shuffle_channels.cpp index ab170ea28572e2..67abf9b28db708 100644 --- a/src/common/low_precision_transformations/src/shuffle_channels.cpp +++ b/src/common/low_precision_transformations/src/shuffle_channels.cpp @@ -26,15 +26,15 @@ ShuffleChannelsTransformation::ShuffleChannelsTransformation(const Params& param if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ShuffleChannelsTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ShuffleChannelsTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -73,14 +73,14 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ov replace_node(dequantization.multiplyConstant, shuffledMulConst); dequantization.multiplyConstant = shuffledMulConst; - const auto newOperation = moveDequantizationAfter(context, shuffleChannels, dequantization); + const auto newOperation = moveDequantizationAfter(shuffleChannels, dequantization); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool ShuffleChannelsTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformedSpatialDimension(context, op)) { +bool ShuffleChannelsTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformedSpatialDimension(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/slice.cpp b/src/common/low_precision_transformations/src/slice.cpp index 99c51f4e3f5ac3..082c00f207a37a 100644 --- a/src/common/low_precision_transformations/src/slice.cpp +++ b/src/common/low_precision_transformations/src/slice.cpp @@ -26,27 +26,27 @@ SliceTransformation::SliceTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SliceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!SliceTransformation::canBeTransformed(context, m.get_match_root())) { +bool SliceTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!SliceTransformation::canBeTransformed(m.get_match_root())) { return false; } const auto strided_slice = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool SliceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool SliceTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/space_to_batch.cpp b/src/common/low_precision_transformations/src/space_to_batch.cpp index 2f0373ffec1068..20a5d1b863a18a 100644 --- a/src/common/low_precision_transformations/src/space_to_batch.cpp +++ b/src/common/low_precision_transformations/src/space_to_batch.cpp @@ -26,15 +26,15 @@ SpaceToBatchTransformation::SpaceToBatchTransformation(const Params& params) : L if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SpaceToBatchTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool SpaceToBatchTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } @@ -46,13 +46,13 @@ bool SpaceToBatchTransformation::canBeTransformed(const TransformationContext& c return dequantization.isPerTensor(); } -bool SpaceToBatchTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool SpaceToBatchTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr op = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(op, NetworkHelper::getDequantization(op, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; diff --git a/src/common/low_precision_transformations/src/split.cpp b/src/common/low_precision_transformations/src/split.cpp index 88deb9f62e444b..35b1ede004730e 100644 --- a/src/common/low_precision_transformations/src/split.cpp +++ b/src/common/low_precision_transformations/src/split.cpp @@ -24,15 +24,15 @@ SplitTransformation::SplitTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SplitTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool SplitTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -120,7 +120,7 @@ bool SplitTransformation::transform(TransformationContext& context, ov::pass::pa } } - updateOutputs(context, lastNodes, newSplit); + updateOutputs(lastNodes, newSplit); OPENVINO_DEBUG("LPT: done: ", newSplit); return true; @@ -128,12 +128,10 @@ bool SplitTransformation::transform(TransformationContext& context, ov::pass::pa void SplitTransformation::updateOutputs( - TransformationContext& context, std::vector> lastNodes, std::shared_ptr originalNode) const { - //TODO: LPT: during refactoring update is not tested if (lastNodes.size() == 1ul) { - updateOutput(context, lastNodes[0], originalNode); + updateOutput(lastNodes[0], originalNode); } else { const std::string originalName = originalNode->get_friendly_name(); for (size_t i = 0; i < lastNodes.size(); ++i) { @@ -155,7 +153,7 @@ bool SplitTransformation::isPrecisionPreserved(std::shared_ptr layer) cons return true; } -bool SplitTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool SplitTransformation::canBeTransformed(const std::shared_ptr& layer) const { return !NetworkHelper::getDequantization(layer, defaultPrecisions).empty() && layer->get_input_partial_shape(0).rank().is_static(); } diff --git a/src/common/low_precision_transformations/src/squeeze.cpp b/src/common/low_precision_transformations/src/squeeze.cpp index 04bdf62362bddd..2ddef0b81be120 100644 --- a/src/common/low_precision_transformations/src/squeeze.cpp +++ b/src/common/low_precision_transformations/src/squeeze.cpp @@ -26,15 +26,15 @@ SqueezeTransformation::SqueezeTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SqueezeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool SqueezeTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -66,7 +66,7 @@ bool SqueezeTransformation::transform(TransformationContext& context, ov::pass:: replace_node(dequantization.subtractConstant, newConstant); } - const auto newOperation = moveDequantizationAfter(context, squeeze, NetworkHelper::getDequantization(squeeze, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(squeeze, NetworkHelper::getDequantization(squeeze, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -76,8 +76,8 @@ bool SqueezeTransformation::isPrecisionPreserved(std::shared_ptr layer) co return true; } -bool SqueezeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(context, layer); +bool SqueezeTransformation::canBeTransformed(const std::shared_ptr& layer) const { + return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(layer); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/strided_slice.cpp b/src/common/low_precision_transformations/src/strided_slice.cpp index 5d9939e1fe943a..046a1d65af6e50 100644 --- a/src/common/low_precision_transformations/src/strided_slice.cpp +++ b/src/common/low_precision_transformations/src/strided_slice.cpp @@ -107,15 +107,15 @@ StridedSliceTransformation::StridedSliceTransformation(const Params& params) : L if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool StridedSliceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!StridedSliceTransformation::canBeTransformed(context, m.get_match_root())) { +bool StridedSliceTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!StridedSliceTransformation::canBeTransformed(m.get_match_root())) { return false; } @@ -132,13 +132,13 @@ bool StridedSliceTransformation::transform(TransformationContext& context, ov::p replace_node(dequantization.multiplyConstant, new_mul_const); dequantization.multiplyConstant = new_mul_const; - const auto newOperation = moveDequantizationAfter(context, strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool StridedSliceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { +bool StridedSliceTransformation::canBeTransformed(const std::shared_ptr& operation) const { if (!ov::is_type(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/subtract.cpp b/src/common/low_precision_transformations/src/subtract.cpp index d67a8dc1e0e288..b19add9fca1570 100644 --- a/src/common/low_precision_transformations/src/subtract.cpp +++ b/src/common/low_precision_transformations/src/subtract.cpp @@ -34,16 +34,16 @@ SubtractTransformation::SubtractTransformation(const Params& params) : LayerTran if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(subtract, matcher_name); this->register_matcher(m, callback); } -bool SubtractTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool SubtractTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr subtract = ov::as_type_ptr(m.get_match_root()); - if (!canBeTransformed(context, subtract)) { + if (!canBeTransformed(subtract)) { return false; } diff --git a/src/common/low_precision_transformations/src/transformation_context.cpp b/src/common/low_precision_transformations/src/transformation_context.cpp deleted file mode 100644 index 7cef253f0e3f3f..00000000000000 --- a/src/common/low_precision_transformations/src/transformation_context.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "low_precision/transformation_context.hpp" - -namespace ov { -namespace pass { -namespace low_precision { - -TransformationContext::TransformationContext() : model(nullptr) {} - -TransformationContext::TransformationContext(std::shared_ptr model) : model(model) { -} - -} // namespace low_precision -} // namespace pass -} // namespace ov diff --git a/src/common/low_precision_transformations/src/transparent_base_transformation.cpp b/src/common/low_precision_transformations/src/transparent_base_transformation.cpp index f5efd99e008b86..8b7a94f5ef966b 100644 --- a/src/common/low_precision_transformations/src/transparent_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/transparent_base_transformation.cpp @@ -14,20 +14,20 @@ using namespace ov; using namespace ov::pass; using namespace ov::pass::low_precision; -bool TransparentBaseTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool TransparentBaseTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr op = m.get_match_root(); - if (!canBeTransformed(context, op)) { + if (!canBeTransformed(op)) { return false; } op = NetworkHelper::separateInStandaloneBranch(op, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(op, NetworkHelper::getDequantization(op, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool TransparentBaseTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool TransparentBaseTransformation::canBeTransformed(const std::shared_ptr& layer) const { return true; } diff --git a/src/common/low_precision_transformations/src/transpose.cpp b/src/common/low_precision_transformations/src/transpose.cpp index 4d8577e40643ff..b210920ab5bc65 100644 --- a/src/common/low_precision_transformations/src/transpose.cpp +++ b/src/common/low_precision_transformations/src/transpose.cpp @@ -26,7 +26,7 @@ TransposeTransformation::TransposeTransformation(const Params& params) : LayerTr if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -83,15 +83,15 @@ void transposeDequantizationConstant(std::shared_ptr& transpose, const std } // namespace -bool TransposeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool TransposeTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr transpose = m.get_match_root(); - if (!canBeTransformed(context, transpose)) { + if (!canBeTransformed(transpose)) { return false; } transpose = NetworkHelper::separateInStandaloneBranch(transpose, defaultPrecisions); transposeDequantizationConstant(transpose, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, transpose, NetworkHelper::getDequantization(transpose, defaultPrecisions, 0)); + const auto newOperation = moveDequantizationAfter(transpose, NetworkHelper::getDequantization(transpose, defaultPrecisions, 0)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -101,8 +101,8 @@ bool TransposeTransformation::isPrecisionPreserved(std::shared_ptr op) con return true; } -bool TransposeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool TransposeTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/unsqueeze.cpp b/src/common/low_precision_transformations/src/unsqueeze.cpp index 3ba7a951950a5b..32e3f89ab01e69 100644 --- a/src/common/low_precision_transformations/src/unsqueeze.cpp +++ b/src/common/low_precision_transformations/src/unsqueeze.cpp @@ -26,15 +26,15 @@ UnsqueezeTransformation::UnsqueezeTransformation(const Params& params) : LayerTr if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool UnsqueezeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool UnsqueezeTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -68,7 +68,7 @@ bool UnsqueezeTransformation::transform(TransformationContext& context, ov::pass replace_node(dequantization.subtractConstant, newConstant); } - const auto newOperation = moveDequantizationAfter(context, unsqueeze, NetworkHelper::getDequantization(unsqueeze, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(unsqueeze, NetworkHelper::getDequantization(unsqueeze, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -78,8 +78,8 @@ bool UnsqueezeTransformation::isPrecisionPreserved(std::shared_ptr layer) return true; } -bool UnsqueezeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(context, layer); +bool UnsqueezeTransformation::canBeTransformed(const std::shared_ptr& layer) const { + return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(layer); } diff --git a/src/common/low_precision_transformations/src/variadic_split.cpp b/src/common/low_precision_transformations/src/variadic_split.cpp index fd719cb5fcdf05..e381fd2e58bd48 100644 --- a/src/common/low_precision_transformations/src/variadic_split.cpp +++ b/src/common/low_precision_transformations/src/variadic_split.cpp @@ -26,7 +26,7 @@ VariadicSplitTransformation::VariadicSplitTransformation(const Params& params) : if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); diff --git a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp index 9bd43e8a73fe9b..64c6e15cd81356 100644 --- a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp @@ -49,9 +49,10 @@ WeightableLayerTransformation::WeightableLayerTransformation(const Params& param canBeTransformedParams(canBeTransformedParams) { } -bool WeightableLayerTransformation::canConvolutionBeTransformed(const TransformationContext& context, std::shared_ptr layer, - const std::vector& defaultPrecisions) const { - if (!WeightableLayerTransformation::canBeTransformed(context, layer)) { +bool WeightableLayerTransformation::canConvolutionBeTransformed( + const std::shared_ptr& layer, + const ov::element::TypeVector& defaultPrecisions) const { + if (!WeightableLayerTransformation::canBeTransformed(layer)) { return false; } @@ -88,8 +89,8 @@ bool WeightableLayerTransformation::canConvolutionBeTransformed(const Transforma return true; } -bool WeightableLayerTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool WeightableLayerTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/tests/layer_transformation.hpp b/src/common/low_precision_transformations/tests/layer_transformation.hpp index 6ce93863c42a67..83a9faa70e16d3 100644 --- a/src/common/low_precision_transformations/tests/layer_transformation.hpp +++ b/src/common/low_precision_transformations/tests/layer_transformation.hpp @@ -8,7 +8,6 @@ #include "low_precision/rt_info/intervals_alignment_attribute.hpp" #include "low_precision/rt_info/precisions_attribute.hpp" #include "low_precision/layer_transformation.hpp" -#include "low_precision/transformation_context.hpp" #include "low_precision/network_helper.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" diff --git a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp index 9a80930160b298..ee1bb78ef4992a 100644 --- a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp +++ b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp @@ -10,7 +10,6 @@ #include "low_precision/markup_bias.hpp" #include "low_precision/markup_can_be_quantized.hpp" #include "low_precision/markup_quantization_granularity.hpp" -#include "low_precision/transformation_context.hpp" // cleanup transformations #include "low_precision/convert.hpp" From e13f71005a714a1180770896eecd21ad29eed53b Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Mon, 20 Jan 2025 22:10:25 +0000 Subject: [PATCH 16/35] [CI] [GHA] Remove `pip cache info` from `setup_python` action (#28561) ### Details: - Port of #28557 --- .github/actions/setup_python/action.yml | 9 --------- .github/workflows/job_build_windows.yml | 1 - docs/dev/ci/github_actions/custom_actions.md | 2 -- 3 files changed, 12 deletions(-) diff --git a/.github/actions/setup_python/action.yml b/.github/actions/setup_python/action.yml index ce85be46ced17e..507c52d8a69efa 100644 --- a/.github/actions/setup_python/action.yml +++ b/.github/actions/setup_python/action.yml @@ -15,10 +15,6 @@ inputs: description: 'If the runner is self-hosted' required: false default: 'true' - show-cache-info: - description: 'If the action should show the share space occupied by cache' - required: false - default: 'false' runs: using: 'composite' steps: @@ -75,8 +71,3 @@ runs: $pipVersion = python3 -c "import pip; print(pip.__version__)" Write-Host "Using pip version: $pipVersion" "PIP_CACHE_DIR=${{ inputs.pip-cache-path }}/$pipVersion" >> $env:GITHUB_ENV - - - if: ${{ inputs.show-cache-info == 'true' }} - name: Get pip cache info - shell: bash - run: python3 -m pip cache info diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index f0c150c4ac4db4..988bec1de7f929 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -80,7 +80,6 @@ jobs: pip-cache-path: ${{ env.PIP_CACHE_PATH }} should-setup-pip-paths: 'true' self-hosted-runner: 'true' - show-cache-info: 'true' - name: Generate product manifest and set CI_BUILD_NUMBER & CI_BUILD_DEV_TAG id: create_manifest diff --git a/docs/dev/ci/github_actions/custom_actions.md b/docs/dev/ci/github_actions/custom_actions.md index d2c2ca149b20b9..e65650aea2b741 100644 --- a/docs/dev/ci/github_actions/custom_actions.md +++ b/docs/dev/ci/github_actions/custom_actions.md @@ -29,14 +29,12 @@ Since `actions/setup-python` does not work on the Linux ARM64 machines, pip-cache-path: ${{ env.PIP_CACHE_PATH }} should-setup-pip-paths: 'true' self-hosted-runner: 'true' - show-cache-info: 'true' ``` where: * `version` - the Python version to install in the `MAJOR.MINOR` format * `pip-cache-path` - the path to the `pip` cache on the mounted share. Read more in the [shares and caches](./caches.md) documentation * `should-setup-pip-paths` - indicates whether the action should set up the `PIP_CACHE_DIR` and `PIP_INSTALL_PATH` environment variables for later usage * `self-hosted-runner` - indicates whether the runner is self-hosted. Learn more about [available runners](./runners.md) -* `show-cache-info` - indicates whether the action should show the share space occupied by the `pip` cache ## System Info Print From 5ce87bf1fe1f8181e551340574cfacbea9a942c2 Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Mon, 20 Jan 2025 22:11:38 +0000 Subject: [PATCH 17/35] [CI] [GHA] Add more download errors to rerunner (#28515) ### Tickets: - *159938* - *156593* --- .github/scripts/workflow_rerun/errors_to_look_for.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/scripts/workflow_rerun/errors_to_look_for.json b/.github/scripts/workflow_rerun/errors_to_look_for.json index 8e45212a89a6f0..55b46f9df1b1bb 100644 --- a/.github/scripts/workflow_rerun/errors_to_look_for.json +++ b/.github/scripts/workflow_rerun/errors_to_look_for.json @@ -110,5 +110,13 @@ { "error_text": "download failed after attempts", "ticket": 159547 + }, + { + "error_text": "Failed to connect to github.com port 443: Connection refused", + "ticket": 156593 + }, + { + "error_text": "file DOWNLOAD cannot compute hash on failed download", + "ticket": 156593 } ] \ No newline at end of file From 73d6a3687d5cf13678938fc886e86d7861f257b5 Mon Sep 17 00:00:00 2001 From: Andrey Babushkin Date: Mon, 20 Jan 2025 22:22:06 +0000 Subject: [PATCH 18/35] Revert "[GHA] Use upload-artifact with tag in Build Doc" (#28359) Reverts openvinotoolkit/openvino#28354 Original PR is not needed anymore. The culprit was `cache-apt-pkgs-action`, it used `upload-artifact@v3` action which caused the original workflow failure, but now it's fixed https://github.com/awalsh128/cache-apt-pkgs-action/pull/140 and `upload-artifact@v3` will be working till January 30 (https://github.blog/changelog/2024-04-16-deprecation-notice-v3-of-the-artifact-actions/) --- .github/workflows/build_doc.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index df85b1ef3aa385..2ea17b79af7514 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -78,13 +78,13 @@ jobs: echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV - name: 'Upload sphinx.log' - uses: actions/upload-artifact@v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: sphinx_build_log_${{ env.PR_NUMBER }}.log path: build/docs/sphinx.log - name: 'Upload docs html' - uses: actions/upload-artifact@v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_docs_html_${{ env.PR_NUMBER }}.zip path: build/docs/openvino_docs_html.zip @@ -101,7 +101,7 @@ jobs: - name: 'Upload test results' if: failure() - uses: actions/upload-artifact@v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_docs_pytest path: build/docs/_artifacts/ From 6eb75bbeff18f3475500d835edb5bfb27a11157c Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Tue, 21 Jan 2025 00:20:17 +0100 Subject: [PATCH 19/35] [LPT] ConcatTransformation: support scalar equal DQ propagation through dynamic dimension (#28350) ### Details: Currently, `ConcatTransformation` doesn't support DQ propagation if `concat->get_out_partial_shape()[axis}.is_dynamic()`. However, it is theoretically possible to propagate the DQ if all dequantization constants are **scalar and equal**. This PR introduces this support. ### Tickets: - *CVS-160325* --- .../src/concat.cpp | 118 +++++++++++++----- .../tests/concat_transformation.cpp | 104 +++++++++++++++ 2 files changed, 192 insertions(+), 30 deletions(-) diff --git a/src/common/low_precision_transformations/src/concat.cpp b/src/common/low_precision_transformations/src/concat.cpp index fe39ed8d4f65b2..db77179a229cd6 100644 --- a/src/common/low_precision_transformations/src/concat.cpp +++ b/src/common/low_precision_transformations/src/concat.cpp @@ -82,31 +82,42 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { allDequantizationShiftConvertAreNotZero = false; } - // constant shape must be broadcastable to the shape on data. - auto broadcastElementWiseConst = [](std::shared_ptr operation, const Shape targetShape) { - auto targetShapeConst = std::make_shared(element::i64, Shape{ targetShape.size() }, targetShape); - auto broadcast = fold(operation, targetShapeConst); - return broadcast; + const auto& concat_out_shape = concat->get_output_partial_shape(0); + const auto axis = ov::util::try_normalize_axis(concat->get_axis(), concat_out_shape.rank(), *concat); + const bool scalar_equal_constants_requested = concat_out_shape[axis].is_dynamic(); + + auto adaptConstForConcatenation = [scalar_equal_constants_requested]( + const std::shared_ptr& constant, + const Shape& targetShape) { + if (scalar_equal_constants_requested) { + OPENVINO_ASSERT(targetShape.empty(), "scalar_equal_constants_requested implies targetShape is empty"); + return std::make_shared(*constant, ov::Shape{}); + } else { + auto targetShapeConst = std::make_shared(element::i64, Shape{ targetShape.size() }, targetShape); + auto bcastedConst = ov::as_type_ptr(fold(constant, targetShapeConst)); + OPENVINO_ASSERT(bcastedConst, "adaptConstForConcatenation must return constant"); + return bcastedConst; + } }; - bool someDqInLowPrecision = std::any_of( + const bool someDqInLowPrecision = std::any_of( layerDequantizations.begin(), layerDequantizations.end(), [](const FakeQuantizeDequantization& value) { return value.isLowPrecision(); }); - bool someDqInFpPrecision = std::any_of( + const bool someDqInFpPrecision = std::any_of( layerDequantizations.begin(), layerDequantizations.end(), [](const FakeQuantizeDequantization& value) { return !value.isLowPrecision(); }); - bool DqWithDifferentPrecision = someDqInLowPrecision && someDqInFpPrecision; - const auto axis = - ov::util::try_normalize_axis(concat->get_axis(), concat->get_output_partial_shape(0).rank(), *concat); + const bool DqWithDifferentPrecision = someDqInLowPrecision && someDqInFpPrecision; OutputVector dataNodes; NodeVector convertNodes; - NodeVector subConstants; - NodeVector mulConstants; + + using ConstVector = std::vector>; + ConstVector subConstants; + ConstVector mulConstants; std::shared_ptr subtractConvert = nullptr; for (size_t i = 0; i < layerDequantizations.size(); ++i) { const auto& dequantization = layerDequantizations[i]; @@ -121,8 +132,13 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { convertNodes.push_back(dequantization.convert); } - Shape targetShape(concat->get_input_partial_shape(i).rank().get_length(), 1ul); - targetShape[axis] = concat->get_input_partial_shape(i)[axis].get_length(); + const auto targetShape = [&]() { + if (scalar_equal_constants_requested) + return ov::Shape{}; + Shape targetShape(concat->get_input_partial_shape(i).rank().get_length(), 1ul); + targetShape[axis] = concat->get_input_partial_shape(i)[axis].get_length(); + return targetShape; + }(); if (!allDequantizationShiftAreZero) { auto subtractInput = dequantization.subtract == nullptr ? @@ -132,13 +148,15 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { deqPrecision), targetShape, std::vector({ 0.f })) : - broadcastElementWiseConst(dequantization.subtractConstant, targetShape); + adaptConstForConcatenation(dequantization.subtractConstant, targetShape); if (allDequantizationShiftConvertAreNotZero) { if (subtractConvert == nullptr && dequantization.subtractConvert != nullptr) { subtractConvert = dequantization.subtractConvert; } } else if (dequantization.subtractConvert != nullptr) { - subtractInput = foldConvert(subtractInput, dequantization.subtractConvert->get_convert_element_type()); + const auto& dstType = dequantization.subtractConvert->get_convert_element_type(); + subtractInput = ov::as_type_ptr(foldConvert(subtractInput, dstType)); + OPENVINO_ASSERT(subtractInput, "foldConvert must finish successfully for the concatenated subtract constant"); NetworkHelper::copyInfo(dequantization.subtractConvert, subtractInput); } subConstants.push_back(subtractInput); @@ -147,7 +165,7 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { if (!allDequantizationMultiplyAreZero) { mulConstants.push_back(dequantization.multiply == nullptr ? std::make_shared(deqPrecision, targetShape, std::vector({ 1.0f })) : - broadcastElementWiseConst(dequantization.multiplyConstant, targetShape)); + adaptConstForConcatenation(dequantization.multiplyConstant, targetShape)); } } @@ -162,10 +180,31 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { lastDequantization = convert; } + auto concat_constants_if_needed = [&](const ConstVector& constants) -> std::shared_ptr { + OPENVINO_ASSERT(!constants.empty(), "concat_constants_if_needed expects non empty constants vec"); + if (constants.size() == 1ul) { + return constants[0]; + } + if (scalar_equal_constants_requested) { + if (ov::shape_size(constants[0]->get_shape()) == 1) { + const auto ref_value = constants[0]->cast_vector(); + if (std::all_of(constants.cbegin() + 1, constants.cend(), [&ref_value](const auto& constant) { + return constant->template cast_vector() == ref_value; + })) { + return constants[0]; + } + } + OPENVINO_THROW("in case of dynamic concatenation dim all constants must be scalar and equal"); + } + ov::OutputVector concatInputs; + std::transform(constants.begin(), constants.end(), std::back_inserter(concatInputs), [](const auto& constant) { + return constant->output(0); + }); + return fold(concatInputs, axis); + }; + if (!subConstants.empty()) { - std::shared_ptr subtractNode = subConstants.size() == 1ul ? - subConstants[0] : - ov::pass::low_precision::fold(subConstants, axis); + auto subtractNode = concat_constants_if_needed(subConstants); if (subtractConvert != nullptr) subtractNode = subtractConvert->clone_with_new_inputs({subtractNode}); const auto subtract = std::make_shared( @@ -181,9 +220,7 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { const auto multiply = std::make_shared>( opset1::Multiply( lastDequantization, - NetworkHelper::toScalarIfPossible(mulConstants.size() == 1ul ? - mulConstants[0] : - ov::pass::low_precision::fold(mulConstants, axis))), + NetworkHelper::toScalarIfPossible(concat_constants_if_needed(mulConstants))), layerDequantizations[0].multiply->get_output_element_type(0)); NetworkHelper::copyInfo({ concat, multiply }, multiply); @@ -216,9 +253,32 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) return false; } + auto base_dq_check = [&](const FakeQuantizeDequantization& dequantization) { + return !dequantization.empty() && (!updatePrecisions || dequantization.isLowPrecision()); + }; + const size_t normalizedAxis = ov::util::try_normalize_axis(axis, outRank, *concat); if (outPShape[normalizedAxis].is_dynamic()) { - return false; + // in case of dynamic dimension we can propagate all dequantizations only if they are all scalar and equal, + // since DQ broadcast is impossible (requested shape is unknown), and only single scalar DQ after Concat can be set + const auto dequantization_ref = NetworkHelper::getDequantization(concat, defaultPrecisions, 0); + if (!base_dq_check(dequantization_ref) || !dequantization_ref.isPerTensor()) + return false; + + auto extract_values = [](const std::shared_ptr& constant) { + return constant ? constant->cast_vector() : std::vector(); + }; + const auto ref_shifts = extract_values(dequantization_ref.subtractConstant); + const auto ref_scales = extract_values(dequantization_ref.multiplyConstant); + + for (size_t i = 1ul; i < concat->get_input_size(); i++) { + const auto cur_dequantization = NetworkHelper::getDequantization(concat, defaultPrecisions, i); + if (!base_dq_check(dequantization_ref) || + ref_shifts != extract_values(cur_dequantization.subtractConstant) || + ref_scales != extract_values(cur_dequantization.multiplyConstant)) + return false; + } + return true; } auto checkConstShape = [&normalizedAxis, &outRank](const std::shared_ptr& constant) { @@ -235,7 +295,6 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) }; const auto check_const_precision = []( - const FakeQuantizeDequantization& dequantization, const std::shared_ptr& constant, ov::element::Type& const_precision) { if (constant == nullptr) { @@ -253,9 +312,8 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) for (size_t i = 0ul; i < concat->get_input_size(); i++) { const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(concat, defaultPrecisions, i); - if (dequantization.empty() || (updatePrecisions && !dequantization.isLowPrecision())) { + if (!base_dq_check(dequantization)) return false; - } if (((dequantization.subtract != nullptr) && (!checkConstShape(dequantization.subtractConstant))) || ((dequantization.multiply != nullptr) && (!checkConstShape(dequantization.multiplyConstant)))) { @@ -268,9 +326,9 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) return false; } - if (!check_const_precision(dequantization, dequantization.subtractConvert, const_precision) || - ((dequantization.subtractConvert == nullptr) && !check_const_precision(dequantization, dequantization.subtractConstant, const_precision)) || - !check_const_precision(dequantization, dequantization.multiplyConstant, const_precision)) { + if (!check_const_precision(dequantization.subtractConvert, const_precision) || + ((dequantization.subtractConvert == nullptr) && !check_const_precision(dequantization.subtractConstant, const_precision)) || + !check_const_precision(dequantization.multiplyConstant, const_precision)) { return false; } } diff --git a/src/common/low_precision_transformations/tests/concat_transformation.cpp b/src/common/low_precision_transformations/tests/concat_transformation.cpp index 4d6973f3f440cf..d833e21ad81584 100644 --- a/src/common/low_precision_transformations/tests/concat_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_transformation.cpp @@ -128,6 +128,110 @@ const std::vector testValues = { {ov::element::f32, {128.f}, {0.1f}} } }, + // dynamic concatenation axis, but the same per-tensor values + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {128.f}, {0.1f}} + } + }, + { + ov::element::u8, + {{}, {}}, + ov::element::u8, + {ov::element::f32, {128.f}, {0.1f}} + } + }, + // dynamic concatenation axis, but the same per-tensor values + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {}, {{0.1f}, ov::element::f32, {1, 1, 1}}}, + {ov::element::f32, {}, {{0.1f}, ov::element::f32, {1, 1, 1}}} + } + }, + { + ov::element::u8, + {{}, {}}, + ov::element::u8, + {ov::element::f32, {}, {0.1f}} + } + }, + // dynamic concatenation axis, dq don't match + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {}, {0.1f}} + } + }, + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {}, {0.1f}} + }, + ov::element::f32, + {} + } + }, + // dynamic concatenation axis, different per-tensor values + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {128.f}, {10.f}} + } + }, + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {128.f}, {10.f}} + }, + ov::element::f32, + {} + } + }, + // dynamic output concatenation axis, but one input dim is static + { + {{1, -1, 4, 4}, {1, 3, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {{128.f, 64.f, 128.f}}, {{10.f, 1.f, 10.f}}} + } + }, + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {{128.f, 64.f, 128.f}}, {{10.f, 1.f, 10.f}}} + }, + ov::element::f32, + {} + } + }, { {{1, 3, 4, 4}, {1, 3, 4, 4}}, std::int64_t{1}, From e8d01dc46fd8c74feb0ae79ea28809c9acee1290 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Tue, 21 Jan 2025 10:52:13 +0800 Subject: [PATCH 20/35] fix CID issue1590313, 1590213, 1588539, 1588497, 1588220 (#28541) ### Details: - *fix CID issue1590313, 1590213, 1588539, 1588497, 1588220* ### Tickets: - *ticket-id* --- .../openvino/runtime/threading/istreams_executor.hpp | 2 +- src/inference/src/dev/threading/cpu_streams_executor.cpp | 4 ++-- src/inference/src/dev/threading/istreams_executor.cpp | 2 +- src/plugins/intel_cpu/src/cpu_streams_calculation.cpp | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp index efb9d41a4dd5a6..18e7216cf22e0d 100644 --- a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp +++ b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp @@ -150,7 +150,7 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { _cpu_pinning{cpu_pinning}, _cores_limit{cores_limit}, _streams_info_table{std::move(streams_info_table)}, - _rank{rank}, + _rank{std::move(rank)}, _add_lock(add_lock) { update_executor_config(_add_lock); } diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index a10709aa6db3df..0313c4f5aabc6b 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -36,7 +36,7 @@ struct CPUStreamsExecutor::Impl { : custom::task_scheduler_observer(arena), _mask{std::move(mask)}, _ncpus(ncpus), - _cpu_ids(cpu_ids) {} + _cpu_ids(std::move(cpu_ids)) {} void on_scheduler_entry(bool) override { pin_thread_to_vacant_core(tbb::this_task_arena::current_thread_index(), _threadBindingStep, @@ -167,7 +167,7 @@ struct CPUStreamsExecutor::Impl { _rank = _impl->_config.get_rank(); get_cur_stream_info(stream_id, _impl->_config.get_cpu_pinning(), - org_proc_type_table, + std::move(org_proc_type_table), _impl->_config.get_streams_info_table(), stream_type, concurrency, diff --git a/src/inference/src/dev/threading/istreams_executor.cpp b/src/inference/src/dev/threading/istreams_executor.cpp index 59201baadfd387..663c7d138b397f 100644 --- a/src/inference/src/dev/threading/istreams_executor.cpp +++ b/src/inference/src/dev/threading/istreams_executor.cpp @@ -234,7 +234,7 @@ void IStreamsExecutor::Config::update_executor_config() { if (_thread_preferred_core_type == ov::hint::SchedulingCoreType::ECORE_ONLY) { stream_info[PROC_TYPE] = EFFICIENT_CORE_PROC; stream_info[NUMBER_OF_STREAMS] = _streams; - _streams_info_table.push_back(stream_info); + _streams_info_table.push_back(std::move(stream_info)); } else { int start = proc_type_table.size() > 1 ? 1 : 0; std::vector core_types; diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 6b68afffa711e7..8b2c7c620923fe 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -514,7 +514,7 @@ std::vector> get_streams_info_table( ALL_PROC); } else if (stream_info[PROC_TYPE] == MAIN_CORE_PROC) { if (stream_info[THREADS_PER_STREAM] == proc_socket_table[0][MAIN_CORE_PROC]) { - streams_info_table.push_back(stream_info); + streams_info_table.push_back(std::move(stream_info)); } else { stream_info[PROC_TYPE] = ALL_PROC; streams_info_table.push_back(stream_info); @@ -524,10 +524,10 @@ std::vector> get_streams_info_table( streams_info_table.push_back(stream_info); stream_info[PROC_TYPE] = HYPER_THREADING_PROC; stream_info[THREADS_PER_STREAM] = proc_socket_table[0][HYPER_THREADING_PROC]; - streams_info_table.push_back(stream_info); + streams_info_table.push_back(std::move(stream_info)); } } else { - streams_info_table.push_back(stream_info); + streams_info_table.push_back(std::move(stream_info)); } } From 15914a9ca0f88206a5a46a6484c19ffa01586e08 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Tue, 21 Jan 2025 10:52:49 +0800 Subject: [PATCH 21/35] fix openvino panic when run CPU inference on XEON HBM platform (#28426) ### Details: - *skip HBM numa node on XEON HBM platform* ### Tickets: - *[issues-28335](https://github.com/openvinotoolkit/openvino/issues/28335)* --- src/inference/src/os/lin/lin_system_conf.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index a235227a4b56f0..f809f15a362943 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -140,7 +140,9 @@ CPU::CPU() { } std::string cache_info; std::getline(cache_file, cache_info); - node_info_table.emplace_back(std::move(cache_info)); + if (cache_info.size() > 0) { + node_info_table.emplace_back(std::move(cache_info)); + } node_index++; } }; From 0aee40dc6187cca73edbb162f2e1754ce97e260c Mon Sep 17 00:00:00 2001 From: Jade Cho Date: Tue, 21 Jan 2025 13:16:48 +0900 Subject: [PATCH 22/35] [GPU] Fix a group conv unit test fail. (#28511) ### Tickets: - *160644* --- .../cl_kernels/convolution_gpu_imad.cl | 12 ++++++------ .../tests/unit/test_cases/convolution_gpu_test.cpp | 4 +--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl index 0cf873f570cf8e..cf442ee80b9bce 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl @@ -282,12 +282,6 @@ KERNEL (fused_convolution_eltwise_gpu_imad)( out[br * OUT_BLOCK_WIDTH + bc] = TO_ACCUMULATOR_TYPE(IMAD(out[br * OUT_BLOCK_WIDTH + bc], inputs, AS_FILTER_TYPE_4(w[wi]))); - #ifdef ASYMMETRIC_WEIGHTS_QUANTIZATION - ACCUMULATOR_TYPE dotProdAxWZP = 0; - dotProdAxWZP = TO_ACCUMULATOR_TYPE(IMAD(dotProdAxWZP, inputs, AS_FILTER_TYPE_4(weights_zp_val))); - out[br * OUT_BLOCK_WIDTH + bc] -= dotProdAxWZP; - #endif - #if !defined COMPENSATION_TERM && defined ASYMMETRIC_DATA_QUANTIZATION out[br * OUT_BLOCK_WIDTH + bc] -= dotProdAZPxW; #endif @@ -297,6 +291,12 @@ KERNEL (fused_convolution_eltwise_gpu_imad)( defined ASYMMETRIC_WEIGHTS_QUANTIZATION) out[br * OUT_BLOCK_WIDTH + bc] += dotProdAZPxWZP; #endif + + #ifdef ASYMMETRIC_WEIGHTS_QUANTIZATION + ACCUMULATOR_TYPE dotProdAxWZP = 0; + dotProdAxWZP = TO_ACCUMULATOR_TYPE(IMAD(dotProdAxWZP, inputs, AS_FILTER_TYPE_4(weights_zp_val))); + out[br * OUT_BLOCK_WIDTH + bc] -= dotProdAxWZP; + #endif } } wi++; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 30d12c490e3d15..bb952e860d0dfe 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -7685,9 +7685,7 @@ INSTANTIATE_TEST_SUITE_P(convolution_grouped_fsv4_fsv16, TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, false, false, false, format::b_fs_yx_fsv4, ""), TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, false, true, false, format::b_fs_yx_fsv4, ""), TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, false, false, format::b_fs_yx_fsv4, ""), - - // TODO: It will be fix soon, test reference is wrong in new driver. - // TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, true, false, format::b_fs_yx_fsv4, ""), + TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, true, false, format::b_fs_yx_fsv4, ""), TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, false, true, format::b_fs_yx_fsv4, ""), // Format: b_fs_yx_fsv16 From c3bdeaf422ec536d1ae5423c3d2d2bfee337cf98 Mon Sep 17 00:00:00 2001 From: "Min, Byungil" Date: Tue, 21 Jan 2025 13:41:06 +0900 Subject: [PATCH 23/35] [GPU] Fix error onednn grouped size dyn-quan with enabled asymmetric config (#28497) + Fixed runtime error of grouped size dyn-quan of onednn if DynamicQuantizeAsym is enabled + Disable dyn-quan if DynamicQuantizeAsym is enabled with grouped size ### Details: - Disable dyn-quan if DynamicQuantizeAsym is enabled with grouped size ### Tickets: - CVS-160327 Signed-off-by: Min, Byung il --- .../transformations/dynamic_quantize_fully_connected.cpp | 4 +++- .../intel_gpu/src/plugin/transformations_pipeline.cpp | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp index a8eb149ff28646..f5607e98ab0f6f 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp @@ -56,7 +56,9 @@ DynamicQuantizeFullyConnected::DynamicQuantizeFullyConnected(uint64_t group_size config.scale_dt = element::f16; config.group_sizes = shape_group_size; - GPU_DEBUG_IF(debug_config->dynamic_quantize_asym) { + // AZP does not support grouped size dyn-quan + // XXX: This is currently wrapped as GPU_DEBUG_IF as dynamic_quantize_asym is not exposed through public API. + GPU_DEBUG_IF(debug_config->dynamic_quantize_asym && group_size == UINT64_MAX) { config.quantization_type = QuantizationType::Asymmetric; config.quantization_dt = element::u8; config.zp_dt = element::u8; // it supports u8 only now diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 9094354a03fbe8..a2bdac78fcb805 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -1077,6 +1077,13 @@ void TransformationsPipeline::apply(std::shared_ptr func) { return true; } + // AZP does not support grouped size dyn-quan + GPU_DEBUG_IF(debug_config->dynamic_quantize_asym && (dynamic_quantization_group_size != UINT64_MAX)) { + GPU_DEBUG_TRACE << root->get_friendly_name() << " dyn_quan is turned off: asym quantization does not support grouped quantization" << + " ('DynamicQuantizeAsym' is enabled with grouped size dyn-quan)" << std::endl; + return true; + } + bool has_wzp = root->get_input_size() > 4; if ((root->get_input_element_type(1) == ov::element::i8 || root->get_input_element_type(1) == ov::element::u8) && has_wzp @@ -1085,6 +1092,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { " asym 8bit weight does not support grouped quantization" << std::endl; return true; } + return false; }); manager.register_pass(dynamic_quantization_group_size); From fb1838fbe8d2d1054ff331c5958397b30df5df6d Mon Sep 17 00:00:00 2001 From: Srinjoy Dutta <114402816+srinjoydutta03@users.noreply.github.com> Date: Tue, 21 Jan 2025 10:41:15 +0530 Subject: [PATCH 24/35] [CPU][ARM64] Implement JIT Emitter for Eltwise Less Operation (#28494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Details: - Implemented and added `jit_less_emitter` derived class for element wise less operation - Added entry `Algorithm::EltwiseLess`, in executors/aarch64 as one of the supported algorithms - Added entry in the `get_supported_precisions` and `create_eltwise_emitters` in kernel/aarch64 ### Tests - Passed local tests using `./bin/aarch64/Release/ov_cpu_func_tests --gtest_filter='*smoke*ComparisonLayerTest*Less*'` Screenshot 2025-01-16 at 7 23 39 PM ### Tickets: - Closes #24415 --- .../plugin/aarch64/jit_eltwise_emitters.cpp | 59 +++++++++++++++++++ .../plugin/aarch64/jit_eltwise_emitters.hpp | 28 +++++++++ .../nodes/executors/aarch64/jit_eltwise.cpp | 1 + .../aarch64/jit_uni_eltwise_generic.cpp | 2 + 4 files changed, 90 insertions(+) diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp index a2041718a14875..b1e64cd25ba0b4 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp @@ -1363,6 +1363,65 @@ void jit_is_nan_emitter::register_table_entries() { push_arg_entry_of("zero", 0x00000000, true); } +/// LESS /// +jit_less_emitter::jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& node) + : jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) { + prepare_table(); +} + +jit_less_emitter::jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc) + : jit_emitter(host, host_isa, exec_prc) { + prepare_table(); +} + +size_t jit_less_emitter::get_inputs_count() const { + return 2; +} + +size_t jit_less_emitter::get_aux_vecs_count() const { + return 1; +} + +size_t jit_less_emitter::get_aux_gprs_count() const { + return 1; +} + +void jit_less_emitter::emit_impl(const std::vector& in_vec_idxs, + const std::vector& out_vec_idxs) const { + if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) { + emit_isa(in_vec_idxs, out_vec_idxs); + } else { + OV_CPU_JIT_EMITTER_THROW("Can't create jit eltwise kernel"); + } +} + +template +void jit_less_emitter::emit_isa(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const { + OV_CPU_JIT_EMITTER_ASSERT(exec_prc_ == ov::element::f32, "unsupported precision: " + exec_prc_.to_string()); + + using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits::TReg; + const TReg src1 = TReg(in_vec_idxs[0]); + const TReg src2 = TReg(in_vec_idxs[1]); + const TReg dst = TReg(out_vec_idxs[0]); + const TReg aux = TReg(aux_vec_idxs[0]); + + h->fcmgt(dst.s, src2.s, src1.s); + h->ld1r(aux.s, table_val2("one")); + h->and_(dst.b16, dst.b16, aux.b16); +} + +void jit_less_emitter::register_table_entries() { + push_arg_entry_of("one", 0x3f800000, true); +} + +std::set> jit_less_emitter::get_supported_precisions(const std::shared_ptr& node) { + return {{element::f32, element::f32}}; +} + /// LESS_EQUAL /// jit_less_equal_emitter::jit_less_equal_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp index c4c70c6651522d..5d0e00e2da42b0 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp @@ -608,6 +608,34 @@ class jit_is_inf_emitter : public jit_emitter { bool detect_positive; }; +class jit_less_emitter : public jit_emitter { +public: + jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc = ov::element::f32); + + jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& n); + + size_t get_inputs_count() const override; + + size_t get_aux_vecs_count() const override; + + size_t get_aux_gprs_count() const override; + + static std::set> get_supported_precisions( + const std::shared_ptr& node = nullptr); + +private: + void emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const override; + + template + void emit_isa(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const; + + void register_table_entries() override; +}; + class jit_less_equal_emitter : public jit_emitter { public: jit_less_equal_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, diff --git a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp index d5b893b67bf2b1..8d5e905f10e86a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp @@ -38,6 +38,7 @@ bool JitEltwiseExecutor::isSupported(const Algorithm& algorithm, Algorithm::EltwiseIsFinite, Algorithm::EltwiseIsInf, Algorithm::EltwiseIsNaN, + Algorithm::EltwiseLess, Algorithm::EltwiseLessEqual, Algorithm::EltwiseLogicalAnd, Algorithm::EltwiseLogicalOr, diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp index 66db416ec7c732..5e69cfb36b5462 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp @@ -670,6 +670,7 @@ std::shared_ptr jit_uni_eltwise_generic::create_eltwise_emitte OV_CASE(Algorithm::EltwiseHswish, ov::intel_cpu::aarch64::jit_hswish_emitter), OV_CASE(Algorithm::EltwiseIsFinite, ov::intel_cpu::aarch64::jit_is_finite_emitter), OV_CASE(Algorithm::EltwiseIsInf, ov::intel_cpu::aarch64::jit_is_inf_emitter), + OV_CASE(Algorithm::EltwiseLess, ov::intel_cpu::aarch64::jit_less_emitter), OV_CASE(Algorithm::EltwiseLessEqual, ov::intel_cpu::aarch64::jit_less_equal_emitter), OV_CASE(Algorithm::EltwiseLogicalAnd, ov::intel_cpu::aarch64::jit_logical_and_emitter), OV_CASE(Algorithm::EltwiseLogicalOr, ov::intel_cpu::aarch64::jit_logical_or_emitter), @@ -863,6 +864,7 @@ std::set> eltwise_precision_helper::get_supported_pre OV_CASE(Algorithm::EltwiseIsFinite, jit_is_finite_emitter), OV_CASE(Algorithm::EltwiseIsInf, jit_is_inf_emitter), OV_CASE(Algorithm::EltwiseIsNaN, jit_is_nan_emitter), + OV_CASE(Algorithm::EltwiseLess, jit_less_emitter), OV_CASE(Algorithm::EltwiseLessEqual, jit_less_equal_emitter), OV_CASE(Algorithm::EltwiseLogicalAnd, jit_logical_and_emitter), OV_CASE(Algorithm::EltwiseLogicalOr, jit_logical_or_emitter), From 8d5f583bc7e56152440192806b3acda619a997fe Mon Sep 17 00:00:00 2001 From: Gorokhov Dmitriy Date: Tue, 21 Jan 2025 09:49:43 +0400 Subject: [PATCH 25/35] [CPU] Fixed FC dynamic quantization accuracy issue (#28554) ### Details: - Cherry-picks: https://github.com/openvinotoolkit/openvino/pull/28553 --- .../src/x64/matmul_weights_decompression.cpp | 11 ++++++++--- src/plugins/intel_cpu/thirdparty/onednn | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp index e2e04501368ac7..1ac681b3b6eff2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp @@ -211,6 +211,11 @@ const std::vector input_shapes_basic_dyn_quant = }; const std::vector weights_precisions_dyn_quant = {ov::element::u8, ov::element::u4}; +const std::vector fusing_params_dyn_quant{ + emptyFusingSpec, + fusingBias, // bias is hanlded in separate code-path with post-ops + fusingSwish // max amount of post-op regs (which reduces available accum regs) +}; std::vector filter_additional_config_dyn_quant() { std::vector additional_config = { @@ -232,7 +237,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_non_default_dyn_quant_gro ::testing::ValuesIn(decompression_subtract_type), ::testing::Values(false), ::testing::ValuesIn(filter_additional_config_dyn_quant()), - ::testing::ValuesIn(fusing_params), + ::testing::ValuesIn(fusing_params_dyn_quant), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -249,7 +254,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_sym_non_default_dyn_quant ::testing::Values(DecompressionType::empty), ::testing::Values(false), ::testing::ValuesIn(filter_additional_config_dyn_quant()), - ::testing::ValuesIn(fusing_params), + ::testing::ValuesIn(fusing_params_dyn_quant), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -265,7 +270,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_mxfp4, // todo: zero points converted to fp32 for reshape == true case ::testing::Values(false), ::testing::ValuesIn(filter_additional_config_basic()), - ::testing::ValuesIn(fusing_params), + ::testing::ValuesIn(fusing_params_dyn_quant), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); diff --git a/src/plugins/intel_cpu/thirdparty/onednn b/src/plugins/intel_cpu/thirdparty/onednn index c7ecd8fc43610c..1789b1e0ae441d 160000 --- a/src/plugins/intel_cpu/thirdparty/onednn +++ b/src/plugins/intel_cpu/thirdparty/onednn @@ -1 +1 @@ -Subproject commit c7ecd8fc43610c82af317c178d28630bd948cb04 +Subproject commit 1789b1e0ae441de15d793123003a900a35d1dc71 From 6aab9ccf15ab41acae4583798bfd9f151f155c35 Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Tue, 21 Jan 2025 10:13:59 +0200 Subject: [PATCH 26/35] [NPU] Adding extra features for the state tensors (#28414) ### Details: Add the last features to the state tensors: - Update MutableCommandList instead of memcpy if memory was allocated in the same L0 context - set_shape is available for state tensors as well. ### Tickets: - *CVS-160364* --------- Signed-off-by: Bogdan Pereanu --- .../backend/include/zero_infer_request.hpp | 8 + .../backend/include/zero_variable_state.hpp | 77 ++++ .../src/backend/src/zero_infer_request.cpp | 276 ++++++++----- .../src/backend/src/zero_pipeline.cpp | 21 +- .../src/backend/src/zero_variable_state.cpp | 80 ++++ .../intel_npu/common/sync_infer_request.hpp | 2 + .../intel_npu/common/variable_state.hpp | 7 +- .../src/common/src/sync_infer_request.cpp | 7 +- .../intel_npu/utils/zero/zero_utils.hpp | 27 ++ .../functional/behavior/infer_request_run.cpp | 2 +- .../functional/behavior/infer_request_run.hpp | 99 +++++ .../remote_tensor_tests/remote_run.cpp | 2 +- .../remote_tensor_tests/remote_run.hpp | 374 ++++++++++++++++++ .../tests/functional/common/utils.cpp | 33 ++ .../tests/functional/common/utils.hpp | 11 +- 15 files changed, 891 insertions(+), 135 deletions(-) create mode 100644 src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp create mode 100644 src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp diff --git a/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp b/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp index aaaa128518b34f..c40142c75608b8 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp @@ -67,11 +67,19 @@ class ZeroInferRequest final : public SyncInferRequest { const ov::Shape& shape, const ov::Allocator& allocator = {}) const override; + void add_state(const IODescriptor& descriptor, size_t tensorIndex) const override; + + void update_pipeline_if_memory_changed(); + void update_states_if_memory_changed(); + const std::shared_ptr _initStructs; const std::shared_ptr _graph; const Config _config; Logger _logger; + const std::vector& _graphInputDescriptors; + const std::vector& _graphOutputDescriptors; + // A copy of each tensor is needed to maintain the original L0 memory allocation in case the user provides another // memory area for the tensor. mutable std::vector>> _levelZeroInputTensors; diff --git a/src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp b/src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp new file mode 100644 index 00000000000000..c7c03bcfe4c8d8 --- /dev/null +++ b/src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "intel_npu/config/config.hpp" +#include "intel_npu/utils/logger/logger.hpp" +#include "intel_npu/utils/zero/zero_init.hpp" +#include "openvino/runtime/ivariable_state.hpp" + +namespace intel_npu { + +/** + * @brief Interface for zero variable state implementation + * @note In case the memory was allocated in the same level zero context use that memory, otherwise use memcpy at infer + * time. Also, get correct data if remote tensor is used. + */ +class ZeroVariableState final : public ov::IVariableState { +public: + explicit ZeroVariableState(const std::shared_ptr& init_structs, + const std::string& name, + const ov::SoPtr& tensor, + size_t tensor_index, + size_t related_tensor_index, + const Config& config); + + void set_state(const ov::SoPtr& new_state) override; + + void reset() override; + + /** + * @brief Get input tensor index used internally for the state + */ + size_t get_tensor_index() const; + + /** + * @brief Get output tensor index used internally for the state + * @details The related tensors are defined by state input, state output pairs. + */ + size_t get_related_tensor_index() const; + + /** + * @brief Get acknowledge if the tensor was updated + */ + bool tensor_was_updated() const; + + /** + * @brief Reset tensor updated flag + */ + void reset_tensor_updated_flag(); + + /** + * @brief Get acknowledge if the zero tensor was updated + * @details In case the memory was allocated in the same level zero context update the zero tensor + */ + bool zero_tensor_should_be_updated() const; + + /** + * @brief Reset zero tensor updated flag + */ + void reset_zero_tensor_updated_flag(); + + ~ZeroVariableState() override = default; + +private: + std::shared_ptr _init_structs; + size_t _tensor_index; + size_t _related_tensor_index; + + bool _tensor_updated = false; + bool _zero_tensor_updated = false; + + Logger _logger; +}; + +} // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index b7049f62af6d31..034f69f63e4158 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -13,6 +13,7 @@ #include "openvino/op/util/op_types.hpp" #include "openvino/runtime/intel_npu/remote_properties.hpp" #include "zero_memory.hpp" +#include "zero_variable_state.hpp" using namespace intel_npu; @@ -63,33 +64,6 @@ void check_level_zero_attributes_match(const IODescriptor& ioDescriptor, const A } } -template -Type extract_object(const ov::AnyMap& params, const ov::Property& p) { - auto itrHandle = params.find(p.name()); - ov::Any res = nullptr; - if (itrHandle == params.end()) { - OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); - } - res = itrHandle->second; - return res.as(); -} - -bool memory_was_allocated_in_the_same_l0_context(ze_context_handle_t hContext, const void* ptr) { - ze_memory_allocation_properties_t desc = {}; - desc.stype = ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES; - auto res = intel_npu::zeMemGetAllocProperties(hContext, ptr, &desc, nullptr); - if (res == ZE_RESULT_SUCCESS) { - if (desc.id) { - if ((desc.type & ZE_MEMORY_TYPE_HOST) || (desc.type & ZE_MEMORY_TYPE_DEVICE) || - (desc.type & ZE_MEMORY_TYPE_SHARED)) { - return true; - } - } - } - - return false; -} - } // namespace //------------------------------------------------------------------------------ @@ -101,13 +75,13 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& _graph(compiledModel->get_graph()), _config(config), _logger("ZeroInferRequest", config.get()), + _graphInputDescriptors(_graph->get_input_descriptors()), + _graphOutputDescriptors(_graph->get_output_descriptors()), _levelZeroInputTensors(_metadata.inputs.size(), std::vector>(1, nullptr)), _levelZeroOutputTensors(_metadata.outputs.size(), nullptr), _profilingPool(_initStructs, _graph, zeroProfiling::POOL_SIZE), _profilingQuery(_initStructs, 0) { _logger.debug("ZeroInferRequest::ZeroInferRequest - SyncInferRequest"); - const std::vector& executorInputDescriptors = _graph->get_input_descriptors(); - const std::vector& executorOutputDescriptors = _graph->get_output_descriptors(); auto proftype = config.get(); if (proftype == ov::intel_npu::ProfilingType::INFER) { @@ -127,7 +101,7 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& size_t ioIndex = 0; for (const IODescriptor& inputDescriptor : _metadata.inputs) { - check_level_zero_attributes_match(inputDescriptor, executorInputDescriptors.at(ioIndex)); + check_level_zero_attributes_match(inputDescriptor, _graphInputDescriptors.at(ioIndex)); if (!(inputDescriptor.isStateInput || inputDescriptor.isShapeTensor)) { ++ioIndex; @@ -142,7 +116,7 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& ioIndex = 0; for (const IODescriptor& outputDescriptor : _metadata.outputs) { - check_level_zero_attributes_match(outputDescriptor, executorOutputDescriptors.at(ioIndex)); + check_level_zero_attributes_match(outputDescriptor, _graphOutputDescriptors.at(ioIndex)); if (!(outputDescriptor.isStateOutput || outputDescriptor.isShapeTensor)) { ++ioIndex; @@ -203,6 +177,29 @@ void ZeroInferRequest::create_pipeline() { auto groupOrdinal = zeroUtils::findGroupOrdinal(_initStructs->getDevice(), _properties); _logger.debug("ZeroInferRequest::create_pipeline - init completed"); + // Set new tensors and reset variable state flag if memory updated before creating the pipeline + _logger.debug("ZeroInferRequest::create_pipeline - set new tensors and reset variable state flag if memory updated " + "before creating the pipeline"); + for (const auto& variableState : _variableStates) { + auto zeroState = std::dynamic_pointer_cast(variableState._ptr); + + OPENVINO_ASSERT(zeroState != nullptr, "State is not compatible with NPU plugin"); + + if (zeroState->tensor_was_updated()) { + get_user_input(zeroState->get_tensor_index()) = zeroState->get_state(); + _userOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state(); + + zeroState->reset_tensor_updated_flag(); + + if (zeroState->zero_tensor_should_be_updated()) { + zeroState->reset_zero_tensor_updated_flag(); + + get_level_zero_input(zeroState->get_tensor_index()) = zeroState->get_state()._ptr; + _levelZeroOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state()._ptr; + } + } + } + _logger.debug("ZeroInferRequest::create_pipeline - constructing pipeline"); // Construct pipeline @@ -228,7 +225,7 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr& tenso bool updateCommandListArg = false; OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "check_data_allocation"); - if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) { + if (zeroUtils::memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) { _logger.debug("ZeroInferRequest::set_tensor_data - tensor was created in the same L0 context"); levelZeroTensors = tensor; updateCommandListArg = true; @@ -268,7 +265,7 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptr( - extract_object(tensor->get_context()->get_property(), ov::intel_npu::l0_context)); + zeroUtils::extract_object(tensor->get_context()->get_property(), ov::intel_npu::l0_context)); if (_initStructs->getContext() != l0_context) { OPENVINO_THROW("Using different context for creating the tensor is not supported"); } @@ -279,7 +276,7 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptrget_properties(), ov::intel_npu::mem_handle); + auto data = zeroUtils::extract_object(tensor->get_properties(), ov::intel_npu::mem_handle); OPENVINO_ASSERT(data, "Empty buffer"); OV_ITT_TASK_NEXT(ZERO_SET_REMOTE_TENSOR, "updateCommandList"); @@ -371,7 +368,8 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, bool tensorHasSameL0Context = false; OV_ITT_TASK_NEXT(SET_TENSORS, "check_data_allocation"); - if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensors[i]->data())) { + if (zeroUtils::memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), + tensors[i]->data())) { _logger.debug("ZeroInferRequest::set_tensors - tensor was created in the same L0 context"); get_level_zero_input(foundPort.idx, i) = tensors.at(i)._ptr; @@ -390,7 +388,7 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, } else { _logger.debug("ZeroInferRequest::set_tensors - remote tensor is used"); - data = extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); get_level_zero_input(foundPort.idx, i) = tensors.at(i)._ptr; } @@ -453,6 +451,112 @@ ov::SoPtr ZeroInferRequest::get_tensor(const ov::Output(levelZeroTensor.at(SINGLE_TENSOR)); + + if (is_batched_input(ioIndex) || inputDescriptor.isShapeTensor || + is_remote_tensor(levelZeroTensor.at(SINGLE_TENSOR)) || zeroTensor == nullptr) { + ++ioIndex; + continue; + } + + if (zeroTensor->memory_address_changed()) { + _logger.debug("Update input graph descriptor with the new tensor"); + OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); + + _pipeline->updateCommandList(_graph->get_input_descriptors().at(ioIndex).idx, + zeroTensor->data(), + zeroTensor->get_byte_size()); + closePipeline = true; + + if (!inputDescriptor.isStateInput) { + zeroTensor->reset_memory_flag(); + } + } + + ++ioIndex; + } + + ioIndex = 0; + + for (const auto& levelZeroTensor : _levelZeroOutputTensors) { + const auto outputDescriptor = _metadata.outputs.at(ioIndex); + auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor); + + if (outputDescriptor.isShapeTensor || is_remote_tensor(levelZeroTensor) || zeroTensor == nullptr) { + ++ioIndex; + continue; + } + + if (zeroTensor->memory_address_changed()) { + _logger.debug("Update output graph descriptor with the new tensor"); + OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); + + _pipeline->updateCommandList(_graph->get_output_descriptors().at(ioIndex).idx, + zeroTensor->data(), + zeroTensor->get_byte_size()); + closePipeline = true; + + zeroTensor->reset_memory_flag(); + } + + ++ioIndex; + } + + if (closePipeline) { + _pipeline->closeCommandList(); + } +} + +void ZeroInferRequest::update_states_if_memory_changed() { + bool closePipeline = false; + + for (const auto& variableState : _variableStates) { + auto zeroState = std::dynamic_pointer_cast(variableState._ptr); + + OPENVINO_ASSERT(zeroState != nullptr, "State is not compatible with NPU plugin"); + + if (zeroState->tensor_was_updated()) { + get_user_input(zeroState->get_tensor_index()) = zeroState->get_state(); + _userOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state(); + + zeroState->reset_tensor_updated_flag(); + + if (zeroState->zero_tensor_should_be_updated()) { + auto remoteTensor = std::dynamic_pointer_cast(zeroState->get_state()._ptr); + + void* userBuffer = !remoteTensor ? zeroState->get_state()->data() + : zeroUtils::extract_object(remoteTensor->get_properties(), + ov::intel_npu::mem_handle); + + _pipeline->updateCommandList(_graphInputDescriptors.at(zeroState->get_tensor_index()).idx, + userBuffer, + zeroState->get_state()->get_byte_size()); + + _pipeline->updateCommandList(_graphOutputDescriptors.at(zeroState->get_related_tensor_index()).idx, + userBuffer, + zeroState->get_state()->get_byte_size()); + + zeroState->reset_zero_tensor_updated_flag(); + + get_level_zero_input(zeroState->get_tensor_index()) = zeroState->get_state()._ptr; + _levelZeroOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state()._ptr; + + closePipeline = true; + } + } + } + + if (closePipeline) { + _pipeline->closeCommandList(); + } +} + void ZeroInferRequest::infer() { if (_config.get()) { OPENVINO_THROW("Only start async is supported when RUN_INFERENCES_SEQUENTIALLY is enabled!"); @@ -476,64 +580,8 @@ void ZeroInferRequest::infer_async() { _pipelineIsCreated = true; } else { if (_initStructs->getMutableCommandListVersion()) { - bool closePipeline = false; - size_t ioIndex = 0; - - for (const auto& levelZeroTensor : _levelZeroInputTensors) { - const auto inputDescriptor = _metadata.inputs.at(ioIndex); - auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor.at(SINGLE_TENSOR)); - - if (is_batched_input(ioIndex) || inputDescriptor.isShapeTensor || inputDescriptor.isStateInput || - is_remote_tensor(levelZeroTensor.at(SINGLE_TENSOR)) || zeroTensor == nullptr) { - ++ioIndex; - continue; - } - - if (zeroTensor->memory_address_changed()) { - _logger.debug("Update input graph descriptor with the new tensor"); - OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); - - _pipeline->updateCommandList(_graph->get_input_descriptors().at(ioIndex).idx, - zeroTensor->data(), - zeroTensor->get_byte_size()); - closePipeline = true; - - zeroTensor->reset_memory_flag(); - } - - ++ioIndex; - } - - ioIndex = 0; - - for (const auto& levelZeroTensor : _levelZeroOutputTensors) { - const auto outputDescriptor = _metadata.outputs.at(ioIndex); - auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor); - - if (outputDescriptor.isShapeTensor || outputDescriptor.isStateOutput || - is_remote_tensor(levelZeroTensor) || zeroTensor == nullptr) { - ++ioIndex; - continue; - } - - if (zeroTensor->memory_address_changed()) { - _logger.debug("Update output graph descriptor with the new tensor"); - OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); - - _pipeline->updateCommandList(_graph->get_output_descriptors().at(ioIndex).idx, - zeroTensor->data(), - zeroTensor->get_byte_size()); - closePipeline = true; - - zeroTensor->reset_memory_flag(); - } - - ++ioIndex; - } - - if (closePipeline) { - _pipeline->closeCommandList(); - } + update_pipeline_if_memory_changed(); + update_states_if_memory_changed(); } } } @@ -561,10 +609,10 @@ void ZeroInferRequest::infer_async() { auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); - void* userBuffer = - !userBatchRemoteTensor - ? userTensor.at(i)->data() - : extract_object(userBatchRemoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !userBatchRemoteTensor + ? userTensor.at(i)->data() + : zeroUtils::extract_object(userBatchRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); if (userBuffer != levelZeroBuffer) { if (userBuffer == nullptr || levelZeroBuffer == nullptr) { @@ -586,9 +634,10 @@ void ZeroInferRequest::infer_async() { for (size_t i = 0; i < userTensor.size(); i++) { auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); - void* userBuffer = !userBatchRemoteTensor ? userTensor.at(i)->data() - : extract_object(userBatchRemoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = !userBatchRemoteTensor + ? userTensor.at(i)->data() + : zeroUtils::extract_object(userBatchRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); std::memcpy(static_cast(levelZeroBuffer) + (i * userTensor.at(i)->get_byte_size()), userBuffer, @@ -601,9 +650,9 @@ void ZeroInferRequest::infer_async() { } auto userRemoteTensor = std::dynamic_pointer_cast(userTensor.at(SINGLE_TENSOR)._ptr); - void* userBuffer = !userRemoteTensor - ? userTensor.at(SINGLE_TENSOR)->data() - : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !userRemoteTensor ? userTensor.at(SINGLE_TENSOR)->data() + : zeroUtils::extract_object(userRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); const auto& levelZeroTensor = get_level_zero_input(inputIndex); if (!is_remote_tensor(levelZeroTensor)) { @@ -652,9 +701,9 @@ void ZeroInferRequest::get_result() { } auto userRemoteTensor = std::dynamic_pointer_cast(userTensor._ptr); - void* userBuffer = !userRemoteTensor - ? userTensor->data() - : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !userRemoteTensor ? userTensor->data() + : zeroUtils::extract_object(userRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); const std::shared_ptr& levelZeroTensor = _levelZeroOutputTensors.at(outputIndex); if (!is_remote_tensor(levelZeroTensor)) { @@ -751,6 +800,19 @@ std::shared_ptr ZeroInferRequest::create_tensor(ov::element::Type t return std::make_shared(_initStructs, type, shape, allocator); } +void ZeroInferRequest::add_state(const IODescriptor& descriptor, size_t tensorIndex) const { + OPENVINO_ASSERT(descriptor.relatedDescriptorIndex.has_value(), + "The link between state descriptors is missing, state name: ", + descriptor.nameFromCompiler); + + _variableStates.push_back(std::make_shared(_initStructs, + descriptor.nameFromCompiler, + get_user_input(tensorIndex), + tensorIndex, + *descriptor.relatedDescriptorIndex, + _config)); +} + std::vector ZeroInferRequest::get_raw_profiling_data() const { return _profilingQuery.getData(); } diff --git a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp index 7ada704c9969d8..a01238a899e0dc 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp @@ -15,21 +15,6 @@ #include "intel_npu/utils/zero/zero_types.hpp" #include "zero_remote_tensor.hpp" -namespace { - -template -Type extract_object(const ov::AnyMap& params, const ov::Property& p) { - auto itrHandle = params.find(p.name()); - ov::Any res = nullptr; - if (itrHandle == params.end()) { - OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); - } - res = itrHandle->second; - return res.as(); -} - -} // namespace - namespace intel_npu { Pipeline::Pipeline(const Config& config, @@ -80,7 +65,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = input_tensors.at(io_index).at(i)->data(); } else { - data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); } graph->set_argument_value(desc.idx, data); @@ -94,7 +79,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = input_tensors.at(io_index).at(0)->data(); } else { - data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); } graph->set_argument_value( @@ -112,7 +97,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = output_tensors.at(io_index)->data(); } else { - data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); } graph->set_argument_value( diff --git a/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp b/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp new file mode 100644 index 00000000000000..19cabfb4246e5d --- /dev/null +++ b/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "zero_variable_state.hpp" + +#include "intel_npu/config/common.hpp" +#include "intel_npu/utils/zero/zero_utils.hpp" +#include "zero_remote_tensor.hpp" + +namespace intel_npu { + +ZeroVariableState::ZeroVariableState(const std::shared_ptr& init_structs, + const std::string& name, + const ov::SoPtr& tensor, + size_t tensor_index, + size_t related_tensor_index, + const Config& config) + : ov::IVariableState(name), + _init_structs(init_structs), + _tensor_index(tensor_index), + _related_tensor_index(related_tensor_index), + _logger("ZeroVariableState", config.get()) { + m_state = tensor; +} + +void ZeroVariableState::set_state(const ov::SoPtr& new_state) { + m_state = new_state; + _tensor_updated = true; + + if (_init_structs->getMutableCommandListVersion()) { + if (!is_remote_tensor(new_state._ptr)) { + if (zeroUtils::memory_was_allocated_in_the_same_l0_context(_init_structs->getContext(), + new_state->data())) { + _logger.debug("ZeroVariableState::set_state - tensor was created in the same L0 context"); + _zero_tensor_updated = true; + } + + return; + } + + _zero_tensor_updated = true; + } +} + +void ZeroVariableState::reset() { + auto remoteTensor = std::dynamic_pointer_cast(m_state._ptr); + + void* userBuffer = !remoteTensor + ? m_state->data() + : zeroUtils::extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); + + std::memset(userBuffer, 0, m_state->get_byte_size()); +} + +size_t ZeroVariableState::get_tensor_index() const { + return _tensor_index; +} + +size_t ZeroVariableState::get_related_tensor_index() const { + return _related_tensor_index; +} + +bool ZeroVariableState::tensor_was_updated() const { + return _tensor_updated; +} + +void ZeroVariableState::reset_tensor_updated_flag() { + _tensor_updated = false; +} + +bool ZeroVariableState::zero_tensor_should_be_updated() const { + return _zero_tensor_updated; +} + +void ZeroVariableState::reset_zero_tensor_updated_flag() { + _zero_tensor_updated = false; +} + +} // namespace intel_npu diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp index 3c772168c0c93f..f7406413c9f197 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp @@ -167,6 +167,8 @@ class SyncInferRequest : public ov::IInferRequest { const ov::Shape& shape, const ov::Allocator& allocator = {}) const; + virtual void add_state(const IODescriptor& descriptor, const size_t tensorIndex) const; + bool is_batched_input(size_t idx) const; ov::SoPtr& get_user_input(size_t index) const; diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp index acb83d5b718033..0987f2b44bbb04 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp @@ -11,12 +11,11 @@ namespace intel_npu { class VariableState final : public ov::IVariableState { public: - explicit VariableState(const std::string& name, const std::shared_ptr& tensor) - : ov::IVariableState(name) { + explicit VariableState(const std::string& name, const ov::SoPtr& tensor) : ov::IVariableState(name) { m_state = tensor; } - void set_state(const ov::SoPtr& newState) override { + virtual void set_state(const ov::SoPtr& newState) override { if (newState->get_byte_size() != m_state->get_byte_size()) { OPENVINO_THROW("Byte size mismatch"); } @@ -24,7 +23,7 @@ class VariableState final : public ov::IVariableState { std::memcpy(m_state->data(), newState->data(), newState->get_byte_size()); } - void reset() override { + virtual void reset() override { std::memset(m_state->data(), 0, m_state->get_byte_size()); } diff --git a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp index 17dc6391761e5c..775113ef0d39bf 100644 --- a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp +++ b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp @@ -326,7 +326,7 @@ std::shared_ptr SyncInferRequest::allocate_tensor(const IODescripto } if (descriptor.isStateInput) { - _variableStates.push_back(std::make_shared(descriptor.nameFromCompiler, tensor)); + add_state(descriptor, index); } } else if (_userOutputTensors.at(index) == nullptr) { _userOutputTensors.at(index) = tensor; @@ -341,6 +341,11 @@ std::shared_ptr SyncInferRequest::create_tensor(ov::element::Type t return ov::make_tensor(type, shape, allocator); } +void SyncInferRequest::add_state(const IODescriptor& descriptor, const size_t tensorIndex) const { + _variableStates.push_back( + std::make_shared(descriptor.nameFromCompiler, get_user_input(tensorIndex))); +} + bool SyncInferRequest::is_batched_input(size_t idx) const { return _userInputTensors.at(idx).size() > 1; } diff --git a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp index db9dc1c9f51d34..0c2367b680851e 100644 --- a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp +++ b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp @@ -277,5 +277,32 @@ static inline std::string getLatestBuildError(ze_graph_dditable_ext_curr_t& _gra } } +template +static inline Type extract_object(const ov::AnyMap& params, const ov::Property& p) { + auto itrHandle = params.find(p.name()); + ov::Any res = nullptr; + if (itrHandle == params.end()) { + OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); + } + res = itrHandle->second; + return res.as(); +} + +static inline bool memory_was_allocated_in_the_same_l0_context(ze_context_handle_t hContext, const void* ptr) { + ze_memory_allocation_properties_t desc = {}; + desc.stype = ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES; + auto res = intel_npu::zeMemGetAllocProperties(hContext, ptr, &desc, nullptr); + if (res == ZE_RESULT_SUCCESS) { + if (desc.id) { + if ((desc.type & ZE_MEMORY_TYPE_HOST) || (desc.type & ZE_MEMORY_TYPE_DEVICE) || + (desc.type & ZE_MEMORY_TYPE_SHARED)) { + return true; + } + } + } + + return false; +} + } // namespace zeroUtils } // namespace intel_npu diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp index f45e30bb109849..f30fa2bb1416a3 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp @@ -11,7 +11,7 @@ using namespace ov::test::behavior; -const std::vector configsInferRequestRunTests = {{ov::log::level(ov::log::Level::ERR)}}; +const std::vector configsInferRequestRunTests = {{}}; INSTANTIATE_TEST_SUITE_P(compatibility_smoke_BehaviorTest, InferRequestRunTests, diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp index 31b55704757b01..ab53a442c16cda 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp @@ -10,6 +10,7 @@ #include #include +#include #include #include "base/ov_behavior_test_utils.hpp" @@ -962,6 +963,104 @@ TEST_P(SetShapeInferRunTests, checkResultsAfterIOBlobReallocation) { } } +TEST_P(SetShapeInferRunTests, checkResultsAfterStateTensorsReallocation) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto dummy_shape = Shape{1, 50, 100, 100}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device); + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + for (auto&& state : inference_request.query_state()) { + state.reset(); + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto states = inference_request.query_state(); + for (auto state : states) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (size_t i = 0; i < last_state_size; ++i) { + EXPECT_NEAR(0.0, last_state_data[i], 1e-5); + } + } + + // create dummy Tensors to force the driver to allocate memory for the initial tensor somewhere else + [[maybe_unused]] auto l0_host_dummy_tensor_0 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_1 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_2 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_3 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_4 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_5 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_6 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_7 = context.create_host_tensor(ov::element::f32, dummy_shape); + + for (auto item : inference_request.query_state()) { + auto tensor_state = item.get_state(); + auto original_shape = tensor_state.get_shape(); + OV_ASSERT_NO_THROW(tensor_state.set_shape({1, 50, 20, 20})); + OV_ASSERT_NO_THROW(tensor_state.set_shape(original_shape)); + } + + for (auto&& state : inference_request.query_state()) { + state.reset(); + } + + for (auto state : states) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (size_t i = 0; i < last_state_size; ++i) { + last_state_data[i] = 1.0f; + } + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + for (auto state : states) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (size_t i = 0; i < last_state_size; ++i) { + EXPECT_NEAR(input_data[i], last_state_data[i], 1e-5); + } + } +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp index 870f6596dca9ce..d3e537863227e4 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp @@ -10,7 +10,7 @@ using namespace ov::test::behavior; -const std::vector remoteConfigs = {{ov::log::level(ov::log::Level::ERR)}}; +const std::vector remoteConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, RemoteRunTests, diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp index fa58d4270889ad..c1992b3047996d 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp @@ -434,6 +434,380 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor2) 0); } +TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensors) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device); + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor0 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor1 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor0); + states[0].reset(); + states[1].set_state(l0_host_tensor1); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto tensor_size = l0_host_tensor0.get_size(); + auto state_data = static_cast(l0_host_tensor0.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor1.get_size(); + state_data = static_cast(l0_host_tensor1.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_state = states[0].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor2 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor3 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor2); + states[1].set_state(l0_host_tensor3); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } +} + +TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensorsWithRemoteTensors) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 2, 2, 2}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device).as(); + ; + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor0 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor1 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor0); + states[0].reset(); + states[1].set_state(l0_host_tensor1); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto tensor_size = l0_host_tensor0.get_size(); + auto state_data = static_cast(l0_host_tensor0.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor1.get_size(); + state_data = static_cast(l0_host_tensor1.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_state = states[0].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor2 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor3 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor2); + states[1].set_state(l0_host_tensor3); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } +} + +TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTensors0) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device).as(); + ; + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto byte_size = tensor_state.get_byte_size(); + float* data = new float[byte_size / sizeof(float)]; + ov::Tensor random_tensor{ov::element::f32, tensor_state_shape, data}; + + states[0].set_state(l0_host_tensor); + states[0].reset(); + states[1].set_state(random_tensor); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto tensor_size = l0_host_tensor.get_size(); + auto state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } +} + +TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTensors1) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device).as(); + ; + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto byte_size = tensor_state.get_byte_size(); + float* data = new float[byte_size / sizeof(float)]; + ov::Tensor random_tensor{ov::element::f32, tensor_state_shape, data}; + + auto tensor_size = l0_host_tensor.get_size(); + auto state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + states[0].set_state(l0_host_tensor); + states[1].set_state(random_tensor); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + states[0].reset(); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/plugins/intel_npu/tests/functional/common/utils.cpp b/src/plugins/intel_npu/tests/functional/common/utils.cpp index 91f78487934e38..b041e694b19ad0 100644 --- a/src/plugins/intel_npu/tests/functional/common/utils.cpp +++ b/src/plugins/intel_npu/tests/functional/common/utils.cpp @@ -7,6 +7,10 @@ #include #include "intel_npu/npu_private_properties.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/sigmoid.hpp" std::string getBackendName(const ov::Core& core) { return core.get_property("NPU", ov::intel_npu::backend_name.name()).as(); @@ -99,3 +103,32 @@ std::vector getRWMandatoryPropertiesValues(std::vector p } return props; } + +std::shared_ptr createModelWithStates(ov::element::Type type, const ov::Shape& shape) { + auto input = std::make_shared(type, shape); + auto mem_i1 = std::make_shared(type, shape, 0); + auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); + auto mul1 = std::make_shared(mem_r1, input); + + auto mem_i2 = std::make_shared(type, shape, 0); + auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); + auto mul2 = std::make_shared(mem_r2, mul1); + auto mem_w2 = std::make_shared(mul2, "c_1-3"); + + auto mem_w1 = std::make_shared(mul2, "r_1-3"); + auto sigm = std::make_shared(mul2); + sigm->set_friendly_name("sigmod_state"); + sigm->get_output_tensor(0).set_names({"sigmod_state"}); + mem_r1->set_friendly_name("Memory_1"); + mem_r1->get_output_tensor(0).set_names({"Memory_1"}); + mem_w1->add_control_dependency(mem_r1); + sigm->add_control_dependency(mem_w1); + + mem_r2->set_friendly_name("Memory_2"); + mem_r2->get_output_tensor(0).set_names({"Memory_2"}); + mem_w2->add_control_dependency(mem_r2); + sigm->add_control_dependency(mem_w2); + + auto function = std::make_shared(ov::NodeVector{sigm}, ov::ParameterVector{input}, "add_output"); + return function; +} diff --git a/src/plugins/intel_npu/tests/functional/common/utils.hpp b/src/plugins/intel_npu/tests/functional/common/utils.hpp index 4ad54cc016302c..40ac987bd25487 100644 --- a/src/plugins/intel_npu/tests/functional/common/utils.hpp +++ b/src/plugins/intel_npu/tests/functional/common/utils.hpp @@ -6,6 +6,7 @@ #include #include + #include "common_test_utils/unicode_utils.hpp" std::string getBackendName(const ov::Core& core); @@ -18,6 +19,8 @@ std::string removeDeviceNameOnlyID(const std::string& device_name_id); std::vector getRWMandatoryPropertiesValues(std::vector props); +std::shared_ptr createModelWithStates(ov::element::Type type, const ov::Shape& shape); + template ::value || std::is_same::value)>::type> void removeDirFilesRecursive(const std::basic_string& path) { @@ -72,6 +75,8 @@ struct GenericTestCaseNameClass { }; template -constexpr bool GenericTestCaseNameClass::hasGetTestCaseName< - T, std::void_t().getTestCaseName( - std::declval>()))>> = true; +constexpr bool + GenericTestCaseNameClass::hasGetTestCaseName().getTestCaseName( + std::declval>()))>> = + true; From 81ff40997bcc3fccba98fc1609c2a5d7035f3d12 Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Tue, 21 Jan 2025 09:58:42 +0100 Subject: [PATCH 27/35] [CPU][TESTS] Remove sse4 instances from oneDNN related tests (#28527) Seems to not worth to verify them considering how slow they are --- .../classes/convolution.cpp | 18 -- .../single_layer_tests/classes/pooling.cpp | 3 +- .../single_layer_tests/group_convolution.cpp | 300 +----------------- .../instances/common/convolution.cpp | 3 +- .../instances/x64/convolution.cpp | 6 - .../instances/x64/pooling.cpp | 13 +- .../instances/x64/softmax.cpp | 3 +- .../subgraph_tests/src/common/conv_concat.cpp | 4 - .../subgraph_tests/src/x64/conv_concat.cpp | 11 +- .../src/x64/memory_sharing_test.cpp | 2 - .../functional/utils/convolution_params.hpp | 30 -- 11 files changed, 14 insertions(+), 379 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp index b3c958a2c88a68..ed7fdcff0479d8 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp @@ -190,16 +190,6 @@ void ConvolutionLayerCPUTest::SetUp() { } TEST_P(ConvolutionLayerCPUTest, CompareWithRefs) { - // Skip tests for sse41 convolution where ic or oc cannot be exactly divided by the block size, - // since tails processing for sse41 nspc layout is not supported yet (see 52736). - if (!inFmts.empty() && (inFmts.front() == nwc || inFmts.front() == nhwc || inFmts.front() == ndhwc) && selectedType.find("jit_sse") != std::string::npos) { - auto inpChannels = function->get_parameters().front()->get_partial_shape()[1].get_length(); - auto outChannels = function->get_output_partial_shape(0)[1].get_length(); - if ((inpChannels % 8) || (outChannels % 8)) { - GTEST_SKIP() << "Disabled test due to the sse41 convolution kernel does not support tails for nspc layout." << std::endl; - } - } - if (!priority.empty()) { // Skip tests for brgconv convolution where kernel size = 1x1 if (one_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) { @@ -340,10 +330,7 @@ const std::vector& inShapesGemm2D_cache() { const std::vector& CPUParams_2D() { static const std::vector CPUParams_2D = { - conv_sse42_2D, - conv_avx2_2D, conv_avx512_2D, - conv_sse42_2D_nspc, conv_avx2_2D_nspc, conv_avx2_2D_nspc_brgconv, conv_avx512_2D_nspc, @@ -354,7 +341,6 @@ const std::vector& CPUParams_2D() { const std::vector& CPUParams_3D() { static const std::vector CPUParams_3D = { - //conv_sse42_3D, // not supported jit_sse42 for 3d conv_avx2_3D, conv_avx512_3D, conv_avx2_3D_nspc, @@ -479,10 +465,8 @@ const std::vector& inputShapes2d_dynBatch() { const std::vector& CPUParams_1x1_1D() { static const std::vector CPUParams_1x1_1D = { - conv_sse42_1D_1x1, conv_avx2_1D_1x1, conv_avx512_1D_1x1, - conv_sse42_1D_1x1_nspc, conv_avx2_1D_1x1_nspc, conv_avx2_1D_1x1_nspc_brgconv, conv_avx512_1D_1x1_nspc, @@ -567,10 +551,8 @@ const std::vector& CPUParams_GEMM_3D() { const std::vector& CPUParams_1x1_2D() { static const std::vector CPUParams_1x1_2D = { - conv_sse42_2D_1x1, conv_avx2_2D_1x1, conv_avx512_2D_1x1, - conv_sse42_2D_1x1_nspc, conv_avx2_2D_1x1_nspc, conv_avx2_2D_1x1_nspc_brgconv, conv_avx512_2D_1x1_nspc, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp index 7c4854dd334bcf..62352c851435b2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp @@ -787,12 +787,11 @@ const CPUSpecificParams& expectedCpuConfigAnyLayout() { } const std::vector& vecCpuConfigsFusing_4D() { - const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}; const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"}; const auto acl_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"acl"}, "acl"}; - static const std::vector vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc, acl_nhwc, expectedCpuConfigAnyLayout()}; + static const std::vector vecCpuConfigsFusing_4D = {avx2_nhwc, avx512_nhwc, acl_nhwc, expectedCpuConfigAnyLayout()}; return vecCpuConfigsFusing_4D; } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp index 64dcf20542c09d..7d9173e472e089 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp @@ -1043,7 +1043,7 @@ const auto groupConvParams_ExplicitPadding_1D = ::testing::Combine(::testing::Va ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_1D = - {conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx512_1D_nspc}; + {conv_avx2_1D, conv_avx512_1D, conv_avx2_1D_nspc, conv_avx512_1D_nspc}; std::vector inputShapes1d = {{{}, {{2, 64, 7}}}, {// dynamic shapes @@ -1108,7 +1108,7 @@ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine(::testing::Va ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_2D = - {conv_sse42_2D, conv_avx2_2D, conv_avx512_2D, conv_sse42_2D_nspc, conv_avx2_2D_nspc, conv_avx512_2D_nspc}; + {conv_avx2_2D, conv_avx512_2D, conv_avx2_2D_nspc, conv_avx512_2D_nspc}; std::vector inputShapes2d = {{{}, {{1, 64, 7, 7}}}, {// dynamic shapes @@ -1197,7 +1197,6 @@ const auto groupConvParams_ExplicitPadding_3D = ::testing::Combine(::testing::Va ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_3D = { - // conv_sse42_3D, // not supported jit_sse42 for 3d conv_avx2_3D, conv_avx512_3D, conv_avx2_3D_nspc, @@ -1247,10 +1246,8 @@ const auto groupConvParams_ExplicitPadding_DW_1D = ::testing::Combine(::testing: ::testing::ValuesIn(numGroups_DW), ::testing::Values(ov::op::PadType::EXPLICIT)); -const std::vector CPUParams_DW_1D = {conv_sse42_dw_1D, - conv_avx2_dw_1D, +const std::vector CPUParams_DW_1D = {conv_avx2_dw_1D, conv_avx512_dw_1D, - conv_sse42_dw_1D_nspc, conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc}; @@ -1272,8 +1269,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(inputShapes1dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice( - {conv_sse42_dw_1D, conv_avx2_dw_1D, conv_avx512_dw_1D})), // todo: [AV] what about conv_sse42_dw_1D_nspc, - // conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? + {conv_avx2_dw_1D, conv_avx512_dw_1D})), // todo: [AV] what about conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? ::testing::ValuesIn(fusingParamsSet), ::testing::Values(empty_plugin_config)), GroupConvolutionLayerCPUTest::getTestCaseName); @@ -1302,10 +1298,8 @@ const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine(::testing: ::testing::ValuesIn(numGroups_DW), ::testing::Values(ov::op::PadType::EXPLICIT)); -const std::vector CPUParams_DW_2D = {conv_sse42_dw_2D, - conv_avx2_dw_2D, +const std::vector CPUParams_DW_2D = {conv_avx2_dw_2D, conv_avx512_dw_2D, - conv_sse42_dw_2D_nspc, conv_avx2_dw_2D_nspc, conv_avx512_dw_2D_nspc}; @@ -1411,10 +1405,8 @@ const auto groupConvParams_ExplicitPadding_DW_3D = ::testing::Combine(::testing: ::testing::ValuesIn(numGroups_DW), ::testing::Values(ov::op::PadType::EXPLICIT)); -const std::vector CPUParams_DW_3D = {conv_sse42_dw_3D, - conv_avx2_dw_3D, +const std::vector CPUParams_DW_3D = {conv_avx2_dw_3D, conv_avx512_dw_3D, - conv_sse42_dw_3D_nspc, conv_avx2_dw_3D_nspc, conv_avx512_dw_3D_nspc}; @@ -1673,171 +1665,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GEMM_GroupConv, ::testing::ValuesIn(filterParamsSetForDevice(gemmGroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); -/* ============= JIT SSE42 GroupConvolution ============= */ -const std::vector sse42_GroupConv = {conv_sse42_2D, conv_sse42_2D_nspc}; -const std::vector JIT_SSE42_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ur_w (=3,<3) - // 2. jcp.ur_w_tail (=0,>0) - // 3. jcp.kw (>7,<=7) - // 4. jcp.nb_oc = jcp.oc / jcp.oc_block; - // 5. jcp.nb_ic = jcp.ic / jcp.ic_block; - // 6. ocb_work - - // jcp.ur_w == 3, jcp.ur_w_tail == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 10}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.ur_w < 3 (jcp.ur_w == jcp.ow) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 4}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.ur_w == 3, jcp.ur_w_tail == 0 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 11}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.kw > 7 - makeSingleGroupConvCPUTestCases({3, 8}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 10}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.nb_oc == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 8, - 16, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.nb_ic == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 16, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // ocb_work > 1 (ocb_work == 2) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 8, - 40, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.nb_ic == 2, ocb_work == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 16, - 40, - sse42_GroupConv, - vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, - {2, 2}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 3, - 2, - {129, 129}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 4}, - {1, 2}, - {3, 2}, - {2, 1}, - {1, 0}, - ov::op::PadType::EXPLICIT, - 2, - 1, - {10, 10}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32Default) - - // not supported jit_sse42 for 3d - // makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, - // ov::op::PadType::EXPLICIT, - // 3, 2, {33, 33, 33}, 8, 8, cpuParams_sse42_3D), - // makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, - // ov::op::PadType::EXPLICIT, - // 2, 1, {10, 10, 10}, 8, 8, cpuParams_sse42_3D), -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_GroupConv, - GroupConvolutionLayerCPUTest, - ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); - /* ============= JIT AVX2 GroupConvolution ============= */ const std::vector avx2_GroupConv_2D = {conv_avx2_2D, conv_avx2_2D_nspc}; const std::vector avx2_GroupConv_3D = {conv_avx2_3D, conv_avx2_3D_nspc}; @@ -2130,120 +1957,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_GroupConv, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); -/* ============= JIT SSE42 DW GroupConvolution ============= */ -const std::vector sse42_DW_2D = {conv_sse42_dw_2D, conv_sse42_dw_2D_nspc}; -const std::vector sse42_DW_3D = {conv_sse42_dw_3D, conv_sse42_dw_3D_nspc}; -const std::vector JIT_SSE42_DW_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ngroups % simd_w (=0,!=0) - // 2. jcp.nb_ch - // 3. jcp.nb_ch_blocking (=2,<2) - // 4. jcp.ur_w == 3 - - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 8, - 1, - {5, 5}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 2, jcp.nb_ch_blocking == 2 (jcp.ngroups == 16) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 16, - 1, - {5, 5}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w != 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 2 (jcp.ngroups == 17) TODO: pad channels not - // supported for SSE42 makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, - // ov::op::PadType::VALID, 17, 1, {5, 5}, 1, 1, conv_sse42_DW_2D, vecPrcConnectParamsFP32only), jcp.ow > jcp.ur_w - // (jcp.ow == 7) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 8, - 1, - {5, 9}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, - {2, 2}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 8, - 2, - {129, 129}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - makeSingleGroupConvCPUTestCases({2, 4}, - {1, 2}, - {3, 2}, - {2, 1}, - {1, 0}, - ov::op::PadType::EXPLICIT, - 8, - 1, - {10, 10}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({3, 3, 3}, - {2, 2, 2}, - {1, 1, 1}, - {1, 1, 1}, - {1, 1, 1}, - ov::op::PadType::EXPLICIT, - 8, - 2, - {33, 33, 33}, - 1, - 1, - sse42_DW_3D, - vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 3, 4}, - {1, 2, 2}, - {3, 1, 2}, - {2, 2, 1}, - {1, 1, 0}, - ov::op::PadType::EXPLICIT, - 8, - 1, - {10, 10, 10}, - 1, - 1, - sse42_DW_3D, - vecPrcConnectParamsFP32)); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_DW_GroupConv, - GroupConvolutionLayerCPUTest, - ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_DW_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); - /* ============= JIT AVX2 DW GroupConvolution ============= */ const std::vector avx2_DW_2D = {conv_avx2_dw_2D, conv_avx2_dw_2D_nspc}; const std::vector avx2_DW_3D = {conv_avx2_dw_3D, conv_avx2_dw_3D_nspc}; @@ -2494,7 +2207,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_DW_GroupConv, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_DW_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); -/* ============= JIT SSE42 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX2 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX512 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX2 PLANAR Convolution (not supported with groups) ============= */ diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp index 09f8dc14660392..94683387d1eac0 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp @@ -106,7 +106,6 @@ INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_empty_fusing, ConvolutionLayerCPUT ConvolutionLayerCPUTest::getTestCaseName); const std::vector CPUParams_2D_plain_to_blocked = { - conv_sse42_plain_to_blocked_2D, conv_avx2_plain_to_blocked_2D, conv_avx512_plain_to_blocked_2D, }; @@ -397,4 +396,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Large_Filter, ConvolutionLayerCPUTest, } // namespace } // namespace Convolution } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp index 741e12031c680c..030f7eb3bc40b8 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp @@ -344,10 +344,8 @@ const auto convParams_ExplicitPadding_1D = ::testing::Combine( ); const std::vector CPUParams_1D_f32 = { - conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, - conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx2_1D_nspc_brgconv, conv_avx512_1D_nspc, @@ -356,10 +354,8 @@ const std::vector CPUParams_1D_f32 = { //Current avx2 I8 fall back on JIT avx2 implement when having src zero point.Not enabling conv_avx2_1D_nspc_brgconv for I8 precision. const std::vector CPUParams_1D_I8 = { - conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, - conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx512_1D_nspc, conv_avx512_1D_nspc_brgconv @@ -424,7 +420,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, ConvolutionLayerCPUTest, ConvolutionLayerCPUTest::getTestCaseName); const std::vector CPUParams_1D_plain_to_blocked = { - conv_sse42_plain_to_blocked_1D, conv_avx2_plain_to_blocked_1D, conv_avx512_plain_to_blocked_1D, }; @@ -630,7 +625,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP16, ConvolutionLayerCPUTest, /* ============= Jit Planar ============= */ /* ============= Convolution planar params (2D) ============= */ const std::vector CPUParams_Jit_Planar_2D = { - // sse42 is not supported conv_avx2_planar_2D, conv_avx512_planar_2D, }; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp index cfe29692f8414c..6b9e9d3718f556 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp @@ -17,9 +17,8 @@ namespace { const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}; const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"}; -const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}; -const std::vector vecCpuConfigs = {sse42, avx, avx512}; +const std::vector vecCpuConfigs = {avx, avx512}; const std::vector paramsMaxV84D_ref = { maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, @@ -50,13 +49,9 @@ const auto avx2_nwc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx2"}, "jit_avx2"}; const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}; const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}; -const auto sse42_nwc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42"}, "jit_sse42"}; -const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; -const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}; - -const std::vector vecCpuConfigsFusing_3D = {sse42_nwc, avx2_nwc, avx512_nwc}; -const std::vector vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc}; -const std::vector vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc}; +const std::vector vecCpuConfigsFusing_3D = {avx2_nwc, avx512_nwc}; +const std::vector vecCpuConfigsFusing_4D = {avx2_nhwc, avx512_nhwc}; +const std::vector vecCpuConfigsFusing_5D = {avx2_ndhwc, avx512_ndhwc}; std::vector fusingParamsSet { emptyFusingSpec, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp index 2a22f629c29661..9f7938310d788f 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp @@ -17,8 +17,7 @@ namespace { const auto optimizedCPUSpec = []()-> std::vector{ const auto avx512 = CPUSpecificParams{{}, {}, {"jit"}, "jit_avx512"}; const auto avx2 = CPUSpecificParams{{}, {}, {"jit"}, "jit_avx2"}; - const auto sse42 = CPUSpecificParams{{}, {}, {"jit"}, "jit_sse42"}; - const std::vector vecCpuConfigs = {avx512, avx2, sse42}; + const std::vector vecCpuConfigs = {avx512, avx2}; auto supportConfigure = CPUTestUtils::filterCPUInfoForDevice(vecCpuConfigs); // only the MAX ISA of vecCpuConfigs will be tested if (supportConfigure.size() > 0) { diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp index 22b94d369c5d8f..2825a3528baf6b 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp @@ -50,7 +50,6 @@ namespace ConvolutionConact { /* ============= Convolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D_nspc, conv_gemm_2D }; @@ -66,7 +65,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D, ConvConcatSubgraphTest, params2D, /* ============= Convolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D_nspc, conv_gemm_3D }; @@ -86,7 +84,6 @@ namespace GroupConvolutionConcat { /* ============= GroupConvolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D_nspc, conv_gemm_2D }; @@ -102,7 +99,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution2D, ConvConcatSubgraphTest, param /* ============= GroupConvolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D_nspc, conv_gemm_3D }; diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp index 1fdbd5016099e1..3d7ec525c0a105 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp @@ -20,7 +20,6 @@ namespace Kernel_1x1 { /* ============= Kernel_1x1 (2D) ============= */ const std::vector CPUParams2DConv = { - conv_sse42_2D_1x1, conv_avx2_2D_1x1, conv_avx512_2D_1x1 }; @@ -84,7 +83,6 @@ commonConvParams dwConvParams2D = commonConvParams{kernelSize2D(), strides2D(), numOutChannels(), paddingType(), numOutChannels()}; const ov::Shape inputShapesDW2D{1, 32, 16, 16}; const std::vector CPUParams2D = { - conv_sse42_dw_2D, conv_avx2_dw_2D, conv_avx512_dw_2D }; @@ -104,7 +102,6 @@ commonConvParams dwConvParams3D = commonConvParams{kernelSize3D(), strides3D(), numOutChannels(), paddingType(), numOutChannels()}; const ov::Shape inputShapesDW3D{1, 32, 8, 16, 16}; const std::vector CPUParams3D = { - conv_sse42_dw_3D, conv_avx2_dw_3D, conv_avx512_dw_3D }; @@ -158,8 +155,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData3D, ConvConcatSubgraphTest namespace ConvolutionConcat { /* ============= Convolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D, - conv_sse42_2D, conv_avx2_2D, conv_avx512_2D }; @@ -176,7 +171,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D, ConvConcatSubgraphTest, params2D, /* ============= Convolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D, conv_avx2_3D, conv_avx512_3D }; @@ -195,8 +189,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution3D, ConvConcatSubgraphTest, params3D, namespace GroupConvolutionConcat { /* ============= GroupConvolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D, - conv_sse42_2D, conv_avx2_2D, conv_avx512_2D }; @@ -213,7 +205,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution2D, ConvConcatSubgraphTest, param /* ============= GroupConvolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D, conv_avx2_3D, conv_avx512_3D }; @@ -255,4 +246,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData3D, ConvConcatSubgrap } // namespace GroupConvolutionBackpropDataConcat } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp index 2b9214c4c22cd0..36f9b0f1e50a65 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp @@ -33,8 +33,6 @@ TEST_F(EdgeWithSameNameInTwoModels, smoke_CompareWithRef) { std::tie(inFmts, outFmts, priority, selectedType) = conv_avx512_2D; } else if (ov::with_cpu_x86_avx2()) { std::tie(inFmts, outFmts, priority, selectedType) = conv_avx2_2D; - } else if (ov::with_cpu_x86_sse42()) { - std::tie(inFmts, outFmts, priority, selectedType) = conv_sse42_2D; } // first model diff --git a/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp b/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp index 6fc3a8ab9382d4..941a4274598de7 100644 --- a/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp +++ b/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp @@ -7,14 +7,6 @@ #include "cpu_test_utils.hpp" namespace CPUTestUtils { - const auto conv_ref_1D = CPUSpecificParams{{ncw}, {ncw}, {"ref_any"}, "ref_any"}; - const auto conv_ref_2D = CPUSpecificParams{{nchw}, {nchw}, {"ref_any"}, "ref_any"}; - const auto conv_ref_3D = CPUSpecificParams{{ncdhw}, {ncdhw}, {"ref_any"}, "ref_any"}; - - const auto conv_ref_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"ref_any"}, "ref_any"}; - const auto conv_ref_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"ref_any"}, "ref_any"}; - const auto conv_ref_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"ref_any"}, "ref_any"}; - const auto conv_gemm_1D = CPUSpecificParams{{ncw}, {ncw}, {"jit_gemm"}, "jit_gemm"}; const auto conv_gemm_2D = CPUSpecificParams{{nchw}, {nchw}, {"jit_gemm"}, "jit_gemm"}; const auto conv_gemm_3D = CPUSpecificParams{{ncdhw}, {ncdhw}, {"jit_gemm"}, "jit_gemm"}; @@ -31,24 +23,6 @@ namespace CPUTestUtils { const auto conv_gemm_acl_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"gemm_acl"}, "gemm_acl"}; const auto conv_gemm_acl_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"gemm_acl"}, "gemm_acl"}; - const auto conv_sse42_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_dw_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - - const auto conv_sse42_plain_to_blocked_1D = CPUSpecificParams{{ncw}, {nCw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_plain_to_blocked_2D = CPUSpecificParams{{nchw}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_plain_to_blocked_3D = CPUSpecificParams{{ncdhw}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}; - - const auto conv_sse42_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_dw_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_avx2_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_avx2"}, "jit_avx2"}; const auto conv_avx2_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2"}, "jit_avx2"}; const auto conv_avx2_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2"}; @@ -107,22 +81,18 @@ namespace CPUTestUtils { const auto conv_avx512_2D_nspc_brgconv_amx = CPUSpecificParams{{nhwc}, {nhwc}, {"brgconv_avx512_amx"}, "brgconv_avx512_amx"}; const auto conv_avx512_3D_nspc_brgconv_amx = CPUSpecificParams{{ndhwc}, {ndhwc}, {"brgconv_avx512_amx"}, "brgconv_avx512_amx"}; - const auto conv_sse42_1D_1x1 = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_1D_1x1 = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx512_1D_1x1 = CPUSpecificParams{{nCw16c}, {nCw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1"}; - const auto conv_sse42_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx2_1D_1x1_nspc_brgconv = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"}; const auto conv_avx512_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx512_1x1"}, "jit_avx512_1x1"}; const auto conv_avx512_1D_1x1_nspc_brgconv = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx512_1x1"}, "brgconv_avx512_1x1"}; const auto conv_avx512_1D_1x1_nspc_brgconv_amx = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx512_amx_1x1"}, "brgconv_avx512_amx_1x1"}; - const auto conv_sse42_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx512_2D_1x1 = CPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1"}; - const auto conv_sse42_2D_1x1_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_2D_1x1_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx2_2D_1x1_nspc_brgconv = CPUSpecificParams{{nhwc}, {nhwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"}; const auto conv_avx2_3D_1x1_nspc_brgconv = CPUSpecificParams{{ndhwc}, {ndhwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"}; From d27f0c41d4086b7ac5bcc65a2c9add2a4910b63b Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Tue, 21 Jan 2025 14:20:28 +0400 Subject: [PATCH 28/35] [GPU] Remove unused PagedAttention inputs causing set_arg error in case of zero buffer (#28577) ### Details: - This change removes unused PagedAttention inputs that were accidentally added during the rebase of the original PR, which caused a set_arg error in the case of a zero buffer - Added related test with a dummy activation function to simulate this behavior --- .../src/graph/impls/ocl/paged_attention.cpp | 22 ------------------- .../kernel_selector/cl_kernels/pa_sdpa_opt.cl | 6 ----- .../test_cases/paged_attention_gpu_test.cpp | 12 +++++++++- 3 files changed, 11 insertions(+), 29 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp index ab7d1a4f2ee1b4..1bcd4b0bb10fe2 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp @@ -214,12 +214,6 @@ struct paged_attention_impl : multi_stage_primitive { if (desc->has_alibi) { args.inputs.push_back(instance.alibi_memory_ptr()); } - - if (desc->has_rotated_blocks) { - args.inputs.push_back(instance.rotated_block_indices_memory_ptr()); - args.inputs.push_back(instance.rotation_deltas_memory_ptr()); - args.inputs.push_back(instance.rotation_trig_lut_memory_ptr()); - } } else if (kernel_idx == 2 || kernel_idx == 3) { // Finalization kernel or mixed stage finalization kernel args.inputs = { instance.past_lens_memory_ptr() }; @@ -687,10 +681,6 @@ struct paged_attention_impl : multi_stage_primitive { if (has_alibi) inputs_number++; - const auto has_rotation = impl_param.input_layouts.size() == 16; - if (has_rotation) - inputs_number += 3; - auto input_idx = 0; params.inputs.resize(inputs_number); params.inputs[input_idx++] = query_tensor; @@ -709,12 +699,6 @@ struct paged_attention_impl : multi_stage_primitive { if (has_alibi) params.inputs[input_idx++] = alibi_tensor; - if (has_rotation) { - params.inputs[input_idx++] = input_tensors[13]; - params.inputs[input_idx++] = input_tensors[14]; - params.inputs[input_idx++] = input_tensors[15]; - } - if (has_scores_output) { params.outputs.resize(2); params.outputs[1] = convert_data_tensor(impl_param.get_output_layout(1)); @@ -752,12 +736,6 @@ struct paged_attention_impl : multi_stage_primitive { if (has_alibi) in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(11)}); - if (has_rotation) { - in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(13)}); - in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(14)}); - in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(15)}); - } - if (has_scores_output) out_tensor_to_offset_map.insert({1, out_offsets_map.at(1)}); diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl index ae0f7a666c4309..7a300aaee1a16a 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl @@ -43,12 +43,6 @@ KERNEL(pa_sdpa_opt)( #if HAS_ALIBI const __global ALIBI_INPUT_TYPE* alibi_slopes, #endif - -#if HAS_ROTATED_BLOCKS - const __global INPUT7_TYPE* rotated_block_indices, - const __global INPUT8_TYPE* rotation_deltas, - const __global INPUT9_TYPE* rotation_trig_lut, -#endif __global OUTPUT_TYPE* output, #if PAGED_ATTENTION_SCORES_OUTPUT __global SOFTMAX_ACCUMULATOR_TYPE* softmax_results, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp index 7076b863c450d7..cdb927a57ca2bb 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp @@ -5,6 +5,7 @@ #include "test_utils.h" #include "random_generator.hpp" +#include #include #include #include @@ -306,6 +307,12 @@ struct PagedAttentionManager { auto layout = mem->get_layout(); layout.set_partial_shape(ov::PartialShape{ max_context_len[0], head_size }); + if (rotated_block_indices.empty()) { + auto empty_layout = mem->get_layout(); + empty_layout.set_partial_shape(ov::PartialShape{ 0, head_size }); + return test_engine.reinterpret_buffer(*mem, empty_layout); + } + return test_engine.reinterpret_buffer(*mem, layout); } @@ -741,7 +748,7 @@ struct PagedAttentionTest : public ::testing::TestWithParam { if (p.rotation_config.apply_rotation) { pa_inputs.push_back(input_info("rotated_block_indices")); pa_inputs.push_back(input_info("rotation_deltas")); - pa_inputs.push_back(input_info("rotation_trig_lut")); + pa_inputs.push_back(input_info("rotation_trig_lut_modified")); } auto pa_prim = paged_attention("paged_attention", pa_inputs); @@ -782,6 +789,9 @@ struct PagedAttentionTest : public ::testing::TestWithParam { topology.add(input_layout("rotated_block_indices", rotated_block_indices_layout)); topology.add(input_layout("rotation_deltas", rotation_deltas_layout)); topology.add(input_layout("rotation_trig_lut", rotation_trig_lut_layout)); + + // add dummy activation operation to simulate an empty PA `rotation_trig_lut` buffer for shapes like [0, head_size] + topology.add(activation("rotation_trig_lut_modified", input_info("rotation_trig_lut"), activation_func::none)); } ExecutionConfig config = get_test_default_config(get_test_engine()); From c99a862597ee03e0c3302fe5a3edf9a21a17413f Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Tue, 21 Jan 2025 11:47:06 +0100 Subject: [PATCH 29/35] [CPU] Fix uninitialized array filed coverity issue with CID 1590270 (#28580) ### Details: - Fix uninitialized array class member field in PlainTensor class ### Tickets: - 160817 --- src/plugins/intel_cpu/src/utils/plain_tensor.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp index a27f29c0ab0e1b..497688f831bb90 100644 --- a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp +++ b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp @@ -91,8 +91,8 @@ struct precision_of { #define PLAINTENSOR_RANK_MAX 8 struct PlainTensor { - size_t m_strides[PLAINTENSOR_RANK_MAX]; - size_t m_dims[PLAINTENSOR_RANK_MAX]; + size_t m_strides[PLAINTENSOR_RANK_MAX] = {}; + size_t m_dims[PLAINTENSOR_RANK_MAX] = {}; size_t m_rank = 0; std::shared_ptr m_ptr; size_t m_capacity = 0; From bc6e3347e09e30219bb83f2ba08c25da04f4c59f Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Tue, 21 Jan 2025 15:03:57 +0400 Subject: [PATCH 30/35] [ONNX] Added support of optional zero-point for DQ-21 (#28562) ### Details: - Added handling of an optional zero-point for DequantizeLinear-21 ### Tickets: - 160914 --- .../frontend/src/op/dequantize_linear.cpp | 2 + ...equantize_linear_21_no_zero_point.prototxt | 55 +++++++++++++++++++ .../onnx/tests/onnx_import_quant.in.cpp | 11 ++++ 3 files changed, 68 insertions(+) create mode 100644 src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index a80e6a77f430e2..4705504699158b 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -255,6 +255,8 @@ ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { if (zp.get_node_shared_ptr()) { broadcastable_x = std::make_shared(x, zp); + } else { + broadcastable_x = x; } const auto& scaled_x = std::make_shared(broadcastable_x, scale); diff --git a/src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt new file mode 100644 index 00000000000000..ab276f3b4d7c78 --- /dev/null +++ b/src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt @@ -0,0 +1,55 @@ +ir_version: 3 +producer_name: "OpenVINO ONNX Frontend" +graph { + name: "test_dequantize_21" + initializer { + dims: 6 + dims: 3 + data_type: 21 + name: "data" + raw_data: "\x21\x43\x65\x87\xA9\xCB\xED\xFF\x00" + } + initializer { + dims: 2 + dims: 3 + data_type: 1 + name: "scale" + raw_data: "\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f" + } + node { + input: "data" + input: "scale" + output: "output" + name: "DequantizeNode" + op_type: "DequantizeLinear" + attribute { + name: "axis" + i: 0 + type: INT + } + attribute { + name: "block_size" + i: 3 + type: INT + } + } + output { + name: "output" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 6 + } + dim { + dim_value: 3 + } + } + } + } + } +} +opset_import { + version: 21 +} diff --git a/src/frontends/onnx/tests/onnx_import_quant.in.cpp b/src/frontends/onnx/tests/onnx_import_quant.in.cpp index 793eb73772880a..166898988a59e4 100644 --- a/src/frontends/onnx/tests/onnx_import_quant.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_quant.in.cpp @@ -191,6 +191,17 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_no_zero_point) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_21_no_zero_point) { + auto model = convert_model("dequantize_linear_21_no_zero_point.onnx"); + + auto test_case = ov::test::TestCase(model, s_device); + + test_case.add_expected_output( + {6, 3}, + std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 0, 0}); + test_case.run(); +} + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_scalar_zero_scale_uint8) { auto model = convert_model("dequantize_linear_0.onnx"); From cc05aadbd442eec471654dbb482c2265a5aba103 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:05:39 +0400 Subject: [PATCH 31/35] Bump actions/cache from 4.1.2 to 4.2.0 (#28574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/cache](https://github.com/actions/cache) from 4.1.2 to 4.2.0.
Release notes

Sourced from actions/cache's releases.

v4.2.0

⚠️ Important Changes

The cache backend service has been rewritten from the ground up for improved performance and reliability. actions/cache now integrates with the new cache service (v2) APIs.

The new service will gradually roll out as of February 1st, 2025. The legacy service will also be sunset on the same date. Changes in these release are fully backward compatible.

We are deprecating some versions of this action. We recommend upgrading to version v4 or v3 as soon as possible before February 1st, 2025. (Upgrade instructions below).

If you are using pinned SHAs, please use the SHAs of versions v4.2.0 or v3.4.0

If you do not upgrade, all workflow runs using any of the deprecated actions/cache will fail.

Upgrading to the recommended versions will not break your workflows.

Read more about the change & access the migration guide: reference to the announcement.

Minor changes

Minor and patch version updates for these dependencies:

  • @​actions/core: 1.11.1
  • @​actions/io: 1.1.3
  • @​vercel/ncc: 0.38.3

Full Changelog: https://github.com/actions/cache/compare/v4...v4.2.0

Changelog

Sourced from actions/cache's changelog.

Releases

4.2.0

TLDR; The cache backend service has been rewritten from the ground up for improved performance and reliability. actions/cache now integrates with the new cache service (v2) APIs.

The new service will gradually roll out as of February 1st, 2025. The legacy service will also be sunset on the same date. Changes in these release are fully backward compatible.

We are deprecating some versions of this action. We recommend upgrading to version v4 or v3 as soon as possible before February 1st, 2025. (Upgrade instructions below).

If you are using pinned SHAs, please use the SHAs of versions v4.2.0 or v3.4.0

If you do not upgrade, all workflow runs using any of the deprecated actions/cache will fail.

Upgrading to the recommended versions will not break your workflows.

4.1.2

  • Add GitHub Enterprise Cloud instances hostname filters to inform API endpoint choices - #1474
  • Security fix: Bump braces from 3.0.2 to 3.0.3 - #1475

4.1.1

  • Restore original behavior of cache-hit output - #1467

4.1.0

  • Ensure cache-hit output is set when a cache is missed - #1404
  • Deprecate save-always input - #1452

4.0.2

  • Fixed restore fail-on-cache-miss not working.

4.0.1

  • Updated isGhes check

4.0.0

  • Updated minimum runner version support from node 12 -> node 20

3.4.0

  • Integrated with the new cache service (v2) APIs

3.3.3

  • Updates @​actions/cache to v3.2.3 to fix accidental mutated path arguments to getCacheVersion actions/toolkit#1378
  • Additional audit fixes of npm package(s)

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/cache&package-manager=github_actions&previous-version=4.1.2&new-version=4.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build_doc.yml | 2 +- .github/workflows/job_cpu_functional_tests.yml | 4 ++-- .github/workflows/ovc.yml | 2 +- .github/workflows/windows_conditional_compilation.yml | 2 +- .github/workflows/windows_vs2019_release.yml | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index 2ea17b79af7514..b0739432f29066 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -64,7 +64,7 @@ jobs: - name: Cache documentation id: cache_sphinx_docs - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: build/docs/_build/.doctrees key: sphinx-docs-cache diff --git a/.github/workflows/job_cpu_functional_tests.yml b/.github/workflows/job_cpu_functional_tests.yml index 568c33d39e307b..74e54d389a8ec5 100644 --- a/.github/workflows/job_cpu_functional_tests.yml +++ b/.github/workflows/job_cpu_functional_tests.yml @@ -90,7 +90,7 @@ jobs: run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt - name: Restore tests execution time - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ${{ env.PARALLEL_TEST_CACHE }} key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }} @@ -110,7 +110,7 @@ jobs: timeout-minutes: 25 - name: Save tests execution time - uses: actions/cache/save@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 if: github.ref_name == 'master' with: path: ${{ env.PARALLEL_TEST_CACHE }} diff --git a/.github/workflows/ovc.yml b/.github/workflows/ovc.yml index 3e7dedf50ad51b..a554ef4fadc6d3 100644 --- a/.github/workflows/ovc.yml +++ b/.github/workflows/ovc.yml @@ -28,7 +28,7 @@ jobs: python-version: '3.10' - name: Cache pip - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('src/bindings/python/requirements*.txt') }} diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index 0f965eabd3c1ad..6ce104ad07fe9f 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -393,7 +393,7 @@ jobs: run: python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/layer_tests_summary/requirements.txt - name: Restore tests execution time - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ${{ env.PARALLEL_TEST_CACHE }} key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 92d826de1d8394..d909c18633795e 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -581,7 +581,7 @@ jobs: run: python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\layer_tests_summary\requirements.txt - name: Restore tests execution time - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ${{ env.PARALLEL_TEST_CACHE }} key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} @@ -595,7 +595,7 @@ jobs: timeout-minutes: 60 - name: Save tests execution time - uses: actions/cache/save@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 if: github.ref_name == 'master' with: path: ${{ env.PARALLEL_TEST_CACHE }} From 60a3f0cc2a09f08b534ea431df89b26c565c17bf Mon Sep 17 00:00:00 2001 From: Sun Xiaoxia Date: Tue, 21 Jan 2025 20:45:31 +0800 Subject: [PATCH 32/35] fix CID issue 1590212 (#28543) ### Details: - *fix CID issue 1590212* ### Tickets: - *ticket-id* --- src/inference/src/dev/threading/cpu_streams_executor.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index 0313c4f5aabc6b..bb47b813dce05f 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -504,7 +504,12 @@ void CPUStreamsExecutor::cpu_reset() { CPUStreamsExecutor::CPUStreamsExecutor(const IStreamsExecutor::Config& config) : _impl{new Impl{config}} {} CPUStreamsExecutor::~CPUStreamsExecutor() { - cpu_reset(); + try { + cpu_reset(); + } catch (const ov::Exception&) { + // Destructor should not throw - catch needed for static analysis. + OPENVINO_THROW("Reset CPU state error."); + } { std::lock_guard lock(_impl->_mutex); _impl->_isStopped = true; From bad9b106c8e1c4b3c18747b0b55ded4fe78560bd Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Tue, 21 Jan 2025 16:14:25 +0200 Subject: [PATCH 33/35] Removing unused files (#28586) ### Details: - * These files are used only for local testing purposes and were added by mistake* Signed-off-by: Bogdan Pereanu --- .../remote_tensor_tests/d3dx12_core.h | 1389 ----------------- .../remote_tensor_tests/d3dx12_default.h | 15 - 2 files changed, 1404 deletions(-) delete mode 100644 src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h delete mode 100644 src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h deleted file mode 100644 index e20327ccbe3158..00000000000000 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h +++ /dev/null @@ -1,1389 +0,0 @@ -//********************************************************* -// -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License (MIT). -// -//********************************************************* - -#pragma once -#ifdef _WIN32 - -# ifndef __cplusplus -# error D3DX12 requires C++ -# endif - -# include - -# include "d3d12.h" -# include "d3dx12_default.h" - - -//------------------------------------------------------------------------------------------------ -# ifndef D3DX12_ASSERT -# ifdef assert -# define D3DX12_ASSERT(x) assert(x) -# else -# define D3DX12_ASSERT(x) -# endif -# endif - -//------------------------------------------------------------------------------------------------ -template -inline ID3D12CommandList* const* CommandListCast(t_CommandListType* const* pp) noexcept { - // This cast is useful for passing strongly typed command list pointers into - // ExecuteCommandLists. - // This cast is valid as long as the const-ness is respected. D3D12 APIs do - // respect the const-ness of their arguments. - return reinterpret_cast(pp); -} - -//------------------------------------------------------------------------------------------------ -inline bool operator==(const D3D12_VIEWPORT& l, const D3D12_VIEWPORT& r) noexcept { - return l.TopLeftX == r.TopLeftX && l.TopLeftY == r.TopLeftY && l.Width == r.Width && l.Height == r.Height && - l.MinDepth == r.MinDepth && l.MaxDepth == r.MaxDepth; -} - -//------------------------------------------------------------------------------------------------ -inline bool operator!=(const D3D12_VIEWPORT& l, const D3D12_VIEWPORT& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RECT : public D3D12_RECT { - CD3DX12_RECT() = default; - explicit CD3DX12_RECT(const D3D12_RECT& o) noexcept : D3D12_RECT(o) {} - explicit CD3DX12_RECT(LONG Left, LONG Top, LONG Right, LONG Bottom) noexcept { - left = Left; - top = Top; - right = Right; - bottom = Bottom; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_VIEWPORT : public D3D12_VIEWPORT { - CD3DX12_VIEWPORT() = default; - explicit CD3DX12_VIEWPORT(const D3D12_VIEWPORT& o) noexcept : D3D12_VIEWPORT(o) {} - explicit CD3DX12_VIEWPORT(FLOAT topLeftX, - FLOAT topLeftY, - FLOAT width, - FLOAT height, - FLOAT minDepth = D3D12_MIN_DEPTH, - FLOAT maxDepth = D3D12_MAX_DEPTH) noexcept { - TopLeftX = topLeftX; - TopLeftY = topLeftY; - Width = width; - Height = height; - MinDepth = minDepth; - MaxDepth = maxDepth; - } - explicit CD3DX12_VIEWPORT(_In_ ID3D12Resource* pResource, - UINT mipSlice = 0, - FLOAT topLeftX = 0.0f, - FLOAT topLeftY = 0.0f, - FLOAT minDepth = D3D12_MIN_DEPTH, - FLOAT maxDepth = D3D12_MAX_DEPTH) noexcept { -# if defined(_MSC_VER) || !defined(_WIN32) - const auto Desc = pResource->GetDesc(); -# else - D3D12_RESOURCE_DESC tmpDesc; - const auto& Desc = *pResource->GetDesc(&tmpDesc); -# endif - const UINT64 SubresourceWidth = Desc.Width >> mipSlice; - const UINT64 SubresourceHeight = Desc.Height >> mipSlice; - switch (Desc.Dimension) { - case D3D12_RESOURCE_DIMENSION_BUFFER: - TopLeftX = topLeftX; - TopLeftY = 0.0f; - Width = float(Desc.Width) - topLeftX; - Height = 1.0f; - break; - case D3D12_RESOURCE_DIMENSION_TEXTURE1D: - TopLeftX = topLeftX; - TopLeftY = 0.0f; - Width = (SubresourceWidth ? float(SubresourceWidth) : 1.0f) - topLeftX; - Height = 1.0f; - break; - case D3D12_RESOURCE_DIMENSION_TEXTURE2D: - case D3D12_RESOURCE_DIMENSION_TEXTURE3D: - TopLeftX = topLeftX; - TopLeftY = topLeftY; - Width = (SubresourceWidth ? float(SubresourceWidth) : 1.0f) - topLeftX; - Height = (SubresourceHeight ? float(SubresourceHeight) : 1.0f) - topLeftY; - break; - default: - break; - } - - MinDepth = minDepth; - MaxDepth = maxDepth; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_BOX : public D3D12_BOX { - CD3DX12_BOX() = default; - explicit CD3DX12_BOX(const D3D12_BOX& o) noexcept : D3D12_BOX(o) {} - explicit CD3DX12_BOX(LONG Left, LONG Right) noexcept { - left = static_cast(Left); - top = 0; - front = 0; - right = static_cast(Right); - bottom = 1; - back = 1; - } - explicit CD3DX12_BOX(LONG Left, LONG Top, LONG Right, LONG Bottom) noexcept { - left = static_cast(Left); - top = static_cast(Top); - front = 0; - right = static_cast(Right); - bottom = static_cast(Bottom); - back = 1; - } - explicit CD3DX12_BOX(LONG Left, LONG Top, LONG Front, LONG Right, LONG Bottom, LONG Back) noexcept { - left = static_cast(Left); - top = static_cast(Top); - front = static_cast(Front); - right = static_cast(Right); - bottom = static_cast(Bottom); - back = static_cast(Back); - } -}; -inline bool operator==(const D3D12_BOX& l, const D3D12_BOX& r) noexcept { - return l.left == r.left && l.top == r.top && l.front == r.front && l.right == r.right && l.bottom == r.bottom && - l.back == r.back; -} -inline bool operator!=(const D3D12_BOX& l, const D3D12_BOX& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_DEPTH_STENCIL_DESC : public D3D12_DEPTH_STENCIL_DESC { - CD3DX12_DEPTH_STENCIL_DESC() = default; - explicit CD3DX12_DEPTH_STENCIL_DESC(const D3D12_DEPTH_STENCIL_DESC& o) noexcept : D3D12_DEPTH_STENCIL_DESC(o) {} - explicit CD3DX12_DEPTH_STENCIL_DESC(CD3DX12_DEFAULT) noexcept { - DepthEnable = TRUE; - DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; - DepthFunc = D3D12_COMPARISON_FUNC_LESS; - StencilEnable = FALSE; - StencilReadMask = D3D12_DEFAULT_STENCIL_READ_MASK; - StencilWriteMask = D3D12_DEFAULT_STENCIL_WRITE_MASK; - const D3D12_DEPTH_STENCILOP_DESC defaultStencilOp = {D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_COMPARISON_FUNC_ALWAYS}; - FrontFace = defaultStencilOp; - BackFace = defaultStencilOp; - } - explicit CD3DX12_DEPTH_STENCIL_DESC(BOOL depthEnable, - D3D12_DEPTH_WRITE_MASK depthWriteMask, - D3D12_COMPARISON_FUNC depthFunc, - BOOL stencilEnable, - UINT8 stencilReadMask, - UINT8 stencilWriteMask, - D3D12_STENCIL_OP frontStencilFailOp, - D3D12_STENCIL_OP frontStencilDepthFailOp, - D3D12_STENCIL_OP frontStencilPassOp, - D3D12_COMPARISON_FUNC frontStencilFunc, - D3D12_STENCIL_OP backStencilFailOp, - D3D12_STENCIL_OP backStencilDepthFailOp, - D3D12_STENCIL_OP backStencilPassOp, - D3D12_COMPARISON_FUNC backStencilFunc) noexcept { - DepthEnable = depthEnable; - DepthWriteMask = depthWriteMask; - DepthFunc = depthFunc; - StencilEnable = stencilEnable; - StencilReadMask = stencilReadMask; - StencilWriteMask = stencilWriteMask; - FrontFace.StencilFailOp = frontStencilFailOp; - FrontFace.StencilDepthFailOp = frontStencilDepthFailOp; - FrontFace.StencilPassOp = frontStencilPassOp; - FrontFace.StencilFunc = frontStencilFunc; - BackFace.StencilFailOp = backStencilFailOp; - BackFace.StencilDepthFailOp = backStencilDepthFailOp; - BackFace.StencilPassOp = backStencilPassOp; - BackFace.StencilFunc = backStencilFunc; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_DEPTH_STENCIL_DESC1 : public D3D12_DEPTH_STENCIL_DESC1 { - CD3DX12_DEPTH_STENCIL_DESC1() = default; - explicit CD3DX12_DEPTH_STENCIL_DESC1(const D3D12_DEPTH_STENCIL_DESC1& o) noexcept : D3D12_DEPTH_STENCIL_DESC1(o) {} - explicit CD3DX12_DEPTH_STENCIL_DESC1(const D3D12_DEPTH_STENCIL_DESC& o) noexcept { - DepthEnable = o.DepthEnable; - DepthWriteMask = o.DepthWriteMask; - DepthFunc = o.DepthFunc; - StencilEnable = o.StencilEnable; - StencilReadMask = o.StencilReadMask; - StencilWriteMask = o.StencilWriteMask; - FrontFace.StencilFailOp = o.FrontFace.StencilFailOp; - FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp; - FrontFace.StencilPassOp = o.FrontFace.StencilPassOp; - FrontFace.StencilFunc = o.FrontFace.StencilFunc; - BackFace.StencilFailOp = o.BackFace.StencilFailOp; - BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp; - BackFace.StencilPassOp = o.BackFace.StencilPassOp; - BackFace.StencilFunc = o.BackFace.StencilFunc; - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC1(CD3DX12_DEFAULT) noexcept { - DepthEnable = TRUE; - DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; - DepthFunc = D3D12_COMPARISON_FUNC_LESS; - StencilEnable = FALSE; - StencilReadMask = D3D12_DEFAULT_STENCIL_READ_MASK; - StencilWriteMask = D3D12_DEFAULT_STENCIL_WRITE_MASK; - const D3D12_DEPTH_STENCILOP_DESC defaultStencilOp = {D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_COMPARISON_FUNC_ALWAYS}; - FrontFace = defaultStencilOp; - BackFace = defaultStencilOp; - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC1(BOOL depthEnable, - D3D12_DEPTH_WRITE_MASK depthWriteMask, - D3D12_COMPARISON_FUNC depthFunc, - BOOL stencilEnable, - UINT8 stencilReadMask, - UINT8 stencilWriteMask, - D3D12_STENCIL_OP frontStencilFailOp, - D3D12_STENCIL_OP frontStencilDepthFailOp, - D3D12_STENCIL_OP frontStencilPassOp, - D3D12_COMPARISON_FUNC frontStencilFunc, - D3D12_STENCIL_OP backStencilFailOp, - D3D12_STENCIL_OP backStencilDepthFailOp, - D3D12_STENCIL_OP backStencilPassOp, - D3D12_COMPARISON_FUNC backStencilFunc, - BOOL depthBoundsTestEnable) noexcept { - DepthEnable = depthEnable; - DepthWriteMask = depthWriteMask; - DepthFunc = depthFunc; - StencilEnable = stencilEnable; - StencilReadMask = stencilReadMask; - StencilWriteMask = stencilWriteMask; - FrontFace.StencilFailOp = frontStencilFailOp; - FrontFace.StencilDepthFailOp = frontStencilDepthFailOp; - FrontFace.StencilPassOp = frontStencilPassOp; - FrontFace.StencilFunc = frontStencilFunc; - BackFace.StencilFailOp = backStencilFailOp; - BackFace.StencilDepthFailOp = backStencilDepthFailOp; - BackFace.StencilPassOp = backStencilPassOp; - BackFace.StencilFunc = backStencilFunc; - DepthBoundsTestEnable = depthBoundsTestEnable; - } - operator D3D12_DEPTH_STENCIL_DESC() const noexcept { - D3D12_DEPTH_STENCIL_DESC D; - D.DepthEnable = DepthEnable; - D.DepthWriteMask = DepthWriteMask; - D.DepthFunc = DepthFunc; - D.StencilEnable = StencilEnable; - D.StencilReadMask = StencilReadMask; - D.StencilWriteMask = StencilWriteMask; - D.FrontFace.StencilFailOp = FrontFace.StencilFailOp; - D.FrontFace.StencilDepthFailOp = FrontFace.StencilDepthFailOp; - D.FrontFace.StencilPassOp = FrontFace.StencilPassOp; - D.FrontFace.StencilFunc = FrontFace.StencilFunc; - D.BackFace.StencilFailOp = BackFace.StencilFailOp; - D.BackFace.StencilDepthFailOp = BackFace.StencilDepthFailOp; - D.BackFace.StencilPassOp = BackFace.StencilPassOp; - D.BackFace.StencilFunc = BackFace.StencilFunc; - return D; - } -}; - -//------------------------------------------------------------------------------------------------ -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 606) -struct CD3DX12_DEPTH_STENCIL_DESC2 : public D3D12_DEPTH_STENCIL_DESC2 { - CD3DX12_DEPTH_STENCIL_DESC2() = default; - explicit CD3DX12_DEPTH_STENCIL_DESC2(const D3D12_DEPTH_STENCIL_DESC2& o) noexcept : D3D12_DEPTH_STENCIL_DESC2(o) {} - explicit CD3DX12_DEPTH_STENCIL_DESC2(const D3D12_DEPTH_STENCIL_DESC1& o) noexcept { - DepthEnable = o.DepthEnable; - DepthWriteMask = o.DepthWriteMask; - DepthFunc = o.DepthFunc; - StencilEnable = o.StencilEnable; - FrontFace.StencilFailOp = o.FrontFace.StencilFailOp; - FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp; - FrontFace.StencilPassOp = o.FrontFace.StencilPassOp; - FrontFace.StencilFunc = o.FrontFace.StencilFunc; - FrontFace.StencilReadMask = o.StencilReadMask; - FrontFace.StencilWriteMask = o.StencilWriteMask; - - BackFace.StencilFailOp = o.BackFace.StencilFailOp; - BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp; - BackFace.StencilPassOp = o.BackFace.StencilPassOp; - BackFace.StencilFunc = o.BackFace.StencilFunc; - BackFace.StencilReadMask = o.StencilReadMask; - BackFace.StencilWriteMask = o.StencilWriteMask; - DepthBoundsTestEnable = o.DepthBoundsTestEnable; - } - explicit CD3DX12_DEPTH_STENCIL_DESC2(const D3D12_DEPTH_STENCIL_DESC& o) noexcept { - DepthEnable = o.DepthEnable; - DepthWriteMask = o.DepthWriteMask; - DepthFunc = o.DepthFunc; - StencilEnable = o.StencilEnable; - - FrontFace.StencilFailOp = o.FrontFace.StencilFailOp; - FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp; - FrontFace.StencilPassOp = o.FrontFace.StencilPassOp; - FrontFace.StencilFunc = o.FrontFace.StencilFunc; - FrontFace.StencilReadMask = o.StencilReadMask; - FrontFace.StencilWriteMask = o.StencilWriteMask; - - BackFace.StencilFailOp = o.BackFace.StencilFailOp; - BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp; - BackFace.StencilPassOp = o.BackFace.StencilPassOp; - BackFace.StencilFunc = o.BackFace.StencilFunc; - BackFace.StencilReadMask = o.StencilReadMask; - BackFace.StencilWriteMask = o.StencilWriteMask; - - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC2(CD3DX12_DEFAULT) noexcept { - DepthEnable = TRUE; - DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; - DepthFunc = D3D12_COMPARISON_FUNC_LESS; - StencilEnable = FALSE; - const D3D12_DEPTH_STENCILOP_DESC1 defaultStencilOp = {D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_COMPARISON_FUNC_ALWAYS, - D3D12_DEFAULT_STENCIL_READ_MASK, - D3D12_DEFAULT_STENCIL_WRITE_MASK}; - FrontFace = defaultStencilOp; - BackFace = defaultStencilOp; - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC2(BOOL depthEnable, - D3D12_DEPTH_WRITE_MASK depthWriteMask, - D3D12_COMPARISON_FUNC depthFunc, - BOOL stencilEnable, - D3D12_STENCIL_OP frontStencilFailOp, - D3D12_STENCIL_OP frontStencilDepthFailOp, - D3D12_STENCIL_OP frontStencilPassOp, - D3D12_COMPARISON_FUNC frontStencilFunc, - UINT8 frontStencilReadMask, - UINT8 frontStencilWriteMask, - D3D12_STENCIL_OP backStencilFailOp, - D3D12_STENCIL_OP backStencilDepthFailOp, - D3D12_STENCIL_OP backStencilPassOp, - D3D12_COMPARISON_FUNC backStencilFunc, - UINT8 backStencilReadMask, - UINT8 backStencilWriteMask, - BOOL depthBoundsTestEnable) noexcept { - DepthEnable = depthEnable; - DepthWriteMask = depthWriteMask; - DepthFunc = depthFunc; - StencilEnable = stencilEnable; - - FrontFace.StencilFailOp = frontStencilFailOp; - FrontFace.StencilDepthFailOp = frontStencilDepthFailOp; - FrontFace.StencilPassOp = frontStencilPassOp; - FrontFace.StencilFunc = frontStencilFunc; - FrontFace.StencilReadMask = frontStencilReadMask; - FrontFace.StencilWriteMask = frontStencilWriteMask; - - BackFace.StencilFailOp = backStencilFailOp; - BackFace.StencilDepthFailOp = backStencilDepthFailOp; - BackFace.StencilPassOp = backStencilPassOp; - BackFace.StencilFunc = backStencilFunc; - BackFace.StencilReadMask = backStencilReadMask; - BackFace.StencilWriteMask = backStencilWriteMask; - - DepthBoundsTestEnable = depthBoundsTestEnable; - } - - operator D3D12_DEPTH_STENCIL_DESC() const noexcept { - D3D12_DEPTH_STENCIL_DESC D; - D.DepthEnable = DepthEnable; - D.DepthWriteMask = DepthWriteMask; - D.DepthFunc = DepthFunc; - D.StencilEnable = StencilEnable; - D.StencilReadMask = FrontFace.StencilReadMask; - D.StencilWriteMask = FrontFace.StencilWriteMask; - D.FrontFace.StencilFailOp = FrontFace.StencilFailOp; - D.FrontFace.StencilDepthFailOp = FrontFace.StencilDepthFailOp; - D.FrontFace.StencilPassOp = FrontFace.StencilPassOp; - D.FrontFace.StencilFunc = FrontFace.StencilFunc; - D.BackFace.StencilFailOp = BackFace.StencilFailOp; - D.BackFace.StencilDepthFailOp = BackFace.StencilDepthFailOp; - D.BackFace.StencilPassOp = BackFace.StencilPassOp; - D.BackFace.StencilFunc = BackFace.StencilFunc; - return D; - } -}; -# endif // D3D12_SDK_VERSION >= 606 - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_BLEND_DESC : public D3D12_BLEND_DESC { - CD3DX12_BLEND_DESC() = default; - explicit CD3DX12_BLEND_DESC(const D3D12_BLEND_DESC& o) noexcept : D3D12_BLEND_DESC(o) {} - explicit CD3DX12_BLEND_DESC(CD3DX12_DEFAULT) noexcept { - AlphaToCoverageEnable = FALSE; - IndependentBlendEnable = FALSE; - const D3D12_RENDER_TARGET_BLEND_DESC defaultRenderTargetBlendDesc = { - FALSE, - FALSE, - D3D12_BLEND_ONE, - D3D12_BLEND_ZERO, - D3D12_BLEND_OP_ADD, - D3D12_BLEND_ONE, - D3D12_BLEND_ZERO, - D3D12_BLEND_OP_ADD, - D3D12_LOGIC_OP_NOOP, - D3D12_COLOR_WRITE_ENABLE_ALL, - }; - for (UINT i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i) - RenderTarget[i] = defaultRenderTargetBlendDesc; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RASTERIZER_DESC : public D3D12_RASTERIZER_DESC { - CD3DX12_RASTERIZER_DESC() = default; - explicit CD3DX12_RASTERIZER_DESC(const D3D12_RASTERIZER_DESC& o) noexcept : D3D12_RASTERIZER_DESC(o) {} - explicit CD3DX12_RASTERIZER_DESC(CD3DX12_DEFAULT) noexcept { - FillMode = D3D12_FILL_MODE_SOLID; - CullMode = D3D12_CULL_MODE_BACK; - FrontCounterClockwise = FALSE; - DepthBias = D3D12_DEFAULT_DEPTH_BIAS; - DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; - SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; - DepthClipEnable = TRUE; - MultisampleEnable = FALSE; - AntialiasedLineEnable = FALSE; - ForcedSampleCount = 0; - ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; - } - explicit CD3DX12_RASTERIZER_DESC(D3D12_FILL_MODE fillMode, - D3D12_CULL_MODE cullMode, - BOOL frontCounterClockwise, - INT depthBias, - FLOAT depthBiasClamp, - FLOAT slopeScaledDepthBias, - BOOL depthClipEnable, - BOOL multisampleEnable, - BOOL antialiasedLineEnable, - UINT forcedSampleCount, - D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster) noexcept { - FillMode = fillMode; - CullMode = cullMode; - FrontCounterClockwise = frontCounterClockwise; - DepthBias = depthBias; - DepthBiasClamp = depthBiasClamp; - SlopeScaledDepthBias = slopeScaledDepthBias; - DepthClipEnable = depthClipEnable; - MultisampleEnable = multisampleEnable; - AntialiasedLineEnable = antialiasedLineEnable; - ForcedSampleCount = forcedSampleCount; - ConservativeRaster = conservativeRaster; - } -}; - -//------------------------------------------------------------------------------------------------ -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 608) -struct CD3DX12_RASTERIZER_DESC1 : public D3D12_RASTERIZER_DESC1 { - CD3DX12_RASTERIZER_DESC1() = default; - explicit CD3DX12_RASTERIZER_DESC1(const D3D12_RASTERIZER_DESC1& o) noexcept - : D3D12_RASTERIZER_DESC1(o) - - {} - explicit CD3DX12_RASTERIZER_DESC1(const D3D12_RASTERIZER_DESC& o) noexcept { - FillMode = o.FillMode; - CullMode = o.CullMode; - FrontCounterClockwise = o.FrontCounterClockwise; - DepthBias = static_cast(o.DepthBias); - DepthBiasClamp = o.DepthBiasClamp; - SlopeScaledDepthBias = o.SlopeScaledDepthBias; - DepthClipEnable = o.DepthClipEnable; - MultisampleEnable = o.MultisampleEnable; - AntialiasedLineEnable = o.AntialiasedLineEnable; - ForcedSampleCount = o.ForcedSampleCount; - ConservativeRaster = o.ConservativeRaster; - } - explicit CD3DX12_RASTERIZER_DESC1(CD3DX12_DEFAULT) noexcept { - FillMode = D3D12_FILL_MODE_SOLID; - CullMode = D3D12_CULL_MODE_BACK; - FrontCounterClockwise = FALSE; - DepthBias = D3D12_DEFAULT_DEPTH_BIAS; - DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; - SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; - DepthClipEnable = TRUE; - MultisampleEnable = FALSE; - AntialiasedLineEnable = FALSE; - ForcedSampleCount = 0; - ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; - } - explicit CD3DX12_RASTERIZER_DESC1(D3D12_FILL_MODE fillMode, - D3D12_CULL_MODE cullMode, - BOOL frontCounterClockwise, - FLOAT depthBias, - FLOAT depthBiasClamp, - FLOAT slopeScaledDepthBias, - BOOL depthClipEnable, - BOOL multisampleEnable, - BOOL antialiasedLineEnable, - UINT forcedSampleCount, - D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster) noexcept { - FillMode = fillMode; - CullMode = cullMode; - FrontCounterClockwise = frontCounterClockwise; - DepthBias = depthBias; - DepthBiasClamp = depthBiasClamp; - SlopeScaledDepthBias = slopeScaledDepthBias; - DepthClipEnable = depthClipEnable; - MultisampleEnable = multisampleEnable; - AntialiasedLineEnable = antialiasedLineEnable; - ForcedSampleCount = forcedSampleCount; - ConservativeRaster = conservativeRaster; - } - - operator D3D12_RASTERIZER_DESC() const noexcept { - D3D12_RASTERIZER_DESC o; - - o.FillMode = FillMode; - o.CullMode = CullMode; - o.FrontCounterClockwise = FrontCounterClockwise; - o.DepthBias = static_cast(DepthBias); - o.DepthBiasClamp = DepthBiasClamp; - o.SlopeScaledDepthBias = SlopeScaledDepthBias; - o.DepthClipEnable = DepthClipEnable; - o.MultisampleEnable = MultisampleEnable; - o.AntialiasedLineEnable = AntialiasedLineEnable; - o.ForcedSampleCount = ForcedSampleCount; - o.ConservativeRaster = ConservativeRaster; - - return o; - } -}; -# endif // D3D12_SDK_VERSION >= 608 - -//------------------------------------------------------------------------------------------------ -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 610) -struct CD3DX12_RASTERIZER_DESC2 : public D3D12_RASTERIZER_DESC2 { - CD3DX12_RASTERIZER_DESC2() = default; - explicit CD3DX12_RASTERIZER_DESC2(const D3D12_RASTERIZER_DESC2& o) noexcept - : D3D12_RASTERIZER_DESC2(o) - - {} - explicit CD3DX12_RASTERIZER_DESC2(const D3D12_RASTERIZER_DESC1& o) noexcept { - FillMode = o.FillMode; - CullMode = o.CullMode; - FrontCounterClockwise = o.FrontCounterClockwise; - DepthBias = o.DepthBias; - DepthBiasClamp = o.DepthBiasClamp; - SlopeScaledDepthBias = o.SlopeScaledDepthBias; - DepthClipEnable = o.DepthClipEnable; - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_ALIASED; - if (o.MultisampleEnable) { - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_QUADRILATERAL_WIDE; - } else if (o.AntialiasedLineEnable) { - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_ALPHA_ANTIALIASED; - } - ForcedSampleCount = o.ForcedSampleCount; - ConservativeRaster = o.ConservativeRaster; - } - explicit CD3DX12_RASTERIZER_DESC2(const D3D12_RASTERIZER_DESC& o) noexcept - : CD3DX12_RASTERIZER_DESC2(CD3DX12_RASTERIZER_DESC1(o)) {} - explicit CD3DX12_RASTERIZER_DESC2(CD3DX12_DEFAULT) noexcept { - FillMode = D3D12_FILL_MODE_SOLID; - CullMode = D3D12_CULL_MODE_BACK; - FrontCounterClockwise = FALSE; - DepthBias = D3D12_DEFAULT_DEPTH_BIAS; - DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; - SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; - DepthClipEnable = TRUE; - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_ALIASED; - ForcedSampleCount = 0; - ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; - } - explicit CD3DX12_RASTERIZER_DESC2(D3D12_FILL_MODE fillMode, - D3D12_CULL_MODE cullMode, - BOOL frontCounterClockwise, - FLOAT depthBias, - FLOAT depthBiasClamp, - FLOAT slopeScaledDepthBias, - BOOL depthClipEnable, - D3D12_LINE_RASTERIZATION_MODE lineRasterizationMode, - UINT forcedSampleCount, - D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster) noexcept { - FillMode = fillMode; - CullMode = cullMode; - FrontCounterClockwise = frontCounterClockwise; - DepthBias = depthBias; - DepthBiasClamp = depthBiasClamp; - SlopeScaledDepthBias = slopeScaledDepthBias; - DepthClipEnable = depthClipEnable; - LineRasterizationMode = lineRasterizationMode; - ForcedSampleCount = forcedSampleCount; - ConservativeRaster = conservativeRaster; - } - - operator D3D12_RASTERIZER_DESC1() const noexcept { - D3D12_RASTERIZER_DESC1 o; - - o.FillMode = FillMode; - o.CullMode = CullMode; - o.FrontCounterClockwise = FrontCounterClockwise; - o.DepthBias = DepthBias; - o.DepthBiasClamp = DepthBiasClamp; - o.SlopeScaledDepthBias = SlopeScaledDepthBias; - o.DepthClipEnable = DepthClipEnable; - o.MultisampleEnable = FALSE; - o.AntialiasedLineEnable = FALSE; - if (LineRasterizationMode == D3D12_LINE_RASTERIZATION_MODE_ALPHA_ANTIALIASED) { - o.AntialiasedLineEnable = TRUE; - } else if (LineRasterizationMode != D3D12_LINE_RASTERIZATION_MODE_ALIASED) { - o.MultisampleEnable = TRUE; - } - o.ForcedSampleCount = ForcedSampleCount; - o.ConservativeRaster = ConservativeRaster; - - return o; - } - operator D3D12_RASTERIZER_DESC() const noexcept { - return static_cast(CD3DX12_RASTERIZER_DESC1(static_cast(*this))); - } -}; -# endif // D3D12_SDK_VERSION >= 610 - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RESOURCE_ALLOCATION_INFO : public D3D12_RESOURCE_ALLOCATION_INFO { - CD3DX12_RESOURCE_ALLOCATION_INFO() = default; - explicit CD3DX12_RESOURCE_ALLOCATION_INFO(const D3D12_RESOURCE_ALLOCATION_INFO& o) noexcept - : D3D12_RESOURCE_ALLOCATION_INFO(o) {} - CD3DX12_RESOURCE_ALLOCATION_INFO(UINT64 size, UINT64 alignment) noexcept { - SizeInBytes = size; - Alignment = alignment; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_HEAP_PROPERTIES : public D3D12_HEAP_PROPERTIES { - CD3DX12_HEAP_PROPERTIES() = default; - explicit CD3DX12_HEAP_PROPERTIES(const D3D12_HEAP_PROPERTIES& o) noexcept : D3D12_HEAP_PROPERTIES(o) {} - CD3DX12_HEAP_PROPERTIES(D3D12_CPU_PAGE_PROPERTY cpuPageProperty, - D3D12_MEMORY_POOL memoryPoolPreference, - UINT creationNodeMask = 1, - UINT nodeMask = 1) - noexcept { - Type = D3D12_HEAP_TYPE_CUSTOM; - CPUPageProperty = cpuPageProperty; - MemoryPoolPreference = memoryPoolPreference; - CreationNodeMask = creationNodeMask; - VisibleNodeMask = nodeMask; - } - explicit CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE type, UINT creationNodeMask = 1, UINT nodeMask = 1) noexcept { - Type = type; - CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN; - MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN; - CreationNodeMask = creationNodeMask; - VisibleNodeMask = nodeMask; - } - bool IsCPUAccessible() const noexcept { - return Type == D3D12_HEAP_TYPE_UPLOAD || Type == D3D12_HEAP_TYPE_READBACK -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 609) - || Type == D3D12_HEAP_TYPE_GPU_UPLOAD -# endif - || (Type == D3D12_HEAP_TYPE_CUSTOM && (CPUPageProperty == D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE || - CPUPageProperty == D3D12_CPU_PAGE_PROPERTY_WRITE_BACK)); - } -}; -inline bool operator==(const D3D12_HEAP_PROPERTIES& l, const D3D12_HEAP_PROPERTIES& r) noexcept { - return l.Type == r.Type && l.CPUPageProperty == r.CPUPageProperty && - l.MemoryPoolPreference == r.MemoryPoolPreference && l.CreationNodeMask == r.CreationNodeMask && - l.VisibleNodeMask == r.VisibleNodeMask; -} -inline bool operator!=(const D3D12_HEAP_PROPERTIES& l, const D3D12_HEAP_PROPERTIES& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_HEAP_DESC : public D3D12_HEAP_DESC { - CD3DX12_HEAP_DESC() = default; - explicit CD3DX12_HEAP_DESC(const D3D12_HEAP_DESC& o) noexcept : D3D12_HEAP_DESC(o) {} - CD3DX12_HEAP_DESC(UINT64 size, - D3D12_HEAP_PROPERTIES properties, - UINT64 alignment = 0, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = size; - Properties = properties; - Alignment = alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(UINT64 size, - D3D12_HEAP_TYPE type, - UINT64 alignment = 0, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = size; - Properties = CD3DX12_HEAP_PROPERTIES(type); - Alignment = alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(UINT64 size, - D3D12_CPU_PAGE_PROPERTY cpuPageProperty, - D3D12_MEMORY_POOL memoryPoolPreference, - UINT64 alignment = 0, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = size; - Properties = CD3DX12_HEAP_PROPERTIES(cpuPageProperty, memoryPoolPreference); - Alignment = alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_HEAP_PROPERTIES properties, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = resAllocInfo.SizeInBytes; - Properties = properties; - Alignment = resAllocInfo.Alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_HEAP_TYPE type, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = resAllocInfo.SizeInBytes; - Properties = CD3DX12_HEAP_PROPERTIES(type); - Alignment = resAllocInfo.Alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_CPU_PAGE_PROPERTY cpuPageProperty, - D3D12_MEMORY_POOL memoryPoolPreference, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = resAllocInfo.SizeInBytes; - Properties = CD3DX12_HEAP_PROPERTIES(cpuPageProperty, memoryPoolPreference); - Alignment = resAllocInfo.Alignment; - Flags = flags; - } - bool IsCPUAccessible() const noexcept { - return static_cast(&Properties)->IsCPUAccessible(); - } -}; -inline bool operator==(const D3D12_HEAP_DESC& l, const D3D12_HEAP_DESC& r) noexcept { - return l.SizeInBytes == r.SizeInBytes && l.Properties == r.Properties && l.Alignment == r.Alignment && - l.Flags == r.Flags; -} -inline bool operator!=(const D3D12_HEAP_DESC& l, const D3D12_HEAP_DESC& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_CLEAR_VALUE : public D3D12_CLEAR_VALUE { - CD3DX12_CLEAR_VALUE() = default; - explicit CD3DX12_CLEAR_VALUE(const D3D12_CLEAR_VALUE& o) noexcept : D3D12_CLEAR_VALUE(o) {} - CD3DX12_CLEAR_VALUE(DXGI_FORMAT format, const FLOAT color[4]) noexcept { - Format = format; - memcpy(Color, color, sizeof(Color)); - } - CD3DX12_CLEAR_VALUE(DXGI_FORMAT format, FLOAT depth, UINT8 stencil) noexcept { - Format = format; - memset(&Color, 0, sizeof(Color)); - /* Use memcpy to preserve NAN values */ - memcpy(&DepthStencil.Depth, &depth, sizeof(depth)); - DepthStencil.Stencil = stencil; - } -}; - -//------------------------------------------------------------------------------------------------ -inline bool operator==(const D3D12_CLEAR_VALUE& a, const D3D12_CLEAR_VALUE& b) noexcept { - if (a.Format != b.Format) - return false; - if (a.Format == DXGI_FORMAT_D24_UNORM_S8_UINT || a.Format == DXGI_FORMAT_D16_UNORM || - a.Format == DXGI_FORMAT_D32_FLOAT || a.Format == DXGI_FORMAT_D32_FLOAT_S8X24_UINT) { - return (a.DepthStencil.Depth == b.DepthStencil.Depth) && (a.DepthStencil.Stencil == b.DepthStencil.Stencil); - } else { - return (a.Color[0] == b.Color[0]) && (a.Color[1] == b.Color[1]) && (a.Color[2] == b.Color[2]) && - (a.Color[3] == b.Color[3]); - } -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RANGE : public D3D12_RANGE { - CD3DX12_RANGE() = default; - explicit CD3DX12_RANGE(const D3D12_RANGE& o) noexcept : D3D12_RANGE(o) {} - CD3DX12_RANGE(SIZE_T begin, SIZE_T end) noexcept { - Begin = begin; - End = end; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RANGE_UINT64 : public D3D12_RANGE_UINT64 { - CD3DX12_RANGE_UINT64() = default; - explicit CD3DX12_RANGE_UINT64(const D3D12_RANGE_UINT64& o) noexcept : D3D12_RANGE_UINT64(o) {} - CD3DX12_RANGE_UINT64(UINT64 begin, UINT64 end) noexcept { - Begin = begin; - End = end; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SUBRESOURCE_RANGE_UINT64 : public D3D12_SUBRESOURCE_RANGE_UINT64 { - CD3DX12_SUBRESOURCE_RANGE_UINT64() = default; - explicit CD3DX12_SUBRESOURCE_RANGE_UINT64(const D3D12_SUBRESOURCE_RANGE_UINT64& o) noexcept - : D3D12_SUBRESOURCE_RANGE_UINT64(o) {} - CD3DX12_SUBRESOURCE_RANGE_UINT64(UINT subresource, const D3D12_RANGE_UINT64& range) noexcept { - Subresource = subresource; - Range = range; - } - CD3DX12_SUBRESOURCE_RANGE_UINT64(UINT subresource, UINT64 begin, UINT64 end) noexcept { - Subresource = subresource; - Range.Begin = begin; - Range.End = end; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SHADER_BYTECODE : public D3D12_SHADER_BYTECODE { - CD3DX12_SHADER_BYTECODE() = default; - explicit CD3DX12_SHADER_BYTECODE(const D3D12_SHADER_BYTECODE& o) noexcept : D3D12_SHADER_BYTECODE(o) {} - CD3DX12_SHADER_BYTECODE(_In_ ID3DBlob* pShaderBlob) noexcept { - pShaderBytecode = pShaderBlob->GetBufferPointer(); - BytecodeLength = pShaderBlob->GetBufferSize(); - } - CD3DX12_SHADER_BYTECODE(const void* _pShaderBytecode, SIZE_T bytecodeLength) noexcept { - pShaderBytecode = _pShaderBytecode; - BytecodeLength = bytecodeLength; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TILED_RESOURCE_COORDINATE : public D3D12_TILED_RESOURCE_COORDINATE { - CD3DX12_TILED_RESOURCE_COORDINATE() = default; - explicit CD3DX12_TILED_RESOURCE_COORDINATE(const D3D12_TILED_RESOURCE_COORDINATE& o) noexcept - : D3D12_TILED_RESOURCE_COORDINATE(o) {} - CD3DX12_TILED_RESOURCE_COORDINATE(UINT x, UINT y, UINT z, UINT subresource) noexcept { - X = x; - Y = y; - Z = z; - Subresource = subresource; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TILE_REGION_SIZE : public D3D12_TILE_REGION_SIZE { - CD3DX12_TILE_REGION_SIZE() = default; - explicit CD3DX12_TILE_REGION_SIZE(const D3D12_TILE_REGION_SIZE& o) noexcept : D3D12_TILE_REGION_SIZE(o) {} - CD3DX12_TILE_REGION_SIZE(UINT numTiles, BOOL useBox, UINT width, UINT16 height, UINT16 depth) noexcept { - NumTiles = numTiles; - UseBox = useBox; - Width = width; - Height = height; - Depth = depth; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SUBRESOURCE_TILING : public D3D12_SUBRESOURCE_TILING { - CD3DX12_SUBRESOURCE_TILING() = default; - explicit CD3DX12_SUBRESOURCE_TILING(const D3D12_SUBRESOURCE_TILING& o) noexcept : D3D12_SUBRESOURCE_TILING(o) {} - CD3DX12_SUBRESOURCE_TILING(UINT widthInTiles, - UINT16 heightInTiles, - UINT16 depthInTiles, - UINT startTileIndexInOverallResource) - noexcept { - WidthInTiles = widthInTiles; - HeightInTiles = heightInTiles; - DepthInTiles = depthInTiles; - StartTileIndexInOverallResource = startTileIndexInOverallResource; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TILE_SHAPE : public D3D12_TILE_SHAPE { - CD3DX12_TILE_SHAPE() = default; - explicit CD3DX12_TILE_SHAPE(const D3D12_TILE_SHAPE& o) noexcept : D3D12_TILE_SHAPE(o) {} - CD3DX12_TILE_SHAPE(UINT widthInTexels, UINT heightInTexels, UINT depthInTexels) noexcept { - WidthInTexels = widthInTexels; - HeightInTexels = heightInTexels; - DepthInTexels = depthInTexels; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_PACKED_MIP_INFO : public D3D12_PACKED_MIP_INFO { - CD3DX12_PACKED_MIP_INFO() = default; - explicit CD3DX12_PACKED_MIP_INFO(const D3D12_PACKED_MIP_INFO& o) noexcept : D3D12_PACKED_MIP_INFO(o) {} - CD3DX12_PACKED_MIP_INFO(UINT8 numStandardMips, - UINT8 numPackedMips, - UINT numTilesForPackedMips, - UINT startTileIndexInOverallResource) - noexcept { - NumStandardMips = numStandardMips; - NumPackedMips = numPackedMips; - NumTilesForPackedMips = numTilesForPackedMips; - StartTileIndexInOverallResource = startTileIndexInOverallResource; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SUBRESOURCE_FOOTPRINT : public D3D12_SUBRESOURCE_FOOTPRINT { - CD3DX12_SUBRESOURCE_FOOTPRINT() = default; - explicit CD3DX12_SUBRESOURCE_FOOTPRINT(const D3D12_SUBRESOURCE_FOOTPRINT& o) noexcept - : D3D12_SUBRESOURCE_FOOTPRINT(o) {} - CD3DX12_SUBRESOURCE_FOOTPRINT(DXGI_FORMAT format, UINT width, UINT height, UINT depth, UINT rowPitch) noexcept { - Format = format; - Width = width; - Height = height; - Depth = depth; - RowPitch = rowPitch; - } - explicit CD3DX12_SUBRESOURCE_FOOTPRINT(const D3D12_RESOURCE_DESC& resDesc, UINT rowPitch) noexcept { - Format = resDesc.Format; - Width = UINT(resDesc.Width); - Height = resDesc.Height; - Depth = (resDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? resDesc.DepthOrArraySize : 1u); - RowPitch = rowPitch; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TEXTURE_COPY_LOCATION : public D3D12_TEXTURE_COPY_LOCATION { - CD3DX12_TEXTURE_COPY_LOCATION() = default; - explicit CD3DX12_TEXTURE_COPY_LOCATION(const D3D12_TEXTURE_COPY_LOCATION& o) noexcept - : D3D12_TEXTURE_COPY_LOCATION(o) {} - CD3DX12_TEXTURE_COPY_LOCATION(_In_ ID3D12Resource* pRes) noexcept { - pResource = pRes; - Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX; - PlacedFootprint = {}; - } - CD3DX12_TEXTURE_COPY_LOCATION(_In_ ID3D12Resource* pRes, D3D12_PLACED_SUBRESOURCE_FOOTPRINT const& Footprint) - noexcept { - pResource = pRes; - Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT; - PlacedFootprint = Footprint; - } - CD3DX12_TEXTURE_COPY_LOCATION(_In_ ID3D12Resource* pRes, UINT Sub) noexcept { - pResource = pRes; - Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX; - PlacedFootprint = {}; - SubresourceIndex = Sub; - } -}; - -//------------------------------------------------------------------------------------------------ -constexpr UINT D3D12CalcSubresource(UINT MipSlice, - UINT ArraySlice, - UINT PlaneSlice, - UINT MipLevels, - UINT ArraySize) noexcept { - return MipSlice + ArraySlice * MipLevels + PlaneSlice * MipLevels * ArraySize; -} - -//------------------------------------------------------------------------------------------------ -inline UINT8 D3D12GetFormatPlaneCount(_In_ ID3D12Device* pDevice, DXGI_FORMAT Format) noexcept { - D3D12_FEATURE_DATA_FORMAT_INFO formatInfo = {Format, 0}; - if (FAILED(pDevice->CheckFeatureSupport(D3D12_FEATURE_FORMAT_INFO, &formatInfo, sizeof(formatInfo)))) { - return 0; - } - return formatInfo.PlaneCount; -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RESOURCE_DESC : public D3D12_RESOURCE_DESC { - CD3DX12_RESOURCE_DESC() = default; - explicit CD3DX12_RESOURCE_DESC(const D3D12_RESOURCE_DESC& o) noexcept : D3D12_RESOURCE_DESC(o) {} - CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION dimension, - UINT64 alignment, - UINT64 width, - UINT height, - UINT16 depthOrArraySize, - UINT16 mipLevels, - DXGI_FORMAT format, - UINT sampleCount, - UINT sampleQuality, - D3D12_TEXTURE_LAYOUT layout, - D3D12_RESOURCE_FLAGS flags) - noexcept { - Dimension = dimension; - Alignment = alignment; - Width = width; - Height = height; - DepthOrArraySize = depthOrArraySize; - MipLevels = mipLevels; - Format = format; - SampleDesc.Count = sampleCount; - SampleDesc.Quality = sampleQuality; - Layout = layout; - Flags = flags; - } - static inline CD3DX12_RESOURCE_DESC Buffer(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_BUFFER, - resAllocInfo.Alignment, - resAllocInfo.SizeInBytes, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags); - } - static inline CD3DX12_RESOURCE_DESC Buffer(UINT64 width, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_BUFFER, - alignment, - width, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags); - } - static inline CD3DX12_RESOURCE_DESC Tex1D(DXGI_FORMAT format, - UINT64 width, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_TEXTURE1D, - alignment, - width, - 1, - arraySize, - mipLevels, - format, - 1, - 0, - layout, - flags); - } - static inline CD3DX12_RESOURCE_DESC Tex2D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - UINT sampleCount = 1, - UINT sampleQuality = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_TEXTURE2D, - alignment, - width, - height, - arraySize, - mipLevels, - format, - sampleCount, - sampleQuality, - layout, - flags); - } - static inline CD3DX12_RESOURCE_DESC Tex3D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 depth, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_TEXTURE3D, - alignment, - width, - height, - depth, - mipLevels, - format, - 1, - 0, - layout, - flags); - } - inline UINT16 Depth() const noexcept { - return (Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT16 ArraySize() const noexcept { - return (Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT8 PlaneCount(_In_ ID3D12Device* pDevice) const noexcept { - return D3D12GetFormatPlaneCount(pDevice, Format); - } - inline UINT Subresources(_In_ ID3D12Device* pDevice) const noexcept { - return static_cast(MipLevels) * ArraySize() * PlaneCount(pDevice); - } - inline UINT CalcSubresource(UINT MipSlice, UINT ArraySlice, UINT PlaneSlice) noexcept { - return D3D12CalcSubresource(MipSlice, ArraySlice, PlaneSlice, MipLevels, ArraySize()); - } -}; -inline bool operator==(const D3D12_RESOURCE_DESC& l, const D3D12_RESOURCE_DESC& r) noexcept { - return l.Dimension == r.Dimension && l.Alignment == r.Alignment && l.Width == r.Width && l.Height == r.Height && - l.DepthOrArraySize == r.DepthOrArraySize && l.MipLevels == r.MipLevels && l.Format == r.Format && - l.SampleDesc.Count == r.SampleDesc.Count && l.SampleDesc.Quality == r.SampleDesc.Quality && - l.Layout == r.Layout && l.Flags == r.Flags; -} -inline bool operator!=(const D3D12_RESOURCE_DESC& l, const D3D12_RESOURCE_DESC& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RESOURCE_DESC1 : public D3D12_RESOURCE_DESC1 { - CD3DX12_RESOURCE_DESC1() = default; - explicit CD3DX12_RESOURCE_DESC1(const D3D12_RESOURCE_DESC1& o) noexcept : D3D12_RESOURCE_DESC1(o) {} - explicit CD3DX12_RESOURCE_DESC1(const D3D12_RESOURCE_DESC& o) noexcept { - Dimension = o.Dimension; - Alignment = o.Alignment; - Width = o.Width; - Height = o.Height; - DepthOrArraySize = o.DepthOrArraySize; - MipLevels = o.MipLevels; - Format = o.Format; - SampleDesc = o.SampleDesc; - Layout = o.Layout; - Flags = o.Flags; - SamplerFeedbackMipRegion = {}; - } - CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION dimension, - UINT64 alignment, - UINT64 width, - UINT height, - UINT16 depthOrArraySize, - UINT16 mipLevels, - DXGI_FORMAT format, - UINT sampleCount, - UINT sampleQuality, - D3D12_TEXTURE_LAYOUT layout, - D3D12_RESOURCE_FLAGS flags, - UINT samplerFeedbackMipRegionWidth = 0, - UINT samplerFeedbackMipRegionHeight = 0, - UINT samplerFeedbackMipRegionDepth = 0) - noexcept { - Dimension = dimension; - Alignment = alignment; - Width = width; - Height = height; - DepthOrArraySize = depthOrArraySize; - MipLevels = mipLevels; - Format = format; - SampleDesc.Count = sampleCount; - SampleDesc.Quality = sampleQuality; - Layout = layout; - Flags = flags; - SamplerFeedbackMipRegion.Width = samplerFeedbackMipRegionWidth; - SamplerFeedbackMipRegion.Height = samplerFeedbackMipRegionHeight; - SamplerFeedbackMipRegion.Depth = samplerFeedbackMipRegionDepth; - } - - static inline CD3DX12_RESOURCE_DESC1 Buffer(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_BUFFER, - resAllocInfo.Alignment, - resAllocInfo.SizeInBytes, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags, - 0, - 0, - 0); - } - static inline CD3DX12_RESOURCE_DESC1 Buffer(UINT64 width, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_BUFFER, - alignment, - width, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags, - 0, - 0, - 0); - } - static inline CD3DX12_RESOURCE_DESC1 Tex1D(DXGI_FORMAT format, - UINT64 width, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_TEXTURE1D, - alignment, - width, - 1, - arraySize, - mipLevels, - format, - 1, - 0, - layout, - flags, - 0, - 0, - 0); - } - static inline CD3DX12_RESOURCE_DESC1 Tex2D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - UINT sampleCount = 1, - UINT sampleQuality = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0, - UINT samplerFeedbackMipRegionWidth = 0, - UINT samplerFeedbackMipRegionHeight = 0, - UINT samplerFeedbackMipRegionDepth = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_TEXTURE2D, - alignment, - width, - height, - arraySize, - mipLevels, - format, - sampleCount, - sampleQuality, - layout, - flags, - samplerFeedbackMipRegionWidth, - samplerFeedbackMipRegionHeight, - samplerFeedbackMipRegionDepth); - } - static inline CD3DX12_RESOURCE_DESC1 Tex3D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 depth, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_TEXTURE3D, - alignment, - width, - height, - depth, - mipLevels, - format, - 1, - 0, - layout, - flags, - 0, - 0, - 0); - } - inline UINT16 Depth() const noexcept { - return (Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT16 ArraySize() const noexcept { - return (Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT8 PlaneCount(_In_ ID3D12Device* pDevice) const noexcept { - return D3D12GetFormatPlaneCount(pDevice, Format); - } - inline UINT Subresources(_In_ ID3D12Device* pDevice) const noexcept { - return static_cast(MipLevels) * ArraySize() * PlaneCount(pDevice); - } - inline UINT CalcSubresource(UINT MipSlice, UINT ArraySlice, UINT PlaneSlice) noexcept { - return D3D12CalcSubresource(MipSlice, ArraySlice, PlaneSlice, MipLevels, ArraySize()); - } -}; -inline bool operator==(const D3D12_RESOURCE_DESC1& l, const D3D12_RESOURCE_DESC1& r) noexcept { - return l.Dimension == r.Dimension && l.Alignment == r.Alignment && l.Width == r.Width && l.Height == r.Height && - l.DepthOrArraySize == r.DepthOrArraySize && l.MipLevels == r.MipLevels && l.Format == r.Format && - l.SampleDesc.Count == r.SampleDesc.Count && l.SampleDesc.Quality == r.SampleDesc.Quality && - l.Layout == r.Layout && l.Flags == r.Flags && - l.SamplerFeedbackMipRegion.Width == r.SamplerFeedbackMipRegion.Width && - l.SamplerFeedbackMipRegion.Height == r.SamplerFeedbackMipRegion.Height && - l.SamplerFeedbackMipRegion.Depth == r.SamplerFeedbackMipRegion.Depth; -} -inline bool operator!=(const D3D12_RESOURCE_DESC1& l, const D3D12_RESOURCE_DESC1& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -// Fills in the mipmap and alignment values of pDesc when either members are zero -// Used to replace an implicit field to an explicit (0 mip map = max mip map level) -// If expansion has occured, returns LclDesc, else returns the original pDesc -inline const CD3DX12_RESOURCE_DESC1* D3DX12ConditionallyExpandAPIDesc(CD3DX12_RESOURCE_DESC1& LclDesc, - const CD3DX12_RESOURCE_DESC1* pDesc) { - // Expand mip levels: - if (pDesc->MipLevels == 0 || pDesc->Alignment == 0) { - LclDesc = *pDesc; - if (pDesc->MipLevels == 0) { - auto MaxMipLevels = [](UINT64 uiMaxDimension) -> UINT16 { - UINT16 uiRet = 0; - while (uiMaxDimension > 0) { - uiRet++; - uiMaxDimension >>= 1; - } - return uiRet; - }; - auto Max = [](UINT64 const& a, UINT64 const& b) { - return (a < b) ? b : a; - }; - - LclDesc.MipLevels = - MaxMipLevels(Max(LclDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? LclDesc.DepthOrArraySize : 1, - Max(LclDesc.Width, LclDesc.Height))); - } - if (pDesc->Alignment == 0) { - if (pDesc->Layout == D3D12_TEXTURE_LAYOUT_64KB_UNDEFINED_SWIZZLE || - pDesc->Layout == D3D12_TEXTURE_LAYOUT_64KB_STANDARD_SWIZZLE) { - LclDesc.Alignment = D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT; - } else { - LclDesc.Alignment = (pDesc->SampleDesc.Count > 1 ? D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT - : D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT); - } - } - return &LclDesc; - } else { - return pDesc; - } -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_VIEW_INSTANCING_DESC : public D3D12_VIEW_INSTANCING_DESC { - CD3DX12_VIEW_INSTANCING_DESC() = default; - explicit CD3DX12_VIEW_INSTANCING_DESC(const D3D12_VIEW_INSTANCING_DESC& o) noexcept - : D3D12_VIEW_INSTANCING_DESC(o) {} - explicit CD3DX12_VIEW_INSTANCING_DESC(CD3DX12_DEFAULT) noexcept { - ViewInstanceCount = 0; - pViewInstanceLocations = nullptr; - Flags = D3D12_VIEW_INSTANCING_FLAG_NONE; - } - explicit CD3DX12_VIEW_INSTANCING_DESC(UINT InViewInstanceCount, - const D3D12_VIEW_INSTANCE_LOCATION* InViewInstanceLocations, - D3D12_VIEW_INSTANCING_FLAGS InFlags) noexcept { - ViewInstanceCount = InViewInstanceCount; - pViewInstanceLocations = InViewInstanceLocations; - Flags = InFlags; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RT_FORMAT_ARRAY : public D3D12_RT_FORMAT_ARRAY { - CD3DX12_RT_FORMAT_ARRAY() = default; - explicit CD3DX12_RT_FORMAT_ARRAY(const D3D12_RT_FORMAT_ARRAY& o) noexcept : D3D12_RT_FORMAT_ARRAY(o) {} - explicit CD3DX12_RT_FORMAT_ARRAY(_In_reads_(NumFormats) const DXGI_FORMAT* pFormats, UINT NumFormats) noexcept { - NumRenderTargets = NumFormats; - memcpy(RTFormats, pFormats, sizeof(RTFormats)); - // assumes ARRAY_SIZE(pFormats) == ARRAY_SIZE(RTFormats) - } -}; - -#endif diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h deleted file mode 100644 index 2ae8f5c4bee1f6..00000000000000 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h +++ /dev/null @@ -1,15 +0,0 @@ -//********************************************************* -// -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License (MIT). -// -//********************************************************* - -#pragma once - -#ifdef _WIN32 - -struct CD3DX12_DEFAULT {}; -extern const DECLSPEC_SELECTANY CD3DX12_DEFAULT D3D12_DEFAULT; - -#endif From ca935238a902f72611ad796e65cf0ae9c2eb21c6 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Tue, 21 Jan 2025 16:56:29 +0100 Subject: [PATCH 34/35] [DOCS] adjustments preparing 2025.0 pass 2 (#28454) --- cspell.json | 412 ------------------ .../about-openvino/release-notes-openvino.rst | 98 ++--- .../configurations/genai-dependencies.rst | 4 +- .../get-started/install-openvino.rst | 6 +- .../openvino-workflow-generative.rst | 16 +- .../remote-tensor-api-gpu-plugin.rst | 2 +- .../query-device-properties.rst | 2 +- docs/dev/ov_dependencies.txt | 2 +- docs/sphinx_setup/index.rst | 2 +- .../cpp/benchmark/sync_benchmark/README.md | 6 +- .../benchmark/throughput_benchmark/README.md | 6 +- samples/cpp/hello_reshape_ssd/README.md | 2 +- samples/js/node/notebooks/hello-detection.nnb | 2 +- .../js/node/notebooks/hello-segmentation.nnb | 2 +- samples/js/node/notebooks/hello-world.nnb | 2 +- .../python/benchmark/bert_benchmark/README.md | 2 +- .../python/benchmark/sync_benchmark/README.md | 18 +- .../benchmark/throughput_benchmark/README.md | 18 +- 18 files changed, 77 insertions(+), 525 deletions(-) delete mode 100644 cspell.json diff --git a/cspell.json b/cspell.json deleted file mode 100644 index f59d00a6a052f6..00000000000000 --- a/cspell.json +++ /dev/null @@ -1,412 +0,0 @@ -{ - "version": "0.2", - "ignorePaths": [], - "dictionaryDefinitions": [], - "dictionaries": [], - "words": [ - "aarch64", - "acdadcfa", - "acea", - "abmrd", - "acfb", - "acosh", - "Acosh", - "adfcd", - "addcmul", - "addif", - "addmm", - "aeaa", - "agem", - "agew", - "armeabi", - "armhf", - "artefacts", - "ARTEFACTS", - "Asinh", - "asynch", - "Atanh", - "autodoc", - "Autograd", - "autoplugin", - "AUTOPLUGIN", - "autoremove", - "autosummary", - "bace", - "Backprop", - "bblayers", - "Beautif", - "Bilat", - "bindir", - "bitbake", - "BFYX", - "BFXY", - "bkgr", - "brctl", - "Bucketize", - "BUILDDIR", - "buildtools", - "buildsystems", - "BYXF", - "bvalue", - "bvlc", - "caffe", - "caffemodel", - "camvid", - "cbba", - "cbcd", - "cdad", - "cdrom", - "chrpath", - "classov", - "cldnn", - "clumber", - "codepath", - "codepaths", - "coeffs", - "concat", - "Concat", - "Conts", - "constexpr", - "consts", - "Consts", - "conv", - "Convolutional", - "CPPLINT", - "cpplint", - "crbegin", - "crend", - "ctest", - "ctput", - "CVAT", - "cython", - "dadb", - "DANDROID", - "DARM", - "Datumaro", - "datumaro", - "DBUILD", - "DCMAKE", - "ddepth", - "Depthwise", - "dearmor", - "devicesupport", - "dequantization", - "Dequantization", - "deeplabv", - "deeced", - "DENABLE", - "delif", - "denormal", - "DENORMAL", - "denormalized", - "Detectron", - "Dequantize", - "devel", - "devtoolset", - "dgpu", - "diffstat", - "dldt", - "dlstreamer", - "dkms", - "Dockerfiles", - "DOPENVINO", - "downscript", - "doxid", - "doxygen", - "Doxygen", - "doxygensnippet", - "DTHREADING", - "dpkg", - "DPYTHON", - "DSELECTIVE", - "dylib", - "DWORD", - "efficientdet", - "Efficientdet", - "Einsum", - "Elems", - "Elementwise", - "elementwise", - "Eltwise", - "endsphinxdirective", - "enumov", - "emcmake", - "emmake", - "emod", - "emom", - "emow", - "Emscripten", - "emscripten", - "emsdk", - "epel", - "ERRORLEVEL", - "evolutionally", - "executionpolicy", - "fafe", - "fdupes", - "flatbuffers", - "FLATBUFFERS", - "frontends", - "Frontends", - "FYXB", - "gaddb", - "GAPI", - "gapi", - "Gaussed", - "gcompoundkernel", - "gcomputation", - "GCPU", - "gcpukernel", - "Gelu", - "GELU", - "Geti", - "getitem", - "gimg", - "gitee", - "gflags", - "globbing", - "gmmlib", - "GNAs", - "gmock", - "gnueabihf", - "googlenet", - "gpgcheck", - "gpgkey", - "graphviz", - "Graphviz", - "groupov", - "gtest", - "hardtanh", - "hashfile", - "HDDL", - "HKLM", - "HOSTTOOLS", - "Hotspots", - "hotspots", - "hostnet", - "hwloc", - "hwquote", - "idbf", - "IDFT", - "iigd", - "ifdef", - "ifdown", - "ifup", - "imgproc", - "imshow", - "inet", - "INTEGRITYCHECK", - "ILSVRC", - "inferenced", - "Informations", - "insmod", - "intelocl", - "INTERPROCEDURAL", - "INSTALLDIR", - "IRDFT", - "jemalloc", - "kaldi", - "Keras", - "keypress", - "keyrings", - "Khronos", - "KROIs", - "Landm", - "landm", - "Latency", - "Lcov", - "ldconfig", - "libc", - "libopencl", - "libopencv", - "libpython", - "libtbb", - "libtbbbind", - "libtpm", - "libvirtd", - "linmac", - "Liskov", - "lowlatency", - "LTSC", - "LSTM", - "makefiles", - "malloc", - "memleaks", - "manylinux", - "maxdepth", - "miktext", - "Mish", - "mklink", - "mmap", - "mobilenet", - "Mobilenet", - "monodepth", - "mozallowfullscreen", - "msallowfullscreen", - "MSVC", - "msvc", - "Multiclass", - "muxed", - "mxnet", - "namespaceov", - "NCHW", - "ncpu", - "netdev", - "netplan", - "ngraph", - "nireq", - "NNCF", - "nncf", - "nocache", - "noglob", - "nohup", - "nlohmann", - "norestart", - "noqueue", - "nproc", - "NUMA", - "numpy", - "Numpy", - "oallowfullscreen", - "ocloc", - "OCSP", - "oneapi", - "onetbb", - "onnx", - "opencl", - "openembedded", - "openvino", - "Opset", - "opset", - "opsets", - "OVMS", - "ovms", - "ovsa", - "OVSA", - "ovsatool", - "OVTF", - "PACKAGECONFIG", - "paddlepaddle", - "parameterizable", - "partitioner", - "patchelf", - "passpattern", - "Pexels", - "pdmodel", - "PDPD", - "pkgdata", - "pkgs", - "pkill", - "polylines", - "postproc", - "postprocess", - "preprocess", - "Preprocess", - "protobuf", - "Protobuf", - "PROTOBUF", - "prototxt", - "PSROI", - "Pugi", - "pugixml", - "PUGIXML", - "pypi", - "PYTHONPATH", - "pzstd", - "qcow", - "qlen", - "QSPECTRE", - "Qspectre", - "quantizer", - "Rects", - "Relu", - "relu", - "rcnn", - "RCNN", - "RDFT", - "Redistributable", - "remotesigned", - "repolist", - "reproject", - "reshapable", - "Requantize", - "retval", - "RHODS", - "rmmod", - "runtool", - "scons", - "SCONS", - "segm", - "Selu", - "servercore", - "setuptools", - "setupvars", - "SETX", - "SIMD", - "Softmax", - "skylake", - "sphinxdirective", - "Strided", - "squeezenet", - "SWTPM", - "swtpm", - "TBBBIND", - "TBBROOT", - "Tensro", - "texlive", - "textrm", - "tflite", - "thirdparty", - "Thresholded", - "toctree", - "toolset", - "Torchvision", - "tpmrm", - "tpmstate", - "tput", - "Tunables", - "unet", - "Uninstallation", - "unixio", - "unsharp", - "Unsharp", - "Unsh", - "Unsqueeze", - "Usecase", - "usecases", - "USERPROFILE", - "userspace", - "VAAPI", - "valgrind", - "vcpkg", - "vcvars", - "venv", - "virbr", - "virsh", - "virt", - "virtio", - "VMHWM", - "VMRSS", - "VNNI", - "vtune", - "vtunesummary", - "vtunebottonup", - "WHOLEARCHIVE", - "WDDM", - "WORKDIR", - "WORKSIZE", - "xbyak", - "Xbyak", - "xdot", - "xvfz", - "yocto", - "yolo", - "YOLO", - "yolov", - "Yolov", - "YXFB", - "zstd" - ], - "ignoreWords": [], - "import": [] -} diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index 0134ed15215541..739c411dcbe7e5 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -16,7 +16,7 @@ OpenVINO Release Notes -2024.6 - 18 December 2024 +2025.0 - 05 February 2025 ############################# :doc:`System Requirements <./release-notes-openvino/system-requirements>` | :doc:`Release policy <./release-notes-openvino/release-policy>` | :doc:`Installation Guides <./../get-started/install-openvino>` @@ -26,10 +26,9 @@ OpenVINO Release Notes What's new +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -* OpenVINO 2024.6 release includes updates for enhanced stability and improved LLM performance. -* Introduced support for Intel® Arc™ B-Series Graphics (formerly known as Battlemage). -* Implemented optimizations to improve the inference time and LLM performance on NPUs. -* Improved LLM performance with GenAI API optimizations and bug fixes. +* . +* . + @@ -39,26 +38,19 @@ OpenVINO™ Runtime CPU Device Plugin ----------------------------------------------------------------------------------------------- -* KV cache now uses asymmetric 8-bit unsigned integer (U8) as the default precision, reducing - memory stress for LLMs and increasing their performance. This option can be controlled by - model meta data. -* Quality and accuracy has been improved for selected models with several bug fixes. +* . +* . GPU Device Plugin ----------------------------------------------------------------------------------------------- -* Device memory copy optimizations have been introduced for inference with **Intel® Arc™ B-Series - Graphics** (formerly known as Battlemage). Since it does not utilize L2 cache for copying memory - between the device and host, a dedicated `copy` operation is used, if inputs or results are - not expected in the device memory. -* ChatGLM4 inference on GPU has been optimized. +* . +* . NPU Device Plugin ----------------------------------------------------------------------------------------------- -* LLM performance and inference time has been improved with memory optimizations. - - +* . @@ -98,14 +90,10 @@ Previous 2025 releases .. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.. dropdown:: 2024.5 - 20 November 2024 +.. dropdown:: 2024.6 - 18 December 2024 :animate: fade-in-slide-down :color: secondary - **What's new** - - * More GenAI coverage and framework integrations to minimize code changes. - @@ -126,74 +114,44 @@ page. -Discontinued in 2024 +Discontinued in 2025 ----------------------------- * Runtime components: - * Intel® Gaussian & Neural Accelerator (Intel® GNA). Consider using the Neural Processing - Unit (NPU) for low-powered systems like Intel® Core™ Ultra or 14th generation and beyond. - * OpenVINO C++/C/Python 1.0 APIs (see - `2023.3 API transition guide `__ - for reference). - * All ONNX Frontend legacy API (known as ONNX_IMPORTER_API). - * ``PerfomanceMode.UNDEFINED`` property as part of the OpenVINO Python API. + * OpenVINO property Affinity API will is no longer available. It has been replaced with CPU + binding configurations (``ov::hint::enable_cpu_pinning``). * Tools: - * Deployment Manager. See :doc:`installation <../get-started/install-openvino>` and - :doc:`deployment <../get-started/install-openvino>` guides for current distribution - options. - * `Accuracy Checker `__. - * `Post-Training Optimization Tool `__ - (POT). Neural Network Compression Framework (NNCF) should be used instead. - * A `Git patch `__ - for NNCF integration with `huggingface/transformers `__. - The recommended approach is to use `huggingface/optimum-intel `__ - for applying NNCF optimization on top of models from Hugging Face. - * Support for Apache MXNet, Caffe, and Kaldi model formats. Conversion to ONNX may be used - as a solution. - * The macOS x86_64 debug bins are no longer provided with the OpenVINO toolkit, starting - with OpenVINO 2024.5. - * Python 3.8 is no longer supported, starting with OpenVINO 2024.5. - - * As MxNet doesn't support Python version higher than 3.8, according to the - `MxNet PyPI project `__, - it is no longer supported by OpenVINO, either. - - * Discrete Keem Bay support is no longer supported, starting with OpenVINO 2024.5. - * Support for discrete devices (formerly codenamed Raptor Lake) is no longer available for - NPU. + * Intel® Streaming SIMD Extensions (Intel® SSE) are currently not enabled in the binary + package by default. They are still supported in the source code form. + * The OpenVINO™ Development Tools package (pip install openvino-dev) is no longer available + for OpenVINO releases in 2025. + * Model Optimizer is no longer avilable. Consider using the + :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` + instead. For more details, see the + `model conversion transition guide `__. Deprecated and to be removed in the future -------------------------------------------- -* Intel® Streaming SIMD Extensions (Intel® SSE) will be supported in source code form, but not - enabled in the binary package by default, starting with OpenVINO 2025.0. * Ubuntu 20.04 support will be deprecated in future OpenVINO releases due to the end of standard support. * The openvino-nightly PyPI module will soon be discontinued. End-users should proceed with the Simple PyPI nightly repo instead. More information in `Release Policy `__. -* The OpenVINO™ Development Tools package (pip install openvino-dev) will be removed from - installation options and distribution channels beginning with OpenVINO 2025.0. -* Model Optimizer will be discontinued with OpenVINO 2025.0. Consider using the - :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` - instead. For more details, see the - `model conversion transition guide `__. -* OpenVINO property Affinity API will be discontinued with OpenVINO 2025.0. - It will be replaced with CPU binding configurations (``ov::hint::enable_cpu_pinning``). - - - +* “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the + future. OpenVINO's dynamic shape models are recommended instead. +* MacOS x86 is no longer recommended for use due to the discontinuation of validation. + Full support will be removed later in 2025. +* The `openvino` namespace of the OpenVINO Python API has been redesigned, removing the nested + `openvino.runtime` module. The old namespace is now considered deprecated and will be + discontinued in 2026.0. - * “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the - future. OpenVINO's dynamic shape models are recommended instead. -* Starting with 2025.0 MacOS x86 is no longer recommended for use due to the discontinuation - of validation. Full support will be removed later in 2025. diff --git a/docs/articles_en/get-started/configurations/genai-dependencies.rst b/docs/articles_en/get-started/configurations/genai-dependencies.rst index 6eec18a74f0f05..13e28107f69d63 100644 --- a/docs/articles_en/get-started/configurations/genai-dependencies.rst +++ b/docs/articles_en/get-started/configurations/genai-dependencies.rst @@ -4,8 +4,8 @@ OpenVINO™ GenAI Dependencies OpenVINO™ GenAI depends on both `OpenVINO `__ and `OpenVINO Tokenizers `__. During OpenVINO™ GenAI installation from PyPi, the same versions of OpenVINO and OpenVINO Tokenizers -are used (e.g. ``openvino==2024.6.0`` and ``openvino-tokenizers==2024.6.0.0`` are installed for -``openvino-genai==2024.6.0``). +are used (e.g. ``openvino==2025.0.0`` and ``openvino-tokenizers==2025.0.0.0`` are installed for +``openvino-genai==2025.0.0``). Trying to update any of the dependency packages might result in a version incompatibility due to different Application Binary Interfaces (ABIs), which will result in errors while running diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 401aa79213e6d7..387a0bf2ab37e3 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -1,4 +1,4 @@ -Install OpenVINO™ 2024.6 +Install OpenVINO™ 2025.0 ========================== @@ -23,10 +23,10 @@ Install OpenVINO™ 2024.6 -OpenVINO 2024.6, described here, is not a Long-Term-Support version! +OpenVINO 2025.0, described here, is not a Long-Term-Support version! All currently supported versions are: -* 2024.6 (development) +* 2025.0 (development) * 2023.3 (LTS) diff --git a/docs/articles_en/openvino-workflow-generative.rst b/docs/articles_en/openvino-workflow-generative.rst index a4fa53335988ae..14521f118f6dfc 100644 --- a/docs/articles_en/openvino-workflow-generative.rst +++ b/docs/articles_en/openvino-workflow-generative.rst @@ -40,7 +40,7 @@ options: `Check out the OpenVINO GenAI Quick-start Guide [PDF] `__ - .. tab-item:: Hugging Face integration + .. tab-item:: Optimum Intel (Hugging Face integration) | - Suggested for prototyping and, if the use case is not covered by OpenVINO GenAI, production. | - Bigger footprint and more dependencies. @@ -55,10 +55,16 @@ options: as well as conversion on the fly. For integration with the final product it may offer lower performance, though. -Note that the base version of OpenVINO may also be used to run generative AI. Although it may -offer a simpler environment, with fewer dependencies, it has significant limitations and a more -demanding implementation process. For reference, see -`the article on generative AI usage of OpenVINO 2024.6 `__. + .. tab-item:: Base OpenVINO (not recommended) + + Note that the base version of OpenVINO may also be used to run generative AI. Although it may + offer a simpler environment, with fewer dependencies, it has significant limitations and a more + demanding implementation process. + + To learn more, refer to the article for the 2024.6 OpenVINO version: + `Generative AI with Base OpenVINO `__ + + The advantages of using OpenVINO for generative model deployment: diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst index f1018b82cf40ee..ce243dbd87f9ae 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst @@ -621,7 +621,7 @@ Two types of map entries are possible: descriptor and container. Descriptor sets the expected structure and possible parameter values of the map. For possible low-level properties and their description, refer to the header file: -`remote_properties.hpp `__. +`remote_properties.hpp `__. Examples ########################################################### diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst index 913d0090b92a52..a704833b374f19 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst @@ -88,7 +88,7 @@ The ``ov::CompiledModel`` class is also extended to support the properties: * ``ov::CompiledModel::set_property`` For documentation about OpenVINO common device-independent properties, refer to -`properties.hpp (GitHub) `__. +`properties.hpp (GitHub) `__. Device-specific configuration keys can be found in a corresponding device folders, for example, ``openvino/runtime/intel_gpu/properties.hpp``. diff --git a/docs/dev/ov_dependencies.txt b/docs/dev/ov_dependencies.txt index cb64e4d5a6534c..71c9c906f9640d 100644 --- a/docs/dev/ov_dependencies.txt +++ b/docs/dev/ov_dependencies.txt @@ -1,6 +1,6 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -#This file provides a comprehensive list of all dependencies of OpenVINO 2024.6 +#This file provides a comprehensive list of all dependencies of OpenVINO 2025.0 #The file is part of the automation pipeline for posting OpenVINO IR models on the HuggingFace Hub, including OneBOM dependency checks. diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index d0da8fa4244dd6..b4e1039248f3a0 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -1,5 +1,5 @@ ============================ -OpenVINO 2024.6 +OpenVINO 2025.0 ============================ .. meta:: diff --git a/samples/cpp/benchmark/sync_benchmark/README.md b/samples/cpp/benchmark/sync_benchmark/README.md index b1eb079216064d..7cbc0f26624fa6 100644 --- a/samples/cpp/benchmark/sync_benchmark/README.md +++ b/samples/cpp/benchmark/sync_benchmark/README.md @@ -1,6 +1,6 @@ # Sync Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) @@ -8,8 +8,8 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | -------------------------------| -------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | diff --git a/samples/cpp/benchmark/throughput_benchmark/README.md b/samples/cpp/benchmark/throughput_benchmark/README.md index 43633498321c1e..bf8e7e6c8b6291 100644 --- a/samples/cpp/benchmark/throughput_benchmark/README.md +++ b/samples/cpp/benchmark/throughput_benchmark/README.md @@ -1,6 +1,6 @@ # Throughput Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. @@ -10,8 +10,8 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | ----------------------------| -------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | diff --git a/samples/cpp/hello_reshape_ssd/README.md b/samples/cpp/hello_reshape_ssd/README.md index bc346e850cf5ba..1359b07fdf27b5 100644 --- a/samples/cpp/hello_reshape_ssd/README.md +++ b/samples/cpp/hello_reshape_ssd/README.md @@ -9,7 +9,7 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | ----------------------------| -----------------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [person-detection-retail-0013](https://docs.openvino.ai/2024/omz_models_model_person_detection_retail_0013.html) | +| Validated Models | [person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | | Other language realization | [Python](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/hello-reshape-ssd.html) | diff --git a/samples/js/node/notebooks/hello-detection.nnb b/samples/js/node/notebooks/hello-detection.nnb index 60640b3bd042ea..e5c6f43f92a550 100644 --- a/samples/js/node/notebooks/hello-detection.nnb +++ b/samples/js/node/notebooks/hello-detection.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Object Detection\n\nA very basic introduction to using object detection models with OpenVINO™.\n\nThe [horizontal-text-detection-0001](https://docs.openvino.ai/2023.0/omz_models_model_horizontal_text_detection_0001.html) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. It detects horizontal text in images and returns a blob of data in the shape of `[100, 5]`. Each detected text box is stored in the `[x_min, y_min, x_max, y_max, conf]` format, where the\n`(x_min, y_min)` are the coordinates of the top left bounding box corner, `(x_max, y_max)` are the coordinates of the bottom right bounding box corner and `conf` is the confidence for the predicted class." + "# Hello Object Detection\n\nA very basic introduction to using object detection models with OpenVINO™.\n\nThe [horizontal-text-detection-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/horizontal-text-detection-0001) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. It detects horizontal text in images and returns a blob of data in the shape of `[100, 5]`. Each detected text box is stored in the `[x_min, y_min, x_max, y_max, conf]` format, where the\n`(x_min, y_min)` are the coordinates of the top left bounding box corner, `(x_max, y_max)` are the coordinates of the bottom right bounding box corner and `conf` is the confidence for the predicted class." ], "outputs": [] }, diff --git a/samples/js/node/notebooks/hello-segmentation.nnb b/samples/js/node/notebooks/hello-segmentation.nnb index a7da34a2799edf..31873f1e1528df 100644 --- a/samples/js/node/notebooks/hello-segmentation.nnb +++ b/samples/js/node/notebooks/hello-segmentation.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Image Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO™.\nIn this tutorial, a pre-trained [road-segmentation-adas-0001](https://docs.openvino.ai/2023.0/omz_models_model_road_segmentation_adas_0001.html) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.\n" + "# Hello Image Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO™.\nIn this tutorial, a pre-trained [road-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/road-segmentation-adas-0001) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.\n" ], "outputs": [] }, diff --git a/samples/js/node/notebooks/hello-world.nnb b/samples/js/node/notebooks/hello-world.nnb index 83d4ca8bec29f5..4da8eb3b4b334c 100644 --- a/samples/js/node/notebooks/hello-world.nnb +++ b/samples/js/node/notebooks/hello-world.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Image Classification\n\nThis basic introduction to OpenVINO™ shows how to do inference with an image classification model.\n\n A pre-trained [MobileNetV3 model](https://docs.openvino.ai/2023.0/omz_models_model_mobilenet_v3_small_1_0_224_tf.html) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-to-openvino/tensorflow-to-openvino.ipynb) tutorial.\n " + "# Hello Image Classification\n\nThis basic introduction to OpenVINO™ shows how to do inference with an image classification model.\n\n A pre-trained [MobileNetV3 model](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v3-small-1.0-224-tf) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-to-openvino/tensorflow-to-openvino.ipynb) tutorial.\n " ], "outputs": [] }, diff --git a/samples/python/benchmark/bert_benchmark/README.md b/samples/python/benchmark/bert_benchmark/README.md index 84ddcba1e598a4..2894c5f33d633b 100644 --- a/samples/python/benchmark/bert_benchmark/README.md +++ b/samples/python/benchmark/bert_benchmark/README.md @@ -1,6 +1,6 @@ # Bert Benchmark Python Sample -This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/bert-benchmark.html) diff --git a/samples/python/benchmark/sync_benchmark/README.md b/samples/python/benchmark/sync_benchmark/README.md index 4ce1329277b5b8..c7604386625572 100644 --- a/samples/python/benchmark/sync_benchmark/README.md +++ b/samples/python/benchmark/sync_benchmark/README.md @@ -1,19 +1,19 @@ # Sync Benchmark Python Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) ## Requirements -| Options | Values | -| ----------------------------| -----------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | -| Model Format | OpenVINO™ toolkit Intermediate Representation | -| | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | +| Options | Values | +| ----------------------------| ----------------------------------------------------------------------------------------------------------------------| +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | +| Model Format | OpenVINO™ toolkit Intermediate Representation | +| | (\*.xml + \*.bin), ONNX (\*.onnx) | +| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | The following Python API is used in the application: diff --git a/samples/python/benchmark/throughput_benchmark/README.md b/samples/python/benchmark/throughput_benchmark/README.md index 1ff02319ade062..5214c1190bb5e9 100644 --- a/samples/python/benchmark/throughput_benchmark/README.md +++ b/samples/python/benchmark/throughput_benchmark/README.md @@ -1,6 +1,6 @@ # Throughput Benchmark Python Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets uint8, while the sample uses default model precision which is usually float32. @@ -8,14 +8,14 @@ For more detailed information on how this sample works, check the dedicated [art ## Requirements -| Options | Values | -| -------------------------------| -----------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html) | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | -| Model Format | OpenVINO™ toolkit Intermediate Representation | -| | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | +| Options | Values | +| -------------------------------| -----------------------------------------------------------------------------------------------------------------------| +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf) | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | +| Model Format | OpenVINO™ toolkit Intermediate Representation | +| | (\*.xml + \*.bin), ONNX (\*.onnx) | +| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | The following Python API is used in the application: From 7f56fcd4658c6a427111ac835e809ddd87f0cad2 Mon Sep 17 00:00:00 2001 From: Bharat Jain <152432505+itsbharatj@users.noreply.github.com> Date: Wed, 22 Jan 2025 01:30:07 +0530 Subject: [PATCH 35/35] Fixed copyright line on the codebase (#28593) ### Details: Inconsistencies in the `ending year` of the copyright line of the codebase. Identified and made all of them to `2025` Fixed the copyright information for all the files in codebase. --- .github/github_org_control/check_org.py | 2 +- .github/github_org_control/check_pr.py | 2 +- .github/github_org_control/configs.py | 2 +- .github/github_org_control/github_api.py | 2 +- .github/github_org_control/ldap_api.py | 2 +- docs/articles_en/assets/snippets/ov_dynamic_shapes.c | 2 +- docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp | 2 +- docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp | 2 +- docs/openvino_sphinx_theme/setup.py | 2 +- docs/scripts/articles_helper.py | 2 +- docs/scripts/create_mapping.py | 2 +- docs/snippets/example_itask_executor.cpp | 2 +- src/bindings/c/include/openvino/c/ov_remote_context.h | 2 +- src/bindings/c/src/ov_remote_context.cpp | 2 +- src/bindings/c/tests/ov_remote_context_test.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/op_extension.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/op_extension.hpp | 2 +- .../include/low_precision/gather.hpp | 2 +- src/common/low_precision_transformations/src/gather.cpp | 2 +- .../tests/gather_transformation.cpp | 2 +- src/common/snippets/include/snippets/op/reg_spill.hpp | 2 +- src/common/snippets/src/generator.cpp | 2 +- src/common/snippets/src/op/reg_spill.cpp | 2 +- .../transformations/op_conversions/convert_convertlike.hpp | 2 +- .../op_conversions/convert_scatter_nd_update15_downgrade.hpp | 2 +- .../mark_decompression_convert_constant_folding.cpp | 2 +- .../src/transformations/op_conversions/convert_convertlike.cpp | 2 +- .../op_conversions/convert_scatter_nd_update15_downgrade.cpp | 2 +- .../tests/common_optimizations/convert_convertlike.cpp | 2 +- src/common/util/include/openvino/util/cpp_version.hpp | 2 +- src/common/util/include/openvino/util/file_path.hpp | 2 +- src/core/include/openvino/core/preprocess/padding_mode.hpp | 2 +- src/core/src/op/fake_convert.cpp | 2 +- src/core/tests/pass/serialization/from_model.cpp | 2 +- src/core/tests/type_prop/col2im.cpp | 2 +- src/core/tests/type_prop/region_yolo.cpp | 2 +- src/core/tests/type_prop/rms_norm.cpp | 2 +- src/core/tests/type_prop/slice_scatter.cpp | 2 +- src/core/tests/type_prop/stft.cpp | 2 +- src/core/tests/type_prop/string_tensor_pack.cpp | 2 +- src/core/tests/type_prop/string_tensor_unpack.cpp | 2 +- src/frontends/ir/tests/meta_data_tests.cpp | 2 +- src/frontends/ir/tests/threading_tests.cpp | 2 +- .../frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp | 2 +- src/frontends/onnx/frontend/src/op/unique.cpp | 2 +- src/frontends/onnx/frontend/src/utils/norm.hpp | 2 +- src/frontends/onnx/frontend/src/utils/split.hpp | 2 +- src/frontends/onnx/tests/conversion.cpp | 2 +- .../paddle/include/openvino/frontend/paddle/extension/op.hpp | 2 +- .../paddle/src/internal/pass/transform_fakequantize.cpp | 2 +- .../paddle/src/internal/pass/transform_fakequantize.hpp | 2 +- src/frontends/paddle/src/op/dequantize_linear.cpp | 2 +- src/frontends/paddle/src/op/generate_proposals_v2.cpp | 2 +- src/frontends/paddle/src/op/quantize_linear.cpp | 2 +- src/frontends/paddle/src/op/round.cpp | 2 +- src/frontends/paddle/src/op/top_k_v2.cpp | 2 +- src/frontends/paddle/tests/conversion.cpp | 2 +- src/frontends/paddle/tests/op_extension.cpp | 2 +- .../paddle/tests/test_models/gen_scripts/generate_roll.py | 2 +- .../paddle/tests/test_models/gen_scripts/generate_round.py | 2 +- src/frontends/pytorch/src/op/any.cpp | 2 +- src/frontends/pytorch/src/op/index_copy_.cpp | 2 +- src/frontends/pytorch/src/op/index_fill_.cpp | 2 +- src/frontends/tensorflow/tests/conversion.cpp | 2 +- src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp | 2 +- src/frontends/tensorflow_lite/tests/conversion.cpp | 2 +- .../tests/frontend/shared/include/conversion_extension.hpp | 2 +- src/frontends/tests/frontend/shared/src/conversion.cpp | 2 +- src/inference/include/openvino/runtime/intel_npu/properties.hpp | 2 +- src/plugins/auto/tests/functional/behavior/auto_func_test.hpp | 2 +- src/plugins/auto/tests/functional/behavior/io_tensor.hpp | 2 +- src/plugins/auto/tests/unit/include/auto_unit_test.hpp | 2 +- .../auto/tests/unit/infer_request_schedule_policy_test.cpp | 2 +- src/plugins/auto/tests/unit/meta_device_check_test.cpp | 2 +- src/plugins/hetero/tests/functional/hetero_tests.hpp | 2 +- .../behavior/ov_compiled_model/import_export.cpp | 2 +- .../behavior/ov_compiled_model/properties.cpp | 2 +- .../behavior/ov_infer_request/infer_request_dynamic.cpp | 2 +- .../behavior/ov_infer_request/inference_chaining.cpp | 2 +- .../behavior/ov_infer_request/io_tensor.cpp | 2 +- .../behavior/ov_plugin/properties_tests.cpp | 2 +- src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h | 2 +- src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp | 2 +- src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h | 2 +- src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp | 2 +- src/plugins/intel_cpu/src/nodes/common/reorder_prim.h | 2 +- src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/convert.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/convert.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/deconv.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp | 2 +- .../src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp | 2 +- .../src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp | 2 +- .../src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/executor.hpp | 2 +- .../intel_cpu/src/nodes/executors/executor_implementation.hpp | 2 +- .../src/nodes/executors/fullyconnected_implementations.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/pooling.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/transpose.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/transpose.hpp | 2 +- .../cpu_opset/common/op/causal_mask_preprocess.cpp | 2 +- .../cpu_opset/common/op/causal_mask_preprocess.hpp | 2 +- .../cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp | 2 +- .../cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp | 2 +- .../transformations/cpu_opset/common/pass/fc_bias_fusion.cpp | 2 +- .../transformations/cpu_opset/common/pass/fc_bias_fusion.hpp | 2 +- .../src/transformations/snippets/x64/op/brgemm_copy_b.cpp | 2 +- .../src/transformations/snippets/x64/op/brgemm_copy_b.hpp | 2 +- .../src/transformations/snippets/x64/op/brgemm_cpu.hpp | 2 +- .../transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp | 2 +- .../transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/factory.cpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/factory.hpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp | 2 +- .../src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp | 2 +- .../src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp | 2 +- .../src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp | 2 +- .../src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp | 2 +- .../tpp/x64/pass/lowered/set_tpp_leading_dim.cpp | 2 +- .../tpp/x64/pass/lowered/set_tpp_leading_dim.hpp | 2 +- .../src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp | 2 +- .../src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp | 2 +- src/plugins/intel_cpu/src/utils/enum_class_hash.hpp | 2 +- .../single_layer_tests/classes/convolution_backprop_data.cpp | 2 +- .../single_layer_tests/classes/convolution_backprop_data.hpp | 2 +- .../instances/arm/convolution_backprop_data.cpp | 2 +- .../instances/common/convolution_backprop_data.cpp | 2 +- .../instances/x64/convolution_backprop_data.cpp | 2 +- .../custom/subgraph_tests/src/classes/eltwise_chain.cpp | 2 +- .../custom/subgraph_tests/src/classes/eltwise_chain.hpp | 2 +- .../src/common/index_add_scatter_elements_update.cpp | 2 +- .../custom/subgraph_tests/src/common/inplace_resolve_io.cpp | 2 +- .../subgraph_tests/src/common/merge_transpose_reorder.cpp | 2 +- .../subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp | 2 +- .../behavior/compiled_model/cpu_reservation_test.cpp | 2 +- .../behavior/ov_infer_request/inference.cpp | 2 +- .../low_precision_transformations/x64/gather_transformation.cpp | 2 +- src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp | 2 +- .../tests/unit/streams_info/update_proc_table_test.cpp | 2 +- .../intel_gpu/include/intel_gpu/graph/serialization/utils.hpp | 2 +- src/plugins/intel_gpu/src/graph/paged_attention.cpp | 2 +- .../kernels/eltwise/eltwise_kernel_blocked_opt.cpp | 2 +- .../kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp | 2 +- .../kernels/permute/permute_kernel_bfzyx_to_bfyxz.h | 2 +- .../tests/functional/concurrency/gpu_reservation_test.cpp | 2 +- .../fuse_dequantize_to_fq_transformation.cpp | 2 +- .../low_precision_transformations/gather_transformation.cpp | 2 +- .../tests/functional/single_layer_tests/dynamic/broadcast.cpp | 2 +- src/plugins/proxy/tests/proxy_tests.hpp | 2 +- src/plugins/template/tests/functional/op_reference/loop.cpp | 2 +- .../template/tests/functional/op_reference/tensor_iterator.cpp | 2 +- .../behavior/ov_infer_request/batched_tensors.cpp | 2 +- .../behavior/ov_infer_request/inference.cpp | 2 +- .../include/behavior/ov_infer_request/batched_tensors.hpp | 2 +- .../shared/include/behavior/ov_infer_request/inference.hpp | 2 +- .../low_precision_transformations/gather_transformation.hpp | 2 +- .../shared/src/behavior/ov_infer_request/batched_tensors.cpp | 2 +- .../plugin/shared/src/behavior/ov_infer_request/inference.cpp | 2 +- .../fuse_dequantize_to_fake_quantize_transformation.cpp | 2 +- .../src/low_precision_transformations/gather_transformation.cpp | 2 +- .../shared_test_classes/base/utils/calculate_thresholds.hpp | 2 +- .../include/shared_test_classes/base/utils/compare_results.hpp | 2 +- .../include/shared_test_classes/base/utils/generate_inputs.hpp | 2 +- .../include/shared_test_classes/base/utils/ranges.hpp | 2 +- .../shared_test_classes/src/base/utils/generate_inputs.cpp | 2 +- .../ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp | 2 +- src/tests/ov_helpers/ov_lpt_models/src/gather.cpp | 2 +- .../include/common_test_utils/node_builders/broadcast.hpp | 2 +- .../include/common_test_utils/node_builders/reshape.hpp | 2 +- .../test_utils/common_test_utils/src/node_builders/reshape.cpp | 2 +- tests/layer_tests/pytorch_tests/test_constant_pad_nd.py | 2 +- tests/layer_tests/pytorch_tests/test_hardtanh.py | 2 +- tests/layer_tests/pytorch_tests/test_index_copy_.py | 2 +- tests/layer_tests/pytorch_tests/test_index_fill_.py | 2 +- tests/layer_tests/pytorch_tests/test_isfinite.py | 2 +- tests/layer_tests/pytorch_tests/test_isinf.py | 2 +- tests/layer_tests/pytorch_tests/test_isnan.py | 2 +- tests/layer_tests/pytorch_tests/test_select_scatter.py | 2 +- tests/layer_tests/pytorch_tests/test_slice_scatter.py | 2 +- tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py | 2 +- tests/time_tests/src/timetests/timetest_infer_api_2.cpp | 2 +- 202 files changed, 202 insertions(+), 202 deletions(-) diff --git a/.github/github_org_control/check_org.py b/.github/github_org_control/check_org.py index ab59d05066a630..7dd256e4c1fef8 100644 --- a/.github/github_org_control/check_org.py +++ b/.github/github_org_control/check_org.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/check_pr.py b/.github/github_org_control/check_pr.py index 08cf6f4b4dbfff..79db5940e24a58 100644 --- a/.github/github_org_control/check_pr.py +++ b/.github/github_org_control/check_pr.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/configs.py b/.github/github_org_control/configs.py index 3df12803c77de0..be93540cd4aa03 100644 --- a/.github/github_org_control/configs.py +++ b/.github/github_org_control/configs.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/github_api.py b/.github/github_org_control/github_api.py index 6f9d14c5376742..581921f3943a1e 100644 --- a/.github/github_org_control/github_api.py +++ b/.github/github_org_control/github_api.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/ldap_api.py b/.github/github_org_control/ldap_api.py index c0f7e2c18117ff..3c68242c40cf75 100644 --- a/.github/github_org_control/ldap_api.py +++ b/.github/github_org_control/ldap_api.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/docs/articles_en/assets/snippets/ov_dynamic_shapes.c b/docs/articles_en/assets/snippets/ov_dynamic_shapes.c index fa1f3158365ddf..68cbef8ab0159e 100644 --- a/docs/articles_en/assets/snippets/ov_dynamic_shapes.c +++ b/docs/articles_en/assets/snippets/ov_dynamic_shapes.c @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp b/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp index c4e9002a9a61db..63d68516aa1c36 100644 --- a/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp +++ b/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp b/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp index a5271d148190d0..67afd8ea13029c 100644 --- a/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp +++ b/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/docs/openvino_sphinx_theme/setup.py b/docs/openvino_sphinx_theme/setup.py index 28af421d8d4e4b..0776711a7765f2 100644 --- a/docs/openvino_sphinx_theme/setup.py +++ b/docs/openvino_sphinx_theme/setup.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from setuptools import setup diff --git a/docs/scripts/articles_helper.py b/docs/scripts/articles_helper.py index 6b01325fa24a95..1065e8b30f85a4 100644 --- a/docs/scripts/articles_helper.py +++ b/docs/scripts/articles_helper.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse diff --git a/docs/scripts/create_mapping.py b/docs/scripts/create_mapping.py index e36bfb53184fbc..b1094dd936f021 100644 --- a/docs/scripts/create_mapping.py +++ b/docs/scripts/create_mapping.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse diff --git a/docs/snippets/example_itask_executor.cpp b/docs/snippets/example_itask_executor.cpp index e951917249f059..0890518e2f86f9 100644 --- a/docs/snippets/example_itask_executor.cpp +++ b/docs/snippets/example_itask_executor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/c/include/openvino/c/ov_remote_context.h b/src/bindings/c/include/openvino/c/ov_remote_context.h index 07ce1cfbe1fd73..b3dbb57f62a886 100644 --- a/src/bindings/c/include/openvino/c/ov_remote_context.h +++ b/src/bindings/c/include/openvino/c/ov_remote_context.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/c/src/ov_remote_context.cpp b/src/bindings/c/src/ov_remote_context.cpp index f1b9d7cbd6aacf..069802346d9cf9 100644 --- a/src/bindings/c/src/ov_remote_context.cpp +++ b/src/bindings/c/src/ov_remote_context.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "openvino/c/ov_remote_context.h" diff --git a/src/bindings/c/tests/ov_remote_context_test.cpp b/src/bindings/c/tests/ov_remote_context_test.cpp index d0d278acef94aa..4f13fe3fcebb6c 100644 --- a/src/bindings/c/tests/ov_remote_context_test.cpp +++ b/src/bindings/c/tests/ov_remote_context_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/op_extension.cpp b/src/bindings/python/src/pyopenvino/graph/op_extension.cpp index 9922493efdf28d..70834f313264db 100644 --- a/src/bindings/python/src/pyopenvino/graph/op_extension.cpp +++ b/src/bindings/python/src/pyopenvino/graph/op_extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/op_extension.hpp b/src/bindings/python/src/pyopenvino/graph/op_extension.hpp index 1f5f0e42d0c702..5fd0117218bb6c 100644 --- a/src/bindings/python/src/pyopenvino/graph/op_extension.hpp +++ b/src/bindings/python/src/pyopenvino/graph/op_extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/low_precision_transformations/include/low_precision/gather.hpp b/src/common/low_precision_transformations/include/low_precision/gather.hpp index 980ec8f1e9b992..73be6a880a80ae 100644 --- a/src/common/low_precision_transformations/include/low_precision/gather.hpp +++ b/src/common/low_precision_transformations/include/low_precision/gather.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/low_precision_transformations/src/gather.cpp b/src/common/low_precision_transformations/src/gather.cpp index 4c5959d5c373e0..437fae10ec0d1d 100644 --- a/src/common/low_precision_transformations/src/gather.cpp +++ b/src/common/low_precision_transformations/src/gather.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/low_precision_transformations/tests/gather_transformation.cpp b/src/common/low_precision_transformations/tests/gather_transformation.cpp index d710709ca69229..79a581e50d589c 100644 --- a/src/common/low_precision_transformations/tests/gather_transformation.cpp +++ b/src/common/low_precision_transformations/tests/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/snippets/include/snippets/op/reg_spill.hpp b/src/common/snippets/include/snippets/op/reg_spill.hpp index 84fe0b4da609c1..93ff1738830964 100644 --- a/src/common/snippets/include/snippets/op/reg_spill.hpp +++ b/src/common/snippets/include/snippets/op/reg_spill.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/snippets/src/generator.cpp b/src/common/snippets/src/generator.cpp index 144fab766e739b..bb6bd636a791ac 100644 --- a/src/common/snippets/src/generator.cpp +++ b/src/common/snippets/src/generator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/snippets/src/op/reg_spill.cpp b/src/common/snippets/src/op/reg_spill.cpp index 0eef459a47ac62..f09b2a419cf3fd 100644 --- a/src/common/snippets/src/op/reg_spill.cpp +++ b/src/common/snippets/src/op/reg_spill.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp index 5952fc114b76fd..94352953df1e82 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp index 4af9172e6351cb..ea36cfddef4eed 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp b/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp index 7724692be61662..8393e0ac1e97f8 100644 --- a/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp +++ b/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp b/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp index aa80d1e35af1e4..c04260917ca55d 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp b/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp index 02bb4cbad5a94b..d72721be467a63 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp b/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp index 785559e4fef9e6..3ddafc7be0df0c 100644 --- a/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp +++ b/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/util/include/openvino/util/cpp_version.hpp b/src/common/util/include/openvino/util/cpp_version.hpp index c0998588027c2a..b250df6a38b2a2 100644 --- a/src/common/util/include/openvino/util/cpp_version.hpp +++ b/src/common/util/include/openvino/util/cpp_version.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/util/include/openvino/util/file_path.hpp b/src/common/util/include/openvino/util/file_path.hpp index 9080ea5289a51e..34c326e67ec391 100644 --- a/src/common/util/include/openvino/util/file_path.hpp +++ b/src/common/util/include/openvino/util/file_path.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/include/openvino/core/preprocess/padding_mode.hpp b/src/core/include/openvino/core/preprocess/padding_mode.hpp index c1391628e8f50b..5d20859397e837 100644 --- a/src/core/include/openvino/core/preprocess/padding_mode.hpp +++ b/src/core/include/openvino/core/preprocess/padding_mode.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/src/op/fake_convert.cpp b/src/core/src/op/fake_convert.cpp index 517674402ef872..71f7aed3f65e8b 100644 --- a/src/core/src/op/fake_convert.cpp +++ b/src/core/src/op/fake_convert.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/pass/serialization/from_model.cpp b/src/core/tests/pass/serialization/from_model.cpp index b1c3f0bad6212c..9999426d6c6431 100644 --- a/src/core/tests/pass/serialization/from_model.cpp +++ b/src/core/tests/pass/serialization/from_model.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/col2im.cpp b/src/core/tests/type_prop/col2im.cpp index c376cdcf39d264..5532d210f760fc 100644 --- a/src/core/tests/type_prop/col2im.cpp +++ b/src/core/tests/type_prop/col2im.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/region_yolo.cpp b/src/core/tests/type_prop/region_yolo.cpp index 96dc868a0354f3..5eb2b317d35e7c 100644 --- a/src/core/tests/type_prop/region_yolo.cpp +++ b/src/core/tests/type_prop/region_yolo.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/rms_norm.cpp b/src/core/tests/type_prop/rms_norm.cpp index ca7155722241b7..97367d9a6a4959 100644 --- a/src/core/tests/type_prop/rms_norm.cpp +++ b/src/core/tests/type_prop/rms_norm.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/slice_scatter.cpp b/src/core/tests/type_prop/slice_scatter.cpp index fad6dd70349606..2be2d73d6e23fd 100644 --- a/src/core/tests/type_prop/slice_scatter.cpp +++ b/src/core/tests/type_prop/slice_scatter.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/stft.cpp b/src/core/tests/type_prop/stft.cpp index 2969af4e5a43bd..4ee5098797d3c8 100644 --- a/src/core/tests/type_prop/stft.cpp +++ b/src/core/tests/type_prop/stft.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/string_tensor_pack.cpp b/src/core/tests/type_prop/string_tensor_pack.cpp index a81aa8eeb1ffd4..4d40f9a3782c15 100644 --- a/src/core/tests/type_prop/string_tensor_pack.cpp +++ b/src/core/tests/type_prop/string_tensor_pack.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/string_tensor_unpack.cpp b/src/core/tests/type_prop/string_tensor_unpack.cpp index afdd44706635f5..37efe08b81120a 100644 --- a/src/core/tests/type_prop/string_tensor_unpack.cpp +++ b/src/core/tests/type_prop/string_tensor_unpack.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/ir/tests/meta_data_tests.cpp b/src/frontends/ir/tests/meta_data_tests.cpp index eba4f38af67913..2af1e0114222b9 100644 --- a/src/frontends/ir/tests/meta_data_tests.cpp +++ b/src/frontends/ir/tests/meta_data_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/ir/tests/threading_tests.cpp b/src/frontends/ir/tests/threading_tests.cpp index 7dc1ca193ddb97..a83d53b5151305 100644 --- a/src/frontends/ir/tests/threading_tests.cpp +++ b/src/frontends/ir/tests/threading_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp index 147e45301316a3..85700bddcfc01c 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/onnx/frontend/src/op/unique.cpp b/src/frontends/onnx/frontend/src/op/unique.cpp index bc842624474ccd..c1d0886181af4a 100644 --- a/src/frontends/onnx/frontend/src/op/unique.cpp +++ b/src/frontends/onnx/frontend/src/op/unique.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/onnx/frontend/src/utils/norm.hpp b/src/frontends/onnx/frontend/src/utils/norm.hpp index 964becc2f0db04..656dcd8ed1cd1a 100644 --- a/src/frontends/onnx/frontend/src/utils/norm.hpp +++ b/src/frontends/onnx/frontend/src/utils/norm.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/frontends/onnx/frontend/src/utils/split.hpp b/src/frontends/onnx/frontend/src/utils/split.hpp index 5cdbaf287eb90b..809d2aec8d2d28 100644 --- a/src/frontends/onnx/frontend/src/utils/split.hpp +++ b/src/frontends/onnx/frontend/src/utils/split.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/frontends/onnx/tests/conversion.cpp b/src/frontends/onnx/tests/conversion.cpp index 237712e60b2725..94d735761b30a6 100644 --- a/src/frontends/onnx/tests/conversion.cpp +++ b/src/frontends/onnx/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp b/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp index 68cea85c19cc44..5dc1499a39080e 100644 --- a/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp +++ b/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp b/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp index 4ab7557c4be2cb..d4d933721ee200 100644 --- a/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp +++ b/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp b/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp index 19abfcbf260d73..23f73f53597a43 100644 --- a/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp +++ b/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/dequantize_linear.cpp b/src/frontends/paddle/src/op/dequantize_linear.cpp index 271b938c17ab43..f30055bf889bca 100644 --- a/src/frontends/paddle/src/op/dequantize_linear.cpp +++ b/src/frontends/paddle/src/op/dequantize_linear.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/generate_proposals_v2.cpp b/src/frontends/paddle/src/op/generate_proposals_v2.cpp index 2df436357bbb22..47547fd46a778b 100644 --- a/src/frontends/paddle/src/op/generate_proposals_v2.cpp +++ b/src/frontends/paddle/src/op/generate_proposals_v2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/quantize_linear.cpp b/src/frontends/paddle/src/op/quantize_linear.cpp index 99e12cd4d0efb4..43fcabd3747819 100644 --- a/src/frontends/paddle/src/op/quantize_linear.cpp +++ b/src/frontends/paddle/src/op/quantize_linear.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/round.cpp b/src/frontends/paddle/src/op/round.cpp index f981fa1e841843..5ce02ffe89bde9 100644 --- a/src/frontends/paddle/src/op/round.cpp +++ b/src/frontends/paddle/src/op/round.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/top_k_v2.cpp b/src/frontends/paddle/src/op/top_k_v2.cpp index 8f51920f05d1a2..cfb113c7a55102 100644 --- a/src/frontends/paddle/src/op/top_k_v2.cpp +++ b/src/frontends/paddle/src/op/top_k_v2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "default_opset.hpp" diff --git a/src/frontends/paddle/tests/conversion.cpp b/src/frontends/paddle/tests/conversion.cpp index 9bcbf9b855765c..c2ad29a42a3303 100644 --- a/src/frontends/paddle/tests/conversion.cpp +++ b/src/frontends/paddle/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/tests/op_extension.cpp b/src/frontends/paddle/tests/op_extension.cpp index e8843c10c475bc..cbd05bb1f1d212 100644 --- a/src/frontends/paddle/tests/op_extension.cpp +++ b/src/frontends/paddle/tests/op_extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py index 6c53d8091169fc..356f2809a10237 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py index c19492373a3280..1b95b7c7406d99 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/frontends/pytorch/src/op/any.cpp b/src/frontends/pytorch/src/op/any.cpp index a17b8777e5f916..09941914065bdd 100644 --- a/src/frontends/pytorch/src/op/any.cpp +++ b/src/frontends/pytorch/src/op/any.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/pytorch/src/op/index_copy_.cpp b/src/frontends/pytorch/src/op/index_copy_.cpp index c13b53858a9c00..f8acb4db1749f8 100644 --- a/src/frontends/pytorch/src/op/index_copy_.cpp +++ b/src/frontends/pytorch/src/op/index_copy_.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/pytorch/src/op/index_fill_.cpp b/src/frontends/pytorch/src/op/index_fill_.cpp index a24f3fa2f5b1c7..ee0ac618079c3f 100644 --- a/src/frontends/pytorch/src/op/index_fill_.cpp +++ b/src/frontends/pytorch/src/op/index_fill_.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tensorflow/tests/conversion.cpp b/src/frontends/tensorflow/tests/conversion.cpp index d705a26a147839..db95a045351779 100644 --- a/src/frontends/tensorflow/tests/conversion.cpp +++ b/src/frontends/tensorflow/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp b/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp index 382f6f1914e334..42a6834cc0007d 100644 --- a/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp +++ b/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tensorflow_lite/tests/conversion.cpp b/src/frontends/tensorflow_lite/tests/conversion.cpp index cccae7494f85cb..56484a0b9dcf47 100644 --- a/src/frontends/tensorflow_lite/tests/conversion.cpp +++ b/src/frontends/tensorflow_lite/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tests/frontend/shared/include/conversion_extension.hpp b/src/frontends/tests/frontend/shared/include/conversion_extension.hpp index f9932b7ca5352f..6df8f185b1b83d 100644 --- a/src/frontends/tests/frontend/shared/include/conversion_extension.hpp +++ b/src/frontends/tests/frontend/shared/include/conversion_extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tests/frontend/shared/src/conversion.cpp b/src/frontends/tests/frontend/shared/src/conversion.cpp index 1a545b92708d76..95200314fcd645 100644 --- a/src/frontends/tests/frontend/shared/src/conversion.cpp +++ b/src/frontends/tests/frontend/shared/src/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/inference/include/openvino/runtime/intel_npu/properties.hpp b/src/inference/include/openvino/runtime/intel_npu/properties.hpp index 723a8b26f555d4..4d7d14a7ebf389 100644 --- a/src/inference/include/openvino/runtime/intel_npu/properties.hpp +++ b/src/inference/include/openvino/runtime/intel_npu/properties.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp index 69ab036f22e0af..b49fd6f43e243d 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.hpp b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp index c4e000395f3eac..63942f86272d4c 100644 --- a/src/plugins/auto/tests/functional/behavior/io_tensor.hpp +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp index 0b39b8e57dc8d2..af6aa58c163f4e 100644 --- a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp +++ b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp index cf1ccda20491d3..f8946664579bdf 100644 --- a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp +++ b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/src/plugins/auto/tests/unit/meta_device_check_test.cpp b/src/plugins/auto/tests/unit/meta_device_check_test.cpp index 7881899d925f8c..36f5f57dd31229 100644 --- a/src/plugins/auto/tests/unit/meta_device_check_test.cpp +++ b/src/plugins/auto/tests/unit/meta_device_check_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/hetero_tests.hpp b/src/plugins/hetero/tests/functional/hetero_tests.hpp index 98c2d487761b73..b3bb85ba78a842 100644 --- a/src/plugins/hetero/tests/functional/hetero_tests.hpp +++ b/src/plugins/hetero/tests/functional/hetero_tests.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp index 7b6730c4169109..9624edcf80b4d2 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "behavior/compiled_model/import_export.hpp" diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp index 11bd48ab42e8c0..4907c9af2f0420 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 711471b9855277..1a2d20d2f61052 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index ed7d1fe42bdf8a..117b095fe2df87 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index daffaaae81f873..3f4e45f265f16d 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 5b691e8ec83328..9f0f1e72cff6ce 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h index c26cc6aa33a251..dac07c252e902a 100644 --- a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h +++ b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp index 93f5278b06a4a8..afc057bbe29e0b 100644 --- a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp +++ b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h index b35de8e25fcae9..da0aa4284382dd 100644 --- a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h +++ b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp index 2ee4c0a23bbdab..5223f6d90c0279 100644 --- a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h index 33e12b4045abf9..27774eb4557602 100644 --- a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h +++ b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp index 7a8e431b606227..b104ca7d44aa24 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp index 1aae396f25a0fe..dfb09c3a896a04 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp index 6c3799da70bfda..e6b8242210c57d 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert.cpp b/src/plugins/intel_cpu/src/nodes/executors/convert.cpp index cf11633e662e07..3dae01f201e2dd 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert.hpp b/src/plugins/intel_cpu/src/nodes/executors/convert.hpp index 85cd64e26c643c..56c5ba02d4f147 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp b/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp index 44731d0648d039..5b28c47e2e11fb 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp index 45e71acd476bb6..e725a3b244e02a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp index c342f5106c221d..55c66c6fa1cbbb 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp index 9afcfac56b14e9..849d7122d45726 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp index 38ceb9922eff70..25e3dcf6ae6421 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp index 31ffd979662f8c..649e0d0f058bc0 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0mvn // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp index 95ff85bb8bf851..02a044d89b6959 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp index 5b9479bdf502b6..bfc54e11d42934 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp index f970d79c3ed1b2..3d976fa94d8c1e 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp index 1d0e4c877ff8e5..cf0e8fed14be2b 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp index 375016038f2b68..6189fac38c06d5 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index 792aacf54a118a..3b702844850744 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp b/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp index 2f1ca6600bbd14..f89e35409008a4 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp index 21ae249757bf9c..43a567b874a4ac 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp index 42be857ba9dead..3e43bda24119ee 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp b/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp index 2c66b9ce56af14..d64e1faf5fa5c8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp b/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp index 2b08dc2a320b5d..0ff25d1f3dd59f 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp index 99a55d79f58177..b96a2e3c655de7 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp index 59d0447965a803..1cff58995addce 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp index 325ae17f161c93..5af8ad8b48d32e 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp index 1cf34912e2293f..a9e2ca13c1621c 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp b/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp index ddf4cf20034d92..278e6e7b16d668 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp b/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp index 6e569e62b65a19..2f1101a7a1ec54 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp index a0ea60b9a20a63..9cc83310cd8059 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "causal_mask_preprocess.hpp" diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp index 7628aea386e4e7..19636d0529c681 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp index e2bcac397af164..7e562104f99d08 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp index 4a46a042722a12..16fcc4dd03c24e 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp index d92d2d3627b65b..4d913c4ced102c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp index 5fadd183dfd694..b3ccb3f36d85cf 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp index df05ce5d539f46..7267e4355de1d6 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp index bf327784503352..e789c59e21dc4d 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp index ddc21e8ddb59d3..b70e8fe122aea7 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp index 48456b8220300a..e0a87ca288bac1 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp index 245f83c13c3466..177c6a466765e4 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp index 4c5f2925ef0735..ba10db13dbdc98 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp index 4a147f79b2a37e..cda7f58afebea8 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp index 44aaf251bc201f..b9491d556c8292 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp index e0e890a347a026..173c12173d7835 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp index 9cfcc2f6226205..c7292770d3eb63 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp index d9ecc3629f2430..558bc0216879c8 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp index 11fc73b949a55c..b928620706c8b0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp index f66e913f85b6e7..07ed321abc7ff5 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp index 98a107380aa7d4..5855481efd1d60 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp index c619d7b6ab1937..9807dbfafa31d0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp index 571e292104d132..c042373f054fa2 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp index 6e1d9f110c6aec..2f00abb213dfb5 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp index 63dd44ca133fa0..bd2c96f7db696b 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp index 0b68074c657c15..faaa20c46f1ad3 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp index 42c30bb112263c..c1b981275face0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp index 6be200c30b7c1c..e49b48ccbfb47e 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp index 06ca575f314b4b..f5188df53aeb28 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp index 2a7e712ab1baea..4e8defeca762c8 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp b/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp index 7e17af42d05a97..28796c752decfc 100644 --- a/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp +++ b/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp index 415515ef7f40a2..651e75024987ab 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp index 615bb99225b952..3e797759d21ee0 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp index 6229f9a30d3c45..f13ec1e98faa46 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp index 538d9f48f88114..3af974dc80d35a 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp index 3263396b52521d..4576f283d43534 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp index 760f9ccd6214cf..9ff85a02db4495 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp index 17954b438abd73..885435ba8c2dc3 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp index 0b8086b48a1110..388d7050851e05 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp index 2d7442f945630f..26e9e9d59dbec2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp index 81686bd5cd4888..15d4dbc3786ccd 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp index 2da18851bd0031..9166f70660ee3c 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp index 78ee401d169cbb..87a19ede785eba 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp index 50f6d09b4271d2..558896557dd58d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp index 1cbd7152fbccfb..ba70b281461308 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp b/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp index a41cb4c4300d42..f520b0b53feae8 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp +++ b/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp index 08c0cf2c9089e9..8fc607229b79d0 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp index ae912fa9c7519c..72b4b870e74846 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/src/graph/paged_attention.cpp b/src/plugins/intel_gpu/src/graph/paged_attention.cpp index 48ae46d83de34a..c656cb1f284ae0 100644 --- a/src/plugins/intel_gpu/src/graph/paged_attention.cpp +++ b/src/plugins/intel_gpu/src/graph/paged_attention.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "paged_attention_inst.h" diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp index d95b008171db24..7ecce23a56777f 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp index e2723ed6841746..b969c986aa81fd 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h index 36acd93d225a45..4479e45d1a4a3b 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp index 07d4879257185c..fb9711e7605859 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp index 69c8ffe19e56ba..50e3f68f190594 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp index dbd5e3476a7a58..973a899ef01829 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp index a3d9a1a9d3465d..1f9cb18db521c7 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/proxy/tests/proxy_tests.hpp b/src/plugins/proxy/tests/proxy_tests.hpp index 014c9ba51aa6b1..075c12c0d7dfa1 100644 --- a/src/plugins/proxy/tests/proxy_tests.hpp +++ b/src/plugins/proxy/tests/proxy_tests.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/template/tests/functional/op_reference/loop.cpp b/src/plugins/template/tests/functional/op_reference/loop.cpp index ffdbc0b8dc6ee2..430b9ee1c76560 100644 --- a/src/plugins/template/tests/functional/op_reference/loop.cpp +++ b/src/plugins/template/tests/functional/op_reference/loop.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp b/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp index a2b3d199adf1bc..e6dcdc8900353b 100644 --- a/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp +++ b/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp index d8e9b3a6284d52..a97fcc4d719a00 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp index 2490bcb1a33cc2..e85c54f853b3cd 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp index 91baf94a800241..ec6ecba28d1f23 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp index 492a0a528298fc..79f73403e27252 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp index 69cc1c804257f0..2a461cfddb24db 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index b0b926967d1e1a..21fb8dd6b6a9c3 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp index 8ba8d4ee933781..d84519f897986b 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp index fac36d8f56b863..6f5dc0648fcb69 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp index be0e5144163f19..839b9f05f97429 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp index afce941a948a81..f57d8f4caf89ac 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp index 1b04cf83b01b3c..5acab8dfa6e815 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp index d2930be59d5eac..1ac793ca97faa5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index 362258598a1344..3805fde5ce9bfb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index ae963375fc7f5d..b8f41c30b55993 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp index cfc92209501e6f..a8b5fcd15100f4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp index 9d6e8b175b018d..f7f5b8a5716ed7 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp index c3e9cb4ae2cd07..4576caf18b89fd 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp index 3c13af77d110ca..44e5ed2303db4c 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp index 7ea8196f39eaf0..8876076d1bed0c 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py b/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py index 7a92983bb1819d..56c2417e7dfea1 100644 --- a/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py +++ b/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/pytorch_tests/test_hardtanh.py b/tests/layer_tests/pytorch_tests/test_hardtanh.py index d0c4c1aac1a38d..728a0cf1d6db42 100644 --- a/tests/layer_tests/pytorch_tests/test_hardtanh.py +++ b/tests/layer_tests/pytorch_tests/test_hardtanh.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import platform diff --git a/tests/layer_tests/pytorch_tests/test_index_copy_.py b/tests/layer_tests/pytorch_tests/test_index_copy_.py index 725c95936664cf..bd9f26814e1082 100644 --- a/tests/layer_tests/pytorch_tests/test_index_copy_.py +++ b/tests/layer_tests/pytorch_tests/test_index_copy_.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/layer_tests/pytorch_tests/test_index_fill_.py b/tests/layer_tests/pytorch_tests/test_index_fill_.py index 878dda7ab3bd7e..18c08669df1695 100644 --- a/tests/layer_tests/pytorch_tests/test_index_fill_.py +++ b/tests/layer_tests/pytorch_tests/test_index_fill_.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/layer_tests/pytorch_tests/test_isfinite.py b/tests/layer_tests/pytorch_tests/test_isfinite.py index a72125799c8a49..00419cb89ceca8 100644 --- a/tests/layer_tests/pytorch_tests/test_isfinite.py +++ b/tests/layer_tests/pytorch_tests/test_isfinite.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/layer_tests/pytorch_tests/test_isinf.py b/tests/layer_tests/pytorch_tests/test_isinf.py index 72e6ae1f198ea2..cd33fa6acf8473 100644 --- a/tests/layer_tests/pytorch_tests/test_isinf.py +++ b/tests/layer_tests/pytorch_tests/test_isinf.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/layer_tests/pytorch_tests/test_isnan.py b/tests/layer_tests/pytorch_tests/test_isnan.py index 6645546c00707d..150c92ba92bdf6 100644 --- a/tests/layer_tests/pytorch_tests/test_isnan.py +++ b/tests/layer_tests/pytorch_tests/test_isnan.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/layer_tests/pytorch_tests/test_select_scatter.py b/tests/layer_tests/pytorch_tests/test_select_scatter.py index 112675264c74a5..c2a881ece0e358 100644 --- a/tests/layer_tests/pytorch_tests/test_select_scatter.py +++ b/tests/layer_tests/pytorch_tests/test_select_scatter.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/pytorch_tests/test_slice_scatter.py b/tests/layer_tests/pytorch_tests/test_slice_scatter.py index 0d291f6bb4d3aa..1357a06c645ef7 100644 --- a/tests/layer_tests/pytorch_tests/test_slice_scatter.py +++ b/tests/layer_tests/pytorch_tests/test_slice_scatter.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py index 392469646b2803..8e6a64c141fa2c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2024 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/time_tests/src/timetests/timetest_infer_api_2.cpp b/tests/time_tests/src/timetests/timetest_infer_api_2.cpp index 67943bf27a68f9..08bf0edb6279e8 100644 --- a/tests/time_tests/src/timetests/timetest_infer_api_2.cpp +++ b/tests/time_tests/src/timetests/timetest_infer_api_2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include