diff --git a/docs/articles_en/assets/snippets/ov_dynamic_shapes.c b/docs/articles_en/assets/snippets/ov_dynamic_shapes.c index 68cbef8ab0159e..454122a9b7f8db 100644 --- a/docs/articles_en/assets/snippets/ov_dynamic_shapes.c +++ b/docs/articles_en/assets/snippets/ov_dynamic_shapes.c @@ -148,7 +148,7 @@ ov_compiled_model_create_infer_request(compiled_model, &infer_request); //! [ov_dynamic_shapes:set_input_tensor] ov_output_const_port_t* input_port = NULL; -ov_element_type_e type = UNDEFINED; +ov_element_type_e type = DYNAMIC; ov_shape_t input_shape_1; ov_tensor_t* input_tensor_1 = NULL; ov_tensor_t* output_tensor = NULL; diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 4050f54f867969..93ca527f51d325 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -529,7 +529,7 @@ int main(int argc, char* argv[]) { if (result != config.end()) device_config = result->second; size_t batchSize = FLAGS_b; - ov::element::Type type = ov::element::undefined; + ov::element::Type type = ov::element::dynamic; std::string topology_name = ""; std::vector app_inputs_info; std::string output_name; @@ -660,14 +660,14 @@ int main(int argc, char* argv[]) { std::const_pointer_cast(model)->outputs()); } - const auto input_precision = FLAGS_ip.empty() ? ov::element::undefined : getPrecision2(FLAGS_ip); - const auto output_precision = FLAGS_op.empty() ? ov::element::undefined : getPrecision2(FLAGS_op); + const auto input_precision = FLAGS_ip.empty() ? ov::element::dynamic : getPrecision2(FLAGS_ip); + const auto output_precision = FLAGS_op.empty() ? ov::element::dynamic : getPrecision2(FLAGS_op); const auto& inputs = model->inputs(); for (size_t i = 0; i < inputs.size(); i++) { const auto& item = inputs[i]; - auto iop_precision = ov::element::undefined; - auto type_to_set = ov::element::undefined; + auto iop_precision = ov::element::dynamic; + auto type_to_set = ov::element::dynamic; std::string name; try { // Some tensors might have no names, get_any_name will throw exception in that case. @@ -676,10 +676,9 @@ int main(int argc, char* argv[]) { iop_precision = getPrecision2(user_precisions_map.at(item.get_any_name())); } catch (...) { } - - if (iop_precision != ov::element::undefined) { + if (iop_precision != ov::element::dynamic) { type_to_set = iop_precision; - } else if (input_precision != ov::element::undefined) { + } else if (input_precision != ov::element::dynamic) { type_to_set = input_precision; } else if (!name.empty() && app_inputs_info[0].at(name).is_image()) { // image input, set U8 @@ -687,7 +686,7 @@ int main(int argc, char* argv[]) { } auto& in = preproc.input(item.get_any_name()); - if (type_to_set != ov::element::undefined) { + if (type_to_set != ov::element::dynamic) { in.tensor().set_element_type(type_to_set); if (!name.empty()) { @@ -707,17 +706,16 @@ int main(int argc, char* argv[]) { const auto& outs = model->outputs(); for (size_t i = 0; i < outs.size(); i++) { const auto& item = outs[i]; - auto iop_precision = ov::element::undefined; + auto iop_precision = ov::element::dynamic; try { // Some tensors might have no names, get_any_name will throw exception in that case. // -iop option will not work for those tensors. iop_precision = getPrecision2(user_precisions_map.at(item.get_any_name())); } catch (...) { } - - if (iop_precision != ov::element::undefined) { + if (iop_precision != ov::element::dynamic) { preproc.output(i).tensor().set_element_type(iop_precision); - } else if (output_precision != ov::element::undefined) { + } else if (output_precision != ov::element::dynamic) { preproc.output(i).tensor().set_element_type(output_precision); } } diff --git a/src/bindings/c/include/openvino/c/ov_common.h b/src/bindings/c/include/openvino/c/ov_common.h index 12ad83181198f1..219617331018f4 100644 --- a/src/bindings/c/include/openvino/c/ov_common.h +++ b/src/bindings/c/include/openvino/c/ov_common.h @@ -169,33 +169,33 @@ typedef enum { * src/core/include/openvino/core/type/element_type.hpp */ typedef enum { - UNDEFINED = 0U, //!< Undefined element type - DYNAMIC, //!< Dynamic element type - BOOLEAN, //!< boolean element type - BF16, //!< bf16 element type - F16, //!< f16 element type - F32, //!< f32 element type - F64, //!< f64 element type - I4, //!< i4 element type - I8, //!< i8 element type - I16, //!< i16 element type - I32, //!< i32 element type - I64, //!< i64 element type - U1, //!< binary element type - U2, //!< u2 element type - U3, //!< u3 element type - U4, //!< u4 element type - U6, //!< u6 element type - U8, //!< u8 element type - U16, //!< u16 element type - U32, //!< u32 element type - U64, //!< u64 element type - NF4, //!< nf4 element type - F8E4M3, //!< f8e4m3 element type - F8E5M3, //!< f8e5m2 element type - STRING, //!< string element type - F4E2M1, //!< f4e2m1 element type - F8E8M0, //!< f8e8m0 element type + UNDEFINED = 0U, //!< Undefined element type + DYNAMIC = UNDEFINED, //!< Dynamic element type + BOOLEAN, //!< boolean element type + BF16, //!< bf16 element type + F16, //!< f16 element type + F32, //!< f32 element type + F64, //!< f64 element type + I4, //!< i4 element type + I8, //!< i8 element type + I16, //!< i16 element type + I32, //!< i32 element type + I64, //!< i64 element type + U1, //!< binary element type + U2, //!< u2 element type + U3, //!< u3 element type + U4, //!< u4 element type + U6, //!< u6 element type + U8, //!< u8 element type + U16, //!< u16 element type + U32, //!< u32 element type + U64, //!< u64 element type + NF4, //!< nf4 element type + F8E4M3, //!< f8e4m3 element type + F8E5M3, //!< f8e5m2 element type + STRING, //!< string element type + F4E2M1, //!< f4e2m1 element type + F8E8M0, //!< f8e8m0 element type } ov_element_type_e; /** diff --git a/src/bindings/c/src/ov_tensor.cpp b/src/bindings/c/src/ov_tensor.cpp index 9c887dfdaba105..bcd52734c86d16 100644 --- a/src/bindings/c/src/ov_tensor.cpp +++ b/src/bindings/c/src/ov_tensor.cpp @@ -6,7 +6,7 @@ #include "common.h" const std::map element_type_map = { - {ov_element_type_e::UNDEFINED, ov::element::undefined}, + {ov_element_type_e::UNDEFINED, ov::element::dynamic}, {ov_element_type_e::DYNAMIC, ov::element::dynamic}, {ov_element_type_e::BOOLEAN, ov::element::boolean}, {ov_element_type_e::BF16, ov::element::bf16}, diff --git a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py index e2fcb75e0f2fdf..5fa69b99fb178b 100644 --- a/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py +++ b/src/bindings/python/src/openvino/frontend/tensorflow/node_decoder.py @@ -20,7 +20,7 @@ def tf_type_to_ov_type(tf_type_int): try: ret_type = Type(numpy_type) except: - ret_type = Type.undefined + ret_type = Type.dynamic return ret_type @@ -169,7 +169,7 @@ def get_attribute(self, name): return OVAny(Type.dynamic) return OVAny(tf_type_to_ov_type(variable_value.dtype)) else: - return OVAny(Type.undefined) + return OVAny(Type.dynamic) return OVAny(tf_type_to_ov_type(type_num)) if name == "value": diff --git a/src/bindings/python/src/pyopenvino/core/common.cpp b/src/bindings/python/src/pyopenvino/core/common.cpp index bf730f3ae89eb6..e08dfdc8c398ac 100644 --- a/src/bindings/python/src/pyopenvino/core/common.cpp +++ b/src/bindings/python/src/pyopenvino/core/common.cpp @@ -510,8 +510,7 @@ ov::Tensor tensor_from_pointer(py::array& array, const ov::Shape& shape, const o if (type_helpers::get_ov_type(array) == ov::element::string) { OPENVINO_THROW("SHARED MEMORY MODE FOR THIS TENSOR IS NOT APPLICABLE! String types can be only copied."); } - - auto element_type = (type == ov::element::undefined) ? Common::type_helpers::get_ov_type(array) : type; + auto element_type = (type == ov::element::dynamic) ? Common::type_helpers::get_ov_type(array) : type; if (array_helpers::is_contiguous(array)) { return ov::Tensor(element_type, shape, const_cast(array.data(0)), {}); diff --git a/src/bindings/python/src/pyopenvino/core/tensor.cpp b/src/bindings/python/src/pyopenvino/core/tensor.cpp index 0ed8e753469e7a..8722a56900f5ca 100644 --- a/src/bindings/python/src/pyopenvino/core/tensor.cpp +++ b/src/bindings/python/src/pyopenvino/core/tensor.cpp @@ -42,7 +42,7 @@ void regclass_Tensor(py::module m) { }), py::arg("array"), py::arg("shape"), - py::arg("type") = ov::element::undefined, + py::arg("type") = ov::element::dynamic, py::keep_alive<1, 2>(), R"( Another Tensor's special constructor. @@ -76,7 +76,7 @@ void regclass_Tensor(py::module m) { }), py::arg("array"), py::arg("shape"), - py::arg("type") = ov::element::undefined, + py::arg("type") = ov::element::dynamic, py::keep_alive<1, 2>(), R"( Another Tensor's special constructor. diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp index eda38c43759339..564ebcd3745ae8 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp @@ -93,7 +93,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { [](ov::preprocess::PreProcessSteps& self, ov::element::Type type = {}) { return &self.convert_element_type(type); }, - py::arg_v("type", ov::element::undefined, "openvino.Type.undefined"), + py::arg_v("type", ov::element::dynamic, "openvino.Type.dynamic"), R"( Converts input tensor element type to specified type. Input tensor must have openvino.Type data type. @@ -239,7 +239,7 @@ static void regclass_graph_PostProcessSteps(py::module m) { [](ov::preprocess::PostProcessSteps& self, ov::element::Type type = {}) { return &self.convert_element_type(type); }, - py::arg_v("type", ov::element::undefined, "openvino.Type.undefined"), + py::arg_v("type", ov::element::dynamic, "openvino.Type.dynamic"), R"( Converts tensor element type to specified type. Tensor must have openvino.Type data type. diff --git a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp index 66e0eda890e309..830209eab5c93d 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp +++ b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp @@ -31,7 +31,7 @@ void regclass_graph_Type(py::module m) { :rtype: ov.Type )"); - type.attr("undefined") = ov::element::undefined; + type.attr("undefined") = ov::element::dynamic; type.attr("dynamic") = ov::element::dynamic; type.attr("boolean") = ov::element::boolean; type.attr("f16") = ov::element::f16; diff --git a/src/bindings/python/tests/test_runtime/test_type.py b/src/bindings/python/tests/test_runtime/test_type.py index 78a660769614a8..a960e60e019096 100644 --- a/src/bindings/python/tests/test_runtime/test_type.py +++ b/src/bindings/python/tests/test_runtime/test_type.py @@ -92,25 +92,17 @@ def test_basic_ovtypes(ovtype, def test_undefined_ovtype(): ov_type = Type.undefined - assert ov_type.is_static() is True - assert ov_type.is_dynamic() is False + assert ov_type.is_static() is False + assert ov_type.is_dynamic() is True assert ov_type.is_real() is False - assert ov_type.real is False assert ov_type.is_integral() is True - assert ov_type.integral is True assert ov_type.is_signed() is False - assert ov_type.signed is False assert ov_type.is_quantized() is False - assert ov_type.quantized is False - assert ov_type.get_type_name() == "undefined" - assert ov_type.type_name == "undefined" - assert ov_type.get_size() == 0 + assert ov_type.get_type_name() == "dynamic" assert ov_type.size == 0 - - # Note: might depend on the system - import sys - assert ov_type.bitwidth == sys.maxsize * 2 + 1 - assert ov_type.get_bitwidth() == sys.maxsize * 2 + 1 + assert ov_type.get_size() == 0 + assert ov_type.bitwidth == 0 + assert ov_type.get_bitwidth() == 0 def test_dynamic_ov_type(): diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index 8b046de904f1e6..6494a212175765 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -32,7 +32,7 @@ namespace precision_set { class LP_TRANSFORMATIONS_API DataPrecision { public: - DataPrecision() : precision(element::undefined), min(0.f), max(0.f), hasZeroPoint(false) {} + DataPrecision() : precision(element::dynamic), min(0.f), max(0.f), hasZeroPoint(false) {} explicit DataPrecision(const element::Type& precision) { this->precision = precision; @@ -48,10 +48,9 @@ class LP_TRANSFORMATIONS_API DataPrecision { hasZeroPoint(hasZeroPoint) {} bool empty() const noexcept { - assert( - ((precision == element::undefined) && (min == 0.f) && (max == 0.f) && (!hasZeroPoint)) || - ((precision != element::undefined) && (max != 0.f))); - return (precision == element::undefined) && (min == 0.f) && (max == 0.f) && (!hasZeroPoint); + assert(((precision == element::dynamic) && (min == 0.f) && (max == 0.f) && (!hasZeroPoint)) || + ((precision != element::dynamic) && (max != 0.f))); + return (precision == element::dynamic) && (min == 0.f) && (max == 0.f) && (!hasZeroPoint); } static bool isSupported(const element::Type& precision) { diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp index d4a3ba6d429044..6c53d468ec02fc 100644 --- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp @@ -186,9 +186,8 @@ class LP_TRANSFORMATIONS_API NetworkHelper { static size_t getParentOutputIndex(const std::shared_ptr& parent, const std::shared_ptr& child); - static FakeQuantizeDequantizationValues createEmptyValues( - const FakeQuantizeDequantization& dequantization, - const element::Type& precision = element::undefined); + static FakeQuantizeDequantizationValues createEmptyValues(const FakeQuantizeDequantization& dequantization, + const element::Type& precision = element::dynamic); static bool isZeroConst(const std::shared_ptr& node); static bool checkZeroPoint(const std::shared_ptr& node, const DataPrecision& dataPrecision = DataPrecision()); @@ -293,8 +292,13 @@ std::shared_ptr NetworkHelper::setOutDataPrecision(std::shared_ptr std::shared_ptr make_op_pattern(const ov::NodeVector& args) { - return std::make_shared(element::undefined, PartialShape{}, - [](std::shared_ptr n) {return !!ov::as_type_ptr(n); }, args); + return std::make_shared( + element::dynamic, + PartialShape{}, + [](std::shared_ptr n) { + return !!ov::as_type_ptr(n); + }, + args); } template diff --git a/src/common/low_precision_transformations/src/concat.cpp b/src/common/low_precision_transformations/src/concat.cpp index db77179a229cd6..d33d95ccd65fa2 100644 --- a/src/common/low_precision_transformations/src/concat.cpp +++ b/src/common/low_precision_transformations/src/concat.cpp @@ -300,7 +300,7 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) if (constant == nullptr) { return true; } - if (const_precision == element::undefined) { + if (const_precision == element::dynamic) { const_precision = constant->get_element_type(); return true; } @@ -320,7 +320,7 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) return false; } - if (precision == element::undefined) { + if (precision == element::dynamic) { precision = dequantization.data.get_element_type(); } else if (precision != dequantization.data.get_element_type()) { return false; diff --git a/src/common/low_precision_transformations/src/fake_quantize.cpp b/src/common/low_precision_transformations/src/fake_quantize.cpp index 4bfb24a57abd65..d03178d212dea8 100644 --- a/src/common/low_precision_transformations/src/fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize.cpp @@ -93,7 +93,7 @@ std::shared_ptr getConstant(const std::shared_ptr& eltwi bool all_precisions_equal(const std::shared_ptr& node) { const auto& inputs = node->inputs(); - const auto first_input_precision = inputs.empty() ? element::undefined : inputs[0].get_element_type(); + const auto first_input_precision = inputs.empty() ? element::dynamic : inputs[0].get_element_type(); if (!inputs.empty()) { const auto first_input_precision = inputs[0].get_element_type(); if (std::any_of( @@ -109,7 +109,7 @@ bool all_precisions_equal(const std::shared_ptr& node) { const auto& outputs = node->outputs(); if (!outputs.empty()) { const auto first_output_precision = outputs[0].get_element_type(); - if ((first_input_precision != element::undefined) && (first_input_precision != first_output_precision)) { + if ((first_input_precision != element::dynamic) && (first_input_precision != first_output_precision)) { return false; } diff --git a/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp b/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp index 32040b06f80fba..88fe98f134bda4 100644 --- a/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp @@ -306,7 +306,8 @@ bool FakeQuantizeDecompositionTransformation::transform(ov::pass::pattern::Match const DataPrecision expectedDataPrecision = fq_decomposition::getDataPrecisionByOutputPortAndFakeQuantize(layer); // TODO: need test to compose FakeQuantize - if ((expectedDataPrecision.precision == element::undefined) || (expectedDataPrecision.precision == outputPrecision)) { + if ((expectedDataPrecision.precision == element::dynamic) || + (expectedDataPrecision.precision == outputPrecision)) { return rewritten; } @@ -363,7 +364,7 @@ bool FakeQuantizeDecompositionTransformation::transform(ov::pass::pattern::Match // if IntervalsAlignment attribute is defined then, the attribute defines decomposition parameters, // if IntervalsAlignment attribute is not defined, then FakeQuantize operation intervals define decomposition parameters - if (dataPrecision.precision == element::undefined) { + if (dataPrecision.precision == element::dynamic) { element::Type precision; const auto levels = layer->get_levels(); const std::vector outputLowValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); diff --git a/src/common/low_precision_transformations/src/layer_transformation.cpp b/src/common/low_precision_transformations/src/layer_transformation.cpp index e7a1af82d95614..3a2fdf9e7d7c04 100644 --- a/src/common/low_precision_transformations/src/layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/layer_transformation.cpp @@ -268,7 +268,7 @@ LayerTransformation::PrecisionDetails LayerTransformation::getPrecisionDetails( unsignedPrecision = !signedPrecision; } - element::Type resultPrecision = element::undefined; + element::Type resultPrecision = element::dynamic; // if zero point exists then result precision has to be defined by client code if (!hasZeroPoint) { if (signedPrecision && (!unsignedPrecision)) { @@ -335,8 +335,7 @@ DataPrecision LayerTransformation::getDataPrecision( printDequantizationInfo(layer); #endif PrecisionDetails precisionDetailsAtOutputIntervals = getPrecisionDetails(quantizationDetails); - - if (precisionDetailsAtOutputIntervals.precision != element::undefined) { + if (precisionDetailsAtOutputIntervals.precision != element::dynamic) { // FakeQuantize optimal precision not deined if (!requiredPrecisions.empty()) { const auto foundIt = std::find(requiredPrecisions.begin(), requiredPrecisions.end(), precisionDetailsAtOutputIntervals.precision); diff --git a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp index 8e52eb38ee8ee1..2187e5da14ffe1 100644 --- a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp +++ b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp @@ -54,7 +54,7 @@ bool MultiplyToGroupConvolutionTransformation::transform(ov::pass::pattern::Matc dequantization = NetworkHelper::foldDequantization(multiply, inputIndex, defaultPrecisions); } - element::Type weightsPrecision = element::undefined; + element::Type weightsPrecision = element::dynamic; if (updatePrecisions) { // try to find restrictions on weights for GroupConvolution if (restrictions.size() > 1ul) { @@ -65,7 +65,7 @@ bool MultiplyToGroupConvolutionTransformation::transform(ov::pass::pattern::Matc } // if restrictions are absent precisions attribute is used - if (weightsPrecision == element::undefined) { + if (weightsPrecision == element::dynamic) { const auto precisionsAttribute = getAttribute(multiply->input(inputIndex == 0ul ? 1ul : 0ul)); const auto precisions = precisionsAttribute == nullptr ? defaultPrecisions : diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp index afb7e19c13e7ad..1b75a714dfe666 100644 --- a/src/common/low_precision_transformations/src/network_helper.cpp +++ b/src/common/low_precision_transformations/src/network_helper.cpp @@ -1368,7 +1368,7 @@ std::shared_ptr NetworkHelper::normalizeDequantizationShap } FakeQuantizeDequantizationValues NetworkHelper::createEmptyValues(const FakeQuantizeDequantization& dequantization, const element::Type& prc) { - const auto precision = prc == element::undefined ? dequantization.getPrecision() : prc; + const auto precision = prc.is_dynamic() ? dequantization.getPrecision() : prc; const std::shared_ptr multiplyConstant = dequantization.multiply ? dequantization.multiplyConstant->get_element_type() != precision ? foldConvert(dequantization.multiplyConstant->output(0), precision) : @@ -1897,4 +1897,4 @@ bool NetworkHelper::checkConstantNotInf(const std::shared_ptr constant_nod } } // namespace low_precision } // namespace pass -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/common/low_precision_transformations/src/recurrent_cell.cpp b/src/common/low_precision_transformations/src/recurrent_cell.cpp index 34d851d6a2b464..34fcc866c3d5fe 100644 --- a/src/common/low_precision_transformations/src/recurrent_cell.cpp +++ b/src/common/low_precision_transformations/src/recurrent_cell.cpp @@ -104,11 +104,17 @@ bool isSupportedForPerChannelQuantization(const std::shared_ptr& node) { std::vector> get_supported_precisions(std::shared_ptr lstm) { // pair fields: // 0 - input number, - // 1 - input type, `element::undefined` - any precision + // 1 - input type, `element::dynamic` - any precision if (is_type(lstm)) { - return std::vector>{ {0, element::u8}, { 1, element::u8 }, { 4, element::undefined }, { 5, element::undefined } }; + return std::vector>{{0, element::u8}, + {1, element::u8}, + {4, element::dynamic}, + {5, element::dynamic}}; } else if (is_type(lstm)) { - return std::vector>{ {0, element::u8}, { 1, element::u8 }, { 3, element::undefined }, { 4, element::undefined } }; + return std::vector>{{0, element::u8}, + {1, element::u8}, + {3, element::dynamic}, + {4, element::dynamic}}; } OPENVINO_THROW("unsupported operation type: ", lstm->get_type_name()); @@ -163,7 +169,8 @@ bool RecurrentCellTransformation::transform(ov::pass::pattern::Matcher& m) { defaultPrecisions : precisionsAttribute.as().value(); const auto& dataPrecision = getDataPrecision(fq, quantizationDetails, precisions); - if (dataPrecision.empty() || ((input.second != element::undefined) && (dataPrecision.precision != input.second))) { + if (dataPrecision.empty() || + ((input.second != element::dynamic) && (dataPrecision.precision != input.second))) { return false; } @@ -257,7 +264,7 @@ bool RecurrentCellTransformation::canBeTransformed(const std::shared_ptr& if (dequantization.empty()) { continue; } - if ((index.second != element::undefined) && (dequantization.data.get_element_type() != index.second)) { + if ((index.second != element::dynamic) && (dequantization.data.get_element_type() != index.second)) { return false; } } diff --git a/src/common/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp b/src/common/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp index cdd334d00909e3..1b8a26f80b1836 100644 --- a/src/common/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp +++ b/src/common/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp @@ -118,8 +118,7 @@ ov::Any IntervalsAlignmentAttribute::create( fakeQuantize->get_levels(), outputLowValues, outputHighValues); - - if (preferablePrecision.precision != element::undefined) { + if (preferablePrecision.precision != element::dynamic) { attribute.value().preferablePrecisions.insert(preferablePrecision.precision); } diff --git a/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp b/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp index fd1f724e6275d8..d278cb3cd72334 100644 --- a/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_fq_tranformation.cpp @@ -116,7 +116,6 @@ class ConcatWithFQTransformation : public LayerTransformation, if (!testValues.actual.dequantization2.multiply.empty()) { testValues.actual.dequantization2.multiply.outPrecision = precision; } - actualFunction = ov::builder::subgraph::ConcatFunction::get(precision, shape, testValues.actual.fakeQuantize1, @@ -126,7 +125,7 @@ class ConcatWithFQTransformation : public LayerTransformation, testValues.actual.convert2, testValues.actual.dequantization2, {}, - ov::element::undefined, + ov::element::dynamic, {}, testValues.axis, testValues.addNotPrecisionPreservedOperation); diff --git a/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp b/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp index 61c06d97f08935..e5821b301cfc38 100644 --- a/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_with_not_quantized_parent_transformation.cpp @@ -150,7 +150,7 @@ class ConcatWithNotQuantizedParentTransformation : testValues.actual.dequantization2, true, {}, - ov::element::undefined, + ov::element::dynamic, {}, testValues.axis, testValues.addNotPrecisionPreservedOperation); diff --git a/src/common/low_precision_transformations/tests/gather_transformation.cpp b/src/common/low_precision_transformations/tests/gather_transformation.cpp index 79a581e50d589c..81b6e0644e12a9 100644 --- a/src/common/low_precision_transformations/tests/gather_transformation.cpp +++ b/src/common/low_precision_transformations/tests/gather_transformation.cpp @@ -111,7 +111,6 @@ namespace testValues1 { const std::vector opset_version = {1, 7, 8}; const std::vector inputShapes3D = {{3, 3, 4}, {-1, -1, -1}}; - const std::vector testValues = { // U8: per-tensor quantization {{1}, @@ -149,13 +148,13 @@ const std::vector testValues = { LayerTransformation::createParamsU8I8(), {ov::element::u8, {{ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1}, false, 1ul, element::u8, true}, {{0.1f}, ov::element::f32, {1, 3, 1}}}}, {ov::element::u8, {{}, {}, {}}, ov::element::u8, {{ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1}, false, 1ul, element::u8, true}, {{0.1f}, ov::element::f32, {1, 3, 1}}}}}, // U8: per-channel quantization, gather axis match with channel {{1}, diff --git a/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp b/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp index 11583d0a907485..c8e8e3eb26245d 100644 --- a/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp +++ b/src/common/low_precision_transformations/tests/move_fake_quantize_transformation.cpp @@ -130,7 +130,7 @@ class MoveFakeQuantizeTransformation : public LayerTransformation, {ov::PrecisionPreservedAttribute(true), ov::IntervalsAlignmentAttribute(interval, 256), ov::QuantizationAlignmentAttribute(false)}, - ov::element::undefined, + ov::element::dynamic, testValues.axis, oneInputWithSplit); diff --git a/src/common/low_precision_transformations/tests/multiply_transformation.cpp b/src/common/low_precision_transformations/tests/multiply_transformation.cpp index c05d8f7cb40cad..a27206c74c7023 100644 --- a/src/common/low_precision_transformations/tests/multiply_transformation.cpp +++ b/src/common/low_precision_transformations/tests/multiply_transformation.cpp @@ -34,7 +34,7 @@ class MultiplyBranch { }; inline std::ostream& operator<<(std::ostream& out, const MultiplyBranch& branch) { - if (branch.input_precision != element::undefined) { + if (branch.input_precision != element::dynamic) { out << "_input=" << branch.input_precision; } if (!branch.constant.empty()) { @@ -83,8 +83,8 @@ class MultiplyTransformationTestValues { expected(std::move(expected)) {} }; -const ov::element::Type MultiplyTransformationTestValues::input_precision = ov::element::undefined; -const ov::element::Type MultiplyTransformationTestValues::model_precision = ov::element::undefined; +const ov::element::Type MultiplyTransformationTestValues::input_precision = ov::element::dynamic; +const ov::element::Type MultiplyTransformationTestValues::model_precision = ov::element::dynamic; typedef std::tuple< ov::element::Type, // model precision @@ -824,4 +824,4 @@ INSTANTIATE_TEST_SUITE_P( MultiplyTransformation::getTestCaseName); } // namespace broadcast_left -} // namespace \ No newline at end of file +} // namespace diff --git a/src/common/low_precision_transformations/tests/pad_transformation.cpp b/src/common/low_precision_transformations/tests/pad_transformation.cpp index 3187abdc869120..bf6c70f11d81d8 100644 --- a/src/common/low_precision_transformations/tests/pad_transformation.cpp +++ b/src/common/low_precision_transformations/tests/pad_transformation.cpp @@ -336,7 +336,7 @@ const std::vector deqWithSub = { ov::element::u8, { {ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1, 1}, false, 1ul, element::u8, true}, {3.f} } }, @@ -346,7 +346,7 @@ const std::vector deqWithSub = { ov::element::u8, { {ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1, 1}, false, 1ul, element::u8, true}, {3.f} } } diff --git a/src/common/low_precision_transformations/tests/precision_details_test.cpp b/src/common/low_precision_transformations/tests/precision_details_test.cpp index 36d680314aa1b5..df92007ab45106 100644 --- a/src/common/low_precision_transformations/tests/precision_details_test.cpp +++ b/src/common/low_precision_transformations/tests/precision_details_test.cpp @@ -33,7 +33,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsI8levels255WithZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(i8levels255WithZeroPoint); - ASSERT_EQ(ov::element::undefined, precisionDetails.precision); + ASSERT_EQ(ov::element::dynamic, precisionDetails.precision); ASSERT_TRUE(precisionDetails.hasNegativeOutput); ASSERT_TRUE(precisionDetails.hasZeroPoint); } @@ -60,7 +60,7 @@ TEST_F(PrecisionDetailsTests, getPrecisionDetailsU8levels256WithZeroPoint) { LayerTransformation::Params params = LayerTransformation::Params(); FakeQuantizeTransformation fakeQuantizeTransformation(params); const LayerTransformation::PrecisionDetails precisionDetails = fakeQuantizeTransformation.getPrecisionDetails(u8levels256WithZeroPoint); - ASSERT_EQ(ov::element::undefined, precisionDetails.precision); + ASSERT_EQ(ov::element::dynamic, precisionDetails.precision); ASSERT_FALSE(precisionDetails.hasNegativeOutput); ASSERT_TRUE(precisionDetails.hasZeroPoint); } diff --git a/src/common/low_precision_transformations/tests/reshape_transformation.cpp b/src/common/low_precision_transformations/tests/reshape_transformation.cpp index 7b7f58c743d487..28289955bc3ccf 100644 --- a/src/common/low_precision_transformations/tests/reshape_transformation.cpp +++ b/src/common/low_precision_transformations/tests/reshape_transformation.cpp @@ -91,7 +91,6 @@ TEST_P(ReshapeTransformation, CompareFunctions) { ASSERT_TRUE(LayerTransformation::allNamesAreUnique(actualFunction)) << "Not all names are unique"; } - const std::vector testValues = { // U8: no subtract 3D -> 4D: channels are not affected {{1, 384, 1024}, @@ -170,11 +169,11 @@ const std::vector testValues = { {1, 2, 2, 10, 10}, LayerTransformation::createParamsU8I8(), {ov::element::u8, - {{ov::element::f32}, {{128.f}, element::undefined, {1, 4, 1, 1}, false, 1ul, element::u8, true}, {3.f}}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {1, 4, 1, 1}, false, 1ul, element::u8, true}, {3.f}}}, {ov::element::u8, {{}, {}, {}}, ov::element::u8, - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::u8, true}, {3.f}}}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::u8, true}, {3.f}}}}, // U8: with subtract 3D -> 4D: channels are not affected, dynamic batch {{-1, 3, 20}, {0, 3, 4, 5}, diff --git a/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp b/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp index a0a4ae654a94cd..7cdf9b40628127 100644 --- a/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp +++ b/src/common/low_precision_transformations/tests/shuffle_channels_transformation.cpp @@ -148,7 +148,7 @@ const std::vector testValues = { ov::element::u8, { {ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1, 1}, false, 1ul, element::u8, true}, {3.f} } }, @@ -158,7 +158,7 @@ const std::vector testValues = { ov::element::u8, { {ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1, 1}, false, 1ul, element::u8, true}, {3.f} } } diff --git a/src/common/low_precision_transformations/tests/split_transformation.cpp b/src/common/low_precision_transformations/tests/split_transformation.cpp index 66f69951dd980f..0b2ecb650946a7 100644 --- a/src/common/low_precision_transformations/tests/split_transformation.cpp +++ b/src/common/low_precision_transformations/tests/split_transformation.cpp @@ -99,7 +99,6 @@ TEST_P(SplitTransformation, CompareFunctions) { } const std::vector precisions = {ov::element::f32, ov::element::f16}; - const std::vector testValues = { // U8 per tensor quantization {{1, 3, 16, 16}, @@ -122,15 +121,14 @@ const std::vector testValues = { size_t{2}, LayerTransformation::createParamsU8I8(), // ActualValues - {ov::element::u8, - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::u8, true}, {3.f}}}, + {ov::element::u8, {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::u8, true}, {3.f}}}, // ExpectedValues {ov::element::u8, {}, ov::element::u8, { - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::u8, true}, {3.f}}, - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::u8, true}, {3.f}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::u8, true}, {3.f}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::u8, true}, {3.f}}, }}}, // U8 per tensor quantization / int8 subtraction with Convert from fp16 -> fp32 {{1, 3, 16, 16}, @@ -139,14 +137,14 @@ const std::vector testValues = { LayerTransformation::createParamsU8I8(), // ActualValues {ov::element::u8, - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::f16, true}, {3.f}}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::f16, true}, {3.f}}}, // ExpectedValues {ov::element::u8, {}, ov::element::u8, { - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::f16, true}, {3.f}}, - {{ov::element::f32}, {{128.f}, element::undefined, {}, false, 1ul, element::f16, true}, {3.f}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::f16, true}, {3.f}}, + {{ov::element::f32}, {{128.f}, element::dynamic, {}, false, 1ul, element::f16, true}, {3.f}}, }}}, {{-1, -1, -1, -1}, std::int64_t{2}, diff --git a/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp b/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp index e665f5a7706772..3dc1e80a33c7f8 100644 --- a/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp +++ b/src/common/low_precision_transformations/tests/strided_slice_transformation.cpp @@ -197,7 +197,6 @@ const std::vector inputShapes = { {1, 3, 24, 24}, {-1, -1, -1, -1} }; - const std::vector stridedSliceTransformationTestValues = { // U8: channel slice, per-tensor quantization { @@ -237,7 +236,7 @@ const std::vector stridedSliceTransformati ov::element::u8, { {ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1, 1}, false, 1ul, element::u8, true}, {3.f} } }, @@ -247,7 +246,7 @@ const std::vector stridedSliceTransformati ov::element::u8, { {ov::element::f32}, - {{128.f}, element::undefined, {}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {}, false, 1ul, element::u8, true}, {3.f} } } diff --git a/src/common/low_precision_transformations/tests/transpose_transformation.cpp b/src/common/low_precision_transformations/tests/transpose_transformation.cpp index 76943c7297c136..b44066ab7a0df2 100644 --- a/src/common/low_precision_transformations/tests/transpose_transformation.cpp +++ b/src/common/low_precision_transformations/tests/transpose_transformation.cpp @@ -100,8 +100,7 @@ const std::vector testValues = { // U8: per-tensor quantization {{0, 1, 3, 2}, LayerTransformation::createParamsU8I8(), - {ov::element::u8, - {{ov::element::f32}, {{128}, ov::element::f32, {}, true, 1, ov::element::u8, true}, {0.1f}}}, + {ov::element::u8, {{ov::element::f32}, {{128}, ov::element::f32, {}, true, 1, ov::element::u8, true}, {0.1f}}}, {ov::element::u8, {{}, {}, {}}, ov::element::u8, @@ -130,13 +129,13 @@ const std::vector testValues = { LayerTransformation::createParamsU8I8(), {ov::element::u8, {{ov::element::f32}, - {{128.f}, element::undefined, {1, 3, 1, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 3, 1, 1}, false, 1ul, element::u8, true}, {{0.1f}, ov::element::f32, {1, 3, 1, 1}}}}, {ov::element::u8, {{}, {}, {}}, ov::element::u8, {{ov::element::f32}, - {{128.f}, element::undefined, {1, 1, 3, 1}, false, 1ul, element::u8, true}, + {{128.f}, element::dynamic, {1, 1, 3, 1}, false, 1ul, element::u8, true}, {{0.1f}, ov::element::f32, {1, 1, 3, 1}}}}}, // U8: per-tensor quantization, transpose channel dimension {{0, 3, 1, 2}, diff --git a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp index 2002a5fa5327f3..9755f01759ebe9 100644 --- a/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp +++ b/src/common/low_precision_transformations/tests/unit/layer_transformation_get_data_precision.cpp @@ -127,7 +127,7 @@ TEST(smoke_LPT_LayerTransformation, getDataPrecision_reqNone_I8zp_to_undefzp) { auto const dequantization = ov::pass::low_precision::QuantizationDetails::getDetails(fakeQuantize); auto const precisionDetails = ov::pass::low_precision::LayerTransformation::getDataPrecision(fakeQuantize, dequantization, {}); - ASSERT_EQ(element::undefined, precisionDetails.precision); + ASSERT_EQ(element::dynamic, precisionDetails.precision); ASSERT_EQ(0.f, precisionDetails.min); ASSERT_EQ(0.f, precisionDetails.max); ASSERT_EQ(false, precisionDetails.hasZeroPoint); @@ -143,7 +143,7 @@ TEST(smoke_LPT_LayerTransformation, getDataPrecision_reqNone_U8zp_to_undefzp) { auto const dequantization = ov::pass::low_precision::QuantizationDetails::getDetails(fakeQuantize); auto const precisionDetails = ov::pass::low_precision::LayerTransformation::getDataPrecision(fakeQuantize, dequantization, {}); - ASSERT_EQ(element::undefined, precisionDetails.precision); + ASSERT_EQ(element::dynamic, precisionDetails.precision); ASSERT_EQ(0.f, precisionDetails.min); ASSERT_EQ(0.f, precisionDetails.max); ASSERT_EQ(false, precisionDetails.hasZeroPoint); diff --git a/src/common/offline_transformations/src/compress_quantize_weigths.cpp b/src/common/offline_transformations/src/compress_quantize_weigths.cpp index c3f4095410cc3f..68e4adc4a3d619 100644 --- a/src/common/offline_transformations/src/compress_quantize_weigths.cpp +++ b/src/common/offline_transformations/src/compress_quantize_weigths.cpp @@ -177,7 +177,7 @@ ov::pass::CompressWeightsWithFakeQuantize::CompressWeightsWithFakeQuantize() { auto levels = fq->get_levels(); if (levels <= 2 || levels > 256) return false; - auto low_precision_type = element::undefined; + auto low_precision_type = element::dynamic; // Currently we support two weights quantize types: i4, u4, i8, u8 // we determine that the weights should be cast to u4, u8 inside compute_scale_and_zero_point if (levels <= 16) { diff --git a/src/common/snippets/src/op/brgemm.cpp b/src/common/snippets/src/op/brgemm.cpp index 1bfb0229cfe8ac..6610a7a8539be8 100644 --- a/src/common/snippets/src/op/brgemm.cpp +++ b/src/common/snippets/src/op/brgemm.cpp @@ -93,13 +93,13 @@ ov::element::Type Brgemm::get_output_type(const ov::element::Type& in_type0, con } else if (is_int8) { return element::i32; } else { - return element::undefined; + return element::dynamic; } } ov::element::Type Brgemm::get_output_type() const { auto output_type = get_output_type(get_input_element_type(0), get_input_element_type(1)); - if (output_type == element::undefined) { + if (output_type == element::dynamic) { OPENVINO_THROW("BrgemmCPU node has incompatible input element types: " + get_input_element_type(0).get_type_name() + " and " + diff --git a/src/common/snippets/src/pass/mha_tokenization.cpp b/src/common/snippets/src/pass/mha_tokenization.cpp index 272961f6a9efcc..6ab797aaccfb6c 100644 --- a/src/common/snippets/src/pass/mha_tokenization.cpp +++ b/src/common/snippets/src/pass/mha_tokenization.cpp @@ -209,7 +209,7 @@ bool ov::snippets::pass::TokenizeMHASnippets::is_matmul0_supported(const std::sh return false; const auto matmul_prc = op::Brgemm::get_output_type(matmul->get_input_element_type(0), matmul->get_input_element_type(1)); - return matmul_prc != element::undefined; + return matmul_prc != element::dynamic; } ov::snippets::pass::TokenizeMHASnippets::TokenizeMHASnippets(const SnippetsTokenization::Config& config) { @@ -311,8 +311,7 @@ ov::snippets::pass::TokenizeMHASnippets::TokenizeMHASnippets(const SnippetsToken const auto matmul1_out_type = op::Brgemm::get_output_type(matmul1->get_input_element_type(0), matmul1->get_input_element_type(1)); - if (matmul1_out_type == element::undefined || - !is_supported_tensor(matmul1->get_input_tensor(0)) || + if (matmul1_out_type == element::dynamic || !is_supported_tensor(matmul1->get_input_tensor(0)) || !is_supported_tensor(matmul1->get_input_tensor(1))) return false; diff --git a/src/common/snippets/tests/src/pass/precision_propagation.cpp b/src/common/snippets/tests/src/pass/precision_propagation.cpp index aa75a0fd9ec828..aa7531eb15070a 100644 --- a/src/common/snippets/tests/src/pass/precision_propagation.cpp +++ b/src/common/snippets/tests/src/pass/precision_propagation.cpp @@ -152,7 +152,7 @@ std::vector test_cases { { {}, {}, - {element::i8, element::undefined}, + {element::i8, element::dynamic}, {} } }, @@ -212,7 +212,7 @@ std::vector test_cases { { {}, {}, - {element::f32, element::undefined}, + {element::f32, element::dynamic}, {} } }, @@ -222,7 +222,7 @@ std::vector test_cases { { {element::f32, element::f32}, {}, - {element::undefined, element::f32}, + {element::dynamic, element::f32}, { {element::f32, element::f32}, {element::bf16, element::bf16} @@ -244,7 +244,7 @@ std::vector test_cases { { {element::f32, element::f32}, {}, - {element::undefined, element::f32}, + {element::dynamic, element::f32}, { {element::f32, element::f32}, {element::bf16, element::bf16} diff --git a/src/common/transformations/include/ov_ops/dynamic_quantize.hpp b/src/common/transformations/include/ov_ops/dynamic_quantize.hpp index 2eb79322b84e28..0fd7b51329445c 100644 --- a/src/common/transformations/include/ov_ops/dynamic_quantize.hpp +++ b/src/common/transformations/include/ov_ops/dynamic_quantize.hpp @@ -34,9 +34,9 @@ class TRANSFORMATIONS_API DynamicQuantize : public ov::op::Op { /// \brief Structure that specifies attributes for interpolation struct Attributes { QuantizationType quantization_type = QuantizationType::Symmetric; - element::Type quantization_dt = element::undefined; - element::Type scale_dt = element::undefined; - element::Type zp_dt = element::undefined; + element::Type quantization_dt = element::dynamic; + element::Type scale_dt = element::dynamic; + element::Type zp_dt = element::dynamic; std::vector group_sizes = {}; std::vector scales_zp_output_order = {}; diff --git a/src/common/transformations/include/ov_ops/fully_connected.hpp b/src/common/transformations/include/ov_ops/fully_connected.hpp index de5a9e72984ad4..ff658476439029 100644 --- a/src/common/transformations/include/ov_ops/fully_connected.hpp +++ b/src/common/transformations/include/ov_ops/fully_connected.hpp @@ -21,11 +21,11 @@ class TRANSFORMATIONS_API FullyConnected : public ov::op::Op { FullyConnected(const ov::Output& A, const ov::Output& B, const ov::Output& bias, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); FullyConnected(const ov::Output& A, const ov::Output& B, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor& visitor) override; diff --git a/src/common/transformations/include/ov_ops/fully_connected_compressed.hpp b/src/common/transformations/include/ov_ops/fully_connected_compressed.hpp index c5f33ff700f9d8..cc1e9324df0021 100644 --- a/src/common/transformations/include/ov_ops/fully_connected_compressed.hpp +++ b/src/common/transformations/include/ov_ops/fully_connected_compressed.hpp @@ -23,13 +23,13 @@ class TRANSFORMATIONS_API FullyConnectedCompressed : public FullyConnected { const ov::Output& bias, const ov::Output& weight_scales, const ov::Output& weight_zero_points, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); FullyConnectedCompressed(const ov::Output& X, const ov::Output& W, const ov::Output& bias, const ov::Output& weight_scales, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; diff --git a/src/common/transformations/include/ov_ops/fully_connected_quantized.hpp b/src/common/transformations/include/ov_ops/fully_connected_quantized.hpp index 471291a1ece3ea..3448b9236da6e3 100644 --- a/src/common/transformations/include/ov_ops/fully_connected_quantized.hpp +++ b/src/common/transformations/include/ov_ops/fully_connected_quantized.hpp @@ -27,7 +27,7 @@ class TRANSFORMATIONS_API FullyConnectedQuantized : public FullyConnected { const ov::Output& input_zero_points, const ov::Output& output_scales, const ov::Output& output_zero_points, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); void validate_and_infer_types() override; diff --git a/src/common/transformations/include/ov_ops/fully_connected_quantized_legacy.hpp b/src/common/transformations/include/ov_ops/fully_connected_quantized_legacy.hpp index 7c7b3d4f162061..076034863fc8c3 100644 --- a/src/common/transformations/include/ov_ops/fully_connected_quantized_legacy.hpp +++ b/src/common/transformations/include/ov_ops/fully_connected_quantized_legacy.hpp @@ -23,13 +23,13 @@ class TRANSFORMATIONS_API FullyConnectedQuantizedLegacy : public FullyConnected const ov::Output& bias, const ov::Output& deq_scales, const ov::Output& deq_zero_points, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); FullyConnectedQuantizedLegacy(const ov::Output& X, const ov::Output& W, const ov::Output& bias, const ov::Output& deq_scales, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; diff --git a/src/common/transformations/include/ov_ops/glu.hpp b/src/common/transformations/include/ov_ops/glu.hpp index ab2c646d7a1896..5ae74bd25ec191 100644 --- a/src/common/transformations/include/ov_ops/glu.hpp +++ b/src/common/transformations/include/ov_ops/glu.hpp @@ -33,7 +33,7 @@ class TRANSFORMATIONS_API GLU : public ov::op::Op { int64_t split_lengths, const GluType glu_type, const size_t split_to_glu_idx, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor& visitor) override; diff --git a/src/common/transformations/include/ov_ops/rms.hpp b/src/common/transformations/include/ov_ops/rms.hpp index 3cebf4caaf8daf..0e4b43ca07fe6f 100644 --- a/src/common/transformations/include/ov_ops/rms.hpp +++ b/src/common/transformations/include/ov_ops/rms.hpp @@ -27,7 +27,7 @@ class TRANSFORMATIONS_API RMS : public ov::op::Op { RMS(const Output& data, const Output& gamma, double epsilson, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor& visitor) override; diff --git a/src/common/transformations/include/ov_ops/type_relaxed.hpp b/src/common/transformations/include/ov_ops/type_relaxed.hpp index 7f00d1c8702d3a..1259e89a172f6e 100644 --- a/src/common/transformations/include/ov_ops/type_relaxed.hpp +++ b/src/common/transformations/include/ov_ops/type_relaxed.hpp @@ -29,7 +29,7 @@ class OPENVINO_API TypeRelaxedBase { m_output_data_types(_output_data_types) {} /// \return Data type that will be set for output with a given index outputIndex. - /// If output with a specified index outputIndex hasn't been set before, element::undefined will returned. + /// If output with a specified index outputIndex hasn't been set before, element::dynamic will returned. /// Undefined means no type override happens for a given outputIndex and it will deduced as original /// operation defineds in its infer function. /// @@ -37,11 +37,11 @@ class OPENVINO_API TypeRelaxedBase { /// get_output_element_type returns the result of type inference, so it is completely deduced from /// an operation inputs and attributes, and get_overridden_output_type returns value of the attribute that /// is used to deduce output type. In some cases they don't match: get_overridden_output_type may return - /// element::undefined for some index i, and get_output_element_type will return some real type for + /// element::dynamic for some index i, and get_output_element_type will return some real type for /// the same index i. const element::Type& get_overridden_output_type(size_t outputIndex = 0) const { if (outputIndex >= m_output_data_types.size()) { - return element::undefined; + return element::dynamic; } return m_output_data_types[outputIndex]; } @@ -52,17 +52,17 @@ class OPENVINO_API TypeRelaxedBase { /// is changed according to a given outputIndex value. void set_overridden_output_type(const element::Type& element_type, size_t outputIndex = 0) { if (outputIndex >= m_output_data_types.size()) { - m_output_data_types.resize(outputIndex + 1, element::undefined); + m_output_data_types.resize(outputIndex + 1, element::dynamic); } m_output_data_types[outputIndex] = element_type; } /// \return Data type that will be set for input when original shape/type inference function is called. - /// If index inputIndex hasn't been set before, element::undefined will returned. Undefined means that + /// If index inputIndex hasn't been set before, element::dynamic will returned. Undefined means that /// the type from input tensor descriptor is used for a given index. const element::Type& get_origin_input_type(size_t inputIndex = 0) const { if (inputIndex >= m_input_data_types.size()) { - return element::undefined; + return element::dynamic; } return m_input_data_types[inputIndex]; } @@ -74,7 +74,7 @@ class OPENVINO_API TypeRelaxedBase { /// at inputIndex position are undefined. void set_origin_input_type(const element::Type& element_type, size_t inputIndex = 0) { if (inputIndex >= m_input_data_types.size()) { - m_input_data_types.resize(inputIndex + 1, element::undefined); + m_input_data_types.resize(inputIndex + 1, element::dynamic); } m_input_data_types[inputIndex] = element_type; } @@ -193,7 +193,7 @@ bool TypeRelaxed::evaluate(ov::TensorVector& outputs, const ov::TensorVe for (size_t i = 0; i < BaseOp::get_input_size(); ++i) { const auto expected_input_type = get_origin_input_type(i); - if (inputs[i].get_element_type() == expected_input_type || expected_input_type == element::undefined) { + if (inputs[i].get_element_type() == expected_input_type || expected_input_type == element::dynamic) { casted_inputs[i] = inputs[i]; } else { if (convert == nullptr) { @@ -214,7 +214,7 @@ bool TypeRelaxed::evaluate(ov::TensorVector& outputs, const ov::TensorVe ov::TensorVector original_outputs(BaseOp::get_output_size()); for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { const auto expected_output_type = get_overridden_output_type(i); - if (expected_output_type == element::undefined || expected_output_type == m_original_output_data_types[i]) { + if (expected_output_type == element::dynamic || expected_output_type == m_original_output_data_types[i]) { original_outputs[i] = outputs[i]; } else { auto partial_shape = BaseOp::get_output_partial_shape(i); @@ -230,7 +230,7 @@ bool TypeRelaxed::evaluate(ov::TensorVector& outputs, const ov::TensorVe for (size_t i = 0; i < BaseOp::get_output_size(); ++i) { const auto expected_output_type = get_overridden_output_type(i); - if (expected_output_type != element::undefined && + if (expected_output_type != element::dynamic && original_outputs[i].get_element_type() != expected_output_type) { if (convert == nullptr) { convert = std::make_shared(); @@ -298,7 +298,7 @@ std::shared_ptr TypeRelaxed::clone_with_new_inputs(const OutputVec OutputVector fake_new_inputs; for (size_t i = 0; i < BaseOp::get_input_size(); ++i) { auto origin_input_type = get_origin_input_type(i); - if (origin_input_type == element::undefined) + if (origin_input_type == element::dynamic) origin_input_type = BaseOp::get_input_element_type(i); fake_new_inputs.push_back(std::make_shared(origin_input_type, new_args[i].get_partial_shape())); } diff --git a/src/common/transformations/src/ov_ops/fully_connected.cpp b/src/common/transformations/src/ov_ops/fully_connected.cpp index 8bb5ba98511ce1..acebe028e19e22 100644 --- a/src/common/transformations/src/ov_ops/fully_connected.cpp +++ b/src/common/transformations/src/ov_ops/fully_connected.cpp @@ -24,7 +24,7 @@ FullyConnected::FullyConnected(const ov::Output& A, FullyConnected::FullyConnected(const ov::Output& A, const ov::Output& B, const ov::element::Type output_type) - : FullyConnected(A, B, std::make_shared(element::undefined, Shape{0}), output_type) {} + : FullyConnected(A, B, std::make_shared(element::dynamic, Shape{0}), output_type) {} bool FullyConnected::visit_attributes(ov::AttributeVisitor& visitor) { visitor.on_attribute("output_type", m_output_type); @@ -52,8 +52,7 @@ void FullyConnected::validate_and_infer_types() { auto out_shapes = ov::op::v0::shape_infer(&op, std::vector{get_input_partial_shape(0), get_input_partial_shape(1)}); - - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/common/transformations/src/ov_ops/fully_connected_compressed.cpp b/src/common/transformations/src/ov_ops/fully_connected_compressed.cpp index 65d75c4fa17f65..a4719f3eb02398 100644 --- a/src/common/transformations/src/ov_ops/fully_connected_compressed.cpp +++ b/src/common/transformations/src/ov_ops/fully_connected_compressed.cpp @@ -35,7 +35,7 @@ FullyConnectedCompressed::FullyConnectedCompressed(const ov::Output& X, W, bias, weight_scales, - std::make_shared(element::undefined, Shape{0}), + std::make_shared(element::dynamic, Shape{0}), output_type) {} std::shared_ptr FullyConnectedCompressed::clone_with_new_inputs(const ov::OutputVector& new_args) const { diff --git a/src/common/transformations/src/ov_ops/fully_connected_quantized_legacy.cpp b/src/common/transformations/src/ov_ops/fully_connected_quantized_legacy.cpp index e8f6beec8cbb60..c7865b21a5427f 100644 --- a/src/common/transformations/src/ov_ops/fully_connected_quantized_legacy.cpp +++ b/src/common/transformations/src/ov_ops/fully_connected_quantized_legacy.cpp @@ -34,7 +34,7 @@ FullyConnectedQuantizedLegacy::FullyConnectedQuantizedLegacy(const ov::Output(element::undefined, Shape{0}), + std::make_shared(element::dynamic, Shape{0}), output_type) {} std::shared_ptr FullyConnectedQuantizedLegacy::clone_with_new_inputs(const ov::OutputVector& new_args) const { @@ -61,8 +61,7 @@ void FullyConnectedQuantizedLegacy::validate_and_infer_types() { auto out_shapes = ov::op::v0::shape_infer(&op, std::vector{get_input_partial_shape(0), get_input_partial_shape(1)}); - - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/common/transformations/src/ov_ops/glu.cpp b/src/common/transformations/src/ov_ops/glu.cpp index 5aac8489101bc5..8c0ae3c2850d2b 100644 --- a/src/common/transformations/src/ov_ops/glu.cpp +++ b/src/common/transformations/src/ov_ops/glu.cpp @@ -37,7 +37,7 @@ bool GLU::visit_attributes(ov::AttributeVisitor& visitor) { } void GLU::validate_and_infer_types() { - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; const auto input_shapes = ov::util::get_node_input_partial_shapes(*this); const auto output_shapes = shape_infer(this, input_shapes); diff --git a/src/common/transformations/src/ov_ops/rms.cpp b/src/common/transformations/src/ov_ops/rms.cpp index 885494336a1c45..bce1d496289f5f 100644 --- a/src/common/transformations/src/ov_ops/rms.cpp +++ b/src/common/transformations/src/ov_ops/rms.cpp @@ -22,7 +22,7 @@ bool RMS::visit_attributes(ov::AttributeVisitor& visitor) { } void RMS::validate_and_infer_types() { - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, get_input_partial_shape(0)); } diff --git a/src/common/transformations/src/transformations/common_optimizations/change_placeholder_types.cpp b/src/common/transformations/src/transformations/common_optimizations/change_placeholder_types.cpp index 96ef5a1a854f27..57f4a34fd217a1 100644 --- a/src/common/transformations/src/transformations/common_optimizations/change_placeholder_types.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/change_placeholder_types.cpp @@ -40,7 +40,7 @@ bool ov::pass::ChangePlaceholderTypes::run_on_model(const shared_ptr& m_params_with_custom_types.end()) continue; - element::Type legacy_type = element::undefined; + element::Type legacy_type = element::dynamic; bool all_castable_or_shapeof = true; for (const auto& target_input : param->get_output_target_inputs(0)) { all_castable_or_shapeof &= is_node_casts_to_float_or_shapeof(target_input.get_node()); @@ -56,7 +56,7 @@ bool ov::pass::ChangePlaceholderTypes::run_on_model(const shared_ptr& } // set OldApiMapElementType only if legacy_type is defined - if (legacy_type != element::undefined) { + if (legacy_type != element::dynamic) { set_old_api_map_element_type(param, ov::OldApiMapElementType(legacy_type)); } } diff --git a/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp b/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp index c04260917ca55d..c8b81c3cbf92aa 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp @@ -28,7 +28,7 @@ ov::pass::ConvertConvertLike::ConvertConvertLike() { auto like = cvtlike->input_value(1); const element::Type& dest_type = like.get_element_type(); - if (dest_type == element::dynamic || dest_type == element::undefined) + if (dest_type == element::dynamic) return false; auto cvt = std::make_shared(cvtlike->input_value(0), dest_type); diff --git a/src/common/transformations/src/transformations/op_conversions/convert_convertpromotetypes.cpp b/src/common/transformations/src/transformations/op_conversions/convert_convertpromotetypes.cpp index 2e3ce342e6f63a..4e6dad1a1c0c7b 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_convertpromotetypes.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_convertpromotetypes.cpp @@ -14,7 +14,7 @@ ov::pass::ConvertConvertPromoteTypes::ConvertConvertPromoteTypes() { MATCHER_SCOPE(ConvertConvertPromoteTypes); auto has_static_defined_type = [](const Output& output) -> bool { - return !pattern::type_matches_any({element::dynamic, element::undefined})(output); + return !pattern::type_matches_any({element::dynamic})(output); }; auto convert_promote_types = pattern::wrap_type(has_static_defined_type); diff --git a/src/common/transformations/src/transformations/op_conversions/convert_fc_to_compressed.cpp b/src/common/transformations/src/transformations/op_conversions/convert_fc_to_compressed.cpp index 0c44dc27367144..18355d384c7a87 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_fc_to_compressed.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_fc_to_compressed.cpp @@ -157,7 +157,7 @@ ov::pass::ConvertFullyConnectedToFullyConnectedCompressed::ConvertFullyConnected } fc_input_zp = - with_zero_point ? fc_input_zp : std::make_shared(element::undefined, Shape{0}); + with_zero_point ? fc_input_zp : std::make_shared(element::dynamic, Shape{0}); ov::disable_constant_folding(fc_input_zp); result_nodes.push_back(fc_input_zp); diff --git a/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp b/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp index 8b8ca7971c07f9..a5c542aa6e0c15 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_fc_to_quantized_legacy.cpp @@ -52,7 +52,7 @@ ov::pass::ConvertFCToFCQuantizedLegacy::ConvertFCToFCQuantizedLegacy() { ov::as_type_ptr(pattern_map.at(fully_connected_m).get_node_shared_ptr()); ov::NodeVector new_ops; - auto zp = std::make_shared(element::undefined, Shape{0}); + auto zp = std::make_shared(element::dynamic, Shape{0}); new_ops.push_back(zp); auto fc_quantized = diff --git a/src/common/transformations/src/transformations/rt_info/original_precision_attribute.cpp b/src/common/transformations/src/transformations/rt_info/original_precision_attribute.cpp index 79d08c1c693103..6986d18bacbd28 100644 --- a/src/common/transformations/src/transformations/rt_info/original_precision_attribute.cpp +++ b/src/common/transformations/src/transformations/rt_info/original_precision_attribute.cpp @@ -25,6 +25,6 @@ element::Type_t ov::get_original_precision(const std::shared_ptr& node) { if (it != rt_info.end()) { return it->second.as(); } else { - return element::Type_t::undefined; + return element::Type_t::dynamic; } } diff --git a/src/common/transformations/tests/type_relaxed_tests.cpp b/src/common/transformations/tests/type_relaxed_tests.cpp index 8331e80ae13ad6..5d1d61e0f514c0 100644 --- a/src/common/transformations/tests/type_relaxed_tests.cpp +++ b/src/common/transformations/tests/type_relaxed_tests.cpp @@ -170,10 +170,9 @@ TEST_F(TypeRelaxedTests, notSupportedTypeOverridePartially) { auto param1 = make_shared(some_type, shape); auto param2 = make_shared(overriden_type, ov::PartialShape{1}); auto op = ov::opset1::Reshape(param1, ov::op::TemporaryReplaceOutputType(param2, orig_type).get(), false); - auto relaxed_op = - make_shared>(op, - TypeVector{element::undefined, orig_type}, - TypeVector{}); + auto relaxed_op = make_shared>(op, + TypeVector{element::dynamic, orig_type}, + TypeVector{}); auto result = make_shared(relaxed_op); model = make_shared(ov::ResultVector{result}, ov::ParameterVector{param1, param2}); @@ -226,16 +225,16 @@ TEST_F(TypeRelaxedTests, setGetTypes) { ASSERT_EQ(element::u8, relaxed_op->get_output_element_type(0)); // internally set types for opset1::Add inference wasn't set when TypeRelaxed created, check it - ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(0)); - ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(1)); + ASSERT_EQ(element::dynamic, relaxed_op->get_origin_input_type(0)); + ASSERT_EQ(element::dynamic, relaxed_op->get_origin_input_type(1)); // if we access elements outside really existing inputs, it should give undefined as well - ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(2)); + ASSERT_EQ(element::dynamic, relaxed_op->get_origin_input_type(2)); // number of inputs for the operation node shouldn't change after that ASSERT_EQ(2, relaxed_op->get_input_size()); // similar checks for outputs - ASSERT_EQ(element::undefined, relaxed_op->get_overridden_output_type(0)); - ASSERT_EQ(element::undefined, relaxed_op->get_overridden_output_type(1)); + ASSERT_EQ(element::dynamic, relaxed_op->get_overridden_output_type(0)); + ASSERT_EQ(element::dynamic, relaxed_op->get_overridden_output_type(1)); ASSERT_EQ(1, relaxed_op->get_output_size()); // previous checks for input/output indices that are out of number of real inputs/outputs @@ -284,17 +283,17 @@ TEST_F(TypeRelaxedTests, setGetTypes) { ASSERT_EQ(1, relaxed_op->get_output_size()); // lets try to reset types to undefined again and make sure that all original types are restored - relaxed_op->set_origin_input_type(element::undefined, 0); - relaxed_op->set_origin_input_type(element::undefined, 1); - relaxed_op->set_overridden_output_type(element::undefined, 0); + relaxed_op->set_origin_input_type(element::dynamic, 0); + relaxed_op->set_origin_input_type(element::dynamic, 1); + relaxed_op->set_overridden_output_type(element::dynamic, 0); model->validate_nodes_and_infer_types(); ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(0)); ASSERT_EQ(element::u8, relaxed_op->get_input_element_type(1)); ASSERT_EQ(element::u8, relaxed_op->get_output_element_type(0)); - ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(0)); - ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(1)); - ASSERT_EQ(element::undefined, relaxed_op->get_origin_input_type(0)); + ASSERT_EQ(element::dynamic, relaxed_op->get_origin_input_type(0)); + ASSERT_EQ(element::dynamic, relaxed_op->get_origin_input_type(1)); + ASSERT_EQ(element::dynamic, relaxed_op->get_origin_input_type(0)); } ASSERT_EQ(4, model->get_ops().size()); diff --git a/src/common/transformations/tests/utils/convert_precision.cpp b/src/common/transformations/tests/utils/convert_precision.cpp index c1190a53fb17a3..188a31d6e3483f 100644 --- a/src/common/transformations/tests/utils/convert_precision.cpp +++ b/src/common/transformations/tests/utils/convert_precision.cpp @@ -943,7 +943,7 @@ TEST(TransformationTests, ConvertPrecision_LogicalNot) { tr = op; ASSERT_TRUE(tr != nullptr); ASSERT_EQ(tr->get_origin_input_type(0), element::boolean); - ASSERT_EQ(tr->get_origin_input_type(1), element::undefined); + ASSERT_EQ(tr->get_origin_input_type(1), element::dynamic); } TEST(TransformationTests, ConvertPrecision_Select) { diff --git a/src/core/dev_api/openvino/op/paged_attention.hpp b/src/core/dev_api/openvino/op/paged_attention.hpp index acc6e8b52b0f80..51c008d6060276 100644 --- a/src/core/dev_api/openvino/op/paged_attention.hpp +++ b/src/core/dev_api/openvino/op/paged_attention.hpp @@ -21,7 +21,7 @@ class OPENVINO_API PagedAttentionExtension : public ov::op::Op { void set_out_type(int index, const ov::element::Type& output_type); protected: - std::vector m_output_type = {ov::element::undefined, ov::element::undefined}; + std::vector m_output_type = {ov::element::dynamic, ov::element::dynamic}; }; } // namespace op diff --git a/src/core/dev_api/openvino/op/rms_norm.hpp b/src/core/dev_api/openvino/op/rms_norm.hpp index dce13ecf2692a7..d704feae68a881 100644 --- a/src/core/dev_api/openvino/op/rms_norm.hpp +++ b/src/core/dev_api/openvino/op/rms_norm.hpp @@ -27,7 +27,7 @@ class OPENVINO_API RMSNorm : public ov::op::Op { RMSNorm(const Output& data, const Output& axes, double epsilson, - const ov::element::Type& compute_type = ov::element::undefined); + const ov::element::Type& compute_type = ov::element::dynamic); /// \brief Constructs an RMSNorm operation with scaling. /// @@ -40,7 +40,7 @@ class OPENVINO_API RMSNorm : public ov::op::Op { const Output& axes, const Output& scale, double epsilson, - const ov::element::Type& compute_type = ov::element::undefined); + const ov::element::Type& compute_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -51,7 +51,7 @@ class OPENVINO_API RMSNorm : public ov::op::Op { private: double m_epsilon{0}; - ov::element::Type m_compute_type{ov::element::undefined}; + ov::element::Type m_compute_type{ov::element::dynamic}; }; } // namespace internal diff --git a/src/core/include/openvino/core/type/element_type.hpp b/src/core/include/openvino/core/type/element_type.hpp index b454d886107e7c..7878d45822a905 100644 --- a/src/core/include/openvino/core/type/element_type.hpp +++ b/src/core/include/openvino/core/type/element_type.hpp @@ -37,33 +37,34 @@ namespace element { /// \brief Enum to define possible element types /// \ingroup ov_element_cpp_api enum class Type_t { - undefined, //!< Undefined element type - dynamic, //!< Dynamic element type - boolean, //!< boolean element type - bf16, //!< bf16 element type - f16, //!< f16 element type - f32, //!< f32 element type - f64, //!< f64 element type - i4, //!< i4 element type - i8, //!< i8 element type - i16, //!< i16 element type - i32, //!< i32 element type - i64, //!< i64 element type - u1, //!< binary element type - u2, //!< u2 element type - u3, //!< u3 element type - u4, //!< u4 element type - u6, //!< u6 element type - u8, //!< u8 element type - u16, //!< u16 element type - u32, //!< u32 element type - u64, //!< u64 element type - nf4, //!< nf4 element type - f8e4m3, //!< f8e4m3 element type - f8e5m2, //!< f8e5m2 element type - string, //!< string element type - f4e2m1, //!< f4e2m1 element type - f8e8m0, //!< f8e8m0 element type + dynamic, //!< Dynamic element type + undefined OPENVINO_ENUM_DEPRECATED("This type is deprecated and will be removed in 2026.0. Use dynamic instead.") = + dynamic, //!< Undefined element type + boolean, //!< boolean element type + bf16, //!< bf16 element type + f16, //!< f16 element type + f32, //!< f32 element type + f64, //!< f64 element type + i4, //!< i4 element type + i8, //!< i8 element type + i16, //!< i16 element type + i32, //!< i32 element type + i64, //!< i64 element type + u1, //!< binary element type + u2, //!< u2 element type + u3, //!< u3 element type + u4, //!< u4 element type + u6, //!< u6 element type + u8, //!< u8 element type + u16, //!< u16 element type + u32, //!< u32 element type + u64, //!< u64 element type + nf4, //!< nf4 element type + f8e4m3, //!< f8e4m3 element type + f8e5m2, //!< f8e5m2 element type + string, //!< string element type + f4e2m1, //!< f4e2m1 element type + f8e8m0, //!< f8e8m0 element type }; /// \brief Base class to define element type @@ -132,14 +133,17 @@ class OPENVINO_API Type { std::string to_string() const; private: - Type_t m_type{Type_t::undefined}; + Type_t m_type{Type_t::dynamic}; }; using TypeVector = std::vector; /// \brief undefined element type /// \ingroup ov_element_cpp_api +OPENVINO_SUPPRESS_DEPRECATED_START +OPENVINO_DEPRECATED("This type is deprecated and will be removed in 2026.0. Use dynamic instead.") inline constexpr Type undefined(Type_t::undefined); +OPENVINO_SUPPRESS_DEPRECATED_END /// \brief dynamic element type /// \ingroup ov_element_cpp_api inline constexpr Type dynamic(Type_t::dynamic); diff --git a/src/core/include/openvino/op/constant.hpp b/src/core/include/openvino/op/constant.hpp index 6247c66b820502..ce235c6fe1d567 100644 --- a/src/core/include/openvino/op/constant.hpp +++ b/src/core/include/openvino/op/constant.hpp @@ -162,7 +162,6 @@ class OPENVINO_API Constant : public Op { case Type_t::f8e8m0: fill_data(value); break; - case Type_t::undefined: case Type_t::dynamic: OPENVINO_THROW("unsupported type"); } @@ -750,7 +749,6 @@ class OPENVINO_API Constant : public Op { case Type_t::f8e8m0: write_buffer(source); break; - case Type_t::undefined: case Type_t::dynamic: OPENVINO_THROW("unsupported type"); } diff --git a/src/core/reference/include/openvino/reference/concat.hpp b/src/core/reference/include/openvino/reference/concat.hpp index b0a50ff77adcf6..44195f3b1ca226 100644 --- a/src/core/reference/include/openvino/reference/concat.hpp +++ b/src/core/reference/include/openvino/reference/concat.hpp @@ -17,7 +17,7 @@ void concat(const std::vector& args, const Shape& out_shape, int64_t concatenation_axis, size_t elem_size, - const ov::element::Type& elem_type = ov::element::Type_t::undefined); + const ov::element::Type& elem_type = ov::element::Type_t::dynamic); } // namespace reference } // namespace ov diff --git a/src/core/shape_inference/include/utils.hpp b/src/core/shape_inference/include/utils.hpp index 7d28309c489361..ee2f94a9b76e10 100644 --- a/src/core/shape_inference/include/utils.hpp +++ b/src/core/shape_inference/include/utils.hpp @@ -331,7 +331,7 @@ inline element::Type get_input_const_element_type(const ov::Node* const op, size } else if (const auto& constant = ov::util::get_constant_from_source(op->input_value(port))) { return constant->get_element_type(); } else { - return element::undefined; + return element::dynamic; } } diff --git a/src/core/src/bound_evaluate.cpp b/src/core/src/bound_evaluate.cpp index da1616e003a81c..134f326cc5436e 100644 --- a/src/core/src/bound_evaluate.cpp +++ b/src/core/src/bound_evaluate.cpp @@ -72,7 +72,7 @@ bool are_equal(const ov::Tensor& lhs, const ov::Tensor& rhs) { } bool is_type_allocable(const element::Type& type) { - return type != element::undefined && type.is_static(); + return type != element::dynamic && type.is_static(); } /** diff --git a/src/core/src/constant_fold_utils.cpp b/src/core/src/constant_fold_utils.cpp index f8d81bd287631e..ea361846b61467 100644 --- a/src/core/src/constant_fold_utils.cpp +++ b/src/core/src/constant_fold_utils.cpp @@ -167,7 +167,7 @@ std::shared_ptr ov::util::convert_to_supported_precision(Node* const n for (size_t i = 0; i < num_inputs; i++) { const auto& origin_type = type_relaxed->get_origin_input_type(i); origin_input_types.push_back(origin_type); - if (origin_type == element::undefined && has_original_input_precision(node->input(i))) { + if ((origin_type == element::dynamic) && has_original_input_precision(node->input(i))) { type_relaxed->set_origin_input_type(get_original_input_precision(node->input(i)), i); } } diff --git a/src/core/src/node.cpp b/src/core/src/node.cpp index 642b233fa5526c..a1da08c73106c2 100644 --- a/src/core/src/node.cpp +++ b/src/core/src/node.cpp @@ -726,7 +726,7 @@ bool ov::Node::constant_fold(OutputVector& output_values, const OutputVector& in TensorVector output_tensors; for (const auto& output : outputs()) { const auto& et = output.get_element_type(); - if (et != element::undefined && et.is_static()) { + if (et.is_static()) { output_tensors.emplace_back(output); } else { output_tensors.emplace_back(); diff --git a/src/core/src/op/paged_attention.cpp b/src/core/src/op/paged_attention.cpp index 4d2cdc3e1fdac2..11d3769dec6ec8 100644 --- a/src/core/src/op/paged_attention.cpp +++ b/src/core/src/op/paged_attention.cpp @@ -206,14 +206,13 @@ void PagedAttentionExtension::validate_and_infer_types() { out_ps[1] = Dimension::dynamic(); } } - - if (m_output_type[0] == ov::element::undefined) { + if (m_output_type[0].is_dynamic()) { set_output_type(0, get_input_element_type(0), out_ps); } else { set_output_type(0, m_output_type[0], out_ps); } - if (m_output_type[1] == ov::element::undefined) { + if (m_output_type[1].is_dynamic()) { set_output_type(1, get_input_element_type(0), {Dimension::dynamic()}); } else { set_output_type(1, m_output_type[1], {Dimension::dynamic()}); diff --git a/src/core/src/op/range.cpp b/src/core/src/op/range.cpp index f4d9a8d73caf51..575f04f9500154 100644 --- a/src/core/src/op/range.cpp +++ b/src/core/src/op/range.cpp @@ -167,11 +167,11 @@ void Range::validate_and_infer_types() { result_et != element::boolean, "Element type for start, stop, and step, must not be boolean."); - NODE_VALIDATION_CHECK(this, - result_et != element::Type_t::u1 && result_et != element::Type_t::i4 && - result_et != element::Type_t::u4 && result_et != element::Type_t::undefined, - "Internal OpenVINO error: unsupported element type: ", - result_et); + NODE_VALIDATION_CHECK( + this, + result_et != element::Type_t::u1 && result_et != element::Type_t::i4 && result_et != element::Type_t::u4, + "Internal OpenVINO error: unsupported element type: ", + result_et); if (result_et == element::Type_t::dynamic) { set_output_type(0, result_et, ov::PartialShape::dynamic(1)); diff --git a/src/core/src/op/type_relaxed.cpp b/src/core/src/op/type_relaxed.cpp index 41dde8f43689f5..8a7f5f515e792c 100644 --- a/src/core/src/op/type_relaxed.cpp +++ b/src/core/src/op/type_relaxed.cpp @@ -24,7 +24,7 @@ void TypeRelaxedBase::remember_input_data_types(Node& node, element::TypeVector& // Reset input data types to m_output_data_type. for (size_t i = 0; i < node.get_input_size(); ++i) { auto origin_input_type = get_origin_input_type(i); - if (origin_input_type != element::undefined) { + if (origin_input_type.is_static()) { ov::descriptor::set_tensor_type(node.get_input_tensor(i), origin_input_type, node.get_input_partial_shape(i)); @@ -50,7 +50,7 @@ void TypeRelaxedBase::restore_input_data_types(Node& node, const element::TypeVe // Override (some) output types for (size_t i = 0; i < node.get_output_size(); ++i) { auto overridden_output_type = get_overridden_output_type(i); - if (overridden_output_type != element::undefined) { + if (overridden_output_type.is_static()) { node.set_output_type(i, overridden_output_type, node.get_output_partial_shape(i)); } } @@ -136,11 +136,11 @@ std::unordered_map> convert_input_type auto& input = inputs[i]; const auto& fake_type = input.get_element_type(); const auto& original_type = types[i]; - if (original_type == fake_type || original_type == element::undefined) + if (original_type == fake_type || original_type == element::dynamic) continue; // this input type wasn't changed if (parameter == nullptr || convert == nullptr) { - parameter = std::make_shared(element::undefined, PartialShape()); - convert = std::make_shared(parameter, element::undefined); + parameter = std::make_shared(element::dynamic, PartialShape()); + convert = std::make_shared(parameter, element::dynamic); } ov::op::convert_types(parameter, convert, input, original_type); original_inputs[i] = {parameter->get_output_tensor(0).get_lower_value(), @@ -190,8 +190,8 @@ bool convert_outputs_to_fake_type(ov::TensorVector& outputs, ov::TensorVector& o if (fake_type == original_type) continue; if (parameter == nullptr || convert == nullptr) { - parameter = std::make_shared(element::undefined, PartialShape()); - convert = std::make_shared(parameter, element::undefined); + parameter = std::make_shared(element::dynamic, PartialShape()); + convert = std::make_shared(parameter, element::dynamic); } reset_convert(parameter, convert, original_outputs[i], fake_type, is_upper); TensorVector local_outputs = {outputs[i]}; diff --git a/src/core/src/pass/serialize.cpp b/src/core/src/pass/serialize.cpp index d468e03db967fa..dac81ccfe9b8de 100644 --- a/src/core/src/pass/serialize.cpp +++ b/src/core/src/pass/serialize.cpp @@ -730,7 +730,6 @@ std::string get_opset_name(const ov::Node* n) { std::string get_precision_name(const ov::element::Type& elem_type) { switch (elem_type) { - case ::ov::element::Type_t::undefined: case ::ov::element::Type_t::dynamic: return "UNSPECIFIED"; case ::ov::element::Type_t::f16: diff --git a/src/core/src/pass/visualize_tree.cpp b/src/core/src/pass/visualize_tree.cpp index b74a8fcebe388a..3d8fd760d9f37b 100644 --- a/src/core/src/pass/visualize_tree.cpp +++ b/src/core/src/pass/visualize_tree.cpp @@ -446,7 +446,6 @@ static std::string get_value(const std::shared_ptr& consta std::stringstream ss; ss << "[ "; switch (constant->get_output_element_type(0)) { - case ov::element::Type_t::undefined: case ov::element::Type_t::dynamic: case ov::element::Type_t::u1: case ov::element::Type_t::u2: diff --git a/src/core/src/preprocess/preprocess_steps_impl.cpp b/src/core/src/preprocess/preprocess_steps_impl.cpp index b37f5211bcc514..f8db306188ed57 100644 --- a/src/core/src/preprocess/preprocess_steps_impl.cpp +++ b/src/core/src/preprocess/preprocess_steps_impl.cpp @@ -699,7 +699,7 @@ void PostStepsList::add_convert_impl(const element::Type& type) { return std::make_tuple(node, false); } OPENVINO_ASSERT( - !t.is_dynamic() && t != element::undefined, + t.is_static(), "Can't convert to dynamic/unknown element type, consider using of InputTensorInfo::set_element_type"); auto convert = std::make_shared(node, t); return std::make_tuple(Output(convert), true); diff --git a/src/core/src/type/element_type.cpp b/src/core/src/type/element_type.cpp index bd61d1c985ea97..d6a92945f99055 100644 --- a/src/core/src/type/element_type.cpp +++ b/src/core/src/type/element_type.cpp @@ -45,16 +45,6 @@ struct TypeInfo { return m_cname != nullptr && m_type_name != nullptr; } }; -; - -constexpr TypeInfo type_info(size_t bitwidth, - bool is_real, - bool is_signed, - bool is_quantized, - const char* cname, - const char* type_name) { - return {bitwidth, is_real, is_signed, is_quantized, cname, type_name, nullptr, 0}; -} template constexpr TypeInfo type_info(size_t bitwidth, @@ -67,7 +57,7 @@ constexpr TypeInfo type_info(size_t bitwidth, return {bitwidth, is_real, is_signed, is_quantized, cname, type_name, aliases.data(), aliases.size()}; } -constexpr auto undefined_aliases = util::make_array("UNSPECIFIED"); +constexpr auto dynamic_aliases = util::make_array("UNSPECIFIED", "undefined"); constexpr auto boolean_aliases = util::make_array("BOOL"); constexpr auto bf16_aliases = util::make_array("BF16"); constexpr auto f16_aliases = util::make_array("FP16"); @@ -95,14 +85,7 @@ constexpr auto f4e2m1_aliases = util::make_array("F4E2M1"); constexpr auto f8e8m0_aliases = util::make_array("F8E8M0"); static constexpr std::array types_info = { - type_info(std::numeric_limits::max(), - false, - false, - false, - "undefined", - "undefined", - undefined_aliases), // undefined - type_info(0, false, false, false, "dynamic", "dynamic"), // dynamic + type_info(0, false, false, false, "dynamic", "dynamic", dynamic_aliases), // dynamic type_info(8, false, true, false, "char", "boolean", boolean_aliases), // boolean type_info(16, true, true, false, "bfloat16", "bf16", bf16_aliases), // bf16 type_info(16, true, true, false, "float16", "f16", f16_aliases), // f16 @@ -319,6 +302,7 @@ size_t Type::bitwidth() const { namespace ov { template <> OPENVINO_API EnumNames& EnumNames::get() { + OPENVINO_SUPPRESS_DEPRECATED_START static auto enum_names = EnumNames("element::Type_t", {{"undefined", element::Type_t::undefined}, {"dynamic", element::Type_t::dynamic}, @@ -347,6 +331,7 @@ OPENVINO_API EnumNames& EnumNames::get() { {"string", element::Type_t::string}, {"f4e2m1", element::Type_t::f4e2m1}, {"f8e8m0", element::Type_t::f8e8m0}}); + OPENVINO_SUPPRESS_DEPRECATED_END return enum_names; } diff --git a/src/core/tests/element_type.cpp b/src/core/tests/element_type.cpp index 3afc7469fee454..a5c56294f62a34 100644 --- a/src/core/tests/element_type.cpp +++ b/src/core/tests/element_type.cpp @@ -77,9 +77,10 @@ TEST(element_type, from_string) { EXPECT_EQ(element::Type("F4E2M1"), element::f4e2m1); EXPECT_EQ(element::Type("f8e8m0"), element::f8e8m0); EXPECT_EQ(element::Type("F8E8M0"), element::f8e8m0); - + OPENVINO_SUPPRESS_DEPRECATED_START EXPECT_EQ(element::Type("undefined"), element::undefined); EXPECT_EQ(element::Type("UNSPECIFIED"), element::undefined); + OPENVINO_SUPPRESS_DEPRECATED_END EXPECT_EQ(element::Type("dynamic"), element::dynamic); EXPECT_THROW(element::Type("some_string"), ov::Exception); diff --git a/src/core/tests/pass/constant_folding.cpp b/src/core/tests/pass/constant_folding.cpp index 51b49798c40946..874d78703ece51 100644 --- a/src/core/tests/pass/constant_folding.cpp +++ b/src/core/tests/pass/constant_folding.cpp @@ -3925,7 +3925,7 @@ TEST(constant_folding, gather_with_dynamic_shapes_in_data_input) { } TEST(constant_folding, parameter_with_unspecified_type_from_host_tensor) { - auto param = std::make_shared(element::undefined, ov::PartialShape{}); + auto param = std::make_shared(element::dynamic, ov::PartialShape{}); auto res = std::make_shared(param); auto model = std::make_shared(ov::ResultVector{res}, ov::ParameterVector{param}); EXPECT_NO_THROW(run_constant_folding(model)); diff --git a/src/core/tests/preprocess.cpp b/src/core/tests/preprocess.cpp index 96f23149b6a68d..fe6fa1e91a4589 100644 --- a/src/core/tests/preprocess.cpp +++ b/src/core/tests/preprocess.cpp @@ -184,9 +184,9 @@ TEST(pre_post_process, preprocess_assert_input_without_index) { TEST(pre_post_process, convert_element_type_from_unknown) { auto f = create_simple_function(element::i32, Shape{1, 3, 224, 224}); auto p = PrePostProcessor(f); - ASSERT_THROW(p.input().preprocess().convert_element_type(element::dynamic).convert_element_type(element::i32); - f = p.build(); - , ov::AssertFailure); + + ASSERT_NO_THROW(p.input().preprocess().convert_element_type(element::dynamic).convert_element_type(element::i32); + f = p.build();); } TEST(pre_post_process, scale_not_float) { diff --git a/src/core/tests/type_prop/range.cpp b/src/core/tests/type_prop/range.cpp index ef82b17636da8c..09352d995b2eb9 100644 --- a/src/core/tests/type_prop/range.cpp +++ b/src/core/tests/type_prop/range.cpp @@ -508,7 +508,7 @@ TEST(type_prop, range_v4_invalid_inputs_elem_type) { // invalid element type for step scalar try { auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::undefined, Shape{}); + auto stop = make_shared(element::dynamic, Shape{}); auto step = make_shared(element::boolean, Shape{}); auto range = make_shared(start, stop, step, element::i32); FAIL() << "Exception expected"; diff --git a/src/frontends/ir/src/ir_deserializer.cpp b/src/frontends/ir/src/ir_deserializer.cpp index 33e77d147557b0..549cbc3d1a41f3 100644 --- a/src/frontends/ir/src/ir_deserializer.cpp +++ b/src/frontends/ir/src/ir_deserializer.cpp @@ -756,7 +756,7 @@ ov::GenericLayerParams ov::XmlDeserializer::parse_generic_params(const pugi::xml port.dims.emplace_back(dim); } - ov::element::Type type(ov::element::Type_t::undefined); + ov::element::Type type(ov::element::Type_t::dynamic); // Input port hasn't precision if (!input) { const std::string& preStr = pugixml::get_str_attr(parentNode, "precision"); @@ -834,9 +834,7 @@ std::shared_ptr ov::XmlDeserializer::create_node(const std::vector(inputs[i].get_node_shared_ptr()) && - ov::element::Type_t::undefined == inputs[i].get_element_type()) + if (is_type(inputs[i].get_node_shared_ptr()) && (inputs[i].get_element_type().is_dynamic())) OPENVINO_THROW(params.type, " layer ", params.name, diff --git a/src/frontends/onnx/frontend/src/core/node.cpp b/src/frontends/onnx/frontend/src/core/node.cpp index b11dd022976f6b..184faf38fa42bc 100644 --- a/src/frontends/onnx/frontend/src/core/node.cpp +++ b/src/frontends/onnx/frontend/src/core/node.cpp @@ -245,7 +245,7 @@ std::shared_ptr Node::Impl::get_attribute_as_constant(cons T default_value, ov::element::Type type) const { const auto value = get_attribute_value(name, default_value); - return std::make_shared(type == ov::element::undefined ? ov::element::from() : type, + return std::make_shared(type == ov::element::dynamic ? ov::element::from() : type, ov::Shape{}, value); } @@ -254,7 +254,7 @@ template std::shared_ptr Node::Impl::get_attribute_as_constant(const std::string& name, ov::element::Type type) const { const auto value = get_attribute_value(name); - return std::make_shared(type == ov::element::undefined ? ov::element::from() : type, + return std::make_shared(type == ov::element::dynamic ? ov::element::from() : type, ov::Shape{}, value); } @@ -271,9 +271,7 @@ std::shared_ptr Node::Impl::get_attribute_as_constant>(name); - return ov::op::v0::Constant::create(type == ov::element::undefined ? ov::element::i64 : type, - {value.size()}, - value); + return ov::op::v0::Constant::create(type == ov::element::dynamic ? ov::element::i64 : type, {value.size()}, value); } template <> @@ -288,9 +286,7 @@ std::shared_ptr Node::Impl::get_attribute_as_constant(cons std::vector default_value, ov::element::Type type) const { const auto value = get_attribute_value>(name, default_value); - return ov::op::v0::Constant::create(type != ov::element::undefined ? type : ov::element::i64, - {value.size()}, - value); + return ov::op::v0::Constant::create(type != ov::element::dynamic ? type : ov::element::i64, {value.size()}, value); } Node::Node(const NodeProto& node_proto, Graph* graph) diff --git a/src/frontends/onnx/frontend/src/input_model.cpp b/src/frontends/onnx/frontend/src/input_model.cpp index 236fa19a9bb79d..d8d464862207cb 100644 --- a/src/frontends/onnx/frontend/src/input_model.cpp +++ b/src/frontends/onnx/frontend/src/input_model.cpp @@ -241,7 +241,7 @@ ov::element::Type InputModel::get_element_type(const ov::frontend::Place::Ptr& p return m_editor->get_input_type(tensor_name); } // now we can return the concrete element type only for model inputs - return ov::element::undefined; + return ov::element::dynamic; } std::shared_ptr InputModel::decode() { diff --git a/src/frontends/onnx/frontend/src/op/clip.cpp b/src/frontends/onnx/frontend/src/op/clip.cpp index 972f65bf3a6085..be8c9430f01a8e 100644 --- a/src/frontends/onnx/frontend/src/op/clip.cpp +++ b/src/frontends/onnx/frontend/src/op/clip.cpp @@ -57,7 +57,6 @@ std::shared_ptr get_constant_lowest_of_type(ov::element::T OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u32); OPENVINO_TYPE_TO_LOWEST_CONST(ov::element::u64); - case ov::element::undefined: case ov::element::dynamic: default: return nullptr; diff --git a/src/frontends/paddle/src/op/fill_any_like.cpp b/src/frontends/paddle/src/op/fill_any_like.cpp index ac1acf77af4ee3..d629f10af25323 100644 --- a/src/frontends/paddle/src/op/fill_any_like.cpp +++ b/src/frontends/paddle/src/op/fill_any_like.cpp @@ -12,9 +12,9 @@ namespace paddle { namespace op { NamedOutputs fill_any_like(const NodeContext& node) { auto x = node.get_input("X"); - auto dtype = node.get_attribute("dtype", element::undefined); + auto dtype = node.get_attribute("dtype", element::dynamic); const auto value = node.get_attribute("value"); - if (dtype == element::undefined) { + if (dtype.is_dynamic()) { // when type does not define, use the input type dtype = x.get_element_type(); } diff --git a/src/frontends/tensorflow/src/op/var_handle.cpp b/src/frontends/tensorflow/src/op/var_handle.cpp index 667828686b1646..aed80886ed7771 100644 --- a/src/frontends/tensorflow/src/op/var_handle.cpp +++ b/src/frontends/tensorflow/src/op/var_handle.cpp @@ -78,7 +78,7 @@ OutputVector translate_varhandle_op(const NodeContext& node) { auto var_index = model->get_variables_index(); auto ov_type = node.get_attribute("dtype"); std::shared_ptr const_node; - if (ov_type == element::undefined) { + if (ov_type == element::dynamic) { const_node = std::make_shared(); } else if (var_index.get() == nullptr) { auto ov_shape = node.get_attribute("shape").get_shape(); diff --git a/src/frontends/tensorflow_common/include/helper_ops/unsupported_constant.hpp b/src/frontends/tensorflow_common/include/helper_ops/unsupported_constant.hpp index 2f4bce2f54b06c..3dfcd9414ebc76 100644 --- a/src/frontends/tensorflow_common/include/helper_ops/unsupported_constant.hpp +++ b/src/frontends/tensorflow_common/include/helper_ops/unsupported_constant.hpp @@ -29,7 +29,7 @@ class UnsupportedConstant : public InternalOperation { } void validate_and_infer_types() override { - set_output_type(0, ov::element::undefined, ov::PartialShape::dynamic()); + set_output_type(0, ov::element::dynamic, ov::PartialShape::dynamic()); } }; diff --git a/src/frontends/tensorflow_common/src/op/const.cpp b/src/frontends/tensorflow_common/src/op/const.cpp index d132af699d454d..4029a4a2bfa8d7 100644 --- a/src/frontends/tensorflow_common/src/op/const.cpp +++ b/src/frontends/tensorflow_common/src/op/const.cpp @@ -20,8 +20,7 @@ OutputVector translate_const_op(const NodeContext& node) { auto ov_type = node.get_attribute_as_any("dtype"); std::shared_ptr const_node; - if (!ov_type.is() || ov_type.as() == ov::element::dynamic || - ov_type.as() == ov::element::undefined) { + if (!ov_type.is() || ov_type.as() == ov::element::dynamic) { const_node = std::make_shared(); } else { auto tensor = node.get_attribute("value"); diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 632dc4af6d426c..88f0989007c522 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -54,15 +54,14 @@ class ViewTensor : public ITensor { m_strides_once{}, m_ptr{ptr} { OPENVINO_ASSERT(shape_size(shape) == 0 || m_ptr != nullptr); - OPENVINO_ASSERT(m_element_type != element::undefined && m_element_type.is_static()); + OPENVINO_ASSERT(m_element_type.is_static()); } void* data(const element::Type& element_type) const override { - if (element_type != element::undefined && element_type != element::dynamic && - (element_type.bitwidth() != get_element_type().bitwidth() || - element_type.is_real() != get_element_type().is_real() || - (element_type == element::string && get_element_type() != element::string) || - (element_type != element::string && get_element_type() == element::string))) { + if (element_type.is_static() && (element_type.bitwidth() != get_element_type().bitwidth() || + element_type.is_real() != get_element_type().is_real() || + (element_type == element::string && get_element_type() != element::string) || + (element_type != element::string && get_element_type() == element::string))) { OPENVINO_THROW("Tensor data with element type ", get_element_type(), ", is not representable as pointer to ", diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 8ec7b0b3b8d18f..fe9bb0011d6641 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -238,7 +238,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (hasHardwareSupport(ov::element::f16)) { inferencePrecision = ov::element::f16; } - } else if (one_of(prec, element::f32, element::undefined)) { + } else if (one_of(prec, element::f32, element::dynamic)) { inferencePrecision = prec; } else { OPENVINO_THROW("invalid value"); @@ -402,7 +402,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { inferencePrecision = ov::element::bf16; } } else { - inferencePrecision = ov::element::undefined; + inferencePrecision = ov::element::dynamic; } } // enable ACL fast math in PERFORMANCE mode diff --git a/src/plugins/intel_cpu/src/cpu_tensor.cpp b/src/plugins/intel_cpu/src/cpu_tensor.cpp index 548bc2755c5ccc..1644d26b8799bd 100644 --- a/src/plugins/intel_cpu/src/cpu_tensor.cpp +++ b/src/plugins/intel_cpu/src/cpu_tensor.cpp @@ -85,7 +85,7 @@ void Tensor::update_strides() const { } void* Tensor::data(const element::Type& element_type) const { - if (element_type != element::undefined && element_type != element::dynamic) { + if (element_type.is_static()) { OPENVINO_ASSERT(element_type == get_element_type(), "Tensor data with element type ", get_element_type(), diff --git a/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp b/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp index ea36ca54cf7e27..bba89bab199a4a 100644 --- a/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp +++ b/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp @@ -80,7 +80,7 @@ std::optional DnnlExtensionUtils::ElementTypeToDataType return memory::data_type::f8_e5m2; case ov::element::f4e2m1: return memory::data_type::f4_e2m1; - case ov::element::undefined: + case ov::element::dynamic: return memory::data_type::undef; default: { return {}; @@ -128,7 +128,7 @@ ov::element::Type DnnlExtensionUtils::DataTypeToElementType(const dnnl::memory:: case memory::data_type::f4_e2m1: return ov::element::f4e2m1; case memory::data_type::undef: - return ov::element::undefined; + return ov::element::dynamic; default: { OPENVINO_THROW("Unsupported data type."); } diff --git a/src/plugins/intel_cpu/src/edge.cpp b/src/plugins/intel_cpu/src/edge.cpp index 1c8fb0919103f0..62d2aea9fee524 100644 --- a/src/plugins/intel_cpu/src/edge.cpp +++ b/src/plugins/intel_cpu/src/edge.cpp @@ -242,8 +242,7 @@ Edge::ReorderStatus Edge::needReorder() { bool optimized = false; auto inputPortDesc = getInputPortDesc(); auto outPortDesc = getOutputPortDesc(); - - if (inputPortDesc->getMemDesc()->getPrecision() == element::undefined) { + if (inputPortDesc->getMemDesc()->getPrecision() == element::dynamic) { return ReorderStatus::No; } @@ -467,7 +466,7 @@ const MemoryDesc& Edge::getOriginalDesc() const { *this, " must be accessed through the memory object"); - if (getInputDesc().getPrecision() == element::undefined) { + if (getInputDesc().getPrecision() == element::dynamic) { return getInputDesc(); } diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 6c02870fd2185e..ce9d267969d125 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -1967,8 +1967,7 @@ void Graph::EnforceInferencePrecision() { CPU_DEBUG_CAP_ENABLE(EnforceInferPrcDebug inferPrecDebug); const auto inferPrec = getConfig().inferencePrecision; - - if (one_of(inferPrec, element::f32, element::undefined, ov::element::f16)) { + if (one_of(inferPrec, element::f32, element::dynamic, ov::element::f16, element::dynamic)) { return; // nothing to do, only precision reduction is currently allowed } #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) diff --git a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h index dac07c252e902a..687cc922e89a49 100644 --- a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h +++ b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h @@ -37,7 +37,7 @@ class EmptyMemoryDesc : public MemoryDesc { }; ov::element::Type getPrecision() const override { - return ov::element::undefined; + return ov::element::dynamic; } size_t getOffsetPadding() const override { @@ -57,7 +57,7 @@ class EmptyMemoryDesc : public MemoryDesc { } MemoryDescPtr cloneWithNewPrecision(const ov::element::Type prec) const override { - OPENVINO_ASSERT(prec == ov::element::undefined, + OPENVINO_ASSERT(prec == ov::element::dynamic, "Clone an empty memory desc with defined precision: ", prec, " is prohibited"); diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index 2614b6af85db5d..57c260c62b721d 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -1573,7 +1573,7 @@ std::vector Node::getOutputPrecisions() const { ov::element::Type Node::getRuntimePrecision() const { // Base implementation consider precision only on data path and // assumes it is placed on 0-th port (which is true for almost all layers) - ov::element::Type runtimePrecision = ov::element::undefined; + ov::element::Type runtimePrecision = ov::element::dynamic; auto inputPrecisions = getInputPrecisions(); if (!inputPrecisions.empty()) { runtimePrecision = inputPrecisions[0]; @@ -1995,7 +1995,7 @@ void Node::addSupportedPrimDesc(const std::vector& inPortConfi for (size_t i = 0; i < inPortConfigs.size(); i++) { auto shape = inPortConfigs[i].shape.getRank() == 0 ? getInputShapeAtPort(i) : inPortConfigs[i].shape; auto prc = - inPortConfigs[i].prc == ov::element::undefined ? getOriginalInputPrecisionAtPort(i) : inPortConfigs[i].prc; + (inPortConfigs[i].prc == ov::element::dynamic) ? getOriginalInputPrecisionAtPort(i) : inPortConfigs[i].prc; if (!fill_port(inPortConfigs[i], shape, prc, config.inConfs)) { return; } @@ -2003,7 +2003,7 @@ void Node::addSupportedPrimDesc(const std::vector& inPortConfi for (size_t i = 0; i < outPortConfigs.size(); i++) { auto dims = outPortConfigs[i].shape.getRank() == 0 ? getOutputShapeAtPort(i) : outPortConfigs[i].shape; - auto prc = outPortConfigs[i].prc == ov::element::undefined ? getOriginalOutputPrecisionAtPort(i) + auto prc = (outPortConfigs[i].prc == ov::element::dynamic) ? getOriginalOutputPrecisionAtPort(i) : outPortConfigs[i].prc; if (!fill_port(outPortConfigs[i], dims, prc, config.outConfs)) { return; diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 6ac03b91b39934..9acccf9c223433 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -60,7 +60,7 @@ class PortConfigurator { inPlace(inPlace) {} PortConfigurator(ov::intel_cpu::LayoutType blockedDescType, - ov::element::Type prc = ov::element::undefined, + ov::element::Type prc = ov::element::dynamic, bool constant = false, int inPlace = -1) : blockedDescCreator(getBlockedDescCreator(blockedDescType)), diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.cpp b/src/plugins/intel_cpu/src/nodes/bucketize.cpp index 8ab671cf554894..fa9e1e30660841 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.cpp +++ b/src/plugins/intel_cpu/src/nodes/bucketize.cpp @@ -77,8 +77,8 @@ void Bucketize::initSupportedPrimitiveDescriptors() { inline constexpr uint32_t getElementsMask(ov::element::Type precision1, ov::element::Type precision2, - ov::element::Type precision3 = ov::element::undefined, - ov::element::Type precision4 = ov::element::undefined) { + ov::element::Type precision3 = ov::element::dynamic, + ov::element::Type precision4 = ov::element::dynamic) { return static_cast(ov::element::Type_t(precision1)) | (static_cast(ov::element::Type_t(precision2)) << 8) | (static_cast(ov::element::Type_t(precision3)) << 16) | diff --git a/src/plugins/intel_cpu/src/nodes/conv.cpp b/src/plugins/intel_cpu/src/nodes/conv.cpp index b580927f862798..089beb49d8401d 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/conv.cpp @@ -342,7 +342,7 @@ bool Convolution::canBeExecutedInInt8() const { } ov::element::Type Convolution::fusedEltwisePrecision(const NodePtr& fusingNode) const { - if (sumPrc != ov::element::undefined) { + if (sumPrc != ov::element::dynamic) { return sumPrc; } diff --git a/src/plugins/intel_cpu/src/nodes/conv.h b/src/plugins/intel_cpu/src/nodes/conv.h index bb4e2499a74408..222a1134c453f5 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.h +++ b/src/plugins/intel_cpu/src/nodes/conv.h @@ -182,7 +182,7 @@ class Convolution : public Node { MemoryPtr legacyOutputCompensationMemPtr; MemoryPtr stockInputZeroPointsMemPtr; dnnl::memory::data_type outputDataType = dnnl::memory::data_type::undef; - ov::element::Type sumPrc = ov::element::undefined; + ov::element::Type sumPrc = ov::element::dynamic; bool useJitPlanar = false; // TODO: migrate on convolution_auto algorithm for x64 #if defined(OPENVINO_ARCH_X86_64) diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index 6fa0cffa95017f..6bdd35274203dd 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -172,8 +172,7 @@ static bool useDynamicQuantizationImpl(size_t dqGroupSize, !((one_of(weightsDesc->getPrecision(), ov::element::i8, ov::element::i4) && !zpPtr))) { return false; } - - if (zpPtr && !one_of(zpPtr->getDesc().getPrecision(), ov::element::u8, ov::element::u4, ov::element::undefined)) { + if (zpPtr && !one_of(zpPtr->getDesc().getPrecision(), ov::element::u8, ov::element::u4, ov::element::dynamic)) { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index 91c7a8ca41655b..6cfa60d264ca2b 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -72,7 +72,7 @@ static const TypeMapping dnnlFCTypeMapping { {{_f16, _bf16, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())}, // quantization configuration // int8 inner_product does not support f16 output and bias - {{_u8 | _i8, _i8, _u8 | _i8 | _i32 | _bf16 | _f32 | _undefined, _u8 | _i8 | _i32 | _bf16 | _f32}, pt(bypass(), bypass(), bypass(), bypass())}, + {{_u8 | _i8, _i8, _u8 | _i8 | _i32 | _bf16 | _f32 | _dynamic, _u8 | _i8 | _i32 | _bf16 | _f32}, pt(bypass(), bypass(), bypass(), bypass())}, {{_u8 | _i8, _i8, _f16, _u8 | _i8 | _i32 | _bf16 | _f32}, pt(bypass(), bypass(), just(), bypass())}, {{_u8 | _i8, _i8, _any, _any}, pt(bypass(), bypass(), just(), just())}, // compresses int weights (@todo more strict requrements for output precision?) @@ -131,7 +131,7 @@ static const TypeMapping dnnlMatMulTypeMapping { {{_bf16, _f16, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())}, {{_f16, _bf16, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())}, // quantization configuration - {{_u8 | _i8, _i8, _u8|_i8|_i32|_bf16|_f16|_f32|_undefined, _u8|_i8|_i32|_bf16|_f16|_f32}, pt(bypass(), bypass(), bypass(), bypass())}, + {{_u8 | _i8, _i8, _u8|_i8|_i32|_bf16|_f16|_f32|_dynamic, _u8|_i8|_i32|_bf16|_f16|_f32}, pt(bypass(), bypass(), bypass(), bypass())}, {{_u8 | _i8, _i8, _any, _any}, pt(bypass(), bypass(), just(), just())}, // compresses int weights {{_f32 | _bf16 | _f16, _u8 | _i8, _any, _any}, pt(bypass(), bypass(), use<0>(), use<0>())}, diff --git a/src/plugins/intel_cpu/src/nodes/executors/type_mask.hpp b/src/plugins/intel_cpu/src/nodes/executors/type_mask.hpp index ef9fdac7f19208..c39062619e59a5 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/type_mask.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/type_mask.hpp @@ -13,30 +13,29 @@ namespace ov { namespace intel_cpu { struct TypeMask { enum Value : uint64_t { - _undefined = 1 << 0, - _dynamic = 1 << 1, - _boolean = 1 << 2, - _bf16 = 1 << 3, - _f16 = 1 << 4, - _f32 = 1 << 5, - _f64 = 1 << 6, - _i4 = 1 << 7, - _i8 = 1 << 8, - _i16 = 1 << 9, - _i32 = 1 << 10, - _i64 = 1 << 11, - _u1 = 1 << 12, - _u4 = 1 << 13, - _u8 = 1 << 14, - _u16 = 1 << 15, - _u32 = 1 << 16, - _u64 = 1 << 17, - _nf4 = 1 << 18, - _f8e4m3 = 1 << 19, - _f8e5m2 = 1 << 20, - _string = 1 << 21, - _f4e2m1 = 1 << 22, - _f8e8m0 = 1 << 23, + _dynamic = 1 << 0, + _boolean = 1 << 1, + _bf16 = 1 << 2, + _f16 = 1 << 3, + _f32 = 1 << 4, + _f64 = 1 << 5, + _i4 = 1 << 6, + _i8 = 1 << 7, + _i16 = 1 << 8, + _i32 = 1 << 9, + _i64 = 1 << 10, + _u1 = 1 << 11, + _u4 = 1 << 12, + _u8 = 1 << 13, + _u16 = 1 << 14, + _u32 = 1 << 15, + _u64 = 1 << 16, + _nf4 = 1 << 17, + _f8e4m3 = 1 << 18, + _f8e5m2 = 1 << 19, + _string = 1 << 20, + _f4e2m1 = 1 << 21, + _f8e8m0 = 1 << 22, }; TypeMask(const ov::element::Type precision) : value(generateMask(precision)), precision(precision) {} @@ -61,8 +60,6 @@ struct TypeMask { case ov::element::typeM: \ return _##typeM; switch (type) { - CASE(undefined) - CASE(dynamic) CASE(boolean) CASE(bf16) CASE(f16) @@ -86,17 +83,16 @@ struct TypeMask { CASE(f4e2m1) CASE(f8e8m0) default: - return _undefined; + return _dynamic; } #undef CASE } }; namespace TypeMaskAlias { -constexpr ov::element::Type fxx(ov::element::Type_t::undefined); +constexpr ov::element::Type fxx(ov::element::Type_t::dynamic); #define DEFINE_TYPE_ALIAS(x) constexpr auto x = TypeMask::Value::x // use underscore for naming to avoid conflicts with Precision aliases -DEFINE_TYPE_ALIAS(_undefined); DEFINE_TYPE_ALIAS(_dynamic); DEFINE_TYPE_ALIAS(_boolean); DEFINE_TYPE_ALIAS(_bf16); diff --git a/src/plugins/intel_cpu/src/nodes/eye.h b/src/plugins/intel_cpu/src/nodes/eye.h index 7bc20113a3af14..9686f10289cf9b 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.h +++ b/src/plugins/intel_cpu/src/nodes/eye.h @@ -43,7 +43,7 @@ class Eye : public Node { static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: - ov::element::Type outType = ov::element::Type_t::undefined; + ov::element::Type outType = ov::element::Type_t::dynamic; template void executeSpecified(); template diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 16c8c1b662efc4..a270a00b87e87e 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -182,7 +182,8 @@ FullyConnected::FullyConnected(const std::shared_ptr& op, const GraphC m_atoi[ARG_BIAS] = BIAS; auto mapArgToInput = [&op](std::unordered_map& argToInput, size_t argId, size_t inputId) { - if (op->get_input_size() > inputId && op->input(inputId).get_element_type() != ov::element::undefined) { + if (op->get_input_size() > inputId && op->input(inputId).get_element_type() != ov::element::dynamic && + op->input(inputId).get_element_type() != ov::element::dynamic) { argToInput[argId] = inputId; } }; @@ -508,7 +509,7 @@ static bool useSparseWeightsDecompression(const NodePtr& weightsInput, } void FullyConnected::initSupportedPrimitiveDescriptors() { - attrs.withBias = getOriginalInputPrecisionAtPort(BIAS) != ov::element::undefined; + attrs.withBias = getOriginalInputPrecisionAtPort(BIAS) != ov::element::dynamic; attrs.sparseWeights = useSparseWeightsDecompression(getParentEdgeAt(WEIGHTS)->getParent(), getOriginalInputPrecisionAtPort(DATA), @@ -528,7 +529,7 @@ void FullyConnected::initSupportedPrimitiveDescriptors() { VecMemoryDescs srcDescs; const auto& creatorsMap = BlockedDescCreator::getCommonCreators(); for (size_t i = 0; i < srcTypes.size(); i++) { - if (srcTypes[i] == element::undefined) { + if (srcTypes[i] == element::dynamic) { srcDescs.push_back(MemoryDescUtils::makeEmptyDesc()); continue; } diff --git a/src/plugins/intel_cpu/src/nodes/input.cpp b/src/plugins/intel_cpu/src/nodes/input.cpp index a09197e763b9f5..f11e533d91c134 100644 --- a/src/plugins/intel_cpu/src/nodes/input.cpp +++ b/src/plugins/intel_cpu/src/nodes/input.cpp @@ -245,8 +245,7 @@ Input::Input(const std::shared_ptr& op, const GraphContext::CPtr& cont void Input::cloneBlobIfRequired() { const auto prec = m_constOp->get_element_type(); - - if (prec == ov::element::undefined && shape_size(m_constOp->get_shape()) == 0) { + if (prec == ov::element::dynamic && shape_size(m_constOp->get_shape()) == 0) { memoryPtr = MemoryDescUtils::makeEmptyMemory(context); return; } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/jit_eltwise_common.cpp b/src/plugins/intel_cpu/src/nodes/kernels/jit_eltwise_common.cpp index 0977a559fb132a..cc19958f00ecc9 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/jit_eltwise_common.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/jit_eltwise_common.cpp @@ -32,7 +32,7 @@ ov::element::Type eltwise_precision_helper::get_precision(const size_t inputs_nu const ov::element::Type (&src_prc)[MAX_ELTWISE_INPUTS], const std::vector& eltwise_data, const std::vector& exec_precisions_priority) { - ov::element::Type exec_prc = ov::element::undefined; + ov::element::Type exec_prc = ov::element::dynamic; std::set> supported_precision_intersection = get_supported_precisions(eltwise_data.front().algo); @@ -79,7 +79,7 @@ ov::element::Type eltwise_precision_helper::get_precision(const size_t inputs_nu } } - if (exec_prc == ov::element::undefined) { + if (exec_prc == ov::element::dynamic) { OPENVINO_THROW("Eltwise jitter failed to specify execution precision for Eltwise node"); } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp index 81ccd2c77602c0..77c54b7263952e 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp @@ -1250,7 +1250,7 @@ struct MHAHelper { # if defined(OPENVINO_ARCH_X86_64) std::shared_ptr _gemv; # endif - ov::element::Type _fastpath_valid_prec = ov::element::undefined; + ov::element::Type _fastpath_valid_prec = ov::element::dynamic; // second token for bhl loop PlainTensor _weight_bhl; PlainTensor _output_bhl; diff --git a/src/plugins/intel_cpu/src/nodes/normalize.h b/src/plugins/intel_cpu/src/nodes/normalize.h index e531e0c8a8851c..58a06ba0365b94 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.h +++ b/src/plugins/intel_cpu/src/nodes/normalize.h @@ -110,8 +110,8 @@ class NormalizeL2 : public Node { bool cornerCase = false; float eps = 1e-10f; - ov::element::Type input_prec = ov::element::undefined; - ov::element::Type output_prec = ov::element::undefined; + ov::element::Type input_prec = ov::element::dynamic; + ov::element::Type output_prec = ov::element::dynamic; size_t src_data_size = 0lu; size_t dst_data_size = 0lu; }; diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.h b/src/plugins/intel_cpu/src/nodes/scaled_attn.h index 6efcd4ebcc5d1e..7b263da3e454f2 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.h +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.h @@ -54,7 +54,7 @@ class ScaledDotProductAttention : public Node { return real_order; } struct SDPAQuantParam { - ov::element::Type precision = ov::element::undefined; + ov::element::Type precision = ov::element::dynamic; size_t groupSize = 0; }; ov::element::Type getKVCachePrecision(); diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp index e61f677ef80c54..8d9128c9e00a3b 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp @@ -83,9 +83,9 @@ ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphCon dataSize(0lu), indicesSize(0lu), axisSize(0lu), - dataPrec(ov::element::undefined), - indicesPrec(ov::element::undefined), - axisPrec(ov::element::undefined) { + dataPrec(ov::element::dynamic), + indicesPrec(ov::element::dynamic), + axisPrec(ov::element::dynamic) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/leaky_relu.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/leaky_relu.cpp index a30dedc67f6363..861b85dae6e121 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/leaky_relu.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/leaky_relu.cpp @@ -24,7 +24,7 @@ std::shared_ptr ov::intel_cpu::LeakyReluNode::clone_with_new_inputs(co void ov::intel_cpu::LeakyReluNode::validate_and_infer_types() { INTERNAL_OP_SCOPE(LeakyReluNode_validate_and_infer_types); set_output_type(0, - m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type, + m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type, get_input_partial_shape(0)); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.cpp index e0f6d465ff3112..06a82d5d201770 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.cpp @@ -36,7 +36,7 @@ std::shared_ptr ov::intel_cpu::PowerStaticNode::clone_with_new_inputs( void ov::intel_cpu::PowerStaticNode::validate_and_infer_types() { INTERNAL_OP_SCOPE(PowerStaticNode_validate_and_infer_types); set_output_type(0, - m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type, + m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type, get_input_partial_shape(0)); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.hpp index a210702677ab4c..2be0645ee3a3f5 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/power_static.hpp @@ -19,7 +19,7 @@ class PowerStaticNode : public ov::op::Op { const float& power, const float& scale, const float& shift, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); void validate_and_infer_types() override; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp index c32c6e0b6a8b97..90fcf3beeeb00d 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp @@ -147,7 +147,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { fc_input_a = create_transpose(fc_input_a, matmul->get_friendly_name() + "/transpose_a"); } - auto bias = std::make_shared(element::undefined, Shape{0}); + auto bias = std::make_shared(element::dynamic, Shape{0}); new_ops.push_back(bias); auto fc = std::make_shared(fc_input_a, diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/interaction.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/interaction.cpp index 30c003b0e468ba..4676c05a898ced 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/interaction.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/interaction.cpp @@ -45,7 +45,7 @@ void ov::intel_cpu::InteractionNode::validate_and_infer_types() { if (feature.is_static()) { output_feature_size = input_size * (input_size - 1) / 2 + feature.get_length(); } - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; m_output_type = output_type; PartialShape output_shape = ov::PartialShape::dynamic(2); output_shape[0] = batch; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp index 1acf3c465338f8..440de361fc0c08 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp @@ -22,9 +22,9 @@ ov::intel_cpu::MHANode::MHANode(const ov::Output& in0, m_output_type(output_type), mul_scales(std::move(mul_scales)), is_mul_first(is_mul_first), - fq0_output_type(ov::element::undefined), - fq1_output_type(ov::element::undefined), - fq2_output_type(ov::element::undefined) { + fq0_output_type(ov::element::dynamic), + fq1_output_type(ov::element::dynamic), + fq2_output_type(ov::element::dynamic) { validate_and_infer_types(); } @@ -107,12 +107,7 @@ void ov::intel_cpu::MHANode::validate_and_infer_types() { std::vector matmul1_output_shapes = shape_infer(matmul1.get(), matmul1_input_shapes); const auto output_shape = transpose(matmul1_output_shapes[0].get_shape(), {0, 2, 1, 3}); - - set_output_type(0, - m_output_type == ov::element::undefined || m_output_type == ov::element::dynamic - ? get_input_element_type(0) - : m_output_type, - output_shape); + set_output_type(0, m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type, output_shape); } bool ov::intel_cpu::MHANode::visit_attributes(ov::AttributeVisitor& visitor) { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.cpp index 89987fabd606a5..2fcd63f4458959 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mha_fusion.cpp @@ -490,8 +490,8 @@ ov::intel_cpu::MHAQuantFusion::MHAQuantFusion() { fq0_scale, fq1_scale, fq2_scale, - ov::element::undefined, - fq0_node ? fq0_node->get_output_element_type(0) : ov::element::undefined, + ov::element::dynamic, + fq0_node ? fq0_node->get_output_element_type(0) : ov::element::dynamic, fq1_node->get_output_element_type(0), transpose3_node->get_output_element_type(0)); mha->set_friendly_name(m.get_match_root()->get_friendly_name()); @@ -671,8 +671,8 @@ ov::intel_cpu::MHAQuantFusion2::MHAQuantFusion2() { std::vector(), fq1_scale, fq0_node->get_output_element_type(0), - ov::element::undefined, - ov::element::undefined, + ov::element::dynamic, + ov::element::dynamic, transpose3_node->get_output_element_type(0)); mha->set_friendly_name(m.get_match_root()->get_friendly_name()); std::vector> merged = { diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp index e789c59e21dc4d..da248b67a94102 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp @@ -79,7 +79,7 @@ class BrgemmCopyB : public snippets::modifier::MemoryAccess, public ov::op::Op { void validate_element_type(const ov::element::Type& element_type); BRGEMM_TYPE m_type = BRGEMM_TYPE::REPACKING_ONLY; - element::Type m_src_type = ov::element::undefined; // src element type of the corresponding BRGEMM + element::Type m_src_type = ov::element::dynamic; // src element type of the corresponding BRGEMM }; } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp index ad3685b5731a72..ad5629ce29995a 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp @@ -60,7 +60,7 @@ bool EnforcePrecision::run_on_model(const std::shared_ptr& f) { if ((supported_precisions[index] == target) && (actual_precisions[index] == source)) { // actual input precision has to be enforced: at least one port has to be handled port_has_to_be_handled = true; - } else if ((supported_precisions[index] != element::undefined) && + } else if ((supported_precisions[index] != element::dynamic) && (supported_precisions[index] != actual_precisions[index])) { // actual input precision is not enforced but not supported, operation has to be ignored op_is_appropriate = false; diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index cb683e27490e24..041520e436a809 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -408,7 +408,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis map.insert({ov::element::bf16, ov::element::f32}); } // TODO: Remove 'hasHardwareSupport' when all nodes are able to handle f16 properly. - if (!one_of(config.inferencePrecision, element::f16, element::undefined) || !hasHardwareSupport(element::f16)) { + if (!one_of(config.inferencePrecision, element::f16, element::dynamic) || !hasHardwareSupport(element::f16)) { map.insert({ov::element::f16, ov::element::f32}); } return map; @@ -621,12 +621,12 @@ void Transformations::PreLpt(const std::vector& defaultPrecis // snippets pipeline as well, where MVN is decomposed to simple ops, these simple ops will not // tokenized into subgraph again. // CVS-134277 to fully enable GN as snippets to disable this GroupNormalizationDecomposition entirly. - if (node->is_dynamic() || !one_of(config.inferencePrecision, element::f32, element::undefined) || + if (node->is_dynamic() || !one_of(config.inferencePrecision, element::f32, element::dynamic) || config.snippetsMode == Config::SnippetsMode::Disable) return false; if (config.snippetsMode != Config::SnippetsMode::IgnoreCallback) { const auto group_norm = ov::as_type_ptr(node); - if (!group_norm || !implication(config.inferencePrecision == element::undefined, + if (!group_norm || !implication((config.inferencePrecision == element::dynamic), group_norm->get_element_type() == element::f32)) return false; const auto num_groups = static_cast(group_norm->get_num_groups()); @@ -988,7 +988,7 @@ void Transformations::MainSnippets(void) { // - CPU Node Subgraph requires bf16 on output when inference precision is bf16. // To avoid situations when Transpose is not alone node between MatMul and Result, // Plugin disables Transpose tokenization on output - bool mha_token_enable_transpose_on_output = one_of(config.inferencePrecision, element::f32, element::undefined); + bool mha_token_enable_transpose_on_output = one_of(config.inferencePrecision, element::f32, element::dynamic); size_t concurrency = config.streamExecutorConfig.get_threads_per_stream(); if (concurrency == 0) { concurrency = parallel_get_max_threads(); @@ -1046,9 +1046,9 @@ void Transformations::MainSnippets(void) { // CPU Plugin Subgraph supports f32, bf16, quantized and fp16(on avx_512_core_amx_fp16 target) BRGEMM const auto is_infer_prc_supported_by_MHA = (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2) && - one_of(config.inferencePrecision, ov::element::f32, element::undefined)) || + one_of(config.inferencePrecision, ov::element::f32, element::dynamic)) || (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && - one_of(config.inferencePrecision, ov::element::bf16, ov::element::f32, element::undefined)) || + one_of(config.inferencePrecision, ov::element::bf16, ov::element::f32, element::dynamic)) || (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_amx_fp16) && one_of(config.inferencePrecision, ov::element::f16)); const bool isMHASupported = !is_LLM && is_infer_prc_supported_by_MHA; @@ -1070,7 +1070,7 @@ void Transformations::MainSnippets(void) { const auto in_type0 = matmul->get_input_element_type(0); const auto in_type1 = matmul->get_input_element_type(1); const auto is_fp32 = (in_type0 == ov::element::f32 && in_type1 == ov::element::f32 && - one_of(config.inferencePrecision, element::f32, element::undefined)); + one_of(config.inferencePrecision, element::f32, element::dynamic)); const auto is_fp16 = (in_type0 == ov::element::f16 || in_type1 == ov::element::f16) || (in_type0 == element::f32 && in_type1 == ov::element::f32 && config.inferencePrecision == ov::element::f16); @@ -1121,16 +1121,16 @@ void Transformations::MainSnippets(void) { ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || - ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || + ov::is_type(n) || ov::is_type(n) || ov::is_type(n)); #else // CPU Plugin support Swish in Subgraph via conversion to SwichCPU which assumes second input to be constant, diff --git a/src/plugins/intel_cpu/src/utils/cpu_utils.hpp b/src/plugins/intel_cpu/src/utils/cpu_utils.hpp index dac0c2f1c10bb2..d2dcda73ae6ff5 100644 --- a/src/plugins/intel_cpu/src/utils/cpu_utils.hpp +++ b/src/plugins/intel_cpu/src/utils/cpu_utils.hpp @@ -130,7 +130,7 @@ inline ov::element::Type normalizeToSupportedPrecision(ov::element::Type precisi break; } default: { - precision = ov::element::undefined; + precision = ov::element::dynamic; } } diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 9a5081842f1410..3632e6b589f40e 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -331,9 +331,7 @@ std::ostream& operator<<(std::ostream& os, const Node& c_node) { auto pmem = input_node->getMemoryPtr(); void* data = pmem->getData(); auto shape = pmem->getDesc().getShape().getDims(); - - if (shape_size(shape) <= 8 && pmem->getDesc().getPrecision() != ov::element::undefined) { - auto type = pmem->getDesc().getPrecision(); + if (auto type = pmem->getDesc().getPrecision(); shape_size(shape) <= 8 && type.is_static()) { auto tensor = ov::Tensor(type, shape, data); auto constop = std::make_shared(tensor); comma = ""; diff --git a/src/plugins/intel_cpu/src/utils/general_utils.h b/src/plugins/intel_cpu/src/utils/general_utils.h index 5b1df13e4994ba..abe3833709399a 100644 --- a/src/plugins/intel_cpu/src/utils/general_utils.h +++ b/src/plugins/intel_cpu/src/utils/general_utils.h @@ -148,7 +148,7 @@ inline ov::element::Type getMaxPrecision(std::vector precisio }); } - return ov::element::undefined; + return ov::element::dynamic; } inline std::vector split(const std::string& str, char delim) { diff --git a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp index 67480d4c0f9b69..c349933fcafdf4 100644 --- a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp +++ b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp @@ -60,7 +60,7 @@ inline void assert_dt(ov::element::Type dt) { template struct precision_of { - static constexpr ov::element::Type_t value = ov::element::Type_t::undefined; + static constexpr ov::element::Type_t value = ov::element::Type_t::dynamic; }; template <> @@ -98,7 +98,7 @@ struct PlainTensor { size_t m_capacity = 0; size_t m_element_size = 0; size_t m_offset = 0; - ov::element::Type_t m_dt = ov::element::Type_t::undefined; + ov::element::Type_t m_dt = ov::element::Type_t::dynamic; MemoryPtr m_mem; // hold memory ptr reference operator bool() const { diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp index fd9b5084a0c768..ee819ad4646c81 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp @@ -183,7 +183,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckKVCachePrecision) { core.set_property(deviceName, ov::hint::kv_cache_precision(ov::element::f32)); ov::CompiledModel compiledModel = core.compile_model(model, deviceName); - auto kv_cache_precision_value = ov::element::undefined; + auto kv_cache_precision_value = ov::element::dynamic; OV_ASSERT_NO_THROW(kv_cache_precision_value = compiledModel.get_property(ov::hint::kv_cache_precision)); ASSERT_EQ(kv_cache_precision_value, ov::element::f32); } @@ -195,8 +195,8 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkFinetuneKVCachePrecision) { core.set_property(deviceName, ov::value_cache_precision(ov::element::u4)); ov::CompiledModel compiledModel = core.compile_model(model, deviceName); - auto key_cache_precision_value = ov::element::undefined; - auto value_cache_precision_value = ov::element::undefined; + auto key_cache_precision_value = ov::element::dynamic; + auto value_cache_precision_value = ov::element::dynamic; OV_ASSERT_NO_THROW(key_cache_precision_value = compiledModel.get_property(ov::key_cache_precision)); OV_ASSERT_NO_THROW(value_cache_precision_value = compiledModel.get_property(ov::value_cache_precision)); ASSERT_EQ(key_cache_precision_value, ov::element::f16); @@ -235,7 +235,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckAccuracyModeKVCachePrecisi ASSERT_NO_THROW(core.set_property(deviceName, ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY))); ov::CompiledModel compiledModel = core.compile_model(model, deviceName); - auto kv_cache_precision_value = ov::element::undefined; + auto kv_cache_precision_value = ov::element::dynamic; ASSERT_NO_THROW(kv_cache_precision_value = compiledModel.get_property(ov::hint::kv_cache_precision)); ASSERT_EQ(kv_cache_precision_value, ov::element::f32); } @@ -264,7 +264,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckExecutionModeIsAvailableIn TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelInferencePrecisionHasHigherPriorityThanCoreInferencePrecision) { ov::Core ie; - auto inference_precision_value = ov::element::undefined; + auto inference_precision_value = ov::element::dynamic; OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); @@ -280,7 +280,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckCoreInferencePrecisionHasHigherPriorityThanModelPerformanceExecutionMode) { ov::Core ie; auto execution_mode_value = ov::hint::ExecutionMode::ACCURACY; - auto inference_precision_value = ov::element::undefined; + auto inference_precision_value = ov::element::dynamic; OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::inference_precision(ov::element::f32))); @@ -299,7 +299,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkCheckModelInferencePrecisionHasHigherPriorityThanCorePerformanceExecutionMode) { ov::Core ie; auto execution_mode_value = ov::hint::ExecutionMode::PERFORMANCE; - auto inference_precision_value = ov::element::undefined; + auto inference_precision_value = ov::element::dynamic; const auto inference_precision_expected = bf16_if_can_be_emulated; OV_ASSERT_NO_THROW(ie.set_property("CPU", ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY))); diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index f8a9558b308dad..3a5568b616ac3d 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -196,11 +196,10 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigEnableProfiling) { const auto bf16_if_can_be_emulated = ov::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; using ExpectedModeAndType = std::pair; -const std::map expectedTypeByMode { - {ov::hint::ExecutionMode::PERFORMANCE, {ov::hint::ExecutionMode::PERFORMANCE, - expected_precision_for_performance_mode}}, - {ov::hint::ExecutionMode::ACCURACY, {ov::hint::ExecutionMode::ACCURACY, - ov::element::undefined}}, +const std::map expectedTypeByMode{ + {ov::hint::ExecutionMode::PERFORMANCE, + {ov::hint::ExecutionMode::PERFORMANCE, expected_precision_for_performance_mode}}, + {ov::hint::ExecutionMode::ACCURACY, {ov::hint::ExecutionMode::ACCURACY, ov::element::dynamic}}, }; TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeExpectCorrespondingInferencePrecision) { @@ -208,7 +207,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeExpectCorrespondi const auto inference_precision_default = expected_precision_for_performance_mode; const auto execution_mode_default = ov::hint::ExecutionMode::PERFORMANCE; auto execution_mode_value = ov::hint::ExecutionMode::PERFORMANCE; - auto inference_precision_value = ov::element::undefined; + auto inference_precision_value = ov::element::dynamic; // check default values OV_ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); @@ -242,7 +241,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginSetConfigExecutionModeAndInferencePreci }; auto expect_inference_precision = [&](const ov::element::Type expected_value) { - auto inference_precision_value = ov::element::undefined;; + auto inference_precision_value = ov::element::dynamic; OV_ASSERT_NO_THROW(inference_precision_value = ie.get_property("CPU", ov::hint::inference_precision)); ASSERT_EQ(inference_precision_value, expected_value); }; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.hpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.hpp index 817cc2155f2212..a1ee05c744ee08 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/activation.hpp @@ -36,7 +36,7 @@ class ActivationLayerCPUTest : public testing::WithParamInterfacesecond.as() : ov::element::undefined; + ov::element::Type inference_precision = + (it != configuration.end()) ? it->second.as() : ov::element::dynamic; if (inference_precision == ov::element::bf16) { selectedType += "_BF16"; rel_threshold = 1e-2f; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp index 651e75024987ab..260520071aa179 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp @@ -107,7 +107,7 @@ void DeconvolutionLayerCPUTest::configure_model() { if (i > 0) { continue; } - if (inType != ov::element::Type_t::undefined) { + if (inType != ov::element::Type_t::dynamic) { p.input(i).tensor().set_element_type(inType); } } @@ -115,7 +115,7 @@ void DeconvolutionLayerCPUTest::configure_model() { { auto results = function->get_results(); for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { + if (outType != ov::element::Type_t::dynamic) { p.output(i).tensor().set_element_type(outType); } } @@ -197,8 +197,8 @@ void DeconvolutionLayerCPUTest::SetUp() { std::tie(kernel, stride, padBegin, padEnd, dilation, convOutChannels, padType, outPadding) = basicParamsSet; auto it = configuration.find(ov::hint::inference_precision.name()); - ov::element::Type inference_precision = (it != configuration.end()) ? - it->second.as() : ov::element::undefined; + ov::element::Type inference_precision = + (it != configuration.end()) ? it->second.as() : ov::element::dynamic; if (inference_precision == ov::element::bf16) { inType = outType = prec = ElementType::bf16; rel_threshold = 1e-2f; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/interpolate.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/interpolate.cpp index 3b36a2fef062bc..ce9465c4e0d91e 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/interpolate.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/interpolate.cpp @@ -115,7 +115,7 @@ void InterpolateLayerCPUTest::configure_model() { if (i > 0) { continue; } - if (inType != ov::element::Type_t::undefined) { + if (inType != ov::element::Type_t::dynamic) { p.input(i).tensor().set_element_type(inType); } } @@ -123,7 +123,7 @@ void InterpolateLayerCPUTest::configure_model() { { auto results = function->get_results(); for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { + if (outType != ov::element::Type_t::dynamic) { p.output(i).tensor().set_element_type(outType); } } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/matmul.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/matmul.cpp index 50b6255b1cfae6..64e035b8396814 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/matmul.cpp @@ -118,8 +118,8 @@ void MatMulLayerCPUTest::SetUp() { configuration.insert(additionalConfig.begin(), additionalConfig.end()); auto it = additionalConfig.find(ov::hint::inference_precision.name()); - ov::element::Type inference_precision = (it != additionalConfig.end()) ? - it->second.as() : ov::element::undefined; + ov::element::Type inference_precision = + (it != additionalConfig.end()) ? it->second.as() : ov::element::dynamic; if (inference_precision == ov::element::bf16) { inType = outType = netType = ElementType::bf16; rel_threshold = abs_threshold = 1e-2f; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/gather_tree.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/gather_tree.cpp index a21b20731d1081..052c7499231d41 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/gather_tree.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/gather_tree.cpp @@ -155,8 +155,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUStatic, ::testing::Combine(::testing::ValuesIn(inputStaticShapes), ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU)), GatherTreeLayerCPUTest::getTestCaseName); @@ -165,8 +165,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUDynamicParameter, ::testing::Combine(::testing::ValuesIn(inputDynamicShapesParameter), ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU)), GatherTreeLayerCPUTest::getTestCaseName); @@ -175,8 +175,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GatherTreeCPUDynamicConstant, ::testing::Combine(::testing::ValuesIn(inputDynamicShapesConstant), ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU)), GatherTreeLayerCPUTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/grn.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/grn.cpp index 87edeb98665cff..34d3c4cde44f49 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/grn.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/grn.cpp @@ -84,8 +84,8 @@ const std::vector dataInputDynamicShapes = {{{-1, -1}, {{5, 17}, {10 INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUStatic, GRNLayerCPUTest, ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::ValuesIn(dataInputStaticShapes), ::testing::ValuesIn(biases), ::testing::Values(ov::test::utils::DEVICE_CPU)), @@ -94,8 +94,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUStatic, INSTANTIATE_TEST_SUITE_P(smoke_GRNCPUDynamic, GRNLayerCPUTest, ::testing::Combine(::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::ValuesIn(dataInputDynamicShapes), ::testing::ValuesIn(biases), ::testing::Values(ov::test::utils::DEVICE_CPU)), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp index 7d9173e472e089..09c20dc658b4ad 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp @@ -173,7 +173,7 @@ class GroupConvolutionLayerCPUTest : public testing::WithParamInterface inputShapes1dDW = {{{}, {{2, 32, 7}}}, {2, 32, 7}, {1, 32, 9}}}}; -INSTANTIATE_TEST_SUITE_P( - smoke_GroupConv_1D_DW_FP32, - GroupConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine(groupConvParams_ExplicitPadding_DW_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1dDW), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice( - {conv_avx2_dw_1D, conv_avx512_dw_1D})), // todo: [AV] what about conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? - ::testing::ValuesIn(fusingParamsSet), - ::testing::Values(empty_plugin_config)), - GroupConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_DW_FP32, + GroupConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1dDW), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice( + {conv_avx2_dw_1D, + conv_avx512_dw_1D})), // todo: [AV] what about conv_avx2_dw_1D_nspc, + // conv_avx512_dw_1D_nspc? + ::testing::ValuesIn(fusingParamsSet), + ::testing::Values(empty_plugin_config)), + GroupConvolutionLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_1D_DW_BF16, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_1D, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes1dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice( @@ -1314,8 +1314,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_FP32, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_2D)), @@ -1346,8 +1346,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_FP32_Brdgmm, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D_Brdgmm, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice(BrdgmmCPUSpec())), @@ -1359,8 +1359,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_BF16, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice({conv_avx512_dw_2D, @@ -1373,8 +1373,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_BF16_Brdgmm, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D_Brdgmm, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDeviceSupportBF16(BrdgmmCPUSpec())), @@ -1386,8 +1386,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_2D_DW_FP16_Brdgmm, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_2D_Brdgmm, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice(BrdgmmCPUSpec())), @@ -1421,8 +1421,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_DW_FP32, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_3D, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes3dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice(CPUParams_DW_3D)), @@ -1444,8 +1444,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConv_3D_DW_FP32_Brdgmm, GroupConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(groupConvParams_ExplicitPadding_DW_3D_Brdgmm, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes3dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice(BrdgmmCPUSpec())), @@ -1495,8 +1495,8 @@ std::vector makeSingleGroupConvCPUTestCases( groupConvLayerTestsParamsSet basicParamsSet(specificParams, ElementType::f32, - ElementType::undefined, - ElementType::undefined, + ElementType::dynamic, + ElementType::dynamic, inputShapes, ov::test::utils::DEVICE_CPU); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution_backprop_data.cpp index 5747b98684361a..12e75150a58578 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution_backprop_data.cpp @@ -154,7 +154,7 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterface 0) { continue; } - if (inType != ov::element::Type_t::undefined) { + if (inType != ov::element::Type_t::dynamic) { p.input(i).tensor().set_element_type(inType); } } @@ -162,7 +162,7 @@ class GroupDeconvolutionLayerCPUTest : public testing::WithParamInterfaceget_results(); for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { + if (outType != ov::element::Type_t::dynamic) { p.output(i).tensor().set_element_type(outType); } } @@ -728,4 +728,4 @@ INSTANTIATE_TEST_SUITE_P(nightly_GroupDeconv_2D_AutoPadding_FP32, } // namespace } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/eltwise.cpp index bcf639706b7372..cd5b4fb6338b08 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/eltwise.cpp @@ -42,22 +42,18 @@ static const std::vector> bitwise_in_shapes_4D = { const auto params_4D_bitwise = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ - ov::test::utils::EltwiseTypes::BITWISE_AND, - ov::test::utils::EltwiseTypes::BITWISE_OR, - ov::test::utils::EltwiseTypes::BITWISE_XOR - }), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR}), ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc, nhwc }, { nhwc }, {}, "ref"), - CPUSpecificParams({ nchw, nchw }, { nchw }, {}, "ref") - }), + ::testing::ValuesIn( + {CPUSpecificParams({nhwc, nhwc}, {nhwc}, {}, "ref"), CPUSpecificParams({nchw, nchw}, {nchw}, {}, "ref")}), ::testing::Values(emptyFusingSpec), ::testing::Values(false)); @@ -66,82 +62,71 @@ INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise, EltwiseLayerCPUTest, const auto params_4D_bitwise_i32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ - ov::test::utils::EltwiseTypes::BITWISE_AND, - ov::test::utils::EltwiseTypes::BITWISE_OR, - ov::test::utils::EltwiseTypes::BITWISE_XOR - }), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR}), ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc, nhwc }, { nhwc }, {}, "ref_I32$/"), - CPUSpecificParams({ nchw, nchw }, { nchw }, {}, "ref_I32$/") - }), + ::testing::ValuesIn({CPUSpecificParams({nhwc, nhwc}, {nhwc}, {}, "ref_I32$/"), + CPUSpecificParams({nchw, nchw}, {nchw}, {}, "ref_I32$/")}), ::testing::Values(emptyFusingSpec), ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_i32, EltwiseLayerCPUTest, params_4D_bitwise_i32, EltwiseLayerCPUTest::getTestCaseName); - const auto params_4D_bitwise_NOT = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_NOT}), + ::testing::ValuesIn({ov::test::utils::InputLayerType::CONSTANT}), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc }, { nhwc }, {}, "ref"), - CPUSpecificParams({ nchw }, { nchw }, {}, "ref") - }), + ::testing::ValuesIn({CPUSpecificParams({nhwc}, {nhwc}, {}, "ref"), CPUSpecificParams({nchw}, {nchw}, {}, "ref")}), ::testing::Values(emptyFusingSpec), ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT, EltwiseLayerCPUTest, params_4D_bitwise_NOT, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_bitwise_NOT_i32 = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i16 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc }, { nhwc }, {}, "ref_I32$/"), - CPUSpecificParams({ nchw }, { nchw }, {}, "ref_I32$/") - }), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_4D_bitwise_NOT_i32 = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(bitwise_in_shapes_4D), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_NOT}), + ::testing::ValuesIn({ov::test::utils::InputLayerType::CONSTANT}), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i16}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::AnyMap())), + ::testing::ValuesIn({CPUSpecificParams({nhwc}, {nhwc}, {}, "ref_I32$/"), + CPUSpecificParams({nchw}, {nchw}, {}, "ref_I32$/")}), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT_i32, EltwiseLayerCPUTest, params_4D_bitwise_NOT_i32, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_int_jit = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), - ::testing::ValuesIn({ utils::EltwiseTypes::ADD, utils::EltwiseTypes::MULTIPLY }), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn({ ElementType::i8, ElementType::u8, ElementType::f16, ElementType::i32, ElementType::f32 }), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); + ::testing::Combine( + ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), + ::testing::ValuesIn({utils::EltwiseTypes::ADD, utils::EltwiseTypes::MULTIPLY}), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn({ElementType::i8, ElementType::u8, ElementType::f16, ElementType::i32, ElementType::f32}), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_int_jit, EltwiseLayerCPUTest, params_4D_int_jit, EltwiseLayerCPUTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/matmul.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/matmul.cpp index 4afdd90427b06e..a3faee03a430e6 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/matmul.cpp @@ -27,16 +27,17 @@ std::vector fusingParamsSet2D_smoke { fusingTanh }; -const auto testParams2D_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2D_smoke), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); +const auto testParams2D_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2D_smoke), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerCPUTest, testParams2D_smoke, MatMulLayerCPUTest::getTestCaseName); @@ -45,17 +46,17 @@ std::vector fusingParamsSet2D_smoke_f16 { fusingBias, fusingRelu }; -const auto testParams2D_smoke_f16 = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values( - ov::AnyMap({ov::hint::inference_precision(ov::element::f16)}))), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2D_smoke_f16), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); +const auto testParams2D_smoke_f16 = ::testing::Combine( + ::testing::Combine(::testing::ValuesIn(IS2D_smoke()), + ::testing::Values(ElementType::f16), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::AnyMap({ov::hint::inference_precision(ov::element::f16)}))), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2D_smoke_f16), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_f16, MatMulLayerCPUTest, testParams2D_smoke_f16, MatMulLayerCPUTest::getTestCaseName); std::vector fusingParamsSet3D_smoke { @@ -66,8 +67,8 @@ std::vector fusingParamsSet3D_smoke { }; const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke()), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::Values(utils::InputLayerType::CONSTANT), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(emptyAdditionalConfig())); @@ -76,14 +77,14 @@ std::vector fusingParamsSet3D_smoke_f16 { fusingBias, fusingRelu }; -const auto fullyConnectedParams3D_smoke_f16 = ::testing::Combine(::testing::ValuesIn(IS3D_smoke()), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values( - ov::AnyMap({ov::hint::inference_precision(ov::element::f16)}))); +const auto fullyConnectedParams3D_smoke_f16 = + ::testing::Combine(::testing::ValuesIn(IS3D_smoke()), + ::testing::Values(ElementType::f16), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::AnyMap({ov::hint::inference_precision(ov::element::f16)}))); const auto testParams3D_smoke = ::testing::Combine(fullyConnectedParams3D_smoke, ::testing::Values(MatMulNodeType::FullyConnected), ::testing::ValuesIn(fusingParamsSet3D_smoke), @@ -108,16 +109,17 @@ std::vector fusingParamsSet4D_smoke { fusingTanh }; -const auto testParams4D_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet4D_smoke), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); +const auto testParams4D_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet4D_smoke), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); INSTANTIATE_TEST_SUITE_P(smoke_FC_4D, MatMulLayerCPUTest, testParams4D_smoke, MatMulLayerCPUTest::getTestCaseName); std::vector fusingParamsSet4D_smoke_f16 { @@ -125,17 +127,17 @@ std::vector fusingParamsSet4D_smoke_f16 { fusingRelu }; -const auto testParams4D_smoke_f16 = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS), - ::testing::Values(ElementType::f16), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values( - ov::AnyMap({ov::hint::inference_precision(ov::element::f16)}))), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet4D_smoke_f16), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); +const auto testParams4D_smoke_f16 = ::testing::Combine( + ::testing::Combine(::testing::ValuesIn(IS), + ::testing::Values(ElementType::f16), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::AnyMap({ov::hint::inference_precision(ov::element::f16)}))), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet4D_smoke_f16), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParamsFC()))); INSTANTIATE_TEST_SUITE_P(smoke_FC_4D_f16, MatMulLayerCPUTest, testParams4D_smoke_f16, MatMulLayerCPUTest::getTestCaseName); } // namespace matmul diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp index 45ecc774b5dbf9..578be8119419cb 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/reduce.cpp @@ -35,19 +35,17 @@ std::vector cpuParams_5D = { CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}), }; -const auto params_MultiAxis_5D = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); +const auto params_MultiAxis_5D = testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); const std::vector> axes5D_ref = { {0} @@ -61,33 +59,30 @@ std::vector> config_infer_prec_f32 = { {{ov::hint::inference_precision.name(), ov::element::f32}} }; -const auto params_MultiAxis_5D_ref = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D_ref), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_ref)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(config_infer_prec_f32)); - -const auto params_MultiAxis_5D_ZeroDim_ref = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D_ZeroDim)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_ref)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); +const auto params_MultiAxis_5D_ref = testing::Combine(testing::Combine(testing::ValuesIn(axes5D_ref), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_ref)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(config_infer_prec_f32)); + +const auto params_MultiAxis_5D_ZeroDim_ref = + testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D_ZeroDim)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_ref)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); //There are dedicated instences of smoke_Reduce_MultiAxis_5D_CPU test in arm and x64 folders //because ACL does not support 0 as reduction axis diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/bitwise_shift.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/bitwise_shift.cpp index db1f59195abed7..adf84267d8b386 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/bitwise_shift.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/bitwise_shift.cpp @@ -31,8 +31,8 @@ const auto params_4D_bitwise_shift = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::ValuesIn( @@ -53,8 +53,8 @@ const auto params_4D_bitwise_shift_i32_cast = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::ValuesIn({CPUSpecificParams({nhwc, nhwc}, {nhwc}, {}, "ref_I32$/"), @@ -80,8 +80,8 @@ const auto params_4D_bitwise_shift_overflow_i32_cast = ::testing::Combine( ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn( {ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32, ov::element::Type_t::i32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::ValuesIn({CPUSpecificParams({nhwc, nhwc}, {nhwc}, {}, "ref_I32$/"), @@ -106,8 +106,8 @@ const auto params_4D_bitwise_shift_overflow_8 = ::testing::Combine( ::testing::ValuesIn(secondaryInputTypes()), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::ValuesIn( @@ -136,8 +136,8 @@ const auto params_5D_1D_bitwise_shift = ::testing::Combine( ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::Values(CPUSpecificParams({ncdhw, x}, {ncdhw}, {}, {})), @@ -157,8 +157,8 @@ const auto params_5D_1D_bitwise_shift_cast_i32 = ::testing::Combine( ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::Values(CPUSpecificParams({ncdhw, x}, {ncdhw}, {}, "ref_I32$/")), @@ -182,8 +182,8 @@ const auto params_4D_1D_bitwise_shift = ::testing::Combine( ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::Values(CPUSpecificParams({nchw, x}, {nchw}, {}, {})), @@ -203,8 +203,8 @@ const auto params_4D_1D_bitwise_shift_cast_i32 = ::testing::Combine( ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), ::testing::ValuesIn({ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32}), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), ::testing::Values(CPUSpecificParams({nchw, x}, {nchw}, {}, "ref_I32$/")), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp index 94683387d1eac0..78e47e3aed49c7 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp @@ -11,19 +11,19 @@ using namespace CPUTestUtils; namespace ov { namespace test { namespace Convolution { +OPENVINO_SUPPRESS_DEPRECATED_START /* ============= Convolution (Gemm 1D) ============= */ -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), - ::testing::ValuesIn(fusingParamsSetWithEmpty()), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), + ::testing::ValuesIn(fusingParamsSetWithEmpty()), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); std::vector inputShapesPlain2Blocked3d = { @@ -53,8 +53,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32, ConvolutionLayerCPUTest, ::testing::Combine( convParams_ExplicitPadding_GEMM_2D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inShapesGemm2D_cache()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), @@ -62,18 +62,17 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32, ConvolutionLayerCPUTest, ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated_empty_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated_empty_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); /* ============= Convolution (2D) ============= */ @@ -82,8 +81,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_empty_fusing, ConvolutionLayerCPUTes ::testing::Combine( convParams_ExplicitPadding_2D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2d_cache()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), @@ -96,8 +95,8 @@ INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_empty_fusing, ConvolutionLayerCPUT ::testing::Combine( convParams_ExplicitPadding_2D_dilated(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), @@ -115,8 +114,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_FP32, ConvolutionLayerCPUT ::testing::Combine( convParams_ExplicitPadding_2D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesPlain2Blocked2d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_2D_plain_to_blocked)), @@ -129,8 +128,8 @@ INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_FP32_dilated, ConvolutionLayerCP ::testing::Combine( convParams_ExplicitPadding_2D_dilated(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesPlain2Blocked2d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_2D_plain_to_blocked)), @@ -174,8 +173,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_reorder_Conv_2D, ConvolutionLayerCPUTest, ::testing::Combine( convParams_Reorder_2D, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes_Reorder_2D), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo({conv_avx512_2D_1x1})), @@ -189,8 +188,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32, ConvolutionLayerCPUTest, ::testing::Combine( convParams_ExplicitPadding_3D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes3d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), @@ -203,8 +202,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP32_fusingScaleShiftAndFakeQuantizePerCh ::testing::Combine( convParams_ExplicitPadding_3D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes3d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), @@ -217,8 +216,8 @@ INSTANTIATE_TEST_SUITE_P(Conv_3D_FP32_dilated, ConvolutionLayerCPUTest, ::testing::Combine( convParams_ExplicitPadding_3D_dilated(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes3d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), @@ -236,8 +235,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_PlainToBlocked_FP32, ConvolutionLayerCPUT ::testing::Combine( convParams_ExplicitPadding_3D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesPlain2Blocked3d), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_3D_plain_to_blocked)), @@ -250,8 +249,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_PlainToBlocked_BF16, ConvolutionLayerCPUT ::testing::Combine( convParams_ExplicitPadding_3D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesPlain2Blocked3d), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_3D})), @@ -264,8 +263,8 @@ INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_3D_FP32_dilated, ConvolutionLayerCP ::testing::Combine( convParams_ExplicitPadding_3D_dilated(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesPlain2Blocked3d), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_3D_plain_to_blocked)), @@ -278,8 +277,8 @@ INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_3D_BF16_dilated, ConvolutionLayerCP ::testing::Combine( convParams_ExplicitPadding_3D_dilated(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesPlain2Blocked3d), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_3D})), @@ -294,8 +293,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP32_empty_fusing, ConvolutionLayerCP ::testing::Combine( convParams_ExplicitPadding_1x1_1D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes1d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), @@ -310,8 +309,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32_empty_fusing, ConvolutionLayerCP ::testing::Combine( convParams_ExplicitPadding_1x1_2D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), @@ -336,8 +335,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_AutoPad_FP32, ConvolutionLayerCPUTest, ::testing::Combine( convParams_AutoPadding_2D, ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes2d()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), @@ -385,7 +384,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Large_Filter, ConvolutionLayerCPUTest, convParams_1D, ::testing::Values(ElementType::f32), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inShapes), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::Values(CPUSpecificParams{{}, {}, {}, CPUTestsBase::any_type}), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/eltwise.cpp index a71fcaf5ddd06a..a420ea994b9123 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/eltwise.cpp @@ -12,259 +12,242 @@ using namespace CPUTestUtils; namespace ov { namespace test { namespace Eltwise { - -const auto params_4D = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_4D = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder, EltwiseLayerCPUTest, params_4D, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_Snippets = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), - ::testing::ValuesIn(eltwiseOpTypesBinInpSnippets()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(additional_config()[0])), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(true)); - +const auto params_4D_Snippets = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), + ::testing::ValuesIn(eltwiseOpTypesBinInpSnippets()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(additional_config()[0])), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(true)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_Snippets, EltwiseLayerCPUTest, params_4D_Snippets, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_emptyCPUSpec = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), - ::testing::ValuesIn(eltwiseOpTypesDiffInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::Values(emptyCPUSpec), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_4D_emptyCPUSpec = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), + ::testing::ValuesIn(eltwiseOpTypesDiffInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::Values(emptyCPUSpec), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_emptyCPUSpec, EltwiseLayerCPUTest, params_4D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); -const auto params_5D = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_5D = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder, EltwiseLayerCPUTest, params_5D, EltwiseLayerCPUTest::getTestCaseName); -const auto params_5D_emptyCPUSpec = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), - ::testing::ValuesIn(eltwiseOpTypesDiffInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::Values(emptyCPUSpec), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_5D_emptyCPUSpec = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), + ::testing::ValuesIn(eltwiseOpTypesDiffInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::Values(emptyCPUSpec), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D, EltwiseLayerCPUTest, params_5D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_1D_constant_mode = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_1D_parameter_mode = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter, EltwiseLayerCPUTest, params_4D_1D_parameter_mode, EltwiseLayerCPUTest::getTestCaseName); const auto params_5D_1D_constant = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName); const auto params_5D_1D_parameter = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Parameter, EltwiseLayerCPUTest, params_5D_1D_parameter, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_dyn_const = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapes_4D_dyn_const()), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_4D_dyn_const = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapes_4D_dyn_const()), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_const, EltwiseLayerCPUTest, params_4D_dyn_const, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_dyn_param = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_4D_dyn_param = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_4D_dyn_param, EltwiseLayerCPUTest::getTestCaseName); -const auto params_5D_dyn_const = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_5D_dyn_const()), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_5D_dyn_const = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_5D_dyn_const()), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_const, EltwiseLayerCPUTest, params_5D_dyn_const, EltwiseLayerCPUTest::getTestCaseName); -const auto params_5D_dyn_param = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_5D_dyn_param()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_5D_dyn_param = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_5D_dyn_param()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param, EltwiseLayerCPUTest, params_5D_dyn_param, EltwiseLayerCPUTest::getTestCaseName); const auto params_fma_4D = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_fusing_4D())), - ::testing::Values(utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::ValuesIn({fusingMultiplyAddPerChannel}), - ::testing::Values(false)); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_fusing_4D())), + ::testing::Values(utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::ValuesIn({fusingMultiplyAddPerChannel}), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_fma_4D, EltwiseLayerCPUTest, params_fma_4D, EltwiseLayerCPUTest::getTestCaseName); const auto params_fma_5D = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_fusing_5D())), - ::testing::Values(utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), - ::testing::ValuesIn({fusingMultiplyAddPerChannel}), - ::testing::Values(false)); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_fusing_5D())), + ::testing::Values(utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D())), + ::testing::ValuesIn({fusingMultiplyAddPerChannel}), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_fma_5D, EltwiseLayerCPUTest, params_fma_5D, EltwiseLayerCPUTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/matmul.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/matmul.cpp index 9b5d7287875d7c..5606f4f6e83270 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/matmul.cpp @@ -286,8 +286,8 @@ const std::vector IS_Dynamic_nightly = { const auto matMulParams = ::testing::Combine(::testing::ValuesIn(IS), ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(additionalConfig())); @@ -300,12 +300,12 @@ const auto testParams = ::testing::Combine(matMulParams, INSTANTIATE_TEST_SUITE_P(smoke_MM_Static, MatMulLayerCPUTest, testParams, MatMulLayerCPUTest::getTestCaseName); const auto matMulParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynamic), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())); + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())); const auto testParamsDynamic = ::testing::Combine(matMulParamsDynamic, ::testing::Values(MatMulNodeType::MatMul), @@ -315,12 +315,12 @@ const auto testParamsDynamic = ::testing::Combine(matMulParamsDynamic, INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic, MatMulLayerCPUTest, testParamsDynamic, MatMulLayerCPUTest::getTestCaseName); const auto matMulParamsDynamic_nightly = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_nightly), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())); + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())); const auto testParamsDynamic_nightly = ::testing::Combine(matMulParamsDynamic_nightly, ::testing::Values(MatMulNodeType::MatMul), @@ -332,4 +332,4 @@ INSTANTIATE_TEST_SUITE_P(nightly_MM_Dynamic, MatMulLayerCPUTest, testParamsDynam } // namespace matmul } // namespace MatMul } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp index f962c20e1566f5..b70b97de89cb34 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/reduce.cpp @@ -47,75 +47,67 @@ std::vector cpuParams_4D = { }; /* ================================ 1.1 No fusion - Arithmetic ================================ */ -const auto params_OneAxis = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::ValuesIn(opTypes()), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_OneAxis_dynamic = testing::Combine( - testing::Combine( - testing::Values(1), // ACL supports reduce against static dims only - testing::ValuesIn(opTypes()), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dynamic_3dims)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_4D = testing::Combine( - testing::Combine( - testing::ValuesIn(axesND()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); +const auto params_OneAxis = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::ValuesIn(opTypes()), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_OneAxis_dynamic = + testing::Combine(testing::Combine(testing::Values(1), // ACL supports reduce against static dims only + testing::ValuesIn(opTypes()), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dynamic_3dims)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_4D = testing::Combine(testing::Combine(testing::ValuesIn(axesND()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); const auto params_MultiAxis_4D_dynamic = testing::Combine( - testing::Combine( - testing::Values(std::vector{0, 1}), // ACL supports reduce against static dims only - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dynamic_2dims)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_Int32 = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypesInt32()), - testing::Values(ElementType::i32), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_Int32)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); + testing::Combine(testing::Values(std::vector{0, 1}), // ACL supports reduce against static dims only + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dynamic_2dims)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_Int32 = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypesInt32()), + testing::Values(ElementType::i32), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_Int32)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); INSTANTIATE_TEST_SUITE_P( smoke_Reduce_OneAxis_CPU, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/riscv64/shl/matmul.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/riscv64/shl/matmul.cpp index 63011cdfc0781a..fad718d03e7088 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/riscv64/shl/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/riscv64/shl/matmul.cpp @@ -44,17 +44,17 @@ const std::vector& FC_2DParams() { return params; } - -const auto testParams3D_SHL_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(FC_2DParams()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterSpecificParams_SHL())); +const auto testParams3D_SHL_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(FC_2DParams()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterSpecificParams_SHL())); INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_SHL, MatMulLayerCPUTest, testParams3D_SHL_smoke, MatMulLayerCPUTest::getTestCaseName); const std::vector& FC_3DParams() { @@ -83,18 +83,19 @@ const std::vector& FC_3DParams() { return params; } -const auto testParams2D_SHL_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(FC_3DParams()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(filterSpecificParams_SHL())); +const auto testParams2D_SHL_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(FC_3DParams()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(filterSpecificParams_SHL())); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_SHL, MatMulLayerCPUTest, testParams2D_SHL_smoke, MatMulLayerCPUTest::getTestCaseName); } // namespace } // namespace MatMul } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp index 030f7eb3bc40b8..57a9a13d4e1af1 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp @@ -59,26 +59,25 @@ const std::vector fusingParamsSetBF16{ const std::vector fusingParamsSetFP16 = fusingParamsSetBF16; /* ============= Convolution (Gemm 1D) ============= */ -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), - ::testing::ValuesIn(fusingParamsSetWithEmpty()), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), + ::testing::ValuesIn(fusingParamsSetWithEmpty()), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32_ImproperPriorityList, ConvolutionLayerCPUTest, ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_1D(), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inShapesGemm1D()), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfo({conv_gemm_1D})), @@ -86,116 +85,109 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_FP32_ImproperPriorityList, ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo({conv_gemm_1D})), // todo: [AV] what about conv_gemm_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm1D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D_cache()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::ValuesIn(fusingParamsSetWithoutEmpty), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm2D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), - ::testing::ValuesIn(fusingParamsSetWithoutEmpty), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_1D_GEMM_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_gemm_1D})), // todo: [AV] what about conv_gemm_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_GEMM_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm1D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_1D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_I8_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_BF16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_GEMM_FP32_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D_cache()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_GEMM_FP32_dilated_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm2D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); const std::vector fusingParamsSet_dynBatch{ @@ -205,131 +197,122 @@ const std::vector fusingParamsSet_dynBatch{ fusingReluScaleShift }; -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_dynBatch, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d_dynBatch()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), - ::testing::ValuesIn(fusingParamsSet_dynBatch), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d_cache()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), - ::testing::ValuesIn(fusingParamsSetWithoutEmpty), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), - ::testing::ValuesIn(fusingParamsSetWithoutEmpty), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_dynBatch, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d_dynBatch()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::ValuesIn(fusingParamsSet_dynBatch), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP32_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d_cache()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_I8_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); /* ============= Kernel_1x1 (1D) ============= */ -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP32_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), - ::testing::ValuesIn(fusingParamsSetWithoutEmpty), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32_fusing, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), - ::testing::ValuesIn(fusingParamsSetWithoutEmpty), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP32_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP32_fusing, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), + ::testing::ValuesIn(fusingParamsSetWithoutEmpty), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_1D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1x1_2D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); /* ============= Convolution (1D) ============= */ @@ -361,62 +344,61 @@ const std::vector CPUParams_1D_I8 = { conv_avx512_1D_nspc_brgconv }; -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_f32)), - ::testing::ValuesIn(fusingParamsSetWithEmpty()), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D, - conv_avx512_1D_nspc_brgconv, conv_avx512_1D_nspc_brgconv_amx})), // todo: [AV] what about conv_avx512_1D_nspc? - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_FP16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_1D_nspc_brgconv, - conv_avx512_1D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_I8)), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_f32)), + ::testing::ValuesIn(fusingParamsSetWithEmpty()), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_1D_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16( + {conv_avx512_1D, + conv_avx512_1D_nspc_brgconv, + conv_avx512_1D_nspc_brgconv_amx})), // todo: [AV] what about conv_avx512_1D_nspc? + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_FP16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16( + {conv_avx512_1D_nspc_brgconv, conv_avx512_1D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_I8)), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); const std::vector CPUParams_1D_plain_to_blocked = { @@ -446,182 +428,184 @@ std::vector inputShapesPlain2Blocked1d = { } }; -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_plain_to_blocked)), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked1d), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_1D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, conv_avx512_2D_nspc, - conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_2D_nspc_brgconv, - conv_avx512_2D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, conv_avx512_2D_nspc, - conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_FP16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_2D_nspc_brgconv, - conv_avx512_2D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_2D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_2D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesPlain2Blocked2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_2D})), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D_1x1, conv_avx512_2D_1x1_nspc, - conv_avx512_1D_1x1_nspc_brgconv, conv_avx512_1D_1x1_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_1x1_FP16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_1D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes1d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_1D_1x1_nspc_brgconv, - conv_avx512_1D_1x1_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D_1x1, conv_avx512_2D_1x1_nspc, - conv_avx512_2D_1x1_nspc_brgconv, conv_avx512_2D_1x1_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_1x1_2D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_2D_1x1_nspc_brgconv, - conv_avx512_2D_1x1_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapesPlain2Blocked1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_1D_plain_to_blocked)), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_PlainToBlocked_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapesPlain2Blocked1d), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_1D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_2D_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, + conv_avx512_2D_nspc, + conv_avx512_2D_nspc_brgconv, + conv_avx512_2D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_FP16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16( + {conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + Conv_2D_BF16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D, + conv_avx512_2D_nspc, + conv_avx512_2D_nspc_brgconv, + conv_avx512_2D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_FP16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16( + {conv_avx512_2D_nspc_brgconv, conv_avx512_2D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_PlainToBlocked_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapesPlain2Blocked2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_2D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_PlainToBlocked_2D_BF16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_2D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapesPlain2Blocked2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo({conv_avx512_plain_to_blocked_2D})), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_1D_1x1_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_1D_1x1, + conv_avx512_2D_1x1_nspc, + conv_avx512_1D_1x1_nspc_brgconv, + conv_avx512_1D_1x1_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_1D_1x1_FP16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_1D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes1d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_1D_1x1_nspc_brgconv, + conv_avx512_1D_1x1_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_2D_1x1_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_2D_1x1, + conv_avx512_2D_1x1_nspc, + conv_avx512_2D_1x1_nspc_brgconv, + conv_avx512_2D_1x1_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_2D_1x1_FP16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_1x1_2D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_2D_1x1_nspc_brgconv, + conv_avx512_2D_1x1_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + /* ============= Jit Planar ============= */ /* ============= Convolution planar params (2D) ============= */ const std::vector CPUParams_Jit_Planar_2D = { @@ -649,117 +633,109 @@ const auto convParams_Planar_ExplicitPadding_2D_dilated = ::testing::Combine( ::testing::Values(ov::op::PadType::EXPLICIT) ); -INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_Jit_Planar_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_2D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_2D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_2D_Jit_Planar_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_2D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes2d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_2D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_Jit_Planar_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_Planar_ExplicitPadding_2D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_2D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_2D_Jit_Planar_FP32_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_Planar_ExplicitPadding_2D_dilated, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes2d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_2D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); /* ============= Convolution (GEMM 3D) ============= */ -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_I8, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_I8_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::i8), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), - ::testing::Values(fusingSum), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_I8_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_3D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_I8, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_FP32_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_I8_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::i8), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::Values(fusingSum), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); /* ============= Convolution planar params (3D) ============= */ @@ -789,120 +765,118 @@ const auto convParams_Planar_ExplicitPadding_3D_dilated = ::testing::Combine( ::testing::Values(ov::op::PadType::EXPLICIT) ); -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_GEMM_3D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inShapesGemm3D()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_BF16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, conv_avx512_3D_nspc, - conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP16, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_3D_nspc_brgconv, - conv_avx512_3D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_BF16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, conv_avx512_3D_nspc, - conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetBF16), - ::testing::Values(cpu_bf16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_FP16_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_ExplicitPadding_3D_dilated(), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16({conv_avx512_3D_nspc_brgconv, - conv_avx512_3D_nspc_brgconv_amx})), - ::testing::ValuesIn(fusingParamsSetFP16), - ::testing::Values(cpu_f16_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_Jit_Planar_FP32, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_3D, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_3D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(empty_plugin_config)), - ConvolutionLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(Conv_3D_Jit_Planar_FP32_dilated, ConvolutionLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - convParams_Planar_ExplicitPadding_3D_dilated, - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes3d()), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_3D)), - ::testing::Values(emptyFusingSpec, fusingRelu), - ::testing::Values(empty_plugin_config)), +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_GEMM_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_GEMM_BF16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_GEMM_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inShapesGemm3D()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_GEMM_3D())), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Conv_3D_BF16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, + conv_avx512_3D_nspc, + conv_avx512_3D_nspc_brgconv, + conv_avx512_3D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_FP16, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_3D(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16( + {conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + Conv_3D_BF16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice_BF16({conv_avx512_3D, + conv_avx512_3D_nspc, + conv_avx512_3D_nspc_brgconv, + conv_avx512_3D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetBF16), + ::testing::Values(cpu_bf16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_FP16_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_ExplicitPadding_3D_dilated(), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16( + {conv_avx512_3D_nspc_brgconv, conv_avx512_3D_nspc_brgconv_amx})), + ::testing::ValuesIn(fusingParamsSetFP16), + ::testing::Values(cpu_f16_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Conv_3D_Jit_Planar_FP32, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_Planar_ExplicitPadding_3D, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_3D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), + ConvolutionLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(Conv_3D_Jit_Planar_FP32_dilated, + ConvolutionLayerCPUTest, + ::testing::Combine(::testing::Combine(convParams_Planar_ExplicitPadding_3D_dilated, + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes3d()), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfo(CPUParams_Jit_Planar_3D)), + ::testing::Values(emptyFusingSpec, fusingRelu), + ::testing::Values(empty_plugin_config)), ConvolutionLayerCPUTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/eltwise.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/eltwise.cpp index 505dafa5d9a469..76b6753a6c7ac8 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/eltwise.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/eltwise.cpp @@ -150,108 +150,102 @@ const std::vector fusingParamsSet_x64{ fusingFQPerChannelSigmoidFQPerTensor }; -const auto params_4D_Blocked_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); +const auto params_4D_Blocked_Blocked = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_MemOrder_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_Blocked_Blocked, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_fusing = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Fusing, EltwiseLayerCPUTest, params_4D_fusing, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_fusing_blocked_blocked = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Fusing_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_fusing_blocked_blocked, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_blocked_blocked_fusing = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_fusing())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_Fusing, EltwiseLayerCPUTest, params_4D_blocked_blocked_fusing, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_emptyCPUSpec = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), - ::testing::ValuesIn(eltwiseOpTypesDiffInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::Values(emptyCPUSpec), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); +const auto params_4D_emptyCPUSpec = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D())), + ::testing::ValuesIn(eltwiseOpTypesDiffInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::Values(emptyCPUSpec), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_emptyCPUSpec_x64, EltwiseLayerCPUTest, params_4D_emptyCPUSpec, EltwiseLayerCPUTest::getTestCaseName); -const auto params_5D_Blocked_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); +const auto params_5D_Blocked_Blocked = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_Blocked_Blocked, EltwiseLayerCPUTest::getTestCaseName); @@ -260,375 +254,354 @@ const std::vector fusingParamsSet_I32{ fusingMultiplyAddPerChannel }; -const auto params_5D_emptyCPUSpec_I32 = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), - ::testing::ValuesIn(eltwiseOpTypesI32()), - ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::i32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::Values(emptyCPUSpec), - ::testing::ValuesIn(fusingParamsSet_I32), - ::testing::ValuesIn(enforceSnippets())); +const auto params_5D_emptyCPUSpec_I32 = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D())), + ::testing::ValuesIn(eltwiseOpTypesI32()), + ::testing::ValuesIn(secondaryInputTypes()), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::i32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::Values(emptyCPUSpec), + ::testing::ValuesIn(fusingParamsSet_I32), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_I32, EltwiseLayerCPUTest, params_5D_emptyCPUSpec_I32, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_Blocked_Planar = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_Blocked_Planar())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Planar())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_Blocked_Planar())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Planar())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Planar, EltwiseLayerCPUTest, params_4D_Blocked_Planar, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_Planar_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_Planar_Blocked())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_Planar_Blocked())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Planar_Blocked, EltwiseLayerCPUTest, params_4D_Planar_Blocked, EltwiseLayerCPUTest::getTestCaseName); const auto params_5D_Blocked_Planar = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_Blocked_Planar())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Planar())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_Blocked_Planar())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Planar())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Blocked_Planar, EltwiseLayerCPUTest, params_5D_Blocked_Planar, EltwiseLayerCPUTest::getTestCaseName); const auto params_5D_Planar_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_Planar_Blocked())), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Planar_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_Planar_Blocked())), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Planar_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_Planar_Blocked_x64, EltwiseLayerCPUTest, params_5D_Planar_Blocked, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_1D_constant_mode = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode_x64())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Constant_mode_x64())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Constant_x64, EltwiseLayerCPUTest, params_4D_1D_constant_mode, EltwiseLayerCPUTest::getTestCaseName); const auto params_4D_1D_parameter_mode = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_4D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_1D_Parameter_mode())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_1D_Parameter_x64, EltwiseLayerCPUTest, params_4D_1D_parameter_mode, EltwiseLayerCPUTest::getTestCaseName); const auto params_5D_1D_constant = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_constant())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Constant_x64, EltwiseLayerCPUTest, params_5D_1D_constant, EltwiseLayerCPUTest::getTestCaseName); const auto params_5D_1D_parameter = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), - ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inShapes_5D_1D())), + ::testing::Values(ov::test::utils::EltwiseTypes::ADD, ov::test::utils::EltwiseTypes::MULTIPLY), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_1D_parameter())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_1D_Parameter_x64, EltwiseLayerCPUTest, params_5D_1D_parameter, EltwiseLayerCPUTest::getTestCaseName); //// ============================================ 4D ============================================ -const auto params_4D_planar_dyn_const = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapes_4D_dyn_const()), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); +const auto params_4D_planar_dyn_const = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapes_4D_dyn_const()), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Planar_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_planar_dyn_const, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_per_channel_dyn_const = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapes_4D_dyn_const()), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_PerChannel())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_per_channel_dyn_const = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapes_4D_dyn_const()), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_PerChannel())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_PerChannel_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_per_channel_dyn_const, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_blocked_blocked_dyn_const = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapes_4D_dyn_const()), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_blocked_blocked_dyn_const = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapes_4D_dyn_const()), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_MemOrder_dyn_const_x64, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_const, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_planar_dyn_param = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar())), - ::testing::Values(emptyFusingSpec), - ::testing::ValuesIn(enforceSnippets())); +const auto params_4D_planar_dyn_param = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar())), + ::testing::Values(emptyFusingSpec), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Planar_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_planar_dyn_param, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_perchannel_dyn_param = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_PerChannel())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_perchannel_dyn_param = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_PerChannel())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_PerChannel_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_perchannel_dyn_param, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_blocked_blocked_dyn_param = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_blocked_blocked_dyn_param = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Blocked_Blocked_MemOrder_dyn_param_x64, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_param, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_planar_dyn_param_fusing = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param_fusing()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::ValuesIn(enforceSnippets())); +const auto params_4D_planar_dyn_param_fusing = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param_fusing()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Planar())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::ValuesIn(enforceSnippets())); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_planar_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_planar_dyn_param_fusing, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_perchannel_dyn_param_fusing = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param_fusing()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_PerChannel())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_perchannel_dyn_param_fusing = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param_fusing()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_PerChannel())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_perchannel_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_perchannel_dyn_param_fusing, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_dyn_param_fusing_Blocked_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param_fusing()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_dyn_param_fusing_Blocked_Blocked = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param_fusing()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_dyn_param_fusing_Blocked_Blocked, EltwiseLayerCPUTest, params_4D_dyn_param_fusing_Blocked_Blocked, EltwiseLayerCPUTest::getTestCaseName); -const auto params_4D_blocked_blocked_dyn_param_fusing = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_4D_dyn_param_fusing()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::Values(ElementType::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), - ::testing::ValuesIn(fusingParamsSet_x64), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_4D_blocked_blocked_dyn_param_fusing = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_4D_dyn_param_fusing()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::Values(ElementType::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D_Blocked_Blocked())), + ::testing::ValuesIn(fusingParamsSet_x64), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_blocked_blocked_dyn_param_fusing, EltwiseLayerCPUTest, params_4D_blocked_blocked_dyn_param_fusing, EltwiseLayerCPUTest::getTestCaseName); //// ============================================ 5D ============================================ -const auto params_5D_dyn_const_Blocked_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_5D_dyn_const()), - ::testing::ValuesIn(eltwiseOpTypesBinInp()), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_5D_dyn_const_Blocked_Blocked = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_5D_dyn_const()), + ::testing::ValuesIn(eltwiseOpTypesBinInp()), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_const_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_dyn_const_Blocked_Blocked, EltwiseLayerCPUTest::getTestCaseName); -const auto params_5D_dyn_param_Blocked_Blocked = ::testing::Combine( - ::testing::Combine( - ::testing::Values(inShapes_5D_dyn_param()), - ::testing::ValuesIn(eltwiseOpTypesBinDyn()), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::ValuesIn(opTypes()), - ::testing::ValuesIn(netType()), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additional_config())), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs +const auto params_5D_dyn_param_Blocked_Blocked = + ::testing::Combine(::testing::Combine(::testing::Values(inShapes_5D_dyn_param()), + ::testing::ValuesIn(eltwiseOpTypesBinDyn()), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::ValuesIn(opTypes()), + ::testing::ValuesIn(netType()), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additional_config())), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D_Blocked_Blocked())), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); // CPU Plugin supports only planar layout for dynamic Subgraphs INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_5D_MemOrder_dyn_param_Blocked_Blocked, EltwiseLayerCPUTest, params_5D_dyn_param_Blocked_Blocked, EltwiseLayerCPUTest::getTestCaseName); @@ -662,91 +635,74 @@ static const std::vector> bitwise_in_shapes_4D = { const auto params_4D_bitwise = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ - ov::test::utils::EltwiseTypes::BITWISE_AND, - ov::test::utils::EltwiseTypes::BITWISE_OR, - ov::test::utils::EltwiseTypes::BITWISE_XOR - }), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR}), ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc, nhwc }, { nhwc }, {}, {}), - CPUSpecificParams({ nchw, nchw }, { nchw }, {}, {}) - }), + ::testing::ValuesIn( + {CPUSpecificParams({nhwc, nhwc}, {nhwc}, {}, {}), CPUSpecificParams({nchw, nchw}, {nchw}, {}, {})}), ::testing::Values(emptyFusingSpec), ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise, EltwiseLayerCPUTest, params_4D_bitwise, EltwiseLayerCPUTest::getTestCaseName); - const auto params_4D_bitwise_i32 = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ - ov::test::utils::EltwiseTypes::BITWISE_AND, - ov::test::utils::EltwiseTypes::BITWISE_OR, - ov::test::utils::EltwiseTypes::BITWISE_XOR - }), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_AND, + ov::test::utils::EltwiseTypes::BITWISE_OR, + ov::test::utils::EltwiseTypes::BITWISE_XOR}), ::testing::ValuesIn(secondaryInputTypes()), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i16, ov::element::Type_t::u16, ov::element::Type_t::u32}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc, nhwc }, { nhwc }, {}, "*_I32"), - CPUSpecificParams({ nchw, nchw }, { nchw }, {}, "*_I32") - }), + ::testing::ValuesIn( + {CPUSpecificParams({nhwc, nhwc}, {nhwc}, {}, "*_I32"), CPUSpecificParams({nchw, nchw}, {nchw}, {}, "*_I32")}), ::testing::Values(emptyFusingSpec), ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_i32, EltwiseLayerCPUTest, params_4D_bitwise_i32, EltwiseLayerCPUTest::getTestCaseName); - const auto params_4D_bitwise_NOT = ::testing::Combine( ::testing::Combine( ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_NOT}), + ::testing::ValuesIn({ov::test::utils::InputLayerType::CONSTANT}), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::u8, ov::element::Type_t::i32}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc }, { nhwc }, {}, {}), - CPUSpecificParams({ nchw }, { nchw }, {}, {}) - }), + ::testing::ValuesIn({CPUSpecificParams({nhwc}, {nhwc}, {}, {}), CPUSpecificParams({nchw}, {nchw}, {}, {})}), ::testing::Values(emptyFusingSpec), ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT, EltwiseLayerCPUTest, params_4D_bitwise_NOT, EltwiseLayerCPUTest::getTestCaseName); - -const auto params_4D_bitwise_NOT_i32 = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(bitwise_in_shapes_4D), - ::testing::ValuesIn({ ov::test::utils::EltwiseTypes::BITWISE_NOT }), - ::testing::ValuesIn({ ov::test::utils::InputLayerType::CONSTANT }), - ::testing::ValuesIn({ ov::test::utils::OpType::VECTOR }), - ::testing::ValuesIn({ ov::element::Type_t::i16 }), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::element::Type_t::undefined), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(ov::AnyMap())), - ::testing::ValuesIn({ - CPUSpecificParams({ nhwc }, { nhwc }, {}, "*_I32"), - CPUSpecificParams({ nchw }, { nchw }, {}, "*_I32") - }), - ::testing::Values(emptyFusingSpec), - ::testing::Values(false)); +const auto params_4D_bitwise_NOT_i32 = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(bitwise_in_shapes_4D), + ::testing::ValuesIn({ov::test::utils::EltwiseTypes::BITWISE_NOT}), + ::testing::ValuesIn({ov::test::utils::InputLayerType::CONSTANT}), + ::testing::ValuesIn({ov::test::utils::OpType::VECTOR}), + ::testing::ValuesIn({ov::element::Type_t::i16}), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::element::Type_t::dynamic), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(ov::AnyMap())), + ::testing::ValuesIn({CPUSpecificParams({nhwc}, {nhwc}, {}, "*_I32"), + CPUSpecificParams({nchw}, {nchw}, {}, "*_I32")}), + ::testing::Values(emptyFusingSpec), + ::testing::Values(false)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_4D_Bitwise_NOT_i32, EltwiseLayerCPUTest, params_4D_bitwise_NOT_i32, EltwiseLayerCPUTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/matmul.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/matmul.cpp index 83faa2c06ec6f6..c9910641411794 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/matmul.cpp @@ -67,12 +67,12 @@ const std::vector matmulFusingParamsNightly { }; const auto matMulParams_x64 = ::testing::Combine(::testing::ValuesIn(IS_x64), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())); + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())); const auto testParams_Static_IS_x64 = ::testing::Combine(matMulParams_x64, ::testing::Values(MatMulNodeType::MatMul), @@ -82,12 +82,12 @@ const auto testParams_Static_IS_x64 = ::testing::Combine(matMulParams_x64, INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_IS_x64, MatMulLayerCPUTest, testParams_Static_IS_x64, MatMulLayerCPUTest::getTestCaseName); const auto matMulParams_x64_FP16 = ::testing::Combine(::testing::ValuesIn(IS_x64), - ::testing::Values(ov::element::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ov::element::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams_Static_IS_x64_FP16 = ::testing::Combine(matMulParams_x64_FP16, ::testing::Values(MatMulNodeType::MatMul), @@ -96,53 +96,57 @@ const auto testParams_Static_IS_x64_FP16 = ::testing::Combine(matMulParams_x64_F INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_IS_x64_FP16, MatMulLayerCPUTest, testParams_Static_IS_x64_FP16, MatMulLayerCPUTest::getTestCaseName); -const auto testParams2D_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2D_smoke), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); - -const auto testParams2DBF16_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2DBF16), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); - -const auto testParams2DFP16_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), - ::testing::Values(ov::element::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2DFP16), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16(specificParams_FP16))); +const auto testParams2D_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2D_smoke), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); + +const auto testParams2DBF16_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2DBF16), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); + +const auto testParams2DFP16_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), + ::testing::Values(ov::element::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2DFP16), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16(specificParams_FP16))); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerCPUTest, testParams2D_smoke, MatMulLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_BF16, MatMulLayerCPUTest, testParams2DBF16_smoke, MatMulLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP16, MatMulLayerCPUTest, testParams2DFP16_smoke, MatMulLayerCPUTest::getTestCaseName); -const auto testParams2D_nightly = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values((emptyAdditionalConfig()))), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2D_nightly), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); +const auto testParams2D_nightly = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values((emptyAdditionalConfig()))), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2D_nightly), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); std::vector filterAdditionalConfig_Brgemm() { #ifndef OV_CPU_WITH_MLAS @@ -241,21 +245,23 @@ std::vector fusingParamsSet2D_Brgemm_smoke { fusingReluScaleShift }; -const auto fullyConnectedParams2D_Brgemm_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); - -const auto fullyConnectedParams2D_Brgemm_FP16_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); +const auto fullyConnectedParams2D_Brgemm_smoke = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); + +const auto fullyConnectedParams2D_Brgemm_FP16_smoke = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams2D_Brgemm_smoke = ::testing::Combine(fullyConnectedParams2D_Brgemm_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -286,8 +292,8 @@ const std::vector IS_brgemm_smoke = { const auto matMulBrgemmParams_smoke = ::testing::Combine(::testing::ValuesIn(IS_brgemm_smoke), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -300,12 +306,12 @@ const auto testBrgemmParams_smoke = ::testing::Combine(matMulBrgemmParams_smoke, INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Static, MatMulLayerCPUTest, testBrgemmParams_smoke, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmParams_FP16_smoke = ::testing::Combine(::testing::ValuesIn(IS_brgemm_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testBrgemmParams_FP16_smoke = ::testing::Combine(matMulBrgemmParams_FP16_smoke, ::testing::Values(MatMulNodeType::MatMul), @@ -329,12 +335,12 @@ const std::vector IS_brgemm_nightly = { }; const auto matMulBrgemmParams_nightly = ::testing::Combine(::testing::ValuesIn(IS_brgemm_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); const auto testBrgemmParams_nightly = ::testing::Combine(matMulBrgemmParams_nightly, ::testing::Values(MatMulNodeType::MatMul), @@ -344,12 +350,12 @@ const auto testBrgemmParams_nightly = ::testing::Combine(matMulBrgemmParams_nigh INSTANTIATE_TEST_SUITE_P(nightly_MM_Brgemm_Static, MatMulLayerCPUTest, testBrgemmParams_nightly, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmParams_FP16_nightly = ::testing::Combine(::testing::ValuesIn(IS_brgemm_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testBrgemmParams_FP16_nightly = ::testing::Combine(matMulBrgemmParams_FP16_nightly, ::testing::Values(MatMulNodeType::MatMul), @@ -419,8 +425,8 @@ const std::vector IS_Brgemm_Dynamic = { const auto matMulBrgemmParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Brgemm_Dynamic), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -433,12 +439,12 @@ const auto testBrgemmParamsDynamic = ::testing::Combine(matMulBrgemmParamsDynami INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Dynamic, MatMulLayerCPUTest, testBrgemmParamsDynamic, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmParamsDynamic_FP16 = ::testing::Combine(::testing::ValuesIn(IS_Brgemm_Dynamic), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testBrgemmParamsDynamic_FP16 = ::testing::Combine(matMulBrgemmParamsDynamic_FP16, ::testing::Values(MatMulNodeType::MatMul), @@ -497,12 +503,12 @@ const auto testParams_x64 = ::testing::Combine(matMulParams_x64, INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_Fusing_x64, MatMulLayerCPUTest, testParams_x64, MatMulLayerCPUTest::getTestCaseName); const auto matMulParamsDynamicFusing = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())); + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())); const auto testParamsDynamicFusing = ::testing::Combine(matMulParamsDynamicFusing, ::testing::Values(MatMulNodeType::MatMul), @@ -512,12 +518,12 @@ const auto testParamsDynamicFusing = ::testing::Combine(matMulParamsDynamicFusin INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic_Fusing, MatMulLayerCPUTest, testParamsDynamicFusing, MatMulLayerCPUTest::getTestCaseName); const auto matMulParamsDynamicFusing_FP16 = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), - ::testing::Values(ov::element::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ov::element::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParamsDynamicFusing_FP16 = ::testing::Combine(matMulParamsDynamicFusing_FP16, ::testing::Values(MatMulNodeType::MatMul), @@ -528,8 +534,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic_Fusing_FP16, MatMulLayerCPUTest, testP const auto matMulParamsBrgemmDynamicFusing = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::Values(utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_CPU), ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); @@ -541,13 +547,14 @@ const auto testParamsBrgemmDynamicFusing = ::testing::Combine(matMulParamsBrgemm INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Dynamic_Fusing, MatMulLayerCPUTest, testParamsBrgemmDynamicFusing, MatMulLayerCPUTest::getTestCaseName); -const auto matMulParamsBrgemmDynamicFusing_FP16 = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); +const auto matMulParamsBrgemmDynamicFusing_FP16 = + ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParamsBrgemmDynamicFusing_FP16 = ::testing::Combine(matMulParamsBrgemmDynamicFusing_FP16, ::testing::Values(MatMulNodeType::MatMul), @@ -589,12 +596,12 @@ const std::vector IS_brgemm_Amx_smoke = { }; const auto matMulBrgemmAmxParams_smoke = ::testing::Combine(::testing::ValuesIn(IS_brgemm_Amx_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); std::vector matmulBrgemmAmxFusingParams { emptyFusingSpec, @@ -612,12 +619,12 @@ const auto testBrgemmAmxParams_smoke = ::testing::Combine(matMulBrgemmAmxParams_ INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Amx_Static, MatMulLayerCPUTest, testBrgemmAmxParams_smoke, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmAmxParams_FP16_smoke = ::testing::Combine(::testing::ValuesIn(IS_brgemm_Amx_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testBrgemmAmxParams_FP16_smoke = ::testing::Combine(matMulBrgemmAmxParams_FP16_smoke, ::testing::Values(MatMulNodeType::MatMul), @@ -627,12 +634,12 @@ const auto testBrgemmAmxParams_FP16_smoke = ::testing::Combine(matMulBrgemmAmxPa INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Amx_Static_FP16, MatMulLayerCPUTest, testBrgemmAmxParams_FP16_smoke, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmAmxParams_nightly = ::testing::Combine(::testing::ValuesIn(IS_brgemm_Amx_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); const auto testBrgemmAmxParams_nightly = ::testing::Combine(matMulBrgemmAmxParams_nightly, ::testing::Values(MatMulNodeType::MatMul), @@ -642,12 +649,12 @@ const auto testBrgemmAmxParams_nightly = ::testing::Combine(matMulBrgemmAmxParam INSTANTIATE_TEST_SUITE_P(nightly_MM_Brgemm_Amx_Static, MatMulLayerCPUTest, testBrgemmAmxParams_nightly, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmAmxParams_FP16_nightly = ::testing::Combine(::testing::ValuesIn(IS_brgemm_Amx_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testBrgemmAmxParams_FP16_nightly = ::testing::Combine(matMulBrgemmAmxParams_FP16_nightly, ::testing::Values(MatMulNodeType::MatMul), @@ -657,12 +664,12 @@ const auto testBrgemmAmxParams_FP16_nightly = ::testing::Combine(matMulBrgemmAmx INSTANTIATE_TEST_SUITE_P(nightly_MM_Brgemm_Amx_Static_FP16, MatMulLayerCPUTest, testBrgemmAmxParams_FP16_nightly, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmAmxParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Brgemm_Dynamic), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); const auto testBrgemmAmxParamsDynamic = ::testing::Combine(matMulBrgemmAmxParamsDynamic, ::testing::Values(MatMulNodeType::MatMul), @@ -672,12 +679,12 @@ const auto testBrgemmAmxParamsDynamic = ::testing::Combine(matMulBrgemmAmxParams INSTANTIATE_TEST_SUITE_P(smoke_MM_Brgemm_Amx_Dynamic, MatMulLayerCPUTest, testBrgemmAmxParamsDynamic, MatMulLayerCPUTest::getTestCaseName); const auto matMulBrgemmAmxParamsDynamic_FP16 = ::testing::Combine(::testing::ValuesIn(IS_Brgemm_Dynamic), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testBrgemmAmxParamsDynamic_FP16 = ::testing::Combine(matMulBrgemmAmxParamsDynamic_FP16, ::testing::Values(MatMulNodeType::MatMul), @@ -718,13 +725,14 @@ std::vector filterSpecificParams_Brgconv1x1() { return specificParams; } -const auto fullyConnectedParams2D_Brgconv1x1_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_Brgconv1x1_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())); +const auto fullyConnectedParams2D_Brgconv1x1_smoke = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgconv1x1_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); const auto testParams2D_Brgconv1x1_smoke = ::testing::Combine(fullyConnectedParams2D_Brgconv1x1_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -771,13 +779,14 @@ const std::vector IS3D_Brgconv1x1_smoke = { }, }; -const auto fullyConnectedParams3D_Brgconv1x1_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_Brgconv1x1_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())); +const auto fullyConnectedParams3D_Brgconv1x1_smoke = + ::testing::Combine(::testing::ValuesIn(IS3D_Brgconv1x1_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); const auto testParams3D_Brgconv1x1_smoke = ::testing::Combine(fullyConnectedParams3D_Brgconv1x1_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -786,13 +795,14 @@ const auto testParams3D_Brgconv1x1_smoke = ::testing::Combine(fullyConnectedPara INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_Brgconv1x1, MatMulLayerCPUTest, testParams3D_Brgconv1x1_smoke, MatMulLayerCPUTest::getTestCaseName); -const auto fullyConnectedParams2D_Brgemm_Amx_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_Amx_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); +const auto fullyConnectedParams2D_Brgemm_Amx_smoke = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_Amx_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); const auto testParams2D_Brgemm_Amx_smoke = ::testing::Combine(fullyConnectedParams2D_Brgemm_Amx_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -801,13 +811,14 @@ const auto testParams2D_Brgemm_Amx_smoke = ::testing::Combine(fullyConnectedPara INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_Brgemm_Amx, MatMulLayerCPUTest, testParams2D_Brgemm_Amx_smoke, MatMulLayerCPUTest::getTestCaseName); -const auto fullyConnectedParams2D_FP16_Brgemm_Amx_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_Amx_smoke), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); +const auto fullyConnectedParams2D_FP16_Brgemm_Amx_smoke = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_Amx_smoke), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams2D_FP16_Brgemm_Amx_smoke = ::testing::Combine(fullyConnectedParams2D_FP16_Brgemm_Amx_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -839,13 +850,14 @@ const std::vector IS2D_Brgemm_nightly = { }, }; -const auto fullyConnectedParams2D_Brgemm_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); +const auto fullyConnectedParams2D_Brgemm_nightly = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_Brgemm())); const auto testParams2D_Brgemm_nightly = ::testing::Combine(fullyConnectedParams2D_Brgemm_nightly, ::testing::Values(MatMulNodeType::FullyConnected), @@ -854,13 +866,14 @@ const auto testParams2D_Brgemm_nightly = ::testing::Combine(fullyConnectedParams INSTANTIATE_TEST_SUITE_P(nightly_FC_2D_Brgemm, MatMulLayerCPUTest, testParams2D_Brgemm_nightly, MatMulLayerCPUTest::getTestCaseName); -const auto fullyConnectedParams2D_FP16_Brgemm_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); +const auto fullyConnectedParams2D_FP16_Brgemm_nightly = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams2D_FP16_Brgemm_nightly = ::testing::Combine(fullyConnectedParams2D_FP16_Brgemm_nightly, ::testing::Values(MatMulNodeType::FullyConnected), @@ -869,13 +882,14 @@ const auto testParams2D_FP16_Brgemm_nightly = ::testing::Combine(fullyConnectedP INSTANTIATE_TEST_SUITE_P(nightly_FC_2D_Brgemm_FP16, MatMulLayerCPUTest, testParams2D_FP16_Brgemm_nightly, MatMulLayerCPUTest::getTestCaseName); -const auto fullyConnectedParams2D_Brgemm_Amx_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); +const auto fullyConnectedParams2D_Brgemm_Amx_nightly = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(filterAdditionalConfig_BrgemmAmx())); const auto testParams2D_Brgemm_Amx_nightly = ::testing::Combine(fullyConnectedParams2D_Brgemm_Amx_nightly, ::testing::Values(MatMulNodeType::FullyConnected), @@ -884,13 +898,14 @@ const auto testParams2D_Brgemm_Amx_nightly = ::testing::Combine(fullyConnectedPa INSTANTIATE_TEST_SUITE_P(nightly_FC_2D_Brgemm_Amx, MatMulLayerCPUTest, testParams2D_Brgemm_Amx_nightly, MatMulLayerCPUTest::getTestCaseName); -const auto fullyConnectedParams2D_FP16_Brgemm_Amx_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); +const auto fullyConnectedParams2D_FP16_Brgemm_Amx_nightly = + ::testing::Combine(::testing::ValuesIn(IS2D_Brgemm_nightly), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams2D_FP16_Brgemm_Amx_nightly = ::testing::Combine(fullyConnectedParams2D_FP16_Brgemm_Amx_nightly, ::testing::Values(MatMulNodeType::FullyConnected), @@ -899,27 +914,29 @@ const auto testParams2D_FP16_Brgemm_Amx_nightly = ::testing::Combine(fullyConnec INSTANTIATE_TEST_SUITE_P(nightly_FC_2D_Brgemm_Amx_FP16, MatMulLayerCPUTest, testParams2D_FP16_Brgemm_Amx_nightly, MatMulLayerCPUTest::getTestCaseName); -const auto testParams2DBF16_nightly = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2DBF16), - ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); - -const auto testParams2DFP16_nightly = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), - ::testing::Values(ov::element::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2DFP16), - ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16(specificParams_FP16))); +const auto testParams2DBF16_nightly = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2DBF16), + ::testing::ValuesIn(filterCPUInfo(filterSpecificParams()))); + +const auto testParams2DFP16_nightly = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), + ::testing::Values(ov::element::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2DFP16), + ::testing::ValuesIn(filterCPUInfoForDeviceWithFP16(specificParams_FP16))); INSTANTIATE_TEST_SUITE_P(nightly_FC_2D, MatMulLayerCPUTest, testParams2D_nightly, MatMulLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(nightly_FC_2D_BF16, MatMulLayerCPUTest, testParams2DBF16_nightly, MatMulLayerCPUTest::getTestCaseName); @@ -949,20 +966,20 @@ std::vector fusingParamsSet3DFP16 { }; const auto fullyConnectedParams3DBF16_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke()), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())); + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())); const auto fullyConnectedParams3DFP16_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke()), - ::testing::Values(ov::element::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ov::element::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams3DBF16_smoke = ::testing::Combine(fullyConnectedParams3DBF16_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -978,12 +995,12 @@ INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_BF16, MatMulLayerCPUTest, testParams3DBF16_ INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_FP16, MatMulLayerCPUTest, testParams3DFP16_smoke, MatMulLayerCPUTest::getTestCaseName); const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); const auto testParams3D_smoke = ::testing::Combine(fullyConnectedParams3D_smoke, ::testing::Values(MatMulNodeType::FullyConnected), @@ -1027,28 +1044,28 @@ const std::vector IS3D_nightly = { }; const auto fullyConnectedParams3D_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())); + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); const auto fullyConnectedParams3DBF16_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly), - ::testing::ValuesIn(netPRCs()), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::ValuesIn(additionalConfig())); + ::testing::ValuesIn(netPRCs()), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::ValuesIn(additionalConfig())); const auto fullyConnectedParams3DFP16_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly), - ::testing::Values(ov::element::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(cpu_f16_plugin_config)); + ::testing::Values(ov::element::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(cpu_f16_plugin_config)); const auto testParams3D_nightly = ::testing::Combine(fullyConnectedParams3D_nightly, ::testing::Values(MatMulNodeType::FullyConnected), @@ -1089,13 +1106,14 @@ const fusingSpecificParams matmulFullDynInputsFusingParams[] = { fusingAddPerChannel }; -const auto matMulParamsDynamicFusingFullUndefShapes = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())); +const auto matMulParamsDynamicFusingFullUndefShapes = + ::testing::Combine(::testing::ValuesIn(IS_Dynamic_Fusing), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())); const auto testParamsDynamicFusingFullUndefShapes = ::testing::Combine(matMulParamsDynamicFusingFullUndefShapes, ::testing::Values(MatMulNodeType::MatMul), @@ -1134,16 +1152,17 @@ const std::vector& notFuseSmoke() { return params; } -const auto notFuseTestParamsSmoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(notFuseSmoke()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn({fusingFakeQuantizePerBatch, fusingFakeQuantizeFullTensor}), - ::testing::ValuesIn({CPUSpecificParams{{}, {}, {""}, "any_type"}})); +const auto notFuseTestParamsSmoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(notFuseSmoke()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn({fusingFakeQuantizePerBatch, fusingFakeQuantizeFullTensor}), + ::testing::ValuesIn({CPUSpecificParams{{}, {}, {""}, "any_type"}})); INSTANTIATE_TEST_SUITE_P(smoke_FC, FCNotFuseFQCPUTest, notFuseTestParamsSmoke, FCNotFuseFQCPUTest::getTestCaseName); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/mlas/matmul.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/mlas/matmul.cpp index 108b45e50deb06..b40269a0b46239 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/mlas/matmul.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/mlas/matmul.cpp @@ -26,31 +26,33 @@ std::vector fusingParamsSet3D_MLAS_smoke { fusingMultiplyPerChannel }; -const auto testParams3D_MLAS_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS3D_smoke()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet3D_MLAS_smoke), - ::testing::ValuesIn(filterSpecificParams_MLAS())); +const auto testParams3D_MLAS_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS3D_smoke()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet3D_MLAS_smoke), + ::testing::ValuesIn(filterSpecificParams_MLAS())); INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_MLAS, MatMulLayerCPUTest, testParams3D_MLAS_smoke, MatMulLayerCPUTest::getTestCaseName); std::vector fusingParamsSet2D_MLAS_nightly { fusingScaleShift }; -const auto testParams2D_MLAS_nightly = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2D_MLAS_nightly), - ::testing::ValuesIn(filterSpecificParams_MLAS())); +const auto testParams2D_MLAS_nightly = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_nightly()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2D_MLAS_nightly), + ::testing::ValuesIn(filterSpecificParams_MLAS())); INSTANTIATE_TEST_SUITE_P(nightly_FC_2D_MLAS, MatMulLayerCPUTest, testParams2D_MLAS_nightly, MatMulLayerCPUTest::getTestCaseName); @@ -60,19 +62,20 @@ std::vector fusingParamsSet2D_MLAS_smoke { fusingMultiplyPerChannel }; -const auto testParams2D_MLAS_smoke = ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), - ::testing::Values(ElementType::f32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_CPU), - ::testing::Values(emptyAdditionalConfig())), - ::testing::Values(MatMulNodeType::FullyConnected), - ::testing::ValuesIn(fusingParamsSet2D_MLAS_smoke), - ::testing::ValuesIn(filterSpecificParams_MLAS())); +const auto testParams2D_MLAS_smoke = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(IS2D_smoke()), + ::testing::Values(ElementType::f32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_CPU), + ::testing::Values(emptyAdditionalConfig())), + ::testing::Values(MatMulNodeType::FullyConnected), + ::testing::ValuesIn(fusingParamsSet2D_MLAS_smoke), + ::testing::ValuesIn(filterSpecificParams_MLAS())); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_MLAS, MatMulLayerCPUTest, testParams2D_MLAS_smoke, MatMulLayerCPUTest::getTestCaseName); #endif } // namespace } // namespace MatMul } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp index af4319d66a6efe..1301385a3a0616 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/reduce.cpp @@ -192,159 +192,141 @@ const std::vector fusingParamsSet_LowPrecision { }; /* ================================ 1.1 No fusion - Arithmetic ================================ */ -const auto params_OneAxis = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::ValuesIn(opTypes()), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_4D = testing::Combine( - testing::Combine( - testing::ValuesIn(axesND()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_5D = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_4D_Hybrid = testing::Combine( - testing::Combine( - testing::ValuesIn(axesND()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(false), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_5D_Hybrid = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(false), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_6D = testing::Combine( - testing::Combine( - testing::ValuesIn(axes6D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_6D_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_Int32 = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypesInt32()), - testing::Values(ElementType::i32), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_Int32_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_NativeInt32 = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypesNativeInt32()), - testing::Values(ElementType::i32), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_NativeInt32_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_NativeInt32Gather = testing::Combine( - testing::Combine( - testing::ValuesIn(axesGather), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::ValuesIn(keepDims()), - testing::ValuesIn(reductionTypesNativeInt32()), - testing::Values(ElementType::i32), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_NativeInt32Gather_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_NHWC_SmallChannel = testing::Combine( - testing::Combine( - testing::ValuesIn(axesHW), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_SmallChannel_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_NHWC_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); - -const auto params_SingleBatch = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypes()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_SingleBatch_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_NHWC_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfig())); +const auto params_OneAxis = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::ValuesIn(opTypes()), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_4D = testing::Combine(testing::Combine(testing::ValuesIn(axesND()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_5D = testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_4D_Hybrid = + testing::Combine(testing::Combine(testing::ValuesIn(axesND()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(false), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_5D_Hybrid = + testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(false), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_6D = testing::Combine(testing::Combine(testing::ValuesIn(axes6D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_6D_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_Int32 = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypesInt32()), + testing::Values(ElementType::i32), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_Int32_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_NativeInt32 = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypesNativeInt32()), + testing::Values(ElementType::i32), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_NativeInt32_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_NativeInt32Gather = + testing::Combine(testing::Combine(testing::ValuesIn(axesGather), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::ValuesIn(keepDims()), + testing::ValuesIn(reductionTypesNativeInt32()), + testing::Values(ElementType::i32), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_NativeInt32Gather_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_NHWC_SmallChannel = + testing::Combine(testing::Combine(testing::ValuesIn(axesHW), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_SmallChannel_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_NHWC_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); + +const auto params_SingleBatch = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypes()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_SingleBatch_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_NHWC_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfig())); INSTANTIATE_TEST_SUITE_P( smoke_Reduce_OneAxis_CPU, @@ -424,89 +406,82 @@ INSTANTIATE_TEST_SUITE_P( ); /* ================================ 1.2 No fusion - Logical ================================ */ -const auto params_OneAxis_Logical = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::ValuesIn(opTypes()), - testing::ValuesIn(keepDims()), - testing::ValuesIn((reductionLogicalTypes)), - testing::Values(ElementType::boolean), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_4D_Logical = testing::Combine( - testing::Combine( - testing::ValuesIn(axesND()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn((reductionLogicalTypes)), - testing::Values(ElementType::boolean), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_5D_Logical = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn((reductionLogicalTypes)), - testing::Values(ElementType::boolean), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_4D_Hybrid_Logical = testing::Combine( - testing::Combine( - testing::ValuesIn(axesND()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(false), - testing::ValuesIn((reductionLogicalTypes)), - testing::Values(ElementType::boolean), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_4D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_5D_Hybrid_Logical = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(false), - testing::ValuesIn((reductionLogicalTypes)), - testing::Values(ElementType::boolean), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_6D_Logical = testing::Combine( - testing::Combine( - testing::ValuesIn(axes6D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::ValuesIn(keepDims()), - testing::ValuesIn((reductionLogicalTypes)), - testing::Values(ElementType::boolean), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_6D_dyn)), - testing::Values(emptyCPUSpec), - testing::Values(emptyFusingSpec), - testing::ValuesIn(additionalConfigFP32())); +const auto params_OneAxis_Logical = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::ValuesIn(opTypes()), + testing::ValuesIn(keepDims()), + testing::ValuesIn((reductionLogicalTypes)), + testing::Values(ElementType::boolean), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_4D_Logical = + testing::Combine(testing::Combine(testing::ValuesIn(axesND()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn((reductionLogicalTypes)), + testing::Values(ElementType::boolean), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_5D_Logical = + testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn((reductionLogicalTypes)), + testing::Values(ElementType::boolean), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_4D_Hybrid_Logical = + testing::Combine(testing::Combine(testing::ValuesIn(axesND()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(false), + testing::ValuesIn((reductionLogicalTypes)), + testing::Values(ElementType::boolean), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_4D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_5D_Hybrid_Logical = + testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(false), + testing::ValuesIn((reductionLogicalTypes)), + testing::Values(ElementType::boolean), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_6D_Logical = + testing::Combine(testing::Combine(testing::ValuesIn(axes6D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::ValuesIn(keepDims()), + testing::ValuesIn((reductionLogicalTypes)), + testing::Values(ElementType::boolean), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_6D_dyn)), + testing::Values(emptyCPUSpec), + testing::Values(emptyFusingSpec), + testing::ValuesIn(additionalConfigFP32())); INSTANTIATE_TEST_SUITE_P( smoke_Reduce_OneAxis_Logical_CPU, @@ -551,103 +526,95 @@ INSTANTIATE_TEST_SUITE_P( ); /* ================================ 2.1 Fusion - KeepDims ================================ */ -const auto params_OneAxis_fusing = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::ValuesIn(opTypes()), - testing::Values(true), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::Values(emptyCPUSpec), - testing::ValuesIn(fusingParamsSet), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_3D_fusing = testing::Combine( - testing::Combine( - testing::Values(axes()[2]), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::Values(ov::test::utils::ReductionType::Sum), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_3D_fuse_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_3D)), - testing::Values(fusingFakeQuantizePerChannelRelu), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_4D_fusing = testing::Combine( - testing::Combine( - testing::ValuesIn(axesND()), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), - testing::ValuesIn(fusingParamsSet), - testing::ValuesIn(additionalConfig())); - -const auto params_MultiAxis_5D_fusing = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5D), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), - testing::ValuesIn(fusingParamsSet), - testing::ValuesIn(additionalConfig())); - -const auto params_LowPrecision_fusing = testing::Combine( - testing::Combine( - testing::ValuesIn(axesNDFusing), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), - testing::ValuesIn(fusingParamsSet_LowPrecision), - testing::ValuesIn(additionalConfig())); - -const auto params_DimZero_Arithmetic_fusing = testing::Combine( - testing::Combine( - testing::ValuesIn(axesZeroDimFusing), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypesArithmetic()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_Dynmic_ZeroDim)), - testing::Values(emptyCPUSpec), - testing::ValuesIn(fusingParamsFullSet), - testing::ValuesIn(additionalConfig())); - -const auto params_DimZero_Compare_fusing = testing::Combine( - testing::Combine( - testing::ValuesIn(axesZeroDimFusing), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(true), - testing::ValuesIn(reductionTypesCompare()), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_Dynmic_ZeroDim)), - testing::Values(emptyCPUSpec), - testing::ValuesIn(fusingParamsFullSet), - testing::ValuesIn(additionalConfigFP32())); +const auto params_OneAxis_fusing = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::ValuesIn(opTypes()), + testing::Values(true), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::Values(emptyCPUSpec), + testing::ValuesIn(fusingParamsSet), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_3D_fusing = + testing::Combine(testing::Combine(testing::Values(axes()[2]), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::Values(ov::test::utils::ReductionType::Sum), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_3D_fuse_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_3D)), + testing::Values(fusingFakeQuantizePerChannelRelu), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_4D_fusing = + testing::Combine(testing::Combine(testing::ValuesIn(axesND()), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + testing::ValuesIn(fusingParamsSet), + testing::ValuesIn(additionalConfig())); + +const auto params_MultiAxis_5D_fusing = + testing::Combine(testing::Combine(testing::ValuesIn(axes5D), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_5D)), + testing::ValuesIn(fusingParamsSet), + testing::ValuesIn(additionalConfig())); + +const auto params_LowPrecision_fusing = + testing::Combine(testing::Combine(testing::ValuesIn(axesNDFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_4D)), + testing::ValuesIn(fusingParamsSet_LowPrecision), + testing::ValuesIn(additionalConfig())); + +const auto params_DimZero_Arithmetic_fusing = + testing::Combine(testing::Combine(testing::ValuesIn(axesZeroDimFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesArithmetic()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_Dynmic_ZeroDim)), + testing::Values(emptyCPUSpec), + testing::ValuesIn(fusingParamsFullSet), + testing::ValuesIn(additionalConfig())); + +const auto params_DimZero_Compare_fusing = + testing::Combine(testing::Combine(testing::ValuesIn(axesZeroDimFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(true), + testing::ValuesIn(reductionTypesCompare()), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_Dynmic_ZeroDim)), + testing::Values(emptyCPUSpec), + testing::ValuesIn(fusingParamsFullSet), + testing::ValuesIn(additionalConfigFP32())); INSTANTIATE_TEST_SUITE_P( smoke_Reduce_OneAxis_fusing_CPU, @@ -699,47 +666,43 @@ INSTANTIATE_TEST_SUITE_P( ); /* ================================ 2.2 Fusion - KeepNoDims ================================ */ -const auto params_OneAxis_fusing_KeepNoDims = testing::Combine( - testing::Combine( - testing::ValuesIn(axes()), - testing::ValuesIn(opTypes()), - testing::Values(false), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::Values(emptyCPUSpec), - testing::ValuesIn(fusingParamsSet_KeepNoDims), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_4D_Hybrid_fusing_KeepNoDims = testing::Combine( - testing::Combine( - testing::ValuesIn(axesNDFusing), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(false), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_dyn)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_4D)), - testing::ValuesIn(fusingParamsSet_KeepNoDims), - testing::ValuesIn(additionalConfigFP32())); - -const auto params_MultiAxis_5D_Hybrid_fusing_KeepNoDims = testing::Combine( - testing::Combine( - testing::ValuesIn(axes5DFusing), - testing::Values(ov::test::utils::OpType::VECTOR), - testing::Values(false), - testing::ValuesIn(reductionTypesFusing), - testing::ValuesIn(inpOutPrc()), - testing::Values(ElementType::undefined), - testing::Values(ElementType::undefined), - testing::ValuesIn(inputShapes_5D)), - testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), - testing::ValuesIn(fusingParamsSet_KeepNoDims), - testing::ValuesIn(additionalConfigFP32())); +const auto params_OneAxis_fusing_KeepNoDims = testing::Combine(testing::Combine(testing::ValuesIn(axes()), + testing::ValuesIn(opTypes()), + testing::Values(false), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::Values(emptyCPUSpec), + testing::ValuesIn(fusingParamsSet_KeepNoDims), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_4D_Hybrid_fusing_KeepNoDims = + testing::Combine(testing::Combine(testing::ValuesIn(axesNDFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(false), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_dyn)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_4D)), + testing::ValuesIn(fusingParamsSet_KeepNoDims), + testing::ValuesIn(additionalConfigFP32())); + +const auto params_MultiAxis_5D_Hybrid_fusing_KeepNoDims = + testing::Combine(testing::Combine(testing::ValuesIn(axes5DFusing), + testing::Values(ov::test::utils::OpType::VECTOR), + testing::Values(false), + testing::ValuesIn(reductionTypesFusing), + testing::ValuesIn(inpOutPrc()), + testing::Values(ElementType::dynamic), + testing::Values(ElementType::dynamic), + testing::ValuesIn(inputShapes_5D)), + testing::ValuesIn(filterCPUSpecificParams(cpuParams_HybridLayout_5D)), + testing::ValuesIn(fusingParamsSet_KeepNoDims), + testing::ValuesIn(additionalConfigFP32())); INSTANTIATE_TEST_SUITE_P( smoke_Reduce_OneAxis_fusing_KeepNoDims_CPU, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/nonzero.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/nonzero.cpp index f93deea3a9b295..78ec189b29074f 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/nonzero.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/nonzero.cpp @@ -30,7 +30,7 @@ class NonZeroLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - ElementType netType = ElementType::undefined; + ElementType netType = ElementType::dynamic; InputShape inputShape; std::tie(inputShape, netType) = basicParamsSet; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box.cpp index c355ec24113105..04aee1b1e12df9 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box.cpp @@ -197,16 +197,16 @@ const auto layerSpecificParams = ::testing::Combine( ::testing::ValuesIn(variances), ::testing::ValuesIn(scale_all_sizes)); -INSTANTIATE_TEST_SUITE_P(smoke_PriorBox, PriorBoxLayerCPUTest, - ::testing::Combine( - layerSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::ElementType::undefined), - ::testing::Values(ov::test::ElementType::undefined), - ::testing::ValuesIn(inputShape), - ::testing::ValuesIn(imageShape), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - PriorBoxLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_PriorBox, + PriorBoxLayerCPUTest, + ::testing::Combine(layerSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::ElementType::dynamic), + ::testing::Values(ov::test::ElementType::dynamic), + ::testing::ValuesIn(inputShape), + ::testing::ValuesIn(imageShape), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + PriorBoxLayerCPUTest::getTestCaseName); } // namespace } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box_clustered.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box_clustered.cpp index a33a5f19c74c87..3dafbc76b0e959 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box_clustered.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/prior_box_clustered.cpp @@ -189,17 +189,16 @@ const std::vector imageShapes = { {{{50, 100}, {50, 100}}, {{50, 50}, {100, 100}}} }; -INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, PriorBoxClusteredLayerCPUTest, - ::testing::Combine( - layerSpeficParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::ElementType::undefined), - ::testing::Values(ov::test::ElementType::undefined), - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(imageShapes), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - PriorBoxClusteredLayerCPUTest::getTestCaseName -); +INSTANTIATE_TEST_SUITE_P(smoke_PriorBoxClustered, + PriorBoxClusteredLayerCPUTest, + ::testing::Combine(layerSpeficParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::ElementType::dynamic), + ::testing::Values(ov::test::ElementType::dynamic), + ::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(imageShapes), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + PriorBoxClusteredLayerCPUTest::getTestCaseName); } // namespace } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/psroi_pooling.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/psroi_pooling.cpp index 504f27eba33b0e..22df26f13342ab 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/psroi_pooling.cpp @@ -82,7 +82,7 @@ class PSROIPoolingLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - auto netPrecision = ElementType::undefined; + auto netPrecision = ElementType::dynamic; InputShape inputShape; std::tie(inputShape, netPrecision) = basicParamsSet; init_input_shapes({inputShape}); diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp index 9fdf819001e634..ab93c3ad607e88 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/topk.cpp @@ -272,32 +272,32 @@ std::vector cpuParams = {CPUSpecificParams({nChw16c, x}, {nCh CPUSpecificParams({nhwc, x}, {nhwc, nhwc}, {}, {})}; INSTANTIATE_TEST_SUITE_P(smoke_TopK, - TopKLayerCPUTest, - ::testing::Combine(::testing::Combine(::testing::ValuesIn(k), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::ValuesIn(additionalConfig)), - TopKLayerCPUTest::getTestCaseName); + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(k), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::ValuesIn(additionalConfig)), + TopKLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TopK_dynamic, - TopKLayerCPUTest, - ::testing::Combine(::testing::Combine(::testing::Values(1), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::ValuesIn(additionalConfig)), - TopKLayerCPUTest::getTestCaseName); + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapesDynamic)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::ValuesIn(additionalConfig)), + TopKLayerCPUTest::getTestCaseName); const std::vector k_int32 = {1, 5, 7, 9}; @@ -309,32 +309,32 @@ std::vector inputShapesDynamic_int32 = { {{9, {5, 10}, 9, {5, 10}}, {{9, 9, 9, 9}, {9, 10, 9, 10}}}}; INSTANTIATE_TEST_SUITE_P(smoke_TopK_int32, - TopKLayerCPUTest, - ::testing::Combine(::testing::Combine(::testing::ValuesIn(k_int32), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_int32)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::Values(additionalConfig[0])), - TopKLayerCPUTest::getTestCaseName); + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(k_int32), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::Values(ElementType::i32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapes_int32)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::Values(additionalConfig[0])), + TopKLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_TopK_int32_dynamic, - TopKLayerCPUTest, - ::testing::Combine(::testing::Combine(::testing::Values(1), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic_int32)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::Values(additionalConfig[0])), - TopKLayerCPUTest::getTestCaseName); + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::Values(ElementType::i32), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), + ::testing::ValuesIn(inputShapesDynamic_int32)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::Values(additionalConfig[0])), + TopKLayerCPUTest::getTestCaseName); std::vector inputShapes_bubble_BLK_on_channel_horiz = { {{}, {{2, 2, 2, 2}}}, @@ -351,8 +351,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(modes), ::testing::ValuesIn(sortTypeStable), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes_bubble_BLK_on_channel_horiz)), ::testing::Values(CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {})), ::testing::ValuesIn(additionalConfig)), @@ -366,8 +366,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(modes), ::testing::ValuesIn(sortTypeStable), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesDynamic_bubble_BLK_on_channel_horiz)), ::testing::Values(CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {})), ::testing::ValuesIn(additionalConfig)), @@ -387,8 +387,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(SortMode::MAX), ::testing::Values(std::tuple(SortType::SORT_INDICES, false)), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapes_top1)), ::testing::Values(CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {})), ::testing::ValuesIn(additionalConfig)), @@ -402,8 +402,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values(SortMode::MAX), ::testing::Values(std::tuple(SortType::SORT_INDICES, false)), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::dynamic), + ::testing::Values(ElementType::dynamic), ::testing::ValuesIn(inputShapesDynamic_top1)), ::testing::Values(CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {})), ::testing::ValuesIn(additionalConfig)), diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/undefined_et.hpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/undefined_et.hpp index f57adb2d86a6f6..f76b44a76cd7d6 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/undefined_et.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/include/undefined_et.hpp @@ -27,7 +27,7 @@ class UndefinedEtSubgraphTest : public testing::WithParamInterface& target_shapes) override; hint::ExecutionMode m_mode; - element::Type m_data_et = element::undefined; + element::Type m_data_et = element::dynamic; }; } // namespace test diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp index 70812fd2580f1a..1586c45308fed5 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/arm/matmul_weights_decompression.cpp @@ -44,7 +44,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights, ::testing::Combine(::testing::ValuesIn(input_shapes), ::testing::ValuesIn(weights_precisions), ::testing::ValuesIn(decompression_precisions), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), ::testing::Values(true), ::testing::Values(DecompressionType::full), ::testing::Values(DecompressionType::full), @@ -73,7 +73,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases, ::testing::Combine(::testing::ValuesIn(input_shapes_corner_cases), ::testing::ValuesIn(weights_precisions), ::testing::ValuesIn(decompression_precisions_corner_cases), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), ::testing::ValuesIn(transpose_weights), ::testing::Values(DecompressionType::full), ::testing::ValuesIn(decompression_subtract_type), diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp index 9ff85a02db4495..ddc2e49ef391bc 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp @@ -44,7 +44,7 @@ std::string EltwiseChainTest::getTestCaseName(const testing::TestParamInfo disabledTestPatterns() { // TODO: 141068 R"(smoke_Snippets_FQDecomposition.*netPRC=f16_D=CPU.*)", // Issue: 160737 - R"(.*smoke_LPT/ConvolutionQDqTransformation.CompareWithRefImpl/f32_\[(1,3,4,4|4,3,4,4)\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32__.*_f32_\[\]_1_1_undefined__\{, 15\}_f32_\[\]__255_\[1,1,1,1\]_\{ -128 \}_\{ 127 \}__i8___f32__\{ -128 \}_.*_1_1_i8_.*)", - R"(.*smoke_LPT/GroupConvolutionQDqTransformation.CompareWithRefImpl/f32_\[1,6,24,24\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32_.*_undefinedoutput_original_f32_multiplyAfter=(false|true).*)", + R"(.*smoke_LPT/ConvolutionQDqTransformation.CompareWithRefImpl/f32_\[(1,3,4,4|4,3,4,4)\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32__.*_f32_\[\]_1_1_dynamic__\{, 15\}_f32_\[\]__255_\[1,1,1,1\]_\{ -128 \}_\{ 127 \}__i8___f32__\{ -128 \}_.*_1_1_i8_.*)", + R"(.*smoke_LPT/GroupConvolutionQDqTransformation.CompareWithRefImpl/f32_\[1,6,24,24\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32_.*_dynamicoutput_original_f32_multiplyAfter=(false|true).*)", // Issue: 160734 R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[1\]_\{ -18.7 \}_\{ 18.7 \}\}.*)", // Issue: 160735 @@ -261,7 +261,7 @@ std::vector disabledTestPatterns() { R"(.*nightly_FC_3D_BF16/MatMulLayerCPUTest.CompareWithRefs/FullyConnected_IS=\[\?.\?.50\]_\[50.7\]_TS=\(\(1.2.50\)_\(1.10.50\)_\(1.2.50\)_\(2.2.50\)\)_\(\(50.7\)_\(50.7\)_\(50.7\)_\(50.7\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", R"(.*smoke_MM_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=jit_gemm.*)", R"(.*(nightly|smoke)_MM_Brgemm_Static/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\]_\[\]_TS=\(\(55.12\)\)_\(\(12.55\)\)_.*config=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)", - R"(.*smoke_MM_Brgemm_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_netPRC=f32_inPRC=undefined_outPRC=undefined_trgDev=CPUconfig=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)", + R"(.*smoke_MM_Brgemm_Dynamic_Fusing/MatMulLayerCPUTest.CompareWithRefs/MatMul_IS=\[\?.\?\]_\[\?.33\]_TS=\(\(16.12\)_\(33.7\)_\(16.12\)\)_\(\(12.33\)_\(7.33\)_\(12.33\)\)_transpose_a=0_transpose_b=0_secondaryInputType=PARAMETER_netPRC=f32_inPRC=dynamic_outPRC=dynamic_trgDev=CPUconfig=\(INFERENCE_PRECISION_HINT=bf16_\)_Fused=Multiply\(PerChannel\)_primitive=brgemm_avx512.*)", // Issue: 140389 R"(.*FQLayerDQBias.smoke_CompareWithRefs.*)", R"(.*smoke_matmulBrgemmInt8/MatmulBrgemmInt8Test.CompareWithRefs.*MatMul.*InputType=i8_OutputType=i8.*)", @@ -290,7 +290,7 @@ std::vector disabledTestPatterns() { #elif defined(OPENVINO_ARCH_ARM64) || defined(OPENVINO_ARCH_ARM) { retVector.emplace_back( - R"(smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS.*_eltwise_op_type=Div_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_InType=undefined_OutType=undefined_trgDev=CPU.*)"); + R"(smoke_CompareWithRefs_static_check_collapsing/EltwiseLayerTest.Inference/IS.*_eltwise_op_type=Div_secondary_input_type=PARAMETER_opType=VECTOR_model_type=i32_InType=dynamic_OutType=dynamic_trgDev=CPU.*)"); // Issue: 123321 retVector.emplace_back( R"(.*smoke_RNNSequenceCommonZeroClip/RNNSequenceTest.Inference.*hidden_size=1.*relu.*direction=reverse.*)"); @@ -470,13 +470,13 @@ std::vector disabledTestPatterns() { #if defined(_WIN32) retVector.emplace_back(R"(.*smoke_QuantizedConvolutionBatchNormTransposeOnWeights/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize_type=fake_quantize_intervals_type=per_(tensor|channel)_transpose_on_weights=true_device=CPU.*)"); retVector.emplace_back(R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1,1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -12.7 \}_output_high\{ 12.8 \}_precision=\}_fq_on_weights=\{_255_\[1,1,1,1\]_\{ -12.7 \}_\{ 12.7 \}\}.*)"); - retVector.emplace_back(R"(.*smoke_LPT/FuseDequantizeToFakeQuantizeTransformation.CompareWithRefImpl/CPU_f32_0_undefined_\[\]_f32__\{\}_\{\}__\{ 0.01, 0.1, 1 \}_f32_\[1,3\]_1_1_.*)"); + retVector.emplace_back(R"(.*smoke_LPT/FuseDequantizeToFakeQuantizeTransformation.CompareWithRefImpl/CPU_f32_0_dynamic_\[\]_f32__\{\}_\{\}__\{ 0.01, 0.1, 1 \}_f32_\[1,3\]_1_1_.*)"); retVector.emplace_back(R"(.*smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_quantize_.*)"); retVector.emplace_back(R"(.*smoke_QuantizedConvolutionBatchNorm/QuantizedConvolutionBatchNorm.CompareWithRefs/conv_type=convolution_backprop_quantize_type=(quantize_dequantize_intervals|compressed_weights_intervals).*)"); retVector.emplace_back(R"(.*smoke_LPT/MatMulTransformation.CompareWithRefImpl/f32_CPU_\[(1|8|1,1,1),4,12,2\]_level=256_shape=\[\]_input_low=\{ (0|-12.8) \}_input_high=\{ (25.5|12.7) \}_output_low=\{ (0|-12.8) \}_output_high\{ (25.5|12.7) \}_.*)"); retVector.emplace_back(R"(.*smoke_LPT/MatMulTransformation.CompareWithRefImpl/f32_CPU_\[(1|8|1,1,1),4,12,2\]_level=256_shape=\[\]_input_low=\{ (0|-12.8) \}_input_high=\{ (25.5|12.7) \}_output_low=\{ (0|-12.8) \}_output_high\{ (25.5|12.7) \}_.*)"); retVector.emplace_back( - R"(.*smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompression.CompareWithRefs/data_shape=\[\?.\?.\?\]_\(\[1,1,4096\]\)_weights_shape=\[4096,4096\]_group_size=128_weights_precision=nf4_decompression_precision=f16_scale_precision=undefined_transpose_weights=0_decompression_subtract=full_reshape_on_decompression=1_config=\(\).*)"); + R"(.*smoke_MatMulCompressedWeights_corner_cases_basic/MatmulWeightsDecompression.CompareWithRefs/data_shape=\[\?.\?.\?\]_\(\[1,1,4096\]\)_weights_shape=\[4096,4096\]_group_size=128_weights_precision=nf4_decompression_precision=f16_scale_precision=dynamic_transpose_weights=0_decompression_subtract=full_reshape_on_decompression=1_config=\(\).*)"); retVector.emplace_back(R"(.*smoke_RDFT_CPU_1D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(126\)\)_constAxes=true_axes=\(\(0\)\)_isInverse=false.*)"); retVector.emplace_back(R"(.*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(16.38\)\)_constAxes=true_axes=\(\(0.1\)\)_isInverse=false.*)"); // Issue: MFDNN-12818 diff --git a/src/plugins/intel_cpu/tests/functional/utils/cpu_test_utils.cpp b/src/plugins/intel_cpu/tests/functional/utils/cpu_test_utils.cpp index 0c6c6cae2373b5..6e3ef21c38b430 100644 --- a/src/plugins/intel_cpu/tests/functional/utils/cpu_test_utils.cpp +++ b/src/plugins/intel_cpu/tests/functional/utils/cpu_test_utils.cpp @@ -473,10 +473,9 @@ CPUTestsBase::deduce_expected_precision(const ov::element::Type& opPrecision, auto inferencePrecisionConfig = it->second.as(); inferencePrecisionSetExplicitly = true; // TODO also need to check (dnnl::impl::cpu::x64::avx2_vnni_2) - if ((inferencePrecisionConfig == ov::element::bf16 && ov::with_cpu_x86_avx512_core()) - || (inferencePrecisionConfig == ov::element::f16 && ov::with_cpu_x86_avx512_core_fp16()) - || (inferencePrecisionConfig == ov::element::f32) - || (inferencePrecisionConfig == ov::element::undefined)) { + if ((inferencePrecisionConfig == ov::element::bf16 && ov::with_cpu_x86_avx512_core()) || + (inferencePrecisionConfig == ov::element::f16 && ov::with_cpu_x86_avx512_core_fp16()) || + (inferencePrecisionConfig == ov::element::f32) || (inferencePrecisionConfig == ov::element::dynamic)) { inferencePrecision = inferencePrecisionConfig; } } @@ -489,7 +488,7 @@ CPUTestsBase::deduce_expected_precision(const ov::element::Type& opPrecision, inferencePrecision = ov::element::bf16; } } else { - inferencePrecision = ov::element::undefined; + inferencePrecision = ov::element::dynamic; } } @@ -508,7 +507,7 @@ CPUTestsBase::deduce_expected_precision(const ov::element::Type& opPrecision, if (deducedType == ov::element::bf16) { deducedType = ov::with_cpu_x86_avx512_core() ? ov::element::bf16 : ov::element::f32; } else if (deducedType == ov::element::f16) { - if (inferencePrecision != ov::element::f16 && inferencePrecision != ov::element::undefined) { + if (inferencePrecision != ov::element::f16 && inferencePrecision != ov::element::dynamic) { deducedType = ov::element::f32; } } else { diff --git a/src/plugins/intel_cpu/tests/unit/memory_desc/empty_memory_desc.cpp b/src/plugins/intel_cpu/tests/unit/memory_desc/empty_memory_desc.cpp index e9dc2b60e15ccb..e0405cf36c0c8d 100644 --- a/src/plugins/intel_cpu/tests/unit/memory_desc/empty_memory_desc.cpp +++ b/src/plugins/intel_cpu/tests/unit/memory_desc/empty_memory_desc.cpp @@ -22,7 +22,7 @@ TEST(MemoryTest, EmptyMemoryDescVerifyPublicInterface) { ASSERT_TRUE(emptyDesc->clone()->empty()); - ASSERT_EQ(emptyDesc->getPrecision(), ov::element::undefined); + ASSERT_EQ(emptyDesc->getPrecision(), ov::element::dynamic); ASSERT_EQ(emptyDesc->getOffsetPadding(), 0); @@ -41,5 +41,5 @@ TEST(MemoryTest, EmptyMemoryDescVerifyPublicInterface) { // not compatible with any other memory desc ASSERT_FALSE(emptyDesc->isCompatible(CpuBlockedMemoryDesc{ov::element::f32, Shape{1, 2, 3}})); ASSERT_FALSE(emptyDesc->isCompatible(DnnlBlockedMemoryDesc{ov::element::u8, Shape{1}})); - ASSERT_FALSE(emptyDesc->isCompatible(CpuBlockedMemoryDesc{ov::element::undefined, Shape{0}})); + ASSERT_FALSE(emptyDesc->isCompatible(CpuBlockedMemoryDesc{ov::element::dynamic, Shape{0}})); } diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp index 34db1ea52d37d2..ef05bddcad5bb8 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/custom_shape_infer/fullconnect.cpp @@ -23,7 +23,7 @@ TEST(CpuShapeInfer, FC_InputSize_2) { auto op = std::make_shared( activate, weight, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); std::vector static_input_shapes = {StaticShape{720, 640}, {5, 6}}; std::vector static_output_shapes = {StaticShape{720, 5}}; unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); @@ -35,7 +35,7 @@ TEST(CpuShapeInfer, FC_broadcastWeights1) { auto op = std::make_shared( activate, weight, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); std::vector static_input_shapes = {StaticShape{1, 720, 6}, {5, 6}}; std::vector static_output_shapes = {StaticShape{1, 720, 5}}; unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); @@ -47,7 +47,7 @@ TEST(CpuShapeInfer, FC_broadcastWeights2) { auto op = std::make_shared( activate, weight, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); std::vector static_input_shapes = {StaticShape{2, 3, 720, 6}, {5, 6}}; std::vector static_output_shapes = {StaticShape{2, 3, 720, 5}}; unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); @@ -59,7 +59,7 @@ TEST(CpuShapeInfer, FC_broadcastActivations1) { auto op = std::make_shared( activate, weight, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); std::vector static_input_shapes = {StaticShape{720, 6}, {1, 5, 6}}; std::vector static_output_shapes = {StaticShape{1, 720, 5}}; unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); @@ -71,7 +71,7 @@ TEST(CpuShapeInfer, FC_broadcastActivations2) { auto op = std::make_shared( activate, weight, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); std::vector static_input_shapes = {StaticShape{720, 6}, {1, 1, 5, 6}}; std::vector static_output_shapes = {StaticShape{1, 1, 720, 5}}; unit_test::cpu_test_shape_infer(op.get(), static_input_shapes, static_output_shapes); diff --git a/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/enforce_precision.cpp b/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/enforce_precision.cpp index b41808c59ee3fd..0d4a28c6c73d2a 100644 --- a/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/enforce_precision.cpp +++ b/src/plugins/intel_cpu/tests/unit/snippets_transformations/x64/enforce_precision.cpp @@ -192,203 +192,90 @@ std::vector> shapes { {{1, 3, 16, 16}, {1, 3, 16, 16}} }; -std::vector test_values { - { - {element::bf16, element::bf16, element::f32}, - element::f32, - element::bf16, - { - {element::f32, element::f32}, - {}, - {}, - {element::bf16}, - { - { - {element::bf16, element::bf16, element::bf16}, - {element::bf16, element::bf16} - }, - }, - { - { - {element::bf16, element::bf16, element::bf16} - } - } - }, - { - {}, - {}, - {element::f32, element::undefined}, - {}, - {element::bf16} - } - }, - - { - {element::bf16, element::bf16, element::f32}, - element::f32, - element::bf16, - { - {element::f32, element::f32}, - {}, - {}, - {element::bf16}, - { - { - {element::bf16, element::bf16} - }, - }, - { - { - {element::bf16, element::bf16} - } - } - }, - { - {}, - {}, - {element::undefined, element::bf16}, - {element::f32}, - {element::bf16} - } - }, - - { - {element::bf16, element::bf16, element::f32}, - element::f32, - element::bf16, - { - {element::f32, element::f32}, - {}, - {}, - {element::bf16}, - { - { - {element::bf16, element::bf16} - }, - }, - { - { - {element::bf16, element::f32} - } - } - }, - { - {}, - {}, - {}, - {element::f32}, - {element::bf16} - } - }, - - { - {element::bf16, element::bf16, element::i32}, - element::f32, - element::bf16, - { - {element::f32, element::f32}, - {}, - {}, - {element::bf16}, - { - { - {element::bf16, element::bf16} - }, - }, - { - { - {element::bf16, element::bf16} - } - } - }, - { - {}, - {}, - {element::f32, element::undefined}, - {}, - {element::bf16} - } - }, - - { - {element::bf16, element::bf16, element::i32}, - element::f32, - element::bf16, - { - {element::f32, element::f32}, - {}, - {}, - {element::bf16}, - { - { - {element::bf16, element::bf16} - }, - }, - { - { - {element::bf16, element::i32} - } - } - }, - { - {}, - {}, - {}, - {element::f32}, - {element::bf16} - } - }, - - { - {element::f16, element::f16, element::f32}, - element::f32, - element::f16, - { - {element::f32, element::f32}, - {}, - {}, - {element::f16}, - { - { - {element::f16, element::f16} - }, - }, - { - { - {element::f16, element::f32} - } - } - }, - { - {}, - {}, - {}, - {element::f32}, - {element::f16} - } - }, - - { - {element::f16, element::f16, element::f32}, - element::f32, - element::f16, - { - {element::f32, element::f32}, - {}, - {}, - {element::f16}, - {}, - {} - }, - { - {element::f32, element::f32}, - {}, - {}, - {}, - {element::f16} - } - } -}; +std::vector test_values{ + {{element::bf16, element::bf16, element::f32}, + element::f32, + element::bf16, + {{element::f32, element::f32}, + {}, + {}, + {element::bf16}, + { + {{element::bf16, element::bf16, element::bf16}, {element::bf16, element::bf16}}, + }, + {{{element::bf16, element::bf16, element::bf16}}}}, + {{}, {}, {element::f32, element::dynamic}, {}, {element::bf16}}}, + + {{element::bf16, element::bf16, element::f32}, + element::f32, + element::bf16, + {{element::f32, element::f32}, + {}, + {}, + {element::bf16}, + { + {{element::bf16, element::bf16}}, + }, + {{{element::bf16, element::bf16}}}}, + {{}, {}, {element::dynamic, element::bf16}, {element::f32}, {element::bf16}}}, + + {{element::bf16, element::bf16, element::f32}, + element::f32, + element::bf16, + {{element::f32, element::f32}, + {}, + {}, + {element::bf16}, + { + {{element::bf16, element::bf16}}, + }, + {{{element::bf16, element::f32}}}}, + {{}, {}, {}, {element::f32}, {element::bf16}}}, + + {{element::bf16, element::bf16, element::i32}, + element::f32, + element::bf16, + {{element::f32, element::f32}, + {}, + {}, + {element::bf16}, + { + {{element::bf16, element::bf16}}, + }, + {{{element::bf16, element::bf16}}}}, + {{}, {}, {element::f32, element::dynamic}, {}, {element::bf16}}}, + + {{element::bf16, element::bf16, element::i32}, + element::f32, + element::bf16, + {{element::f32, element::f32}, + {}, + {}, + {element::bf16}, + { + {{element::bf16, element::bf16}}, + }, + {{{element::bf16, element::i32}}}}, + {{}, {}, {}, {element::f32}, {element::bf16}}}, + + {{element::f16, element::f16, element::f32}, + element::f32, + element::f16, + {{element::f32, element::f32}, + {}, + {}, + {element::f16}, + { + {{element::f16, element::f16}}, + }, + {{{element::f16, element::f32}}}}, + {{}, {}, {}, {element::f32}, {element::f16}}}, + + {{element::f16, element::f16, element::f32}, + element::f32, + element::f16, + {{element::f32, element::f32}, {}, {}, {element::f16}, {}, {}}, + {{element::f32, element::f32}, {}, {}, {}, {element::f16}}}}; INSTANTIATE_TEST_SUITE_P( smoke_Snippets_EnforcePrecisionTest, diff --git a/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp b/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp index a7ed7296281c8f..37794839cf19ea 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/convert_matmul_test.cpp @@ -44,7 +44,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest1) { auto matmul = std::make_shared( transpose1, transpose2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -83,7 +83,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest3) { auto matmul = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -104,7 +104,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest4) { auto matmul = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -143,7 +143,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest7) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); } @@ -165,7 +165,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest8) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); auto a_shape = std::make_shared(input1); auto I = ov::op::util::node_to_get_shape_value_of_indices_from_shape_node(a_shape, {0, 1}); @@ -191,7 +191,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest9) { auto matmul = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -239,7 +239,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest13) { auto matmul = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -267,7 +267,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest14) { auto matmul = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0}), + std::make_shared(ov::element::dynamic, ov::Shape{0}), ov::element::f32); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); @@ -290,7 +290,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_1) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0}), + std::make_shared(ov::element::dynamic, ov::Shape{0}), ov::element::f32); model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); @@ -312,7 +312,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_2) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); } @@ -333,7 +333,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_3) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0}), + std::make_shared(ov::element::dynamic, ov::Shape{0}), ov::element::f32); model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); @@ -355,7 +355,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_4) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0}), + std::make_shared(ov::element::dynamic, ov::Shape{0}), ov::element::f32); model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); @@ -377,7 +377,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_4d_5) { auto fc = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0}), + std::make_shared(ov::element::dynamic, ov::Shape{0}), ov::element::f32); model_ref = std::make_shared(ov::NodeVector{fc}, ov::ParameterVector{input1}); @@ -399,7 +399,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_second_input_rank_adj_1) { auto matmul = std::make_shared( input1, input2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } } @@ -419,7 +419,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_second_input_rank_adj_2) { auto matmul = std::make_shared( input1, weights, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -441,7 +441,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_second_input_rank_adj_3) { auto matmul = std::make_shared( input1, weights, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } } @@ -468,7 +468,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_decompress_convert_0) { auto matmul = std::make_shared( input1, transpose, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -498,7 +498,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_decompress_convert_1) { auto matmul = std::make_shared( transpose1, transpose2, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{input1}); } @@ -532,7 +532,7 @@ TEST_F(TransformationTestsF, ConvertMatMulToFCTest_compressed_u8_weights) { auto matmul = std::make_shared( data, transpose, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); model_ref = std::make_shared(ov::NodeVector{matmul}, ov::ParameterVector{data}); } diff --git a/src/plugins/intel_cpu/tests/unit/transformations/move_fc_reshape_to_weights.cpp b/src/plugins/intel_cpu/tests/unit/transformations/move_fc_reshape_to_weights.cpp index b669d04128c1cd..1eccf0844004c4 100644 --- a/src/plugins/intel_cpu/tests/unit/transformations/move_fc_reshape_to_weights.cpp +++ b/src/plugins/intel_cpu/tests/unit/transformations/move_fc_reshape_to_weights.cpp @@ -119,7 +119,7 @@ class MoveFCReshapeToWeightsTests : public TransformationTestsF, public WithPara auto fully_connected = std::make_shared( data, weights_path, - std::make_shared(ov::element::undefined, ov::Shape{0})); + std::make_shared(ov::element::dynamic, ov::Shape{0})); return std::make_shared(ov::NodeVector{fully_connected}, ov::ParameterVector{data}); } diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/convolution.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/convolution.hpp index fedd482e8d762b..b4ecd237cdc8de 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/convolution.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/convolution.hpp @@ -66,7 +66,7 @@ class Convolution : public ov::op::util::ConvolutionFwdPropBase { protected: int64_t m_groups = -1; // negative value means no groups bool m_asymmetric = false; - ov::element::Type m_output_type = ov::element::undefined; + ov::element::Type m_output_type = ov::element::dynamic; }; std::vector shape_infer(const Convolution* op, diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected.hpp index 14dd564302715d..2f28927283fcdf 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected.hpp @@ -18,7 +18,7 @@ class FullyConnected : public ov::op::Op { FullyConnected(const ov::Output& A, const ov::Output& B, const ov::Output& bias, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor &visitor) override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected_compressed.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected_compressed.hpp index 4b741715c3de7b..2f855448eb2157 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected_compressed.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/fully_connected_compressed.hpp @@ -14,28 +14,27 @@ class FullyConnectedCompressed : public FullyConnected { FullyConnectedCompressed() = default; - FullyConnectedCompressed(const ov::Output &A, - const ov::Output &B, - const ov::Output &bias, - const ov::Output &w_decompression_scale, - const ov::Output &w_decompression_zero_point, - const ov::Output &a_decompression_scale, - const ov::Output &a_decompression_zero_point, - const ov::element::Type output_type = ov::element::undefined); - - - FullyConnectedCompressed(const ov::Output &A, - const ov::Output &B, - const ov::Output &bias, - const ov::Output &w_decompression_scale, - const ov::Output &w_decompression_zero_point, - const ov::element::Type output_type = ov::element::undefined); - - FullyConnectedCompressed(const ov::Output &A, - const ov::Output &B, - const ov::Output &bias, - const ov::Output &w_decompression_scale, - const ov::element::Type output_type = ov::element::undefined); + FullyConnectedCompressed(const ov::Output& A, + const ov::Output& B, + const ov::Output& bias, + const ov::Output& w_decompression_scale, + const ov::Output& w_decompression_zero_point, + const ov::Output& a_decompression_scale, + const ov::Output& a_decompression_zero_point, + const ov::element::Type output_type = ov::element::dynamic); + + FullyConnectedCompressed(const ov::Output& A, + const ov::Output& B, + const ov::Output& bias, + const ov::Output& w_decompression_scale, + const ov::Output& w_decompression_zero_point, + const ov::element::Type output_type = ov::element::dynamic); + + FullyConnectedCompressed(const ov::Output& A, + const ov::Output& B, + const ov::Output& bias, + const ov::Output& w_decompression_scale, + const ov::element::Type output_type = ov::element::dynamic); std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override; }; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/gemm.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/gemm.hpp index 6d45da459bdec8..3b692ead8c810c 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/gemm.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/gemm.hpp @@ -22,7 +22,7 @@ class Gemm : public ov::op::v0::MatMul { const std::vector& order_a, const std::vector& order_b, const std::vector& order_c, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor &visitor) override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/indirect_gemm.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/indirect_gemm.hpp index 7ebefd1802ef3c..49567f118c89f1 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/indirect_gemm.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/indirect_gemm.hpp @@ -26,7 +26,7 @@ class IndirectGemm : public ov::intel_gpu::op::Gemm { const std::vector& order_a, const std::vector& order_b, const std::vector& order_c, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor &visitor) override; void validate_and_infer_types() override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/indirect_sdpa.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/indirect_sdpa.hpp index 5580906d6fe80d..96eb0211684f45 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/indirect_sdpa.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/indirect_sdpa.hpp @@ -25,7 +25,7 @@ class IndirectSDPA : public ov::intel_gpu::op::SDPA { const std::vector& order_k, const std::vector& order_v, const std::vector& order_out, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); IndirectSDPA(const OutputVector& data_inputs, const ov::Output& beam_table, @@ -36,7 +36,7 @@ class IndirectSDPA : public ov::intel_gpu::op::SDPA { const std::vector& order_v, const std::vector& order_out, const QuantizationAttribute& quantization_attribute, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor &visitor) override; void validate_and_infer_types() override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache.hpp index 74c67209cec655..7bbae015eed7cc 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache.hpp @@ -23,7 +23,7 @@ class KVCache : public ov::op::Op, public ov::op::util::VariableExtension { const Output& new_token_data, const std::shared_ptr& past_values, int64_t concat_axis, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); KVCache(const Output& past, const Output& new_token_data, @@ -31,7 +31,7 @@ class KVCache : public ov::op::Op, public ov::op::util::VariableExtension { const std::shared_ptr& past_values, int64_t concat_axis, int64_t gather_axis, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor& visitor) override; @@ -58,7 +58,7 @@ class KVCache : public ov::op::Op, public ov::op::util::VariableExtension { bool indirect, int64_t concat_axis, int64_t gather_axis, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); int64_t m_concat_axis = 0; int64_t m_gather_axis = 0; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache_compressed.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache_compressed.hpp index f45aef80cc292a..70cd2fdc9c91e3 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache_compressed.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/kv_cache_compressed.hpp @@ -24,7 +24,7 @@ class KVCacheCompressed : public ov::intel_gpu::op::KVCache { int64_t concat_axis, int64_t gather_axis, const QuantizationAttrs& quantization_attrs, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); void validate_and_infer_types() override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/op/sdpa.hpp b/src/plugins/intel_gpu/include/intel_gpu/op/sdpa.hpp index 80457e2203dafd..af0498ec4c3db4 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/op/sdpa.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/op/sdpa.hpp @@ -26,7 +26,7 @@ class SDPA : public ov::op::v13::ScaledDotProductAttention { const std::vector& order_k, const std::vector& order_v, const std::vector& order_out, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); SDPA(const OutputVector& inputs, const bool is_causal, @@ -35,7 +35,7 @@ class SDPA : public ov::op::v13::ScaledDotProductAttention { const std::vector& order_v, const std::vector& order_out, const QuantizationAttribute& quantization_attrs, - const ov::element::Type output_type = ov::element::undefined); + const ov::element::Type output_type = ov::element::dynamic); bool visit_attributes(ov::AttributeVisitor &visitor) override; diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp index 159e2c5aa28bd5..fa258afbc6c50d 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/common_utils.hpp @@ -114,10 +114,11 @@ inline void ForceExit() { std::_Exit(-1); } -void convert_and_copy(const ov::ITensor* src, - cldnn::memory::ptr dst, - cldnn::stream& stream, - const cldnn::layout& src_layout = cldnn::layout({}, ov::element::undefined, cldnn::format::bfyx, cldnn::padding())); +void convert_and_copy( + const ov::ITensor* src, + cldnn::memory::ptr dst, + cldnn::stream& stream, + const cldnn::layout& src_layout = cldnn::layout({}, ov::element::dynamic, cldnn::format::bfyx, cldnn::padding())); void convert_and_copy(const cldnn::memory::ptr src, ov::ITensor const* dst, const cldnn::stream& stream); void convert_and_copy(const ov::ITensor* src, ov::ITensor* dst, const cldnn::stream& stream); void convert_and_copy(const cldnn::memory::ptr src, cldnn::memory::ptr dst, cldnn::stream& stream); diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp index 7cdc5e39ac9583..221b863c328541 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/variable_state.hpp @@ -15,11 +15,13 @@ namespace ov::intel_gpu { class RemoteContextImpl; struct VariableStateInfo { - VariableStateInfo(const std::string& id, const cldnn::layout& layout, ov::element::Type_t user_specified_type = ov::element::undefined) - : m_id(id) - , m_layout(layout) - , m_user_specified_type(user_specified_type) - , m_primitives() {} + VariableStateInfo(const std::string& id, + const cldnn::layout& layout, + ov::element::Type_t user_specified_type = ov::element::dynamic) + : m_id(id), + m_layout(layout), + m_user_specified_type(user_specified_type), + m_primitives() {} std::string m_id; cldnn::layout m_layout; diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/assign.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/assign.hpp index cb16fd4a96d629..458cd47ac472e7 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/assign.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/assign.hpp @@ -22,15 +22,15 @@ struct assign : public primitive_base { /// @param inputs Input parameters ids /// @param variable_id Variable id /// @param output_layout Memory layout - assign(const primitive_id &id, + assign(const primitive_id& id, const std::vector& inputs, const std::string& variable_id, const layout& output_layout, - const ov::element::Type& user_specified_type = ov::element::undefined) - : primitive_base(id, inputs, 1, {optional_data_type{output_layout.data_type}}), - variable_id{variable_id}, - output_layout{output_layout}, - user_specified_type(user_specified_type) {} + const ov::element::Type& user_specified_type = ov::element::dynamic) + : primitive_base(id, inputs, 1, {optional_data_type{output_layout.data_type}}), + variable_id{variable_id}, + output_layout{output_layout}, + user_specified_type(user_specified_type) {} std::string variable_id; layout output_layout; @@ -56,7 +56,7 @@ struct assign : public primitive_base { void load(BinaryInputBuffer& ib) override { primitive_base::load(ib); - ov::element::Type_t data_type = ov::element::Type_t::undefined; + ov::element::Type_t data_type = ov::element::Type_t::dynamic; ib >> variable_id; ib >> output_layout; ib >> make_data(&data_type, sizeof(ov::element::Type_t)); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/data.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/data.hpp index a09401af24d043..5ce4974793f30a 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/data.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/data.hpp @@ -122,8 +122,8 @@ struct weightless_cache_manager { size_t bin_offset = SIZE_MAX; size_t original_size = SIZE_MAX; - ov::element::Type original_dtype = ov::element::Type_t::undefined; - ov::element::Type curr_dtype = ov::element::Type_t::undefined; + ov::element::Type original_dtype = ov::element::Type_t::dynamic; + ov::element::Type curr_dtype = ov::element::Type_t::dynamic; ov::Shape shape; bool should_run_transformations() { diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp index 2c056c7c21e274..f6c03fdde9bcf1 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/generate_proposals.hpp @@ -62,7 +62,7 @@ struct generate_proposals primitive_id output_rois_scores; primitive_id output_rois_num; - data_types roi_num_type = data_types::undefined; + data_types roi_num_type = data_types::dynamic; size_t hash() const override { size_t seed = primitive::hash(); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/kv_cache.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/kv_cache.hpp index 1c8f095752aca2..c57fa62f000b1c 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/kv_cache.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/kv_cache.hpp @@ -101,7 +101,7 @@ struct kv_cache : public primitive_base { void load(BinaryInputBuffer& ib) override { primitive_base::load(ib); ov::PartialShape data_shape; - ov::element::Type_t data_type = ov::element::Type_t::undefined; + ov::element::Type_t data_type = ov::element::Type_t::dynamic; std::string variable_id; ib >> variable_id; ib >> data_shape; diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp index 1f92d44ac0eab0..8370456d1698ab 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/primitive.hpp @@ -181,7 +181,12 @@ struct primitive { return false; for (size_t i = 0; i < output_data_types.size(); ++i) { - if (output_data_types[i].value_or(data_types::undefined) != rhs.output_data_types[i].value_or(data_types::undefined)) + if (output_data_types[i].value_or(data_types::dynamic) != + rhs.output_data_types[i].value_or(data_types::dynamic)) + return false; + + if (output_data_types[i].value_or(data_types::dynamic) != + rhs.output_data_types[i].value_or(data_types::dynamic)) return false; } diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/read_value.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/read_value.hpp index 26465692ef6352..6f5db5e776c6d4 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/read_value.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/read_value.hpp @@ -27,11 +27,11 @@ struct read_value : public primitive_base { const std::vector& inputs, const std::string& variable_id, const std::vector& output_layouts, - const ov::element::Type& user_specified_type = ov::element::undefined) - : primitive_base(id, inputs, output_layouts.size()), - variable_id{variable_id}, - output_layouts{output_layouts}, - user_specified_type(user_specified_type) { + const ov::element::Type& user_specified_type = ov::element::dynamic) + : primitive_base(id, inputs, output_layouts.size()), + variable_id{variable_id}, + output_layouts{output_layouts}, + user_specified_type(user_specified_type) { for (size_t output_idx = 0; output_idx < output_layouts.size(); output_idx++) { output_data_types[output_idx] = optional_data_type(output_layouts[output_idx].data_type); } @@ -63,7 +63,7 @@ struct read_value : public primitive_base { void load(BinaryInputBuffer& ib) override { primitive_base::load(ib); - ov::element::Type_t data_type = ov::element::Type_t::undefined; + ov::element::Type_t data_type = ov::element::Type_t::dynamic; ib >> variable_id; size_t output_layouts_size; ib >> output_layouts_size; diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp index 2a92b8ece37077..c3a4cf1eeae20e 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/layout.hpp @@ -269,10 +269,10 @@ struct layout { layout(const layout& other) = default; layout() - : data_type(cldnn::data_types::undefined) - , format(cldnn::format::any) - , data_padding(padding()) - , size(ov::PartialShape()) { } + : data_type(cldnn::data_types::dynamic), + format(cldnn::format::any), + data_padding(padding()), + size(ov::PartialShape()) {} layout& operator=(const layout& other) { if (this == &other) diff --git a/src/plugins/intel_gpu/src/graph/impls/sycl/impl_example.cpp b/src/plugins/intel_gpu/src/graph/impls/sycl/impl_example.cpp index aace9ea7cae998..d6fc7bd3eeac17 100644 --- a/src/plugins/intel_gpu/src/graph/impls/sycl/impl_example.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/sycl/impl_example.cpp @@ -167,7 +167,8 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl ov::element::Type_t wei_t = params->weights_layout.value().data_type; ov::element::Type_t out_t = params->output_layouts[0].data_type; ov::element::Type_t ds_t = params->input_layouts[2].data_type; - ov::element::Type_t dzp_t = inputs.size() == 3 ? params->input_layouts[3].data_type : ov::element::Type_t::undefined; + ov::element::Type_t dzp_t = + inputs.size() == 3 ? params->input_layouts[3].data_type : ov::element::Type_t::dynamic; OPENVINO_ASSERT(out_shape.size() == 3); size_t M = out_shape[1]; @@ -189,7 +190,7 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl ds_t == ov::element::ScaleType && \ dzp_t == ov::element::ZPType - if ((CASE(f32, u4, f32, f32, f32)) || (CASE(f32, u4, undefined, f32, f32))) { + if ((CASE(f32, u4, f32, f32, f32)) || (CASE(f32, u4, dynamic, f32, f32))) { const float* in = static_cast(inputs[0]->buffer_ptr()); const uint8_t* wei = static_cast(weights->buffer_ptr()); float* out = static_cast(output->buffer_ptr()); @@ -197,7 +198,7 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl const float* dzp = inputs.size() == 3 ? static_cast(inputs[2]->buffer_ptr()) : nullptr; return to_ocl_event(stream, run_fc_int4_woq(sycl_queue, barrier, in, wei, dzp, ds, out, M, N, K, group_size, groups_num, out_shape, dzp_scalar)); - } else if ((CASE(f16, u4, f16, f16, f16)) || (CASE(f16, u4, undefined, f16, f16))) { + } else if ((CASE(f16, u4, f16, f16, f16)) || (CASE(f16, u4, dynamic, f16, f16))) { const ::sycl::half* in = static_cast(inputs[0]->buffer_ptr()); const uint8_t* wei = static_cast(weights->buffer_ptr()); ::sycl::half* out = static_cast<::sycl::half*>(output->buffer_ptr()); @@ -206,7 +207,7 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl return to_ocl_event(stream, run_fc_int4_woq(sycl_queue, barrier, in, wei, dzp, ds, out, M, N, K, group_size, groups_num, out_shape, dzp_scalar)); - } else if ((CASE(f16, u4, f16, f16, f32)) || (CASE(f16, u4, undefined, f16, f32))) { + } else if ((CASE(f16, u4, f16, f16, f32)) || (CASE(f16, u4, dynamic, f16, f32))) { const ::sycl::half* in = static_cast(inputs[0]->buffer_ptr()); const uint8_t* wei = static_cast(weights->buffer_ptr()); float* out = static_cast(output->buffer_ptr()); @@ -215,7 +216,7 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl return to_ocl_event(stream, run_fc_int4_woq(sycl_queue, barrier, in, wei, dzp, ds, out, M, N, K, group_size, groups_num, out_shape, dzp_scalar)); - } else if ((CASE(f32, u8, f32, f32, f32)) || (CASE(f32, u8, undefined, f32, f32))) { + } else if ((CASE(f32, u8, f32, f32, f32)) || (CASE(f32, u8, dynamic, f32, f32))) { const float* in = static_cast(inputs[0]->buffer_ptr()); const uint8_t* wei = static_cast(weights->buffer_ptr()); float* out = static_cast(output->buffer_ptr()); @@ -223,7 +224,7 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl const float* dzp = inputs.size() == 3 ? static_cast(inputs[2]->buffer_ptr()) : nullptr; return to_ocl_event(stream, run_fc_int8_woq(sycl_queue, barrier, in, wei, dzp, ds, out, M, N, K, group_size, groups_num, out_shape, dzp_scalar)); - } else if ((CASE(f16, u8, f16, f16, f16)) || (CASE(f16, u8, undefined, f16, f16))) { + } else if ((CASE(f16, u8, f16, f16, f16)) || (CASE(f16, u8, dynamic, f16, f16))) { const ::sycl::half* in = static_cast(inputs[0]->buffer_ptr()); const uint8_t* wei = static_cast(weights->buffer_ptr()); ::sycl::half* out = static_cast<::sycl::half*>(output->buffer_ptr()); @@ -231,7 +232,7 @@ struct fully_connected_sycl_example : typed_primitive_sycl_impl const ::sycl::half* dzp = inputs.size() == 3 ? static_cast(inputs[2]->buffer_ptr()) : nullptr; return to_ocl_event(stream, run_fc_int8_woq(sycl_queue, barrier, in, wei, dzp, ds, out, M, N, K, group_size, groups_num, out_shape, dzp_scalar)); - } else if ((CASE(f16, u8, f16, f16, f32)) || (CASE(f16, u8, undefined, f16, f32))) { + } else if ((CASE(f16, u8, f16, f16, f32)) || (CASE(f16, u8, dynamic, f16, f32))) { const ::sycl::half* in = static_cast(inputs[0]->buffer_ptr()); const uint8_t* wei = static_cast(weights->buffer_ptr()); float* out = static_cast(output->buffer_ptr()); diff --git a/src/plugins/intel_gpu/src/graph/include/variable.hpp b/src/plugins/intel_gpu/src/graph/include/variable.hpp index a9ae867dee67a1..c2ab9753db75a6 100644 --- a/src/plugins/intel_gpu/src/graph/include/variable.hpp +++ b/src/plugins/intel_gpu/src/graph/include/variable.hpp @@ -12,9 +12,9 @@ namespace memory_state { class variable { public: - explicit variable(const std::string& variable_id, ov::element::Type user_specified_type = ov::element::undefined) - : m_variable_id {variable_id} - , m_user_specified_type(user_specified_type) {} + explicit variable(const std::string& variable_id, ov::element::Type user_specified_type = ov::element::dynamic) + : m_variable_id{variable_id}, + m_user_specified_type(user_specified_type) {} const std::string& variable_id() const { return m_variable_id; } ov::element::Type get_user_specified_type() const { return m_user_specified_type; } diff --git a/src/plugins/intel_gpu/src/plugin/common_utils.cpp b/src/plugins/intel_gpu/src/plugin/common_utils.cpp index f4304514ab1c5a..7a747525dd0785 100644 --- a/src/plugins/intel_gpu/src/plugin/common_utils.cpp +++ b/src/plugins/intel_gpu/src/plugin/common_utils.cpp @@ -99,32 +99,56 @@ namespace ov::intel_gpu { bool is_supported(ov::element::Type_t et) { switch (et) { - case ov::element::Type_t::undefined: return true; - case ov::element::Type_t::dynamic: return false; - case ov::element::Type_t::boolean: return true; // converted to u8 - case ov::element::Type_t::bf16: return false; - case ov::element::Type_t::f16: return true; - case ov::element::Type_t::f32: return true; - case ov::element::Type_t::f64: return true; // converted to inference precision - case ov::element::Type_t::i4: return true; - case ov::element::Type_t::i8: return true; - case ov::element::Type_t::i16: return false; - case ov::element::Type_t::i32: return true; - case ov::element::Type_t::i64: return true; // converted to i32 - case ov::element::Type_t::u1: return true; - case ov::element::Type_t::u2: return false; - case ov::element::Type_t::u3: return false; - case ov::element::Type_t::u4: return true; - case ov::element::Type_t::u6: return true; - case ov::element::Type_t::u8: return true; - case ov::element::Type_t::u16: return true; // converted to i32 - case ov::element::Type_t::u32: return true; // converted to i32 - case ov::element::Type_t::u64: return true; // converted to i32 - case ov::element::Type_t::nf4: return false; - case ov::element::Type_t::f8e4m3: return false; - case ov::element::Type_t::f8e5m2: return false; - case ov::element::Type_t::string: return false; - default: return false; + case ov::element::Type_t::dynamic: + return true; + case ov::element::Type_t::boolean: + return true; // converted to u8 + case ov::element::Type_t::bf16: + return false; + case ov::element::Type_t::f16: + return true; + case ov::element::Type_t::f32: + return true; + case ov::element::Type_t::f64: + return true; // converted to inference precision + case ov::element::Type_t::i4: + return true; + case ov::element::Type_t::i8: + return true; + case ov::element::Type_t::i16: + return false; + case ov::element::Type_t::i32: + return true; + case ov::element::Type_t::i64: + return true; // converted to i32 + case ov::element::Type_t::u1: + return true; + case ov::element::Type_t::u2: + return false; + case ov::element::Type_t::u3: + return false; + case ov::element::Type_t::u4: + return true; + case ov::element::Type_t::u6: + return true; + case ov::element::Type_t::u8: + return true; + case ov::element::Type_t::u16: + return true; // converted to i32 + case ov::element::Type_t::u32: + return true; // converted to i32 + case ov::element::Type_t::u64: + return true; // converted to i32 + case ov::element::Type_t::nf4: + return false; + case ov::element::Type_t::f8e4m3: + return false; + case ov::element::Type_t::f8e5m2: + return false; + case ov::element::Type_t::string: + return false; + default: + return false; } return false; @@ -233,14 +257,24 @@ void convert_and_copy(const ov::ITensor* src, ov::ITensor* dst, const cldnn::str dst_ptr = dst_lock->data(); } else if (auto remote = dynamic_cast(dst)) { tmp_tensor = ov::Tensor(dst_et, src->get_shape()); - ::convert_and_copy(src_ptr, src_et, tmp_tensor.data(), dst_et, size, cldnn::layout({}, ov::element::undefined, cldnn::format::bfyx, cldnn::padding())); + ::convert_and_copy(src_ptr, + src_et, + tmp_tensor.data(), + dst_et, + size, + cldnn::layout({}, ov::element::dynamic, cldnn::format::bfyx, cldnn::padding())); remote->copy_from(get_tensor_impl(tmp_tensor)._ptr); return; } else { dst_ptr = dst->data(); } - return ::convert_and_copy(src_ptr, src_et, dst_ptr, dst_et, size, cldnn::layout({}, ov::element::undefined, cldnn::format::bfyx, cldnn::padding())); + return ::convert_and_copy(src_ptr, + src_et, + dst_ptr, + dst_et, + size, + cldnn::layout({}, ov::element::dynamic, cldnn::format::bfyx, cldnn::padding())); } std::vector get_output_data_types(const ov::Node* op, PrecisionMap precision_map) { diff --git a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp index 6c0d50be96e7ae..1afe4565c434ba 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp @@ -69,7 +69,7 @@ DynamicQuantizeFullyConnected::DynamicQuantizeFullyConnected(uint64_t group_size std::make_shared() : dyn_quan->output(2); auto output_type = m_fc->get_output_type(); - if (output_type == ov::element::undefined) + if (output_type.is_dynamic()) output_type = m_fc->get_input_element_type(0); auto new_fc = std::make_shared(dyn_quan->output(0), diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/convolution.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/convolution.cpp index b7e61cd8cb8ad7..7ad894828b39e4 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/convolution.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/convolution.cpp @@ -64,8 +64,7 @@ void Convolution::validate_and_infer_types() { const auto& filters_et = get_input_element_type(1); element::Type result_et; - - if (m_output_type != ov::element::undefined) { + if (m_output_type != ov::element::dynamic) { result_et = m_output_type; } else if (data_batch_et.compatible(filters_et)) { NODE_VALIDATION_CHECK(this, diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/fully_connected.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/fully_connected.cpp index 412f48e56ba533..00066612c31fb9 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/fully_connected.cpp @@ -35,7 +35,7 @@ void FullyConnected::validate_and_infer_types() { auto out_shapes = ov::op::v0::shape_infer(&op, std::vector{get_input_partial_shape(0), get_input_partial_shape(1)}); - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/gemm.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/gemm.cpp index 382e2a0525d2d6..ea7ceb96a1fe67 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/gemm.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/gemm.cpp @@ -50,8 +50,7 @@ void Gemm::validate_and_infer_types() { m_order_a, m_order_b, m_order_c); - - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_gemm.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_gemm.cpp index 8572c9c2418132..e9d718db712418 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_gemm.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_gemm.cpp @@ -55,7 +55,7 @@ void IndirectGemm::validate_and_infer_types() { m_order_b, m_order_c); - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_sdpa.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_sdpa.cpp index 8d8180b5c10b1b..0a60acd0091b5d 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_sdpa.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/indirect_sdpa.cpp @@ -93,7 +93,7 @@ void IndirectSDPA::validate_and_infer_types() { m_order_v, m_order_out); - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp index b30afb1dc03356..b37449a4e39b76 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp @@ -59,7 +59,7 @@ bool KVCache::visit_attributes(ov::AttributeVisitor& visitor) { void KVCache::validate_and_infer_types() { auto output_type = m_output_type; - if (m_output_type == ov::element::undefined) { + if (m_output_type == ov::element::dynamic) { output_type = get_input_element_type(0); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/placeholder.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/placeholder.cpp index b3c8f1707beda0..968c0f1f2b16df 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/placeholder.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/placeholder.cpp @@ -19,7 +19,7 @@ bool Placeholder::visit_attributes(ov::AttributeVisitor& visitor) { } void Placeholder::validate_and_infer_types() { - set_output_type(0, ov::element::undefined, ov::PartialShape{}); + set_output_type(0, ov::element::dynamic, ov::PartialShape{}); } std::shared_ptr Placeholder::clone_with_new_inputs(const ov::OutputVector& new_args) const { diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp index 487c5146dd3186..797402e740e569 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/sdpa.cpp @@ -86,7 +86,7 @@ void SDPA::validate_and_infer_types() { m_order_v, m_order_out); - auto output_type = m_output_type == ov::element::undefined ? get_input_element_type(0) : m_output_type; + auto output_type = m_output_type == ov::element::dynamic ? get_input_element_type(0) : m_output_type; set_output_type(0, output_type, out_shapes[0]); } diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 579331aa149d92..74e3888a8b180c 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -332,7 +332,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { const auto& defaultPrecisions = ov::pass::low_precision::precision_set::get_int8_support(); const ov::element::TypeVector supported_woq_types = {ov::element::u8, ov::element::i8, ov::element::u4, ov::element::i4}; bool enableInt8; - ov::element::Type infer_precision = ov::element::undefined; + ov::element::Type infer_precision = ov::element::dynamic; bool unroll_loop = config.get_property(ov::intel_gpu::enable_loop_unrolling); { ov::pass::Manager manager("Plugin:GPU"); @@ -380,7 +380,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { // Add conversion from FP data types to infer precision if it's specified infer_precision = config.get_property(ov::hint::inference_precision); - if (infer_precision != ov::element::undefined) { + if (infer_precision != ov::element::dynamic) { if (!fp_precision_supported(infer_precision)) infer_precision = fallback_precision; diff --git a/src/plugins/intel_gpu/src/plugin/variable_state.cpp b/src/plugins/intel_gpu/src/plugin/variable_state.cpp index d6558ab1cadf08..b747a5a28ca8b3 100644 --- a/src/plugins/intel_gpu/src/plugin/variable_state.cpp +++ b/src/plugins/intel_gpu/src/plugin/variable_state.cpp @@ -118,7 +118,7 @@ void VariableState::update_device_buffer() { } ov::element::Type VariableState::get_user_specified_type() const { - return m_user_specified_type != ov::element::undefined ? m_user_specified_type : ov::element::Type(m_layout.data_type); + return m_user_specified_type != ov::element::dynamic ? m_user_specified_type : ov::element::Type(m_layout.data_type); } ov::SoPtr VariableState::get_state() const { diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 7d2a9d5f90fc8b..23686d65e8b7e1 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -19,7 +19,7 @@ class InferencePrecisionValidator : public BaseValidator { public: bool is_valid(const ov::Any& v) const override { auto precision = v.as(); - return precision == ov::element::f16 || precision == ov::element::f32 || precision == ov::element::undefined; + return precision == ov::element::f16 || precision == ov::element::f32 || precision == ov::element::dynamic; } }; @@ -139,7 +139,7 @@ void ExecutionConfig::apply_execution_hints(const cldnn::device_info& info) { const auto mode = get_property(ov::hint::execution_mode); if (!is_set_by_user(ov::hint::inference_precision)) { if (mode == ov::hint::ExecutionMode::ACCURACY) { - set_property(ov::hint::inference_precision(ov::element::undefined)); + set_property(ov::hint::inference_precision(ov::element::dynamic)); } else if (mode == ov::hint::ExecutionMode::PERFORMANCE) { if (info.supports_fp16) set_property(ov::hint::inference_precision(ov::element::f16)); @@ -218,7 +218,7 @@ void ExecutionConfig::apply_debug_options(const cldnn::device_info& info) { GPU_DEBUG_IF(debug_config->use_kv_cache_compression == 1) { set_property(ov::hint::kv_cache_precision(ov::element::i8)); } else { - set_property(ov::hint::kv_cache_precision(ov::element::undefined)); + set_property(ov::hint::kv_cache_precision(ov::element::dynamic)); } } } @@ -262,8 +262,8 @@ void ExecutionConfig::apply_user_properties(const cldnn::device_info& info) { set_property(ov::hint::enable_cpu_pinning(true)); } } - - if (!is_set_by_user(ov::hint::kv_cache_precision) || get_property(ov::hint::kv_cache_precision) == ov::element::undefined) { + if (!is_set_by_user(ov::hint::kv_cache_precision) || + get_property(ov::hint::kv_cache_precision) == ov::element::dynamic) { if (info.supports_immad) { // MFDNN-11755 set_property(ov::hint::kv_cache_precision(get_property(ov::hint::inference_precision))); } else { diff --git a/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp b/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp index 59dd465294123d..29f2bef31e7fac 100644 --- a/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp +++ b/src/plugins/intel_gpu/tests/functional/behavior/inference_precision.cpp @@ -37,10 +37,10 @@ TEST_P(InferencePrecisionTests, smoke_canSetInferencePrecisionAndInfer) { static const std::vector test_params = { {ov::element::f16, ov::element::f32}, {ov::element::f16, ov::element::f16}, - {ov::element::f16, ov::element::undefined}, + {ov::element::f16, ov::element::dynamic}, {ov::element::f32, ov::element::f32}, {ov::element::f32, ov::element::f16}, - {ov::element::f32, ov::element::undefined}, + {ov::element::f32, ov::element::dynamic}, }; INSTANTIATE_TEST_SUITE_P(smoke_GPU_BehaviorTests, InferencePrecisionTests, ::testing::ValuesIn(test_params), InferencePrecisionTests::getTestCaseName); @@ -73,16 +73,18 @@ TEST(ExecutionModeTest, SetCompileGetInferPrecisionAndExecMode) { { /* ov::hint::inference_precision has higher priority than ov::hint::execution_mode */ - auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::inference_precision(ov::element::undefined), - ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)); + auto compiled_model = core.compile_model(model, + ov::test::utils::DEVICE_GPU, + ov::hint::inference_precision(ov::element::dynamic), + ov::hint::execution_mode(ov::hint::ExecutionMode::PERFORMANCE)); ASSERT_EQ(ov::hint::ExecutionMode::PERFORMANCE, compiled_model.get_property(ov::hint::execution_mode)); - ASSERT_EQ(ov::element::undefined, compiled_model.get_property(ov::hint::inference_precision)); + ASSERT_EQ(ov::element::dynamic, compiled_model.get_property(ov::hint::inference_precision)); } { auto compiled_model = core.compile_model(model, ov::test::utils::DEVICE_GPU, ov::hint::execution_mode(ov::hint::ExecutionMode::ACCURACY)); ASSERT_EQ(ov::hint::ExecutionMode::ACCURACY, compiled_model.get_property(ov::hint::execution_mode)); - ASSERT_EQ(ov::element::undefined, compiled_model.get_property(ov::hint::inference_precision)); + ASSERT_EQ(ov::element::dynamic, compiled_model.get_property(ov::hint::inference_precision)); } { @@ -91,4 +93,4 @@ TEST(ExecutionModeTest, SetCompileGetInferPrecisionAndExecMode) { ASSERT_EQ(ov::element::f16, compiled_model.get_property(ov::hint::inference_precision)); } } -} // namespace \ No newline at end of file +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index e026a44fa74ce0..4c674dda24e083 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -311,7 +311,7 @@ TEST_P(OVClassGetPropertyTest_GPU, GetAndSetEnableProfilingNoThrow) { TEST_P(OVClassGetPropertyTest_GPU, GetAndSetInferencePrecisionNoThrow) { ov::Core ie; - auto value = ov::element::undefined; + auto value = ov::element::dynamic; const auto expected_default_precision = ov::element::f16; OV_ASSERT_NO_THROW(value = ie.get_property(target_device, ov::hint::inference_precision)); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp index b122d486e81c2d..02c48fc2c6cea2 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/multiply_transformation.cpp @@ -16,71 +16,57 @@ const std::vector netPrecisions = { }; const std::vector params = { - { - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ov::element::undefined, // ov::element::i8 - false - }, - { - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, // ov::element::u8 - false - }, - { - true, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, //ov::element::u8 - false - }, - { - true, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ov::element::undefined, // ov::element::i8 - false - }, - { - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - true, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -1.28f }, { 1.27f } }, - ov::element::undefined, // ov::element::i8 - false - }, - { - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.28f }, { 1.27f }, { -128.f }, { 1.27f } }, - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, // ov::element::u8 - false - }, - { - false, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - true, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { -1.27f }, { 1.28f }, { -1.27f }, { 1.28f } }, - { 256ul, ov::Shape { 1, 1, 1, 1 }, { 0.f }, { 2.55f }, { 0.f }, { 2.55f } }, - ov::element::undefined, // ov::element::u8 - false - }, - { false, {}, false, {}, {}, ov::element::undefined /* ov::element::f32 */, false }, - { true, {}, true, {}, {}, ov::element::undefined /* ov::element::f32 */, false }, + {false, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + false, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + ov::element::dynamic, // ov::element::i8 + false}, + {false, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + false, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + ov::element::dynamic, // ov::element::u8 + false}, + {true, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + false, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + ov::element::dynamic, // ov::element::u8 + false}, + {true, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + false, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + ov::element::dynamic, // ov::element::i8 + false}, + {false, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + true, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}}, + ov::element::dynamic, // ov::element::i8 + false}, + {false, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.28f}, {1.27f}, {-128.f}, {1.27f}}, + false, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + ov::element::dynamic, // ov::element::u8 + false}, + {false, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + true, + {256ul, ov::Shape{1, 1, 1, 1}, {-1.27f}, {1.28f}, {-1.27f}, {1.28f}}, + {256ul, ov::Shape{1, 1, 1, 1}, {0.f}, {2.55f}, {0.f}, {2.55f}}, + ov::element::dynamic, // ov::element::u8 + false}, + {false, {}, false, {}, {}, ov::element::dynamic /* ov::element::f32 */, false}, + {true, {}, true, {}, {}, ov::element::dynamic /* ov::element::f32 */, false}, }; INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyTransformation, @@ -91,6 +77,3 @@ INSTANTIATE_TEST_SUITE_P(smoke_LPT, MultiplyTransformation, ::testing::ValuesIn(params)), MultiplyTransformation::getTestCaseName); } // namespace - - - diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp index 11dbaceeb24ad9..214a0f1100a464 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -87,8 +87,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(intOnly_netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), EltwiseLayerTest::getTestCaseName); @@ -101,8 +101,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), EltwiseLayerTest::getTestCaseName); @@ -115,8 +115,8 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::Values(additional_config)), EltwiseLayerTest::getTestCaseName); diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp index 8f222cc2f36c46..4c7c2dff9c2ede 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/group_normalization.cpp @@ -29,13 +29,13 @@ INSTANTIATE_TEST_SUITE_P( smoke_GroupNormalization, GroupNormalizationTest, testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes)), testing::ValuesIn(numGroups), testing::ValuesIn(epsilon), testing::Values(ov::test::utils::DEVICE_GPU), testing::Values(ov::AnyMap())), - GroupNormalizationTest::getTestCaseName); + GroupNormalizationTest::getTestCaseName); } // anonymous namespace diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index 85e595f86e1e61..5a04f37f8493a6 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -34,8 +34,8 @@ INSTANTIATE_TEST_SUITE_P( smoke_SoftMax2D, SoftMax8LayerTest, testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes2D)), testing::ValuesIn(axis2D), testing::Values(ov::test::utils::DEVICE_GPU), @@ -51,15 +51,13 @@ const std::vector axis3D = { -1, 1 }; -const auto params3D = testing::Combine( - testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes3D)), - testing::ValuesIn(axis3D), - testing::Values(ov::test::utils::DEVICE_GPU), - testing::Values(ov::AnyMap()) -); +const auto params3D = testing::Combine(testing::ValuesIn(netPrecisions), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes3D)), + testing::ValuesIn(axis3D), + testing::Values(ov::test::utils::DEVICE_GPU), + testing::Values(ov::AnyMap())); INSTANTIATE_TEST_SUITE_P( smoke_SoftMax3D, @@ -81,8 +79,8 @@ INSTANTIATE_TEST_SUITE_P( smoke_SoftMax4D, SoftMax8LayerTest, testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes4D)), testing::ValuesIn(axis4D), testing::Values(ov::test::utils::DEVICE_GPU), @@ -98,8 +96,8 @@ INSTANTIATE_TEST_SUITE_P( smoke_SoftMaxStableDiffusion, SoftMax8LayerTest, testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::ValuesIn(ov::test::static_shapes_to_test_representation(stableDiffusionShapes)), testing::Values(-1), testing::Values(ov::test::utils::DEVICE_GPU), @@ -118,8 +116,8 @@ INSTANTIATE_TEST_SUITE_P( smoke_SoftMax5D, SoftMax8LayerTest, testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputShapes5D)), testing::ValuesIn(axis5D), testing::Values(ov::test::utils::DEVICE_GPU), diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 1b914f20d75d94..44d67c8a60166a 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -85,10 +85,10 @@ std::vector disabledTestPatterns() { R"(.*smoke_LPT/ConcatWithNeighborsGraphTransformation.CompareWithRefImpl/f16_\[1,3,16,16\]_GPU_f32.*)", R"(.*smoke_LPT/ConcatWithIntermediateTransformation.CompareWithRefImpl/f16_\[1,3,16,16\]_GPU_f32.*)", R"(.*smoke_LPT/ConcatWithSplitTransformation.CompareWithRefImpl/f16_\[1,6,10,10\]_GPU_f32level=256_shape=\[\]_input_low=\{ 0 \}_input_high=\{ 2.55 \}_output_low=\{ 0 \}_output_high\{ 2.55 \}_precision=_level=256_shape=\[\]_input_low=\{ 0 \}_input_high=\{ 2.55 \}_output_low=\{ 0 \}_output_high\{ 1.275 \}_precision=.*)", - R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f32_\[1,32,16,16\]_.*_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=__0_\[\]_\{ \}_\{ \}___f32_\{\}__\{ 4 \}_f32_\[\]_1_1_undefined.*)", + R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f32_\[1,32,16,16\]_.*_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=__0_\[\]_\{ \}_\{ \}___f32_\{\}__\{ 4 \}_f32_\[\]_1_1_dynamic.*)", R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f16_\[1,(8|32),16,16\]_.*_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=__255_\[1,1,1,1\]_\{ 0 \}_\{ 25.4 \}_\{\}.*)", - R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f16_\[1,(8|32),16,16\]_.*_input_low.*0.*input_high=.*255.*_output_low=.*0.*_output_high.*25.5.*_precision=__0_\[\]_\{ \}_\{ \}___f32_\{\}__\{ 4 \}_f32_\[\]_1_1_undefined.*)", - R"(.*smoke_LPT_3D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/(f32|f16)_\[1,32,16,16\]_GPU_f32_\[16\]_level=256_shape=\[1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=__0_\[\]_\{ \}_\{ \}___f32_\{\}__\{ 4 \}_f32_\[\]_1_1_undefined.*)", + R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f16_\[1,(8|32),16,16\]_.*_input_low.*0.*input_high=.*255.*_output_low=.*0.*_output_high.*25.5.*_precision=__0_\[\]_\{ \}_\{ \}___f32_\{\}__\{ 4 \}_f32_\[\]_1_1_dynamic.*)", + R"(.*smoke_LPT_3D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/(f32|f16)_\[1,32,16,16\]_GPU_f32_\[16\]_level=256_shape=\[1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=__0_\[\]_\{ \}_\{ \}___f32_\{\}__\{ 4 \}_f32_\[\]_1_1_dynamic.*)", R"(.*smoke_LPT/FakeQuantizeAndMaxPoolTransformation.CompareWithRefImpl/f16_\[1,32,72,48\]_GPU_f32.*)", R"(.*smoke_LPT/FakeQuantizeAndAvgPoolTransformation.CompareWithRefImpl/f16_\[1,32,72,48\]_GPU_f32.*)", R"(.*smoke_LPT/FuseConvertTransformation.CompareWithRefImpl/f32_\[1,4,16,16\]_GPU_f32_.*)", diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp index be89ed9226a154..d93b0fe718f284 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/convolution.cpp @@ -72,7 +72,7 @@ class ConvolutionLayerGPUTest : public testing::WithParamInterfaceGetParam(); @@ -110,44 +110,42 @@ TEST_P(ConvolutionLayerGPUTest, Inference) { } // Check 3D input tensor for convolution is handled properly and its output is correct comparing with ov runtime. -INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_3D_tensor_basic, ConvolutionLayerGPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(std::vector{3}), - ::testing::Values(std::vector{1}), - ::testing::Values(std::vector{0}), - ::testing::Values(std::vector{0}), - ::testing::Values(std::vector{1}), - ::testing::Values(13), - ::testing::Values(ov::op::PadType::SAME_UPPER)), - ::testing::Values(ov::element::f16), - ::testing::Values(ov::element::f16), - ::testing::Values(ov::element::undefined), - ::testing::Values(InputShape{{}, {{1, 13, 30}}}), - ::testing::Values(false), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ConvolutionLayerGPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_3D_tensor_basic, + ConvolutionLayerGPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(std::vector{3}), + ::testing::Values(std::vector{1}), + ::testing::Values(std::vector{0}), + ::testing::Values(std::vector{0}), + ::testing::Values(std::vector{1}), + ::testing::Values(13), + ::testing::Values(ov::op::PadType::SAME_UPPER)), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::dynamic), + ::testing::Values(InputShape{{}, {{1, 13, 30}}}), + ::testing::Values(false), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + ConvolutionLayerGPUTest::getTestCaseName); // Customer model input/filter shape std::vector input_shape_reducesum_test = { {{}, {{1, 1, 36, 64}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_4D_tensor_ReduceSum, ConvolutionLayerGPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(std::vector{36, 64}), - ::testing::Values(std::vector{1, 1}), - ::testing::Values(std::vector{0, 0}), - ::testing::Values(std::vector{0, 0}), - ::testing::Values(std::vector{1, 1}), - ::testing::Values(1), - ::testing::Values(ov::op::PadType::EXPLICIT)), - ::testing::Values(ov::element::f16), - ::testing::Values(ov::element::f16), - ::testing::Values(ov::element::undefined), - ::testing::ValuesIn(input_shape_reducesum_test), - ::testing::Values(true), - ::testing::Values(ov::test::utils::DEVICE_GPU)), - ConvolutionLayerGPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionLayerGPUTest_4D_tensor_ReduceSum, + ConvolutionLayerGPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(std::vector{36, 64}), + ::testing::Values(std::vector{1, 1}), + ::testing::Values(std::vector{0, 0}), + ::testing::Values(std::vector{0, 0}), + ::testing::Values(std::vector{1, 1}), + ::testing::Values(1), + ::testing::Values(ov::op::PadType::EXPLICIT)), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::f16), + ::testing::Values(ov::element::dynamic), + ::testing::ValuesIn(input_shape_reducesum_test), + ::testing::Values(true), + ::testing::Values(ov::test::utils::DEVICE_GPU)), + ConvolutionLayerGPUTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp index 0f1bb3a933d24a..1fc74b07c71537 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/convolution.cpp @@ -69,7 +69,7 @@ class ConvolutionLayerGPUTestDynamic : public testing::WithParamInterfaceGetParam(); @@ -372,7 +372,7 @@ class ConvolutionLayerGPUTestDynamicEltwiseFusing : public testing::WithParamInt void SetUp() override { convSpecificParams convParams; std::vector inputShapes; - auto model_type = ov::element::undefined; + auto model_type = ov::element::dynamic; bool activationFusing; std::tie(convParams, model_type, inputShapes, targetDevice, activationFusing) = this->GetParam(); diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp index 65a1f1c20c13f6..8ddbf63feb7f8b 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/group_convolution_backprop_data.cpp @@ -143,7 +143,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterface 0) { continue; } - if (inType != ov::element::Type_t::undefined) { + if (inType != ov::element::Type_t::dynamic) { p.input(i).tensor().set_element_type(inType); } } @@ -151,7 +151,7 @@ class GroupDeconvolutionLayerGPUTest : public testing::WithParamInterfaceget_results(); for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { + if (outType != ov::element::Type_t::dynamic) { p.output(i).tensor().set_element_type(outType); } } diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp index 9771dc67d38310..2b3a4e5684e8a7 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/groupconvolution.cpp @@ -65,7 +65,7 @@ class GroupConvolutionLayerGPUTestDynamic : public testing::WithParamInterfaceGetParam(); init_input_shapes({inputShape}); diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp index 133a515fe58104..16e25549d17293 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/interpolate.cpp @@ -139,7 +139,7 @@ class InterpolateLayerGPUTest : public testing::WithParamInterface 0) { continue; } - if (inType != ov::element::Type_t::undefined) { + if (inType != ov::element::Type_t::dynamic) { p.input(i).tensor().set_element_type(inType); } } @@ -147,7 +147,7 @@ class InterpolateLayerGPUTest : public testing::WithParamInterfaceget_results(); for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { + if (outType != ov::element::Type_t::dynamic) { p.output(i).tensor().set_element_type(outType); } } diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp index 36b6370a85c2f4..cb22c17506ff24 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/matmul.cpp @@ -231,22 +231,22 @@ const std::vector IS2D_nightly = { }; const auto testParams2D_smoke = ::testing::Combine(::testing::ValuesIn(IS2D_smoke), - ::testing::Values(ov::element::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_2D, MatMulLayerGPUTest, testParams2D_smoke, MatMulLayerGPUTest::getTestCaseName); const auto testParams2D_nightly = ::testing::Combine(::testing::ValuesIn(IS2D_nightly), - ::testing::Values(ov::element::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(nightly_FC_2D, MatMulLayerGPUTest, testParams2D_nightly, MatMulLayerGPUTest::getTestCaseName); @@ -314,23 +314,25 @@ const std::vector IS3D_nightly = { } }; -const auto fullyConnectedParams3D_smoke = ::testing::Combine(::testing::ValuesIn(IS3D_smoke), - ::testing::ValuesIn(netPRCs_f32_i32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); +const auto fullyConnectedParams3D_smoke = + ::testing::Combine(::testing::ValuesIn(IS3D_smoke), + ::testing::ValuesIn(netPRCs_f32_i32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_3D, MatMulLayerGPUTest, fullyConnectedParams3D_smoke, MatMulLayerGPUTest::getTestCaseName); -const auto fullyConnectedParams3D_nightly = ::testing::Combine(::testing::ValuesIn(IS3D_nightly), - ::testing::Values(ov::element::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); +const auto fullyConnectedParams3D_nightly = + ::testing::Combine(::testing::ValuesIn(IS3D_nightly), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(nightly_FC_3D, MatMulLayerGPUTest, fullyConnectedParams3D_nightly, MatMulLayerGPUTest::getTestCaseName); @@ -365,13 +367,14 @@ const std::vector IS4D_smoke = { } }; -const auto fullyConnectedParams4D_smoke = ::testing::Combine(::testing::ValuesIn(IS4D_smoke), - ::testing::Values(ov::element::f32), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(emptyAdditionalConfig)); +const auto fullyConnectedParams4D_smoke = + ::testing::Combine(::testing::ValuesIn(IS4D_smoke), + ::testing::Values(ov::element::f32), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(emptyAdditionalConfig)); INSTANTIATE_TEST_SUITE_P(smoke_FC_4D, MatMulLayerGPUTest, fullyConnectedParams4D_smoke, MatMulLayerGPUTest::getTestCaseName); @@ -675,8 +678,8 @@ const std::vector IS_Dynamic_nightly = { const auto testParams = ::testing::Combine(::testing::ValuesIn(IS), ::testing::ValuesIn(netPRCs), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(additional_config)); @@ -685,8 +688,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_MM_Static, MatMulLayerGPUTest, testParams, MatMul const auto testParamsOneDNN = ::testing::Combine(::testing::ValuesIn(IS_OneDNN), ::testing::Values(ov::element::f16), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(additional_config)); @@ -695,8 +698,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_MM_Static_OneDNN, MatMulLayerGPUTest, testParamsO const auto testParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynamic), ::testing::ValuesIn(netPRCs), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(ov::test::utils::DEVICE_GPU), ::testing::ValuesIn(additional_config)); @@ -704,12 +707,12 @@ const auto testParamsDynamic = ::testing::Combine(::testing::ValuesIn(IS_Dynamic INSTANTIATE_TEST_SUITE_P(smoke_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic, MatMulLayerGPUTest::getTestCaseName); const auto testParamsDynamic_nightly = ::testing::Combine(::testing::ValuesIn(IS_Dynamic_nightly), - ::testing::ValuesIn(netPRCs), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::ValuesIn(additional_config)); + ::testing::ValuesIn(netPRCs), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::ValuesIn(additional_config)); INSTANTIATE_TEST_SUITE_P(nightly_MM_Dynamic, MatMulLayerGPUTest, testParamsDynamic_nightly, MatMulLayerGPUTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp index f8e73f9f058001..27c33f1a117dd9 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/prior_box.cpp @@ -73,7 +73,7 @@ class PriorBoxLayerGPUTest : public testing::WithParamInterface max_size; @@ -226,4 +226,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_prior_box_full_dynamic, ::testing::ValuesIn(max_size), ::testing::ValuesIn(mode)), PriorBoxLayerGPUTest::getTestCaseName); -} // namespace \ No newline at end of file +} // namespace diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp index bb50df13a3ebc8..55983556212618 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/range.cpp @@ -113,7 +113,7 @@ class RangeDynamicGPUTest : public testing::WithParamInterface types = { ov::element::f32, ov::element::i32, ov::element::f32 }; for (size_t i = 0; i < types.size(); i++) { auto paramNode = std::make_shared(types[i], inputDynamicShapes[i]); @@ -230,9 +230,7 @@ const std::vector> inputMixedValues = { const std::vector netMixedPrecisions = { // Mixed type test(start/step:fp32, end:i32) - ov::element::undefined -}; - + ov::element::dynamic}; const auto testMixedParams_smoke = ::testing::Combine(::testing::ValuesIn(dynInputShapes), ::testing::ValuesIn(inputMixedValues), diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp index dcdff1f7eda3d9..b8c0cfdcca59fc 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/top_k.cpp @@ -178,32 +178,32 @@ std::vector input_shapesDynamic = { } }; -INSTANTIATE_TEST_SUITE_P(smoke_TopK_constant_dynamic, TopKLayerGPUTest, - ::testing::Combine( - ::testing::ValuesIn(k), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypes), - ::testing::ValuesIn(model_types), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::ValuesIn(input_shapesDynamic), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ov::test::utils::InputLayerType::CONSTANT)), - TopKLayerGPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_TopK_parameter_dynamic, TopKLayerGPUTest, - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypes), - ::testing::ValuesIn(model_types), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), - ::testing::ValuesIn(input_shapesDynamic), - ::testing::Values(ov::test::utils::DEVICE_GPU), - ::testing::Values(ov::test::utils::InputLayerType::PARAMETER)), - TopKLayerGPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_TopK_constant_dynamic, + TopKLayerGPUTest, + ::testing::Combine(::testing::ValuesIn(k), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypes), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::ValuesIn(input_shapesDynamic), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT)), + TopKLayerGPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_TopK_parameter_dynamic, + TopKLayerGPUTest, + ::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypes), + ::testing::ValuesIn(model_types), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), + ::testing::ValuesIn(input_shapesDynamic), + ::testing::Values(ov::test::utils::DEVICE_GPU), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER)), + TopKLayerGPUTest::getTestCaseName); } // namespace diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp index b079017d5c12e0..3092a7967a4a01 100644 --- a/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/range_si_test.cpp @@ -60,7 +60,7 @@ TEST_P(range_si_test, shape_infer) { auto in_layout = input_layouts[idx]; if (in_layout.is_static() && (idx < p.vals.size())) { auto prim_mem = engine.allocate_memory(in_layout); - ASSERT_NE(p.out_data_type, data_types::undefined); + ASSERT_NE(p.out_data_type, data_types::dynamic); switch (p.out_data_type) { case data_types::f16: set_values(prim_mem, {ov::float16(p.vals[idx]).to_bits()}); diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp index bcd5cf739b8e6b..13a9d96c872e58 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/border_gpu_test.cpp @@ -1830,7 +1830,7 @@ TEST(border_gpu, basic_zero_input_dynamic) { auto& engine = get_test_engine(); // WA to avoid crash due to attempt to allocate 0 bytes for USM memory - layout fake_input_layout = {{1}, data_types::undefined, format::bfyx}; + layout fake_input_layout = {{1}, data_types::dynamic, format::bfyx}; auto input = engine.allocate_memory(fake_input_layout); layout zero_input_layout = {{0, 1}, data_types::f32, format::bfyx}; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp index 3bf1a9e937c37c..4114d9b9d22d3f 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp @@ -27,14 +27,13 @@ using OutputStorageType = ov::op::internal::DynamicQuantize::OutputStorageType; class dynamic_quantization_gpu_tests: public ::testing::Test { public: - void test_dynamic_quantization(bool is_caching_test, const ov::PartialShape& input_shape, const ov::Shape& data_shape, const QuantizationType quantization_type = QuantizationType::Symmetric, uint64_t group_size = UINT64_MAX, data_types quant_dt = data_types::i8, - data_types zp_dt = data_types::undefined, + data_types zp_dt = data_types::dynamic, OutputStorageType storage_type = OutputStorageType::Planar, const std::string& impl_name = "", bool set_inner_most_dim_values_zero = false) { @@ -216,23 +215,51 @@ TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_unaligned_dynamic) { } TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache) { - this->test_dynamic_quantization(false, {-1, 8, -1, 96}, {1, 8, 1, 96}, QuantizationType::Symmetric, UINT64_MAX, - data_types::i8, data_types::undefined, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); + this->test_dynamic_quantization(false, + {-1, 8, -1, 96}, + {1, 8, 1, 96}, + QuantizationType::Symmetric, + UINT64_MAX, + data_types::i8, + data_types::dynamic, + OutputStorageType::Planar, + "dynamic_quantize_gpu_kv_cache"); } TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched) { - this->test_dynamic_quantization(false, {-1, 4, -1, 64}, {1, 4, 35, 64}, QuantizationType::Symmetric, UINT64_MAX, - data_types::i8, data_types::undefined, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); + this->test_dynamic_quantization(false, + {-1, 4, -1, 64}, + {1, 4, 35, 64}, + QuantizationType::Symmetric, + UINT64_MAX, + data_types::i8, + data_types::dynamic, + OutputStorageType::Planar, + "dynamic_quantize_gpu_kv_cache"); } TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_reordered) { - this->test_dynamic_quantization(false, {-1, -1, 8, 96}, {1, 1, 8, 96}, QuantizationType::Symmetric, UINT64_MAX, - data_types::i8, data_types::undefined, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); + this->test_dynamic_quantization(false, + {-1, -1, 8, 96}, + {1, 1, 8, 96}, + QuantizationType::Symmetric, + UINT64_MAX, + data_types::i8, + data_types::dynamic, + OutputStorageType::Planar, + "dynamic_quantize_gpu_kv_cache"); } TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_reordered) { - this->test_dynamic_quantization(false, {-1, -1, 4, 64}, {1, 35, 4, 64}, QuantizationType::Symmetric, UINT64_MAX, - data_types::i8, data_types::undefined, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); + this->test_dynamic_quantization(false, + {-1, -1, 4, 64}, + {1, 35, 4, 64}, + QuantizationType::Symmetric, + UINT64_MAX, + data_types::i8, + data_types::dynamic, + OutputStorageType::Planar, + "dynamic_quantize_gpu_kv_cache"); } TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_asym_planar) { diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp index 3384fb1ed514f6..dc519ab325d357 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/hash_key_gpu_test.cpp @@ -44,7 +44,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 4145865612957978777UL); - ASSERT_EQ(params_hash, 5324604476707368882UL); + ASSERT_EQ(params_hash, 1717643793116242977UL); } void test_fc_basic(bool is_caching_test) { @@ -72,10 +72,10 @@ class check_hash_value: public ::testing::Test { const auto params_hash = primitve->type->get_fake_aligned_params(*prim_inst->get_impl_params()).hash(); if (!engine.get_device_info().supports_immad) { ASSERT_EQ(primitive_hash, 9510988594087947885UL); - ASSERT_EQ(params_hash, 7833603199176871790UL); + ASSERT_EQ(params_hash, 1095272671134235967UL); } else { ASSERT_EQ(primitive_hash, 9510988594087947885UL); - ASSERT_EQ(params_hash, 16259702189938020305UL); + ASSERT_EQ(params_hash, 12994953567935633205UL); } } @@ -104,8 +104,8 @@ class check_hash_value: public ::testing::Test { const auto primitive_hash = primitve->hash(); const auto params_hash = prim_inst->get_impl_params()->hash(); - ASSERT_EQ(primitive_hash, 8439414674502129643UL); - ASSERT_EQ(params_hash, 17825246500238118561UL); + ASSERT_EQ(primitive_hash, 7823853951962111674UL); + ASSERT_EQ(params_hash, 5049423120420866837UL); } void test_gemm_basic(bool is_caching_test) { @@ -128,7 +128,7 @@ class check_hash_value: public ::testing::Test { const auto primitive_hash = primitve->hash(); const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 13388149315122571178UL); - ASSERT_EQ(params_hash, 17826282051937408484UL); + ASSERT_EQ(params_hash, 17362657208739837157UL); } void test_permute_basic(bool is_caching_test) { @@ -149,7 +149,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 4658575237077439700UL); - ASSERT_EQ(params_hash, 13514742184580394157UL); + ASSERT_EQ(params_hash, 15976735712435632434UL); } void test_reorder_basic(bool is_caching_test) { @@ -176,7 +176,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 16293979194373117693UL); - ASSERT_EQ(params_hash, 17794336956091484480UL); + ASSERT_EQ(params_hash, 3897060862531064919UL); } void test_reshape_basic(bool is_caching_test) { @@ -202,7 +202,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 1534749073560581535UL); - ASSERT_EQ(params_hash, 6798482801217293235UL); + ASSERT_EQ(params_hash, 6426521365118381035UL); } void test_conv_basic(bool is_caching_test) { @@ -227,7 +227,7 @@ class check_hash_value: public ::testing::Test { const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 13549661972131371304UL); - ASSERT_EQ(params_hash, 8974374790280144289UL); + ASSERT_EQ(params_hash, 4514788296955089688UL); } void test_quantize_basic(bool is_caching_test) { @@ -257,7 +257,7 @@ class check_hash_value: public ::testing::Test { const auto primitive_hash = primitve->hash(); const auto params_hash = prim_inst->get_impl_params()->hash(); ASSERT_EQ(primitive_hash, 4135863035456568493UL); - ASSERT_EQ(params_hash, 2000365791052292402UL); + ASSERT_EQ(params_hash, 9610563181439837451UL); } }; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp index e25903df916995..7322fda2675a4b 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/lru_caches_gpu_test.cpp @@ -133,8 +133,8 @@ struct ImplHasher { } // namespace TEST(lru_cache, collisions) { - auto l1 = layout{{1, 3, 27, 85}, data_types::f32, format::bfyx}; - auto l2 = layout{{1, 3, 26, 24}, data_types::f32, format::bfyx}; + auto l1 = layout{{1, 3, 27, 92}, data_types::f32, format::bfyx}; + auto l2 = layout{{1, 3, 28, 29}, data_types::f32, format::bfyx}; auto input1_prim = std::make_shared("input1", l1); auto input2_prim = std::make_shared("input2", l2); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/transpose_matmul_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/transpose_matmul_fusion_test.cpp index aa64cd8e85539c..4823c2296173b4 100644 --- a/src/plugins/intel_gpu/tests/unit/transformations/transpose_matmul_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/transformations/transpose_matmul_fusion_test.cpp @@ -39,7 +39,12 @@ TEST_F(TransformationTestsF, TranposeMatmulFusion1) { std::vector order_c = {0, 1, 2, 3}; auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto gemm = std::make_shared(input_a, input_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + input_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -63,7 +68,12 @@ TEST_F(TransformationTestsF, TranposeMatmulFusion2) { std::vector order_c = {0, 1, 2, 3}; auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto gemm = std::make_shared(input_a, input_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + input_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -89,7 +99,12 @@ TEST_F(TransformationTestsF, TranposeMatmulFusion3) { std::vector order_c = {0, 1, 2, 3}; auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto gemm = std::make_shared(input_a, input_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + input_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -117,7 +132,12 @@ TEST_F(TransformationTestsF, TranposeMatmulFusion4) { std::vector order_c = {0, 2, 1, 3}; auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto gemm = std::make_shared(input_a, input_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + input_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -143,7 +163,12 @@ TEST_F(TransformationTestsF, TranposeMatmulFusion5) { std::vector order_c = {0, 2, 1}; auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(3)); - auto gemm = std::make_shared(input_a, input_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + input_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); comparator.enable(FunctionsComparator::ATTRIBUTES); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/transpose_sdpa_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/transpose_sdpa_fusion_test.cpp index f6d21681767f05..b08d05c621d928 100644 --- a/src/plugins/intel_gpu/tests/unit/transformations/transpose_sdpa_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/transformations/transpose_sdpa_fusion_test.cpp @@ -43,7 +43,13 @@ TEST_F(TransformationTestsF, TranposeSDPAFusion1) { auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_c = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, is_causal, order_a, order_b, order_c, order_output, ov::element::undefined ); + auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, + is_causal, + order_a, + order_b, + order_c, + order_output, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ sdpa }, ov::ParameterVector{ input_a, input_b, input_c }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -71,7 +77,13 @@ TEST_F(TransformationTestsF, TransformationTestsF) { auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_c = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, is_causal, order_a, order_b, order_c, order_output, ov::element::undefined); + auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, + is_causal, + order_a, + order_b, + order_c, + order_output, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ sdpa }, ov::ParameterVector{ input_a, input_b, input_c }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -102,7 +114,13 @@ TEST_F(TransformationTestsF, TranposeSDPAFusion3) { auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_c = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, is_causal, order_a, order_b, order_c, order_output, ov::element::undefined); + auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, + is_causal, + order_a, + order_b, + order_c, + order_output, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ sdpa }, ov::ParameterVector{ input_a, input_b, input_c }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -135,7 +153,13 @@ TEST_F(TransformationTestsF, TranposeSDPAFusion4) { auto input_a = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_b = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); auto input_c = std::make_shared(ov::element::f32, ov::PartialShape::dynamic(4)); - auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, is_causal, order_a, order_b, order_c, order_output, ov::element::undefined); + auto sdpa = std::make_shared(ov::OutputVector{input_a, input_b, input_c}, + is_causal, + order_a, + order_b, + order_c, + order_output, + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ sdpa }, ov::ParameterVector{ input_a, input_b, input_c }); comparator.enable(FunctionsComparator::ATTRIBUTES); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/unsqueeze_broadcast_reshape_matmul_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/transformations/unsqueeze_broadcast_reshape_matmul_fusion_test.cpp index d76a4809a950e0..5ab64ea86ad3f6 100644 --- a/src/plugins/intel_gpu/tests/unit/transformations/unsqueeze_broadcast_reshape_matmul_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/transformations/unsqueeze_broadcast_reshape_matmul_fusion_test.cpp @@ -50,7 +50,12 @@ TEST_F(TransformationTestsF, UnsqueezeBroadReshapeMatmulFusion1) { auto broadcast = std::make_shared(unsqueeze, target_shape, ov::op::BroadcastType::BIDIRECTIONAL); auto pattern = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, pattern_b); auto reshape = std::make_shared(broadcast, pattern, true); - auto gemm = std::make_shared(input_a, reshape, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + reshape, + order_a, + order_b, + order_c, + ov::element::dynamic); model = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, new_token_param, beam_idx }); manager.register_pass(); @@ -69,7 +74,7 @@ TEST_F(TransformationTestsF, UnsqueezeBroadReshapeMatmulFusion1) { order_a, order_b, order_c, - ov::element::undefined); + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, new_token_param, beam_idx }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -98,7 +103,12 @@ TEST_F(TransformationTestsF, UnsqueezeBroadReshapeMatmulFusion2) { auto broadcast = std::make_shared(unsqueeze, abs, ov::op::BroadcastType::BIDIRECTIONAL); auto pattern = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, pattern_b); auto reshape = std::make_shared(broadcast, pattern, true); - auto gemm = std::make_shared(input_a, reshape, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + reshape, + order_a, + order_b, + order_c, + ov::element::dynamic); model = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, new_token_param, beam_idx, abs_param }); manager.register_pass(); @@ -117,7 +127,7 @@ TEST_F(TransformationTestsF, UnsqueezeBroadReshapeMatmulFusion2) { order_a, order_b, order_c, - ov::element::undefined); + ov::element::dynamic); model_ref = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, new_token_param, beam_idx }); comparator.enable(FunctionsComparator::ATTRIBUTES); @@ -140,7 +150,12 @@ TEST_F(TransformationTestsF, UnsqueezeBroadReshapeMatmulFusion3) { auto broadcast_b = std::make_shared(unsqueeze_b, broadcast_b_const, ov::op::BroadcastType::BIDIRECTIONAL); auto reshape_b_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, pattern_b); auto reshape_b = std::make_shared(broadcast_b, reshape_b_const, true); - auto gemm = std::make_shared(input_a, reshape_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + reshape_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); manager.register_pass(); @@ -167,7 +182,12 @@ TEST_F(TransformationTestsF, UnsqueezeBroadReshapeMatmulFusion4) { auto broadcast_b = std::make_shared(unsqueeze_b, broadcast_b_const, ov::op::BroadcastType::BIDIRECTIONAL); auto reshape_b_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{4}, pattern_b); auto reshape_b = std::make_shared(broadcast_b, reshape_b_const, true); - auto gemm = std::make_shared(input_a, reshape_b, order_a, order_b, order_c, ov::element::undefined); + auto gemm = std::make_shared(input_a, + reshape_b, + order_a, + order_b, + order_c, + ov::element::dynamic); model = std::make_shared(ov::NodeVector{ gemm }, ov::ParameterVector{ input_a, input_b }); manager.register_pass(); diff --git a/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp b/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp index 6d34186b22b3fa..5a2e3c5cb93dac 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_tensor.cpp @@ -21,7 +21,7 @@ ZeroTensor::ZeroTensor(const std::shared_ptr& init_struct _strides{}, _strides_once{}, _allocator{allocator} { - OPENVINO_ASSERT(_element_type != ov::element::undefined && _element_type.is_static()); + OPENVINO_ASSERT(_element_type.is_static()); OPENVINO_ASSERT(allocator, "Allocator was not initialized"); const auto byte_size = ov::element::get_memory_size(_element_type, shape_size(_shape)); auto data = const_cast(_allocator).allocate(byte_size); @@ -31,7 +31,7 @@ ZeroTensor::ZeroTensor(const std::shared_ptr& init_struct } void* ZeroTensor::data(const ov::element::Type& element_type) const { - if (element_type != ov::element::undefined && element_type != ov::element::dynamic && + if (element_type != ov::element::dynamic && (element_type.bitwidth() != get_element_type().bitwidth() || element_type.is_real() != get_element_type().is_real() || (element_type == ov::element::string && get_element_type() != ov::element::string) || diff --git a/src/plugins/intel_npu/src/common/src/remote_tensor.cpp b/src/plugins/intel_npu/src/common/src/remote_tensor.cpp index 0cb327b785fd21..ce82a4fa106073 100644 --- a/src/plugins/intel_npu/src/common/src/remote_tensor.cpp +++ b/src/plugins/intel_npu/src/common/src/remote_tensor.cpp @@ -18,7 +18,7 @@ RemoteTensor::RemoteTensor(const std::shared_ptr& context, _shape(shape), _capacity(shape) { OPENVINO_ASSERT(shape_size(_shape) != 0); - OPENVINO_ASSERT(_element_type != ov::element::undefined && _element_type.is_static()); + OPENVINO_ASSERT(_element_type.is_static()); } RemoteTensor::~RemoteTensor() = default; diff --git a/src/plugins/intel_npu/src/compiler_adapter/src/driver_compiler_adapter.cpp b/src/plugins/intel_npu/src/compiler_adapter/src/driver_compiler_adapter.cpp index 624ba448fed44f..bedf2513f0f3a5 100644 --- a/src/plugins/intel_npu/src/compiler_adapter/src/driver_compiler_adapter.cpp +++ b/src/plugins/intel_npu/src/compiler_adapter/src/driver_compiler_adapter.cpp @@ -68,8 +68,6 @@ void checkedMemcpy(void* destination, size_t destinationSize, void const* source */ std::string ovPrecisionToLegacyPrecisionString(const ov::element::Type& precision) { switch (precision) { - case ov::element::Type_t::undefined: - return "UNSPECIFIED"; case ov::element::Type_t::f16: return "FP16"; case ov::element::Type_t::f32: diff --git a/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp b/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp index d5e793d4fff9fe..66c928e2a86a5b 100644 --- a/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp +++ b/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp @@ -40,7 +40,7 @@ namespace { ov::element::Type_t toOVElementType(const ze_graph_argument_precision_t zeElementType) { switch (zeElementType) { case ZE_GRAPH_ARGUMENT_PRECISION_UNKNOWN: - return ov::element::Type_t::undefined; + return ov::element::Type_t::dynamic; case ZE_GRAPH_ARGUMENT_PRECISION_DYNAMIC: return ov::element::Type_t::dynamic; case ZE_GRAPH_ARGUMENT_PRECISION_BOOLEAN: @@ -76,7 +76,7 @@ ov::element::Type_t toOVElementType(const ze_graph_argument_precision_t zeElemen case ZE_GRAPH_ARGUMENT_PRECISION_UINT64: return ov::element::Type_t::u64; default: - return ov::element::Type_t::undefined; + return ov::element::Type_t::dynamic; } } diff --git a/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp b/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp index 24e70aa7125e52..9e6c838c9eeccd 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp @@ -864,7 +864,7 @@ std::string ov::npuw::CompiledModel::funcall_mem_device(const std::size_t idx) c void ov::npuw::CompiledModel::remove_long_output_names(const std::shared_ptr& model) { NPUW_ASSERT(model.get() != nullptr); for (auto node : model->get_ordered_ops()) { - for (auto &&output : node->outputs()) { + for (auto&& output : node->outputs()) { const auto& tensor_names = output.get_tensor().get_names(); if (tensor_names.size() > 32) { LOG_VERB(model->get_friendly_name() << " output " << output << " exceeds the name limit, removing..."); diff --git a/src/plugins/intel_npu/tests/unit/npuw/unpack.cpp b/src/plugins/intel_npu/tests/unit/npuw/unpack.cpp index 1049832f6ead7c..1404931502e661 100644 --- a/src/plugins/intel_npu/tests/unit/npuw/unpack.cpp +++ b/src/plugins/intel_npu/tests/unit/npuw/unpack.cpp @@ -10,13 +10,21 @@ namespace { const auto TestCases = ::testing::Combine( ::testing::ValuesIn({ov::element::Type_t::i4}), ::testing::ValuesIn({ov::element::Type_t::i8, ov::element::Type_t::f16}), - ::testing::ValuesIn({ov::element::Type_t::undefined}), // no used in this test - ::testing::ValuesIn({ov::element::Type_t::undefined}), // no used in this test + ::testing::ValuesIn({ov::element::Type_t::dynamic}), // no used in this test + ::testing::ValuesIn({ov::element::Type_t::dynamic}), // no used in this test ::testing::ValuesIn({3lu, 0lu}), - ::details::ShapesIn({Tensors{input={1, 1, 1, 32};}, - Tensors{input={1,1,1, 128};}, - Tensors{input={1,1,1, 390};}, - Tensors{input={1,1,1, 82};}}), + ::details::ShapesIn({Tensors{input={1, 1, 1, 32}; +} +, Tensors { + input = {1, 1, 1, 128}; +} +, Tensors { + input = {1, 1, 1, 390}; +} +, Tensors { + input = {1, 1, 1, 82}; +} +}), ::testing::ValuesIn({true, false}), ::testing::ValuesIn({true, false}) ); @@ -29,13 +37,27 @@ const auto TestCasesScale = ::testing::Combine( ::testing::ValuesIn({ov::element::Type_t::i4}), // TODO: add i8 as input for test ::testing::ValuesIn({ov::element::Type_t::f16, ov::element::Type_t::f32}), ::testing::ValuesIn({ov::element::Type_t::f16, ov::element::Type_t::f32}), - ::testing::ValuesIn({ov::element::Type_t::undefined}), // no used in this test + ::testing::ValuesIn({ov::element::Type_t::dynamic}), // no used in this test ::testing::ValuesIn({3lu, 0lu}), - ::details::ShapesIn({Tensors{input={1,32, 128}; scale = {1, 32, 1};}, - Tensors{input={32, 128}; scale = {32, 1};}, - Tensors{input={64, 160}; scale = {64, 1};}, - Tensors{input={1024, 4}; scale = {64, 1};}, - Tensors{input={1, 1, 1024, 4}; scale = {1, 1, 64, 1};}}), + ::details::ShapesIn({Tensors{input={1,32, 128}; scale = {1, 32, 1}; +} +, Tensors { + input = {32, 128}; + scale = {32, 1}; +} +, Tensors { + input = {64, 160}; + scale = {64, 1}; +} +, Tensors { + input = {1024, 4}; + scale = {64, 1}; +} +, Tensors { + input = {1, 1, 1024, 4}; + scale = {1, 1, 64, 1}; +} +}), ::testing::ValuesIn({true, false}), ::testing::ValuesIn({true, false}) ); diff --git a/src/plugins/intel_npu/tests/unit/npuw/unpack.hpp b/src/plugins/intel_npu/tests/unit/npuw/unpack.hpp index da5bb4e4720f3e..8bac0ff1706ffa 100644 --- a/src/plugins/intel_npu/tests/unit/npuw/unpack.hpp +++ b/src/plugins/intel_npu/tests/unit/npuw/unpack.hpp @@ -186,7 +186,7 @@ class UnpackTestsBase { bool strictPartitions = false; void make_zeropoints() { - if (zeropType == ov::element::undefined) { + if (zeropType == ov::element::dynamic) { return; } @@ -230,7 +230,7 @@ class UnpackTestsBase { } void make_scales() { - if (scaleType == ov::element::undefined) { + if (scaleType == ov::element::dynamic) { return; } ASSERT_TRUE(scaleType == ov::element::f16 || scaleType == ov::element::f32); @@ -334,9 +334,9 @@ class UnpackTestsBase { << (useParallelFor ? "_parallel" : "_serial") << "_from_" << fromType << "_to_" << toType; - if (scaleType != ov::element::Type_t::undefined) + if (scaleType != ov::element::Type_t::dynamic) result << "_scale_" << scaleType; - if (zeropType != ov::element::Type_t::undefined) + if (zeropType != ov::element::Type_t::dynamic) result << "_zerop_" << zeropType; return result.str(); diff --git a/src/plugins/intel_npu/tools/single-image-test/main.cpp b/src/plugins/intel_npu/tools/single-image-test/main.cpp index 3188075fc58148..fa9c6dfeb1be60 100644 --- a/src/plugins/intel_npu/tools/single-image-test/main.cpp +++ b/src/plugins/intel_npu/tools/single-image-test/main.cpp @@ -755,7 +755,7 @@ void loadBinary(const std::string& filePath, const BatchIndexer &fileSourceInBat const size_t fileBytes = static_cast(fileSize); const size_t reqTensorBytes = static_cast(requestedTensor.get_byte_size()); - if (dataPrecision != modelPrecision && dataPrecision != ov::element::Type_t::undefined) { + if (dataPrecision != modelPrecision && dataPrecision != ov::element::Type_t::dynamic) { std::cout << "Converting " << filePath << " input from " << dataPrecision << " to " << modelPrecision << std::endl; const ov::Tensor inputTensor(dataPrecision, shape); @@ -855,9 +855,12 @@ ov::Tensor loadBinaries(const ov::element::Type& modelPrecision, const ov::Shape * @param dataPrecision Indicates the precision used by the data found within the binary file. * @return The tensor containing the loaded data. */ -ov::Tensor loadInput(const ov::element::Type& modelPrecision, const ov::Shape& shape, const ov::Layout& layout, - const std::vector& filePaths, const std::string& colorFormat, - const ov::element::Type& dataPrecision = ov::element::Type_t::undefined) { +ov::Tensor loadInput(const ov::element::Type& modelPrecision, + const ov::Shape& shape, + const ov::Layout& layout, + const std::vector& filePaths, + const std::string& colorFormat, + const ov::element::Type& dataPrecision = ov::element::Type_t::dynamic) { if (isImage(shape, layout) && !FLAGS_img_as_bin) { return loadImages(modelPrecision, shape, layout, filePaths, colorFormat); } else { @@ -1866,7 +1869,7 @@ static int runSingleImageTest() { if (FLAGS_img_as_bin) { for (std::size_t i = 0; i < inputFilesForOneInfer.size(); ++i) { inputBinPrecisionForOneInfer[i] = - std::vector(inputFilesForOneInfer[i].size(), ov::element::undefined); + std::vector(inputFilesForOneInfer[i].size(), ov::element::dynamic); } inputBinPrecisionStrPerCase = splitStringList(FLAGS_img_bin_precision, ';'); std::size_t inferIdx = 0; diff --git a/src/plugins/template/src/config.hpp b/src/plugins/template/src/config.hpp index ed7a25d3f0ca49..ecdd56b1a71974 100644 --- a/src/plugins/template/src/config.hpp +++ b/src/plugins/template/src/config.hpp @@ -42,7 +42,7 @@ struct Configuration { bool exclusive_async_requests = false; // unused - ov::element::Type inference_precision = ov::element::undefined; + ov::element::Type inference_precision = ov::element::dynamic; ov::hint::ExecutionMode execution_mode = ov::hint::ExecutionMode::ACCURACY; ov::log::Level log_level = ov::log::Level::NO; diff --git a/src/plugins/template/tests/functional/op_reference/rms_internal.cpp b/src/plugins/template/tests/functional/op_reference/rms_internal.cpp index ddda093ed86388..e7b90b6a2a11f9 100644 --- a/src/plugins/template/tests/functional/op_reference/rms_internal.cpp +++ b/src/plugins/template/tests/functional/op_reference/rms_internal.cpp @@ -39,7 +39,7 @@ class ReferenceRMSLayerTest : public testing::TestWithParam, public C void SetUp() override { auto params = GetParam(); const auto output_type = - params.expected.type == params.input.type ? ov::element::undefined : params.expected.type; + params.expected.type == params.input.type ? ov::element::dynamic : params.expected.type; function = CreateFunction(params.input, params.eps, params.scale, output_type); if (!params.scale.data) { inputData = {params.input.data}; diff --git a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp index 356e2ffa013aae..6394877d2ea32a 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/eltwise.cpp @@ -78,8 +78,8 @@ const auto multiply_params = ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_TEMPLATE), ::testing::Values(additional_config)); @@ -88,8 +88,8 @@ const auto multiply_params_dynamic = ::testing::Combine(::testing::ValuesIn(inSh ::testing::ValuesIn(secondaryInputTypesDynamic), ::testing::ValuesIn(opTypesDynamic), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_TEMPLATE), ::testing::Values(additional_config)); @@ -121,8 +121,8 @@ const auto single_thread_params = ::testing::ValuesIn(secondaryInputTypes), ::testing::ValuesIn(opTypes), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), ::testing::Values(ov::test::utils::DEVICE_TEMPLATE), ::testing::Values(additional_config_single_thread)); diff --git a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp index 0c76f5dae72a43..71b1da88da7cdc 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/single_layer_tests/softmax.cpp @@ -33,16 +33,16 @@ const std::vector axis2D = {-2, -1, 0, 1}; const auto params2D_static = testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape2D)), testing::ValuesIn(axis2D), testing::Values(ov::test::utils::DEVICE_TEMPLATE), testing::Values(ov::AnyMap())); const auto params2D_dynamic = testing::Combine(testing::ValuesIn(netPrecisions), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(inputDynamicShape2D), testing::ValuesIn(axis2D), testing::Values(ov::test::utils::DEVICE_TEMPLATE), @@ -78,16 +78,16 @@ const std::vector axis4D = {0, 1, 2, 3, -1, -2, -3, -4}; const auto params4Dstatic = testing::Combine(testing::ValuesIn(netPrecisions4D), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape4D)), testing::ValuesIn(axis4D), testing::Values(ov::test::utils::DEVICE_TEMPLATE), testing::Values(ov::AnyMap())); const auto params4Ddynamic = testing::Combine(testing::ValuesIn(netPrecisions4D), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(inputDynamicShape4D), testing::ValuesIn(axis4D), testing::Values(ov::test::utils::DEVICE_TEMPLATE), @@ -124,16 +124,16 @@ const std::vector axis5D = {0, 1, 2, 3, 4, -1, -2, -3, -4, -5}; const auto params5Dstatic = testing::Combine(testing::ValuesIn(netPrecisions5D), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(ov::test::static_shapes_to_test_representation(inputStaticShape5D)), testing::ValuesIn(axis5D), testing::Values(ov::test::utils::DEVICE_TEMPLATE), testing::Values(ov::AnyMap())); const auto params5Ddynamic = testing::Combine(testing::ValuesIn(netPrecisions5D), - ::testing::Values(ov::element::undefined), - ::testing::Values(ov::element::undefined), + ::testing::Values(ov::element::dynamic), + ::testing::Values(ov::element::dynamic), testing::ValuesIn(inputDynamicShape5D), testing::ValuesIn(axis5D), testing::Values(ov::test::utils::DEVICE_TEMPLATE), diff --git a/src/plugins/template/tests/functional/skip_tests_config.cpp b/src/plugins/template/tests/functional/skip_tests_config.cpp index e3dc1d81d53159..95cb3ac061120f 100644 --- a/src/plugins/template/tests/functional/skip_tests_config.cpp +++ b/src/plugins/template/tests/functional/skip_tests_config.cpp @@ -100,10 +100,10 @@ std::vector disabledTestPatterns() { // Precision not high enough to get exact result for the complex test cases // (both tiny values and very high values necessary) R"(.*ReferenceInverse.*bf16.*[4,4].*)", - R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=\(\[\]_\)_TS=.*(4.4.200|1.10.200|10.200|2.200|1.10.100|4.4.16).*_eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=undefined_OutType=undefined.*)", - R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=\(\(2.17.5.1\)_\(1.17.1.4\)_\)_eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f16_InType=undefined_OutType=undefined_.*)", - R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=.*(2.200|10.200|1.10.100|4.4.16|1.2.4|1.4.4|1.4.4.1).*eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f16_InType=undefined_OutType=undefined.*)", - R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=.*2.*eltwise_op_type=Pow_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=undefined_OutType=undefined.*)", + R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=\(\[\]_\)_TS=.*(4.4.200|1.10.200|10.200|2.200|1.10.100|4.4.16).*_eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=dynamic_OutType=dynamic.*)", + R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=\(\(2.17.5.1\)_\(1.17.1.4\)_\)_eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f16_InType=dynamic_OutType=dynamic_.*)", + R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=.*(2.200|10.200|1.10.100|4.4.16|1.2.4|1.4.4|1.4.4.1).*eltwise_op_type=Mod_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f16_InType=dynamic_OutType=dynamic.*)", + R"(.*smoke_CompareWithRefs_static/EltwiseLayerTest.Inference/IS=.*_TS=.*2.*eltwise_op_type=Pow_secondary_input_type=PARAMETER_opType=VECTOR_model_type=f32_InType=dynamic_OutType=dynamic.*)", }; #ifdef _WIN32 // CVS-63989 diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/types.hpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/types.hpp index 883f697db0a7a4..1739049bd09578 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/types.hpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/include/utils/types.hpp @@ -12,26 +12,27 @@ namespace test { namespace op_conformance { static std::set get_element_type_names() { - std::vector element_types = { ov::element::Type_t::f64, - ov::element::Type_t::f32, - ov::element::Type_t::f16, - ov::element::Type_t::bf16, - ov::element::Type_t::nf4, - ov::element::Type_t::i64, - ov::element::Type_t::i32, - ov::element::Type_t::i16, - ov::element::Type_t::i8, - ov::element::Type_t::i4, - ov::element::Type_t::u64, - ov::element::Type_t::u32, - ov::element::Type_t::u16, - ov::element::Type_t::u8, - ov::element::Type_t::u4, - ov::element::Type_t::u1, - ov::element::Type_t::boolean, - ov::element::Type_t::dynamic, - ov::element::Type_t::undefined, - }; + std::vector element_types = {ov::element::Type_t::f64, + ov::element::Type_t::f32, + ov::element::Type_t::f16, + ov::element::Type_t::bf16, + ov::element::Type_t::nf4, + ov::element::Type_t::i64, + ov::element::Type_t::i32, + ov::element::Type_t::i16, + ov::element::Type_t::i8, + ov::element::Type_t::i4, + ov::element::Type_t::u64, + ov::element::Type_t::u32, + ov::element::Type_t::u16, + ov::element::Type_t::u8, + ov::element::Type_t::u4, + ov::element::Type_t::u1, + ov::element::Type_t::boolean, + ov::element::Type_t::dynamic}; + OPENVINO_SUPPRESS_DEPRECATED_START + element_types.emplace_back(element::undefined); + OPENVINO_SUPPRESS_DEPRECATED_END std::set result; for (const auto& element_type : element_types) { std::string element_name = element_type.get_type_name(); @@ -47,4 +48,4 @@ static auto element_type_names = get_element_type_names(); } // namespace op_conformance } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp index ca57c5dc93ee2d..85fb0ccf4d51a0 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_plugin/properties_tests.cpp @@ -511,12 +511,23 @@ TEST_P(OVCheckChangePropComplieModleGetPropTests_InferencePrecision, ChangeCorre OV_ASSERT_NO_THROW(default_property = core->get_property(target_device, ov::hint::inference_precision)); ASSERT_FALSE(default_property.empty()); - const std::vector ovElemTypes = { - ov::element::f64, ov::element::f32, ov::element::f16, ov::element::bf16, - ov::element::i64, ov::element::i32, ov::element::i16, ov::element::i8, ov::element::i4, - ov::element::u64, ov::element::u32, ov::element::u16, ov::element::u8, ov::element::u4, ov::element::u1, - ov::element::boolean, ov::element::undefined, ov::element::dynamic - }; + const std::vector ovElemTypes = {ov::element::f64, + ov::element::f32, + ov::element::f16, + ov::element::bf16, + ov::element::i64, + ov::element::i32, + ov::element::i16, + ov::element::i8, + ov::element::i4, + ov::element::u64, + ov::element::u32, + ov::element::u16, + ov::element::u8, + ov::element::u4, + ov::element::u1, + ov::element::boolean, + ov::element::dynamic}; bool any_supported = false; for (ov::element::Type type : ovElemTypes) { diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp index 1b4eb35689bd58..59ee374bec14ea 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/ov_subgraph.hpp @@ -65,9 +65,8 @@ class SubgraphBaseTest : public ov::test::TestsCommon { std::map, ov::Tensor> inputs; std::vector inputDynamicShapes; std::vector> targetStaticShapes; - ElementType inType = ov::element::undefined, - outType = ov::element::undefined, - inference_precision = ov::element::undefined; + ElementType inType = ov::element::dynamic, outType = ov::element::dynamic, + inference_precision = ov::element::dynamic; ov::CompiledModel compiledModel; ov::InferRequest inferRequest; diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp index f57d8f4caf89ac..8c38537a80f33b 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp @@ -34,10 +34,10 @@ static std::map custom_op_thresholds = { // { ov::op::v0::Add::get_type_info_static(), { 1e-7, 1e-4 }}, }; -std::pair -calculate_thresholds_by_model(const std::shared_ptr& model, - const std::shared_ptr& ref_model = nullptr, - const ov::element::Type& inference_precision = ov::element::undefined); +std::pair calculate_thresholds_by_model( + const std::shared_ptr& model, + const std::shared_ptr& ref_model = nullptr, + const ov::element::Type& inference_precision = ov::element::dynamic); } // namespace utils } // namespace test diff --git a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp index 00b5b10189b8e5..5dd5029f27976c 100644 --- a/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp +++ b/src/tests/functional/shared_test_classes/src/base/ov_subgraph.cpp @@ -273,7 +273,7 @@ void SubgraphBaseTest::configure_model() { { auto& params = function->get_parameters(); for (size_t i = 0; i < params.size(); i++) { - if (inType != ov::element::Type_t::undefined) { + if (inType != ov::element::Type_t::dynamic) { p.input(i).tensor().set_element_type(inType); } } @@ -283,7 +283,7 @@ void SubgraphBaseTest::configure_model() { { auto results = function->get_results(); for (size_t i = 0; i < results.size(); i++) { - if (outType != ov::element::Type_t::undefined) { + if (outType != ov::element::Type_t::dynamic) { p.output(i).tensor().set_element_type(outType); } } @@ -377,7 +377,7 @@ void SubgraphBaseTest::update_ref_model() { } const auto& outputs = functionRefs->outputs(); for (size_t i = 0; i < outputs.size(); ++i) { - if (outType != ElementType::undefined && outType != outputs[i].get_element_type()) { + if (outType != ElementType::dynamic && outType != outputs[i].get_element_type()) { p.output(i).tensor().set_element_type(outType); } } diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp index 3208ab16ec4ac1..561e751a69a485 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_convolution_backprop_data.cpp @@ -43,7 +43,7 @@ std::string QuantConvBackpropDataLayerTest::getTestCaseName(const testing::TestP void QuantConvBackpropDataLayerTest::SetUp() { quantConvBackpropDataSpecificParams groupConvBackpropDataParams; ov::Shape inputShape; - ov::element::Type element_type = ov::element::undefined; + ov::element::Type element_type = ov::element::dynamic; std::tie(groupConvBackpropDataParams, element_type, inputShape, targetDevice) = this->GetParam(); ov::op::PadType padType; std::vector kernel, stride, dilation; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp index 0e64399cd69494..a27908ea51a5c2 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution.cpp @@ -46,7 +46,7 @@ std::string QuantGroupConvLayerTest::getTestCaseName(const testing::TestParamInf void QuantGroupConvLayerTest::SetUp() { quantGroupConvSpecificParams groupConvParams; ov::Shape inputShape; - ov::element::Type element_type = ov::element::undefined; + ov::element::Type element_type = ov::element::dynamic; std::tie(groupConvParams, element_type, inputShape, targetDevice) = this->GetParam(); ov::op::PadType padType = ov::op::PadType::AUTO; ov::Shape kernel, stride, dilation; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp index 86ffe543a0bd60..035570402a4dbe 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/quantized_group_convolution_backprop_data.cpp @@ -45,7 +45,7 @@ std::string QuantGroupConvBackpropDataLayerTest::getTestCaseName(const testing:: void QuantGroupConvBackpropDataLayerTest::SetUp() { quantGroupConvBackpropDataSpecificParams groupConvBackpropDataParams; ov::Shape inputShape; - ov::element::Type element_type = ov::element::undefined; + ov::element::Type element_type = ov::element::dynamic; std::tie(groupConvBackpropDataParams, element_type, inputShape, targetDevice) = this->GetParam(); ov::op::PadType padType; ov::Shape kernel, stride, dilation; diff --git a/src/tests/functional/shared_test_classes/src/subgraph/shared_matmul_weights_decompression.cpp b/src/tests/functional/shared_test_classes/src/subgraph/shared_matmul_weights_decompression.cpp index e43d7bfffc4611..dc81b0a3d56ca4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/shared_matmul_weights_decompression.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/shared_matmul_weights_decompression.cpp @@ -52,7 +52,7 @@ std::shared_ptr SharedMatmulWeightsDecompression::initSubgraph( data_precision, weights_precision, decompression_precision, - ov::element::undefined, + ov::element::dynamic, transpose_weights, DecompressionType::full, decompression_subtract_type, @@ -127,4 +127,4 @@ void SharedMatmulWeightsDecompression::check_results() { } } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/tests/functional/shared_test_classes/src/subgraph/weights_decompression_builders.cpp b/src/tests/functional/shared_test_classes/src/subgraph/weights_decompression_builders.cpp index 818bf7740a4abf..dca5b315b687e4 100644 --- a/src/tests/functional/shared_test_classes/src/subgraph/weights_decompression_builders.cpp +++ b/src/tests/functional/shared_test_classes/src/subgraph/weights_decompression_builders.cpp @@ -132,7 +132,7 @@ std::shared_ptr initMatMulDecompressionSubgraph( } std::shared_ptr last_node = mul_parent; - const auto& scale_prc = scale_precision == ov::element::undefined ? decompression_precision : scale_precision; + const auto& scale_prc = scale_precision == ov::element::dynamic ? decompression_precision : scale_precision; if (decompression_multiply_type != DecompressionType::empty) { auto multiply_shape = decompression_multiply_type == DecompressionType::full ? scaleshift_const_shape : ov::Shape({}); diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp index 39bbf23c9ecfde..2a78bf5f97b99b 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/builders.hpp @@ -44,8 +44,10 @@ std::shared_ptr makeElementwise(const std::shared_ptr data, cons description.values); std::shared_ptr operation; - if ((description.outPrecision == ov::element::undefined) || + OPENVINO_SUPPRESS_DEPRECATED_START + if ((description.outPrecision == ov::element::undefined) || (description.outPrecision == ov::element::dynamic) || (description.outPrecision == data->get_output_element_type(0))) { + OPENVINO_SUPPRESS_DEPRECATED_END operation = std::make_shared(data, operationConst); } else { operation = std::make_shared>( diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp index e2965ef0d8f425..b52ce6a28d8119 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/dequantization_operations.hpp @@ -23,7 +23,7 @@ class DequantizationOperations { return equal(value); } - ov::element::Type outPrecision = ov::element::undefined; + ov::element::Type outPrecision = ov::element::dynamic; bool addDequantizationAttribute = true; private: bool isEmpty; @@ -35,16 +35,15 @@ class DequantizationOperations { Subtract(const float value, const bool toRemove = true); Subtract(const std::vector& values); Subtract(const std::vector& values, const ov::element::Type outPrecision); - Subtract( - const std::vector& values, - const ov::element::Type outPrecision, - const ov::Shape& constantShape, - const bool toRemove = false, - const size_t constantIndex = 1ul, - const ov::element::Type constantPrecision = ov::element::undefined, - const bool addConvert = false, - const ov::Node::RTMap& attributes = {}, - const ov::Node::RTMap& convertAttributes = {}); + Subtract(const std::vector& values, + const ov::element::Type outPrecision, + const ov::Shape& constantShape, + const bool toRemove = false, + const size_t constantIndex = 1ul, + const ov::element::Type constantPrecision = ov::element::dynamic, + const bool addConvert = false, + const ov::Node::RTMap& attributes = {}, + const ov::Node::RTMap& convertAttributes = {}); bool empty() const noexcept; bool equal(const DequantizationOperations::Subtract& value) const noexcept; bool operator==(const Subtract& value) const noexcept { @@ -57,11 +56,11 @@ class DequantizationOperations { Subtract& setAddConvert(bool value); std::vector values; - ov::element::Type outPrecision = ov::element::undefined; + ov::element::Type outPrecision = ov::element::dynamic; ov::Shape constantShape; bool constantShapeIsDefined = false; size_t constantIndex = 1ul; - ov::element::Type constantPrecision = ov::element::undefined; + ov::element::Type constantPrecision = ov::element::dynamic; bool addConvert = false; ov::Node::RTMap attributes; ov::Node::RTMap convertAttributes; @@ -76,14 +75,13 @@ class DequantizationOperations { Multiply(const float value); Multiply(const std::vector& values); Multiply(const std::vector& values, const ov::element::Type outPrecision); - Multiply( - const std::vector& values, - const ov::element::Type outPrecision, - const ov::Shape& constantShape, - const bool toRemove = false, - const size_t constantIndex = 1ul, - const ov::element::Type constantPrecision = ov::element::undefined, - const bool addConvert = false); + Multiply(const std::vector& values, + const ov::element::Type outPrecision, + const ov::Shape& constantShape, + const bool toRemove = false, + const size_t constantIndex = 1ul, + const ov::element::Type constantPrecision = ov::element::dynamic, + const bool addConvert = false); bool empty() const noexcept; bool equal(const DequantizationOperations::Multiply& value) const noexcept; bool operator==(const Multiply& value) const noexcept { @@ -93,11 +91,11 @@ class DequantizationOperations { Multiply& setAddConvert(bool value); std::vector values; - ov::element::Type outPrecision = ov::element::undefined; + ov::element::Type outPrecision = ov::element::dynamic; ov::Shape constantShape; bool constantShapeIsDefined = false; size_t constantIndex = 1ul; - ov::element::Type constantPrecision = ov::element::undefined; + ov::element::Type constantPrecision = ov::element::dynamic; bool addConvert = false; private: @@ -124,7 +122,7 @@ inline std::ostream& operator<<(std::ostream& out, const DequantizationOperation if (convert.empty()) { return out << "{}"; } - return out << "_" << (convert.outPrecision != ov::element::undefined ? convert.outPrecision.get_type_name() : ""); + return out << "_" << (convert.outPrecision != ov::element::dynamic ? convert.outPrecision.get_type_name() : ""); } inline std::ostream& operator<<(std::ostream& out, const DequantizationOperations::Subtract& subtract) { diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_data.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_data.hpp index 095f77c5b756bc..4380f5f8fde916 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_data.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_data.hpp @@ -19,15 +19,14 @@ class FakeQuantizeOnData { public: FakeQuantizeOnData(); - FakeQuantizeOnData( - const uint64_t quantizationLevel, - const ov::Shape& constantShape, - const std::vector& inputLowValues, - const std::vector& inputHighValues, - const std::vector& outputLowValues, - const std::vector& outputHighValues, - const ov::element::Type outputPrecision = ov::element::undefined, - const std::vector& attributes = {}); + FakeQuantizeOnData(const uint64_t quantizationLevel, + const ov::Shape& constantShape, + const std::vector& inputLowValues, + const std::vector& inputHighValues, + const std::vector& outputLowValues, + const std::vector& outputHighValues, + const ov::element::Type outputPrecision = ov::element::dynamic, + const std::vector& attributes = {}); virtual ~FakeQuantizeOnData(); @@ -60,29 +59,25 @@ inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeOnData& dat if (data.empty()) { return out << "{}"; } - return out << "level=" << data.quantizationLevel << - "_shape=" << data.constantShape << - "_input_low=" << data.inputLowValues << - "_input_high=" << data.inputHighValues << - "_output_low=" << data.outputLowValues << - "_output_high" << data.outputHighValues << - "_precision=" << (data.outputPrecision == ov::element::undefined ? "" : data.outputPrecision.get_type_name()); + return out << "level=" << data.quantizationLevel << "_shape=" << data.constantShape + << "_input_low=" << data.inputLowValues << "_input_high=" << data.inputHighValues + << "_output_low=" << data.outputLowValues << "_output_high" << data.outputHighValues << "_precision=" + << (data.outputPrecision == ov::element::dynamic ? "" : data.outputPrecision.get_type_name()); } class FakeQuantizeOnDataWithConstant { public: FakeQuantizeOnDataWithConstant(); - FakeQuantizeOnDataWithConstant( - const uint64_t quantizationLevel, - const std::vector& constantShapes, - const std::vector& inputLowValues, - const std::vector& inputHighValues, - const std::vector& outputLowValues, - const std::vector& outputHighValues, - const ov::element::Type outputPrecision = ov::element::undefined, - const std::vector& attributes = {}, - const bool addConverts = false); + FakeQuantizeOnDataWithConstant(const uint64_t quantizationLevel, + const std::vector& constantShapes, + const std::vector& inputLowValues, + const std::vector& inputHighValues, + const std::vector& outputLowValues, + const std::vector& outputHighValues, + const ov::element::Type outputPrecision = ov::element::dynamic, + const std::vector& attributes = {}, + const bool addConverts = false); virtual ~FakeQuantizeOnDataWithConstant(); virtual bool empty() const; @@ -102,13 +97,11 @@ inline std::ostream& operator<<(std::ostream& out, const FakeQuantizeOnDataWithC if (data.empty()) { return out << "{}"; } - return out << "level=" << data.quantizationLevel << - "_shape=" <<(data.constantShapes.empty() ? ov::Shape{} : data.constantShapes[0]) << - "_input_low=" << data.inputLowValues << - "_input_high=" << data.inputHighValues << - "_output_low=" << data.outputLowValues << - "_output_high=" << data.outputHighValues << - "_precision=" << (data.outputPrecision == ov::element::undefined ? "" : data.outputPrecision.get_type_name()); + return out << "level=" << data.quantizationLevel + << "_shape=" << (data.constantShapes.empty() ? ov::Shape{} : data.constantShapes[0]) + << "_input_low=" << data.inputLowValues << "_input_high=" << data.inputHighValues + << "_output_low=" << data.outputLowValues << "_output_high=" << data.outputHighValues << "_precision=" + << (data.outputPrecision == ov::element::dynamic ? "" : data.outputPrecision.get_type_name()); } } // namespace subgraph diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_weights.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_weights.hpp index e0fd1890b7dc20..511ec7fdbb0823 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_weights.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/common/fake_quantize_on_weights.hpp @@ -17,14 +17,13 @@ class FakeQuantizeOnWeights: public FakeQuantizeOnData { public: FakeQuantizeOnWeights(); - FakeQuantizeOnWeights( - const uint64_t quantizationLevel, - const ov::Shape& constantShape, - const std::vector& inputLowValues, - const std::vector& inputHighValues, - const std::vector& outputLowValues, - const std::vector& outputHighValues, - const ov::element::Type outputPrecision = ov::element::undefined); + FakeQuantizeOnWeights(const uint64_t quantizationLevel, + const ov::Shape& constantShape, + const std::vector& inputLowValues, + const std::vector& inputHighValues, + const std::vector& outputLowValues, + const std::vector& outputHighValues, + const ov::element::Type outputPrecision = ov::element::dynamic); virtual ~FakeQuantizeOnWeights(); diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp index 22394945641231..5e41e0ea570c5a 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/concat.hpp @@ -16,14 +16,13 @@ namespace subgraph { class ConcatFunction { public: - static std::shared_ptr get( - const ov::element::Type inputPrecision, - const ov::element::Type deqPrecision, - const std::vector& inputShapes, - const std::vector& dequantizationsBefore, - const std::int64_t concatAxis, - const ov::element::Type precisionAfter = ov::element::undefined, - const DequantizationOperations& dequantizationAfter = {}); + static std::shared_ptr get(const ov::element::Type inputPrecision, + const ov::element::Type deqPrecision, + const std::vector& inputShapes, + const std::vector& dequantizationsBefore, + const std::int64_t concatAxis, + const ov::element::Type precisionAfter = ov::element::dynamic, + const DequantizationOperations& dequantizationAfter = {}); static std::shared_ptr getOriginal( const ov::element::Type precision, diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp index bb93c77aad1b39..0a675a52499c0e 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/add.cpp @@ -8,25 +8,19 @@ namespace ov { namespace builder { namespace subgraph { -Add::Add() : - isEmpty(true), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) -{} - -Add::Add(const float value) : - isEmpty(false), - values({ value }), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} - -Add::Add(const std::vector& values) : - isEmpty(values.empty()), - values(values), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +Add::Add() : isEmpty(true), outPrecision(ov::element::dynamic), constantShapeIsDefined(false) {} + +Add::Add(const float value) + : isEmpty(false), + values({value}), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} + +Add::Add(const std::vector& values) + : isEmpty(values.empty()), + values(values), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} Add::Add(const std::vector& values, const ov::element::Type outPrecision) : isEmpty(false), diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp index 0c69a91226cd00..b1dc78667368c5 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/builders.cpp @@ -52,7 +52,7 @@ std::shared_ptr makeDequantization( } std::shared_ptr subtractConst = std::make_shared( - dequantizationOperations.subtract.constantPrecision != ov::element::undefined + dequantizationOperations.subtract.constantPrecision != ov::element::dynamic ? dequantizationOperations.subtract.constantPrecision : parent.get_element_type(), shape, @@ -61,7 +61,7 @@ std::shared_ptr makeDequantization( if (dequantizationOperations.subtract.addConvert) { std::shared_ptr subtractConstConvert = std::make_shared( subtractConst, - dequantizationOperations.subtract.outPrecision == ov::element::undefined + dequantizationOperations.subtract.outPrecision == ov::element::dynamic ? parent.get_element_type() : dequantizationOperations.subtract.outPrecision); @@ -76,9 +76,9 @@ std::shared_ptr makeDequantization( ov::Output leftBranchParent = dequantizationOperations.subtract.constantIndex == 1 ? parent : subtractConst; ov::Output rightBranchParent = dequantizationOperations.subtract.constantIndex == 1 ? subtractConst : parent; - if (((dequantizationOperations.subtract.outPrecision == ov::element::undefined) || + if (((dequantizationOperations.subtract.outPrecision == ov::element::dynamic) || (dequantizationOperations.subtract.outPrecision == parent.get_element_type())) && - (((dequantizationOperations.subtract.constantPrecision == ov::element::undefined) || + (((dequantizationOperations.subtract.constantPrecision == ov::element::dynamic) || (dequantizationOperations.subtract.constantPrecision == parent.get_element_type())) || dequantizationOperations.subtract.addConvert)) { subtract = std::make_shared(leftBranchParent, rightBranchParent); @@ -133,23 +133,23 @@ std::shared_ptr makeMultiply(const ov::Output& parent, const Dequant } std::shared_ptr constant = std::make_shared( - multiply.constantPrecision != ov::element::undefined ? multiply.constantPrecision : parent.get_element_type(), + multiply.constantPrecision != ov::element::dynamic ? multiply.constantPrecision : parent.get_element_type(), shape, values); if (multiply.addConvert) { constant = std::make_shared( constant, - multiply.outPrecision == ov::element::undefined ? parent.get_element_type() : multiply.outPrecision); + multiply.outPrecision == ov::element::dynamic ? parent.get_element_type() : multiply.outPrecision); } ov::Output leftBranchParent = multiply.constantIndex == 1 ? parent : constant; ov::Output rightBranchParent = multiply.constantIndex == 1 ? constant : parent; std::shared_ptr newMultiply; - if (((multiply.outPrecision == ov::element::undefined) || (multiply.outPrecision == parent.get_element_type())) && - ((multiply.constantPrecision == ov::element::undefined) || - (multiply.constantPrecision == parent.get_element_type())) || - multiply.addConvert) { + if (((multiply.outPrecision == ov::element::dynamic) || (multiply.outPrecision == parent.get_element_type())) && + ((multiply.constantPrecision == ov::element::dynamic) || + (multiply.constantPrecision == parent.get_element_type())) || + multiply.addConvert) { newMultiply = std::make_shared(leftBranchParent, rightBranchParent); } else { // TODO: use templates @@ -225,7 +225,7 @@ std::shared_ptr makeFakeQuantizeTypeRelaxed( const std::shared_ptr fq = makeFakeQuantize(output, precision, fqOnData); return std::make_shared>( *fq, - fqOnData.outputPrecision == ov::element::undefined ? precision : fqOnData.outputPrecision); + fqOnData.outputPrecision == ov::element::dynamic ? precision : fqOnData.outputPrecision); } std::shared_ptr makeFakeQuantize( @@ -318,7 +318,7 @@ std::shared_ptr makeFakeQuantizeTypeRelaxed( const std::shared_ptr fq = makeFakeQuantize(input, constantPrecision, fqOnData); return std::make_shared>( *fq, - fqOnData.outputPrecision == ov::element::undefined ? constantPrecision : fqOnData.outputPrecision); + fqOnData.outputPrecision == ov::element::dynamic ? constantPrecision : fqOnData.outputPrecision); } void addAttributes(std::vector> nodes, std::vector attributes) { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp index 73414b6a8c8c20..aa87b2ed294c1a 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/constant.cpp @@ -8,25 +8,19 @@ namespace ov { namespace builder { namespace subgraph { -Constant::Constant() : - isEmpty(true), - outPrecision(ov::element::undefined), - shapeIsDefined(false) -{} - -Constant::Constant(const float value) : - isEmpty(false), - values({ value }), - outPrecision(ov::element::undefined), - shapeIsDefined(false) { -} - -Constant::Constant(const std::vector& values) : - isEmpty(values.empty()), - values(values), - outPrecision(ov::element::undefined), - shapeIsDefined(false) { -} +Constant::Constant() : isEmpty(true), outPrecision(ov::element::dynamic), shapeIsDefined(false) {} + +Constant::Constant(const float value) + : isEmpty(false), + values({value}), + outPrecision(ov::element::dynamic), + shapeIsDefined(false) {} + +Constant::Constant(const std::vector& values) + : isEmpty(values.empty()), + values(values), + outPrecision(ov::element::dynamic), + shapeIsDefined(false) {} Constant::Constant(const std::vector& values, const ov::element::Type outPrecision) : isEmpty(false), diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp index 8ef6bb03792c7e..025ad5e564d544 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/dequantization_operations.cpp @@ -9,10 +9,7 @@ namespace ov { namespace builder { namespace subgraph { -DequantizationOperations::Convert::Convert() : - isEmpty(true), - outPrecision(ov::element::undefined) -{} +DequantizationOperations::Convert::Convert() : isEmpty(true), outPrecision(ov::element::dynamic) {} DequantizationOperations::Convert::Convert(const ov::element::Type outPrecision, const bool toRemove) : isEmpty(false), @@ -27,25 +24,22 @@ bool DequantizationOperations::Convert::equal(const DequantizationOperations::Co return (this->outPrecision == value.outPrecision) && (this->addDequantizationAttribute == value.addDequantizationAttribute); } -DequantizationOperations::Subtract::Subtract() : - isEmpty(true), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) -{} +DequantizationOperations::Subtract::Subtract() + : isEmpty(true), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} -DequantizationOperations::Subtract::Subtract(const float value, const bool toRemove) : - isEmpty(false), - values({ value }), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +DequantizationOperations::Subtract::Subtract(const float value, const bool toRemove) + : isEmpty(false), + values({value}), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} -DequantizationOperations::Subtract::Subtract(const std::vector& values) : - isEmpty(values.empty()), - values(values), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +DequantizationOperations::Subtract::Subtract(const std::vector& values) + : isEmpty(values.empty()), + values(values), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} DequantizationOperations::Subtract::Subtract( const std::vector& values, @@ -101,25 +95,22 @@ DequantizationOperations::Subtract& DequantizationOperations::Subtract::setAddCo return *this; } -DequantizationOperations::Multiply::Multiply() : - isEmpty(true), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +DequantizationOperations::Multiply::Multiply() + : isEmpty(true), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} -DequantizationOperations::Multiply::Multiply(const float value) : - isEmpty(false), - values({ value }), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +DequantizationOperations::Multiply::Multiply(const float value) + : isEmpty(false), + values({value}), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} -DequantizationOperations::Multiply::Multiply(const std::vector& values) : - isEmpty(values.empty()), - values(values), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +DequantizationOperations::Multiply::Multiply(const std::vector& values) + : isEmpty(values.empty()), + values(values), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} DequantizationOperations::Multiply::Multiply(const std::vector& values, const ov::element::Type outPrecision) : isEmpty(false), diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp index c5263814933a1b..2639194f63588c 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/fake_quantize_on_data.cpp @@ -46,9 +46,9 @@ bool FakeQuantizeOnData::empty() const { outputHighValues.empty(); } -FakeQuantizeOnDataWithConstant::FakeQuantizeOnDataWithConstant() : - quantizationLevel(0), - outputPrecision(ov::element::undefined) {} +FakeQuantizeOnDataWithConstant::FakeQuantizeOnDataWithConstant() + : quantizationLevel(0), + outputPrecision(ov::element::dynamic) {} FakeQuantizeOnDataWithConstant::FakeQuantizeOnDataWithConstant( const uint64_t quantizationLevel, diff --git a/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp index e6f7dbeef5bc5c..9930f458f693d5 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/common/multiply.cpp @@ -9,25 +9,19 @@ namespace ov { namespace builder { namespace subgraph { -Multiply::Multiply() : - isEmpty(true), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} - -Multiply::Multiply(const float value) : - isEmpty(false), - values({ value }), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} - -Multiply::Multiply(const std::vector& values) : - isEmpty(values.empty()), - values(values), - outPrecision(ov::element::undefined), - constantShapeIsDefined(false) { -} +Multiply::Multiply() : isEmpty(true), outPrecision(ov::element::dynamic), constantShapeIsDefined(false) {} + +Multiply::Multiply(const float value) + : isEmpty(false), + values({value}), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} + +Multiply::Multiply(const std::vector& values) + : isEmpty(values.empty()), + values(values), + outPrecision(ov::element::dynamic), + constantShapeIsDefined(false) {} Multiply::Multiply(const std::vector& values, const ov::element::Type outPrecision) : isEmpty(false), diff --git a/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp index 9f4c6e956383e5..a3e4f0b42b412f 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/concat.cpp @@ -51,7 +51,8 @@ std::shared_ptr ConcatFunction::get( } const auto concat = std::make_shared(concatInputs, concatAxis); - if (precisionAfter != ov::element::undefined && (concat->get_output_element_type(0).is_real() ^ precisionAfter.is_real())) { + if (precisionAfter != ov::element::dynamic && + (concat->get_output_element_type(0).is_real() ^ precisionAfter.is_real())) { throw std::runtime_error("Concat builder: requested precision after operation could't be set"); } @@ -925,7 +926,7 @@ std::shared_ptr ConcatFunction::getReference( throw std::runtime_error("FakeQuantize expected precisions are different"); } const ov::element::Type fqOnDataPrecision = fqOnData1.outputPrecision; - if (fqOnDataPrecision != ov::element::undefined) { + if (fqOnDataPrecision != ov::element::dynamic) { if (fakeQuantize1->get_output_element_type(0) != fakeQuantize2->get_output_element_type(0)) { throw std::runtime_error("FakeQuantize operation precisions are different"); } diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp index ad438b0e453b29..2f4c457a36b5ec 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fake_quantize_and_convolution.cpp @@ -117,9 +117,9 @@ std::shared_ptr FakeQuantizeAndConvolutionFunction::get( std::shared_ptr parentOnActivation = input; { if (!fqOnData.empty()) { - parentOnActivation = fqOnData.outputPrecision == element::undefined ? - ov::builder::subgraph::makeFakeQuantize(input, precision, fqOnData) : - ov::builder::subgraph::makeFakeQuantizeTypeRelaxed(input, precision, fqOnData); + parentOnActivation = fqOnData.outputPrecision == element::dynamic + ? ov::builder::subgraph::makeFakeQuantize(input, precision, fqOnData) + : ov::builder::subgraph::makeFakeQuantizeTypeRelaxed(input, precision, fqOnData); } if (!convertOnData.empty()) { @@ -152,9 +152,14 @@ std::shared_ptr FakeQuantizeAndConvolutionFunction::get( constantOnWeights.values); if (!fqOnWeights.empty()) { - parentOnWeights = fqOnWeights.outputPrecision == element::undefined ? - ov::builder::subgraph::makeFakeQuantize(parentOnWeights, parentOnWeights->output(0).get_element_type(), fqOnWeights) : - ov::builder::subgraph::makeFakeQuantizeTypeRelaxed(parentOnWeights, parentOnWeights->output(0).get_element_type(), fqOnWeights); + parentOnWeights = + fqOnWeights.outputPrecision == element::dynamic + ? ov::builder::subgraph::makeFakeQuantize(parentOnWeights, + parentOnWeights->output(0).get_element_type(), + fqOnWeights) + : ov::builder::subgraph::makeFakeQuantizeTypeRelaxed(parentOnWeights, + parentOnWeights->output(0).get_element_type(), + fqOnWeights); } if (!convertOnWeights.empty()) { diff --git a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp index 50b1e33aeb6009..6f5df17ea5d5d4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/fuse_fake_quantize.cpp @@ -102,7 +102,7 @@ std::shared_ptr make_convolution( auto fqOnDataCopy = fqOnData; fqOnDataCopy.outputHighValues = {255.f}; fqOnDataCopy.outputPrecision = - fqOnData.outputPrecision == ov::element::undefined ? ov::element::u8 : fqOnData.outputPrecision; + fqOnData.outputPrecision == ov::element::dynamic ? ov::element::u8 : fqOnData.outputPrecision; std::shared_ptr lastNode = makeFakeQuantizeTypeRelaxed(lastDequantization, precisionFqOnData, fqOnDataCopy); lastNode = makeDequantization(lastNode, diff --git a/src/tests/ov_helpers/ov_snippets_models/include/two_binary_ops.hpp b/src/tests/ov_helpers/ov_snippets_models/include/two_binary_ops.hpp index d469b3740542c4..76a5344f67edaa 100644 --- a/src/tests/ov_helpers/ov_snippets_models/include/two_binary_ops.hpp +++ b/src/tests/ov_helpers/ov_snippets_models/include/two_binary_ops.hpp @@ -19,18 +19,18 @@ namespace snippets { */ class BaseDummyOperation : public ov::op::Op { public: - BaseDummyOperation( - const Output& arg0, - const Output& arg1, - const element::Type& output_type = element::undefined) : Op({ arg0, arg1 }), output_type(output_type) { + BaseDummyOperation(const Output& arg0, + const Output& arg1, + const element::Type& output_type = element::dynamic) + : Op({arg0, arg1}), + output_type(output_type) { constructor_validate_and_infer_types(); } void validate_and_infer_types() override { - set_output_type( - 0, - output_type == element::undefined ? get_input_element_type(0) : output_type, - get_input_partial_shape(0)); + set_output_type(0, + output_type == element::dynamic ? get_input_element_type(0) : output_type, + get_input_partial_shape(0)); } element::Type get_output_type() const { return output_type; } @@ -47,10 +47,10 @@ class DummyOperation1 : public BaseDummyOperation { public: OPENVINO_OP("DummyOperation1", "test::snippets"); - DummyOperation1( - const Output& arg0, - const Output& arg1, - const element::Type& output_type = element::undefined) : BaseDummyOperation(arg0, arg1, output_type) { + DummyOperation1(const Output& arg0, + const Output& arg1, + const element::Type& output_type = element::dynamic) + : BaseDummyOperation(arg0, arg1, output_type) { constructor_validate_and_infer_types(); } @@ -71,10 +71,10 @@ class DummyOperation2 : public BaseDummyOperation { public: OPENVINO_OP("DummyOperation2", "test::snippets"); - DummyOperation2( - const Output& arg0, - const Output& arg1, - const element::Type& output_type = element::undefined) : BaseDummyOperation(arg0, arg1, output_type) { + DummyOperation2(const Output& arg0, + const Output& arg1, + const element::Type& output_type = element::dynamic) + : BaseDummyOperation(arg0, arg1, output_type) { constructor_validate_and_infer_types(); } diff --git a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp index be736a1c1fd66c..ecc19c92931129 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/precision_propagation.cpp @@ -21,10 +21,11 @@ std::shared_ptr PrecisionPropagationAddFunction::get( const std::pair& convertion_before_op2_2, const element::Type& convertion_after_op2, const element::Type& convertion_before_result) { - const auto create_convert = [](std::shared_ptr parent, const element::Type convertion_type) -> std::shared_ptr { - return convertion_type == element::undefined - ? std::dynamic_pointer_cast(parent) - : std::make_shared(parent, convertion_type); + const auto create_convert = [](std::shared_ptr parent, + const element::Type convertion_type) -> std::shared_ptr { + return convertion_type == element::dynamic + ? std::dynamic_pointer_cast(parent) + : std::make_shared(parent, convertion_type); }; const auto make_branch = [&create_convert]( @@ -48,10 +49,9 @@ std::shared_ptr PrecisionPropagationAddFunction::get( parent = create_convert(parent, convertion_before_op2_1); - const auto maximum_in2_type = convertion_before_op2_2.second == element::undefined ? - constant_precision : - convertion_before_op2_2.second; - if ((convertion_before_op2_2.first == element::undefined) && + const auto maximum_in2_type = + convertion_before_op2_2.second == element::dynamic ? constant_precision : convertion_before_op2_2.second; + if ((convertion_before_op2_2.first == element::dynamic) && (parent->get_output_element_type(0) != maximum_in2_type)) { parent = std::make_shared(parent, maximum_in2_type); } diff --git a/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp index 942a4a5e3b1dc8..753af6747c1a20 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/two_binary_ops.cpp @@ -21,10 +21,11 @@ std::shared_ptr TwoBinaryOpsFunction::get( const std::pair& convertion_before_op2_2, const element::Type& convertion_after_op2, const element::Type& convertion_before_result) { - const auto create_convert = [](std::shared_ptr parent, const element::Type convertion_type) -> std::shared_ptr { - return convertion_type == element::undefined - ? std::dynamic_pointer_cast(parent) - : std::make_shared(parent, convertion_type); + const auto create_convert = [](std::shared_ptr parent, + const element::Type convertion_type) -> std::shared_ptr { + return convertion_type == element::dynamic + ? std::dynamic_pointer_cast(parent) + : std::make_shared(parent, convertion_type); }; const auto make_branch = [&create_convert]( diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp index 8021b0cc2a0de9..64c9b65c9bcf3b 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/ov_tensor_utils.hpp @@ -188,12 +188,12 @@ namespace tensor_comparation { double calculate_threshold(const double abs_threshold, const double rel_threshold, const double ref_value); double calculate_default_abs_threshold(const ov::element::Type& expected_type, - const ov::element::Type& actual_type = ov::element::undefined, - const ov::element::Type& inference_precision = ov::element::undefined); + const ov::element::Type& actual_type = ov::element::dynamic, + const ov::element::Type& inference_precision = ov::element::dynamic); double calculate_default_rel_threshold(const ov::element::Type& expected_type, - const ov::element::Type& actual_type = ov::element::undefined, - const ov::element::Type& inference_precision = ov::element::undefined); + const ov::element::Type& actual_type = ov::element::dynamic, + const ov::element::Type& inference_precision = ov::element::dynamic); } // namespace tensor_comparation // function to compare tensors using different metrics: @@ -220,7 +220,7 @@ inline void compare(const ov::Tensor& expected, const double rel_threshold = -1, const double topk_threshold = 1.f, const double mvn_threshold = 1.f) { - compare(expected, actual, ov::element::undefined, abs_threshold, rel_threshold, topk_threshold, mvn_threshold); + compare(expected, actual, ov::element::dynamic, abs_threshold, rel_threshold, topk_threshold, mvn_threshold); } // todo: replace this function by `compare(expected, actual)` diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/type_ranges.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/type_ranges.hpp index 7dc5841869a493..e9ff844de75133 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/type_ranges.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/type_ranges.hpp @@ -68,11 +68,6 @@ static ov::test::utils::InputGenerateData get_range_by_type( } switch (elemType) { - case (ov::element::Type_t::undefined): { - inData.start_from = min_start; - inData.range = max_range_limit; - break; - } case (ov::element::Type_t::dynamic): { inData.start_from = min_start; inData.range = max_range_limit;