From ed52ac5b39f573ce9cb25d5c652252aef0fa6d91 Mon Sep 17 00:00:00 2001 From: Dmitry Matveev Date: Thu, 16 Jan 2025 09:29:30 +0000 Subject: [PATCH 01/97] NPUW: Fix BF16 tensor collision in weight bank in new LLM pipeline (#28473) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp b/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp index fb31f7ed0770bb..6e3cb68e2f1ad2 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/llm_compiled_model.cpp @@ -15,6 +15,7 @@ #include "openvino/pass/validate.hpp" #include "openvino/runtime/iasync_infer_request.hpp" #include "serialization.hpp" +#include "transformations/convert_precision.hpp" namespace opp = ov::pass::pattern; class TransposeValueTensors : public ov::pass::MatcherPass { @@ -457,6 +458,8 @@ ov::npuw::LLMCompiledModel::LLMCompiledModel(const std::shared_ptr& m auto kvcache_model = model->clone(); LOG_DEBUG("2. Transform kvcache model from stateful to stateless."); ov::pass::StatefulToStateless().run_on_model(kvcache_model); + LOG_DEBUG(" ...also convert BF16 to FP16"); + ov::pass::ConvertPrecision(ov::element::bf16, ov::element::f16).run_on_model(kvcache_model); LOG_DEBUG("3. Creating prefill model as clone of transformed kvcache one."); auto prefill_model = kvcache_model->clone(); prefill_model->set_friendly_name(kvcache_model->get_friendly_name() + "_prefill"); From 657be955c93a362dce2dacf88785d85b9f41dea2 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 16 Jan 2025 13:47:24 +0400 Subject: [PATCH 02/97] Added missed space in error message (#28478) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- src/core/src/pass/sdpa_to_paged_attention.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/src/pass/sdpa_to_paged_attention.cpp b/src/core/src/pass/sdpa_to_paged_attention.cpp index c239ce5cc27a2c..2ccd19ca3e1fc3 100644 --- a/src/core/src/pass/sdpa_to_paged_attention.cpp +++ b/src/core/src/pass/sdpa_to_paged_attention.cpp @@ -40,7 +40,7 @@ bool ov::pass::SDPAToPagedAttention::run_on_model(const std::shared_ptr(model), - "No ScaledDotProductAttention operation observed in the graph, cannot perform" + "No ScaledDotProductAttention operation observed in the graph, cannot perform " "the SDPAToPagedAttention transformation."); auto max_context_len = setName(std::make_shared(element::i32, PartialShape{}), "max_context_len"); From e13b4c8e042b41d3115d00db20996c5f8082d772 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 16 Jan 2025 10:48:12 +0100 Subject: [PATCH 03/97] [DOCS] add images to testdrive article (#28451) --- .../assets/images/TestDrive_geti_download.gif | 3 +++ .../assets/images/TestDrive_llm_import.gif | 3 +++ .../assets/images/TestDrive_llm_metrics.gif | 3 +++ .../images/TestDrive_llm_model_chat.gif | 3 +++ .../openvino-test-drive.rst | 19 ++++++++++++++++--- 5 files changed, 28 insertions(+), 3 deletions(-) create mode 100644 docs/articles_en/assets/images/TestDrive_geti_download.gif create mode 100644 docs/articles_en/assets/images/TestDrive_llm_import.gif create mode 100644 docs/articles_en/assets/images/TestDrive_llm_metrics.gif create mode 100644 docs/articles_en/assets/images/TestDrive_llm_model_chat.gif diff --git a/docs/articles_en/assets/images/TestDrive_geti_download.gif b/docs/articles_en/assets/images/TestDrive_geti_download.gif new file mode 100644 index 00000000000000..09ea66897aeb52 --- /dev/null +++ b/docs/articles_en/assets/images/TestDrive_geti_download.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aaa98d9bbc6e3452b0563787b1a493f1cd6319ad726d7778dd663958b2c6d30 +size 3244257 diff --git a/docs/articles_en/assets/images/TestDrive_llm_import.gif b/docs/articles_en/assets/images/TestDrive_llm_import.gif new file mode 100644 index 00000000000000..87b89893c9ab2b --- /dev/null +++ b/docs/articles_en/assets/images/TestDrive_llm_import.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a435731a47634f32c954fbef89c051a4f4204579bd8b9ebe834c7b95e751c9f3 +size 1920154 diff --git a/docs/articles_en/assets/images/TestDrive_llm_metrics.gif b/docs/articles_en/assets/images/TestDrive_llm_metrics.gif new file mode 100644 index 00000000000000..325f32493cc7e4 --- /dev/null +++ b/docs/articles_en/assets/images/TestDrive_llm_metrics.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3441b7a18da12e3b00a787699862511b332cd803a2a81a6b5e436a718d18a53c +size 318025 diff --git a/docs/articles_en/assets/images/TestDrive_llm_model_chat.gif b/docs/articles_en/assets/images/TestDrive_llm_model_chat.gif new file mode 100644 index 00000000000000..3620c55f94578a --- /dev/null +++ b/docs/articles_en/assets/images/TestDrive_llm_model_chat.gif @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f34b14de1e92b72e87290e3cee09b56a25cb0a03c1db8710ef993440183d69eb +size 387028 diff --git a/docs/articles_en/documentation/openvino-ecosystem/openvino-test-drive.rst b/docs/articles_en/documentation/openvino-ecosystem/openvino-test-drive.rst index 527a01bf38a6cf..602a2b8ec24eb2 100644 --- a/docs/articles_en/documentation/openvino-ecosystem/openvino-test-drive.rst +++ b/docs/articles_en/documentation/openvino-ecosystem/openvino-test-drive.rst @@ -9,7 +9,7 @@ OpenVINO™ Test Drive -OpenVINO™ Test Drive is a cross-platform graphic user interface application for running and +OpenVINO™ Test Drive is a cross-platform **graphic user interface** application for running and testing AI models, both generative and vision based. It can run directly on your computer or on edge devices using `OpenVINO™ Runtime `__. @@ -26,7 +26,6 @@ Use OpenVINO™ Test Drive to: * **Run inference of models** trained by Intel® Geti™ and **visualize the results**. - Installation (Windows) ############################################################################################### @@ -51,12 +50,22 @@ Inference of models from Hugging Face 1. Find a model on `Hugging Face `__ and import it. + .. image:: ../../assets/images/TestDrive_llm_import.gif + :align: center + :alt: how to import a model to test drive + 2. Chat with LLMs via the `Playground` tab. + .. image:: ../../assets/images/TestDrive_llm_model_chat.gif + :align: center + :alt: chatting with llm models in test drive + 3. Use the `Performance metrics` tab to get model performance metrics on your computer or an edge device. - + .. image:: ../../assets/images/TestDrive_llm_metrics.gif + :align: center + :alt: verifying llm performance in test drive Inference of models trained with Intel® Geti™ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -65,6 +74,10 @@ Inference of models trained with Intel® Geti™ by Intel® Geti™ (refer to the `Intel® Geti™ documentation `__ for more details). + .. image:: ../../assets/images/TestDrive_geti_download.gif + :align: center + :alt: verifying llm performance in test drive + 2. Import the deployment code into OpenVINO™ Test Drive, using the *Import model* and then *Local disk* buttons. From 4faa82da12af40e2249c38e019e75d10c3fff9ff Mon Sep 17 00:00:00 2001 From: Eddy Kim Date: Thu, 16 Jan 2025 19:06:41 +0900 Subject: [PATCH 04/97] [GPU] allow to read activations scale factor from rt_info for non-LLMs (#28449) ### Details: - allows to read `ACTIVATIONS_SCALE_FACTOR` from rt_info for non-LLMs, such as FLUX.1 and SDXL. - assumes that LLMs have `ReadValue` layers and non-LLMs does not. --- .../intel_gpu/runtime/execution_config.hpp | 2 +- src/plugins/intel_gpu/src/plugin/plugin.cpp | 34 +++++++++++++++++-- .../src/runtime/execution_config.cpp | 5 +-- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_gpu/include/intel_gpu/runtime/execution_config.hpp b/src/plugins/intel_gpu/include/intel_gpu/runtime/execution_config.hpp index 70a04f0b0c3a99..113fa73e979b1b 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/runtime/execution_config.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/runtime/execution_config.hpp @@ -140,7 +140,7 @@ class ExecutionConfig { // Note that RT info property value has lower priority than values set by user via core.set_property or passed to compile_model call // So this method should be called after setting all user properties, but before apply_user_properties() call. - void apply_rt_info(const cldnn::device_info& info, const ov::RTMap& rt_info); + void apply_rt_info(const cldnn::device_info& info, const ov::RTMap& rt_info, const bool is_llm); std::string to_string() const; diff --git a/src/plugins/intel_gpu/src/plugin/plugin.cpp b/src/plugins/intel_gpu/src/plugin/plugin.cpp index 2931e55eb51d0d..4058b38dd78584 100644 --- a/src/plugins/intel_gpu/src/plugin/plugin.cpp +++ b/src/plugins/intel_gpu/src/plugin/plugin.cpp @@ -26,7 +26,11 @@ #include "intel_gpu/runtime/execution_config.hpp" #include "intel_gpu/runtime/itt.hpp" #include "openvino/core/deprecated.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/concat.hpp" #include "openvino/pass/manager.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "openvino/pass/pattern/op/or.hpp" #include "openvino/pass/visualize_tree.hpp" #include "openvino/runtime/device_id_parser.hpp" #include "openvino/runtime/intel_gpu/properties.hpp" @@ -62,6 +66,32 @@ namespace intel_gpu { #include "intel_gpu/plugin/primitives_list.hpp" #undef REGISTER_FACTORY +const auto is_llm = [](const std::shared_ptr& model) -> bool { + using namespace ov::pass::pattern; + + auto past = wrap_type(); + auto convert_past = wrap_type({past}); + auto gather_input = std::make_shared(OutputVector{past, convert_past}); + auto beam_idx = wrap_type(); + auto gather_past = wrap_type({gather_input, beam_idx, wrap_type()}); + auto gather_convert = wrap_type({gather_past}); + auto concat_past_input = std::make_shared(OutputVector{past, convert_past, gather_past, gather_convert}); + auto concat = wrap_type({concat_past_input, any_input()}); + auto convert_present = wrap_type({concat}); + auto present_input = std::make_shared(OutputVector{concat, convert_present}); + auto present = wrap_type({present_input}); + + auto kvcache_matcher = std::make_shared(present, "KVCacheMatcher"); + + for (auto& op : model->get_ordered_ops()) { + if (kvcache_matcher->match(op)) { + return true; + } + } + + return false; +}; + void Plugin::register_primitives() const { #define REGISTER_FACTORY(op_version, op_name) FACTORY_CALL(op_version, op_name) #include "intel_gpu/plugin/primitives_list.hpp" @@ -190,7 +220,7 @@ std::shared_ptr Plugin::compile_model(const std::shared_ptr< ExecutionConfig config = m_configs_map.at(device_id); config.set_user_property(orig_config); if (model->has_rt_info("runtime_options")) - config.apply_rt_info(context->get_engine().get_device_info(), model->get_rt_info("runtime_options")); + config.apply_rt_info(context->get_engine().get_device_info(), model->get_rt_info("runtime_options"), is_llm(model)); config.apply_user_properties(context->get_engine().get_device_info()); set_cache_info(model, config); @@ -281,7 +311,7 @@ ov::SupportedOpsMap Plugin::query_model(const std::shared_ptr& ExecutionConfig config = m_configs_map.at(device_id); config.set_user_property(orig_config); if (model->has_rt_info("runtime_options")) - config.apply_rt_info(ctx->get_engine().get_device_info(), model->get_rt_info("runtime_options")); + config.apply_rt_info(ctx->get_engine().get_device_info(), model->get_rt_info("runtime_options"), is_llm(model)); config.apply_user_properties(ctx->get_engine().get_device_info()); ProgramBuilder prog(ctx->get_engine(), config); diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 0372050657f018..16c47b7116853b 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -272,11 +272,12 @@ void ExecutionConfig::apply_user_properties(const cldnn::device_info& info) { user_properties.clear(); } -void ExecutionConfig::apply_rt_info(const cldnn::device_info& info, const ov::RTMap& rt_info) { +void ExecutionConfig::apply_rt_info(const cldnn::device_info& info, const ov::RTMap& rt_info, const bool is_llm) { if (!info.supports_immad) { apply_rt_info_property(ov::hint::kv_cache_precision, rt_info); - apply_rt_info_property(ov::hint::activations_scale_factor, rt_info); } + if (!info.supports_immad || !is_llm) + apply_rt_info_property(ov::hint::activations_scale_factor, rt_info); apply_rt_info_property(ov::hint::dynamic_quantization_group_size, rt_info); } From 25cd6b0905aeff7068c5dd8650cc8df289dfd589 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 16 Jan 2025 14:21:35 +0400 Subject: [PATCH 05/97] Migrate to pugixml v1.15 (#28389) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- thirdparty/pugixml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/thirdparty/pugixml b/thirdparty/pugixml index 2e357d19a3228c..ee86beb30e4973 160000 --- a/thirdparty/pugixml +++ b/thirdparty/pugixml @@ -1 +1 @@ -Subproject commit 2e357d19a3228c0a301727aac6bea6fecd982d21 +Subproject commit ee86beb30e4973f5feffe3ce63bfa4fbadf72f38 From ed50d5161684f22e56965390cfcba006b3ba5c7c Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Thu, 16 Jan 2025 14:35:09 +0400 Subject: [PATCH 06/97] Fix RopeFusion transformation after applying SDPA to PagedAttention conversion (#28447) ### Details: After internal discussion, we decided to use the changes from https://github.com/openvinotoolkit/openvino/pull/27718 as a base line Fixed Rope pattern detection for ChatGLM. ### Tickets: - *CVS-158393* --- .../fuse_rotary_positional_embeddings.cpp | 22 +++-- .../fuse_rotary_positional_embeddings.cpp | 85 +++++++++++++++++++ 2 files changed, 101 insertions(+), 6 deletions(-) diff --git a/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp b/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp index c82853ec56e9ed..6b12f56215ca83 100644 --- a/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp +++ b/src/common/transformations/src/transformations/common_optimizations/fuse_rotary_positional_embeddings.cpp @@ -557,9 +557,11 @@ ov::pass::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id, const bool s } else { auto ListConstruct_452_Concat = makePattern({seq_length, {-1}, {head_cnt}, {ndims / 2}, {2}}, {{"axis", 0}}); + auto const_target_shape_0 = makeConst({0, 0, head_cnt, ndims / 2, 2}); auto const_target_shape_1 = makeConst({seq_len, batch, head_cnt, ndims / 2, 2}); - reshape_Reshape_453 = makePattern( - {slice_Slice_437 | var_split_1->output(0), ListConstruct_452_Concat | const_target_shape_1}); + reshape_Reshape_453 = + makePattern({slice_Slice_437 | var_split_1->output(0), + ListConstruct_452_Concat | const_target_shape_1 | const_target_shape_0}); } auto x_even = makePattern({reshape_Reshape_453, 0, -1}, {{"batch_dims", 0}}); @@ -588,6 +590,7 @@ ov::pass::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id, const bool s } else { auto ListConstruct_379_Concat = makePattern({seq_length, {-1}, {1}, {ndims / 2}, {2}}, {{"axis", 0}}); + auto const_target_shape_0 = makeConst({1, -1, 1, ndims / 2, 2}); auto const_target_shape_2 = makeConst({seq_len, batch, 1, ndims / 2, 2}); auto slice_Slice_449 = makePattern({cos_sin_cache, {0}, seq_length, {1}, {0}}); @@ -596,7 +599,7 @@ ov::pass::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id, const bool s // [seq_length, 1, batch, half_rotary_dims, 2] view_Reshape_460 = makePattern({slice_StridedSlice_449 | slice_Slice_449 | var_split_2->output(0), - ListConstruct_379_Concat | const_target_shape_2}, + ListConstruct_379_Concat | const_target_shape_0 | const_target_shape_2}, {{"special_zero", false}}); } @@ -609,12 +612,17 @@ ov::pass::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id, const bool s auto sub_Subtract_469 = makePattern({x_even_cos, neg_x_odd_sin}, {{"auto_broadcast", "numpy"}}); auto y_even = makePattern({sub_Subtract_469, -1}); + auto const_y_even_reshape = makeConst({1, -1, head_cnt, ndims / 2, 1}); + auto y_even_reshape = + makePattern({sub_Subtract_469, const_y_even_reshape}, {{"special_zero", false}}); auto x_odd_cos = makePattern({x_odd, cos_tab}, {{"auto_broadcast", "numpy"}}); auto x_even_sin = makePattern({x_even, sin_tab}, {{"auto_broadcast", "numpy"}}); auto add_Add_476 = makePattern({x_odd_cos, x_even_sin}, {{"auto_broadcast", "numpy"}}); auto y_odd = makePattern({add_Add_476, -1}); + auto const_y_odd_reshape = makeConst({1, -1, head_cnt, ndims / 2, 1}); + auto y_odd_reshape = makePattern({add_Add_476, const_y_odd_reshape}, {{"special_zero", false}}); - auto stack_481 = makePattern({y_even, y_odd}, {{"axis", -1}}); + auto stack_481 = makePattern({y_even | y_even_reshape, y_odd | y_odd_reshape}, {{"axis", -1}}); auto ShapeOf_135133 = makePattern({stack_481}); auto flatten_Slice_497 = GenSlice(ShapeOf_135133, 0, 3, 1, 0); @@ -629,9 +637,11 @@ ov::pass::RoPEFusionChatGLM::RoPEFusionChatGLM(int split_output_id, const bool s {{"special_zero", true}}); } else { // [length, batch, head_cnt, half_rotary_dims, 2] + auto const_target_shape_0 = makeConst({0, 0, head_cnt, ndims}); const_target_shape_3 = makeConst({seq_len, batch, head_cnt, ndims}); - flatten_Reshape_501 = makePattern({stack_481, flatten_Concat_500 | const_target_shape_3}, - {{"special_zero", true}}); + flatten_Reshape_501 = + makePattern({stack_481, flatten_Concat_500 | const_target_shape_3 | const_target_shape_0}, + {{"special_zero", true}}); } auto slice_Slice_443 = GenSlice(input_key, ndims, INT_MAX, 1, 3); diff --git a/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp b/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp index 0328831ff1a69c..1b34e0c4423d3d 100644 --- a/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp +++ b/src/common/transformations/tests/common_optimizations/fuse_rotary_positional_embeddings.cpp @@ -1131,3 +1131,88 @@ TEST_F(TransformationTestsF, ConvertToROPE_Flux_mul_squeeze_unsqueeze) { } comparator.enable(FunctionsComparator::ATTRIBUTES); } + +TEST_F(TransformationTestsF, ConvertToROPE_chatGLM3_PagedAttention) { + disable_rt_info_check(); + const int batch = -1; + const int seq_len = 1; + const int num_heads = 32; + const int num_heads_kv = 2; + const int ndims = 128; + const int rotary_ndims = 64; + const int hidden_size = ndims * (num_heads + 2 * num_heads_kv); + const int hidden_size_q = ndims * num_heads; + const int hidden_size_kv = ndims * num_heads_kv; + using namespace ov; + { + auto input = + std::make_shared(ov::element::f32, ov::PartialShape{seq_len, batch, hidden_size}); + auto cos_sin = std::make_shared(ov::element::f32, + ov::PartialShape{seq_len, batch, rotary_ndims / 2, 2}); + auto aten_slice_Slice_1 = makeOP({cos_sin, {0}, {1}, {1}, {0}}); + auto aten_view_Reshape = makeOP({aten_slice_Slice_1, {seq_len, batch, 1, rotary_ndims / 2, 2}}, + {{"special_zero", false}}); + auto aten_select_Gather_1 = makeOP({aten_view_Reshape, 0, -1}, {{"batch_dims", 0}}); + auto aten_select_Gather_3 = makeOP({aten_view_Reshape, 1, -1}, {{"batch_dims", 0}}); + + auto attn_prim_ListUnpack = + makeOP({input, -1, {hidden_size_q, hidden_size_kv, hidden_size_kv}}); + auto attn_aten_view_Reshape_2 = + makeOP({attn_prim_ListUnpack->output(0), {0, 0, num_heads, ndims}}, + {{"special_zero", true}}); + auto VariadicSplit_29663 = + makeOP({attn_aten_view_Reshape_2, 3, {rotary_ndims, ndims - rotary_ndims}}); + auto aten_reshape_Reshape_55 = + makeOP({VariadicSplit_29663->output(0), {0, 0, num_heads, rotary_ndims / 2, 2}}, + {{"special_zero", true}}); + auto aten_select_Gather_440 = makeOP({aten_reshape_Reshape_55, 0, -1}, {{"batch_dims", 0}}); + auto aten_mul_Multiply_276 = + makeOP({aten_select_Gather_440, aten_select_Gather_1}, {{"auto_broadcast", "numpy"}}); + auto aten_select_Gather_442 = makeOP({aten_reshape_Reshape_55, 1, -1}, {{"batch_dims", 0}}); + auto aten_mul_Multiply_277 = + makeOP({aten_select_Gather_442, aten_select_Gather_3}, {{"auto_broadcast", "numpy"}}); + auto Multiply_34833 = + makeOP({aten_mul_Multiply_277, -1.000000f}, {{"auto_broadcast", "numpy"}}); + auto aten_sub_Subtract_55 = + makeOP({aten_mul_Multiply_276, Multiply_34833}, {{"auto_broadcast", "numpy"}}); + auto Unsqueeze_62197 = makeOP({aten_sub_Subtract_55, {1, -1, num_heads, rotary_ndims / 2, 1}}, + {{"special_zero", false}}); + auto aten_mul_Multiply_278 = + makeOP({aten_select_Gather_442, aten_select_Gather_1}, {{"auto_broadcast", "numpy"}}); + auto aten_mul_Multiply_279 = + makeOP({aten_select_Gather_440, aten_select_Gather_3}, {{"auto_broadcast", "numpy"}}); + auto aten_add_Add_55 = + makeOP({aten_mul_Multiply_278, aten_mul_Multiply_279}, {{"auto_broadcast", "numpy"}}); + auto Unsqueeze_62198 = makeOP({aten_add_Add_55, {1, -1, num_heads, rotary_ndims / 2, 1}}, + {{"special_zero", false}}); + auto aten_stack_55 = makeOP({Unsqueeze_62197, Unsqueeze_62198}, {{"axis", -1}}); + auto aten_flatten_Reshape_55 = + makeOP({aten_stack_55, {0, 0, num_heads, rotary_ndims}}, {{"special_zero", true}}); + auto aten_cat_Concat_55 = + makeOP({aten_flatten_Reshape_55, VariadicSplit_29663->output(1)}, {{"axis", -1}}); + + model = std::make_shared(ov::NodeVector{aten_cat_Concat_55}, ov::ParameterVector{input, cos_sin}); + } + manager.register_pass(false); + { + auto input = + std::make_shared(ov::element::f32, ov::PartialShape{seq_len, batch, hidden_size}); + auto gather_cos_sin = + std::make_shared(ov::element::f32, + ov::PartialShape{seq_len, batch, rotary_ndims / 2, 2}); + auto rope = makeOP({input, gather_cos_sin, gather_cos_sin}, + {{"config.slice_start", 0}, + {"config.slice_stop", 4096}, + {"config.input_trans0213", false}, + {"config.output_trans0213", false}, + {"config.is_interleaved", false}, + {"config.rotary_ndims", rotary_ndims}, + {"config.is_chatglm", true}, + {"config.support_2d_rope", false}, + {"config.is_qwen", false}, + {"config.head_cnt", num_heads}, + {"config.head_size", ndims}, + {"config.gather_position_arg_id", 0}}); + model_ref = std::make_shared(ov::NodeVector{rope}, ov::ParameterVector{input, gather_cos_sin}); + } +} \ No newline at end of file From eb17cb36b0e116b09b1dd7a9eb830a0ce88da63a Mon Sep 17 00:00:00 2001 From: Alicja Miloszewska Date: Thu, 16 Jan 2025 12:25:06 +0100 Subject: [PATCH 07/97] [Py OV] Update .runtime in strings (#28479) ### Details: - Update docstrings - Update tests that rely on them - Update `py::module_::import("openvino.runtime").attr("Model")` e.g. [here](https://github.com/openvinotoolkit/openvino/blob/9d77a3da11f0dbdb72cbd412c06a1da5e0e47ef0/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp#L38) ### Tickets: - *ticket-id* --------- Signed-off-by: Alicja Miloszewska --- src/bindings/python/src/openvino/_ov_api.py | 56 ++++---- .../python/src/openvino/helpers/packing.py | 6 +- .../openvino/utils/data_helpers/wrappers.py | 2 +- .../src/pyopenvino/core/async_infer_queue.cpp | 12 +- .../python/src/pyopenvino/core/common.cpp | 2 +- .../src/pyopenvino/core/compiled_model.cpp | 22 +-- .../python/src/pyopenvino/core/core.cpp | 36 ++--- .../python/src/pyopenvino/core/extension.cpp | 2 +- .../src/pyopenvino/core/infer_request.cpp | 84 ++++++------ .../src/pyopenvino/core/profiling_info.cpp | 2 +- .../src/pyopenvino/core/remote_tensor.cpp | 4 +- .../python/src/pyopenvino/core/tensor.cpp | 38 +++--- .../src/pyopenvino/core/variable_state.cpp | 2 +- .../python/src/pyopenvino/core/version.cpp | 3 +- .../pyopenvino/experimental/experimental.cpp | 4 +- .../src/pyopenvino/frontend/frontend.cpp | 10 +- .../src/pyopenvino/frontend/input_model.cpp | 8 +- .../python/src/pyopenvino/graph/any.cpp | 2 +- .../pyopenvino/graph/attribute_visitor.cpp | 2 +- .../python/src/pyopenvino/graph/axis_set.cpp | 2 +- .../src/pyopenvino/graph/axis_vector.cpp | 2 +- .../src/pyopenvino/graph/coordinate.cpp | 2 +- .../src/pyopenvino/graph/coordinate_diff.cpp | 2 +- .../pyopenvino/graph/descriptors/tensor.cpp | 20 +-- .../python/src/pyopenvino/graph/dimension.cpp | 2 +- .../pyopenvino/graph/discrete_type_info.cpp | 2 +- .../python/src/pyopenvino/graph/layout.cpp | 2 +- .../src/pyopenvino/graph/layout_helpers.cpp | 2 +- .../python/src/pyopenvino/graph/model.cpp | 125 +++++++++--------- .../python/src/pyopenvino/graph/node.cpp | 40 +++--- .../src/pyopenvino/graph/node_input.cpp | 16 +-- .../src/pyopenvino/graph/node_output.cpp | 4 +- .../src/pyopenvino/graph/node_output.hpp | 16 +-- .../src/pyopenvino/graph/ops/assign.cpp | 2 +- .../src/pyopenvino/graph/ops/constant.cpp | 2 +- .../python/src/pyopenvino/graph/ops/if.cpp | 38 +++--- .../python/src/pyopenvino/graph/ops/loop.cpp | 2 +- .../src/pyopenvino/graph/ops/parameter.cpp | 2 +- .../src/pyopenvino/graph/ops/read_value.cpp | 2 +- .../src/pyopenvino/graph/ops/result.cpp | 2 +- .../pyopenvino/graph/ops/tensor_iterator.cpp | 4 +- .../pyopenvino/graph/ops/util/variable.cpp | 4 +- .../src/pyopenvino/graph/partial_shape.cpp | 20 +-- .../pyopenvino/graph/passes/graph_rewrite.cpp | 20 +-- .../src/pyopenvino/graph/passes/manager.cpp | 8 +- .../pyopenvino/graph/passes/matcher_pass.cpp | 36 ++--- .../pyopenvino/graph/passes/model_pass.cpp | 6 +- .../src/pyopenvino/graph/passes/pass_base.cpp | 2 +- .../pyopenvino/graph/passes/pattern_ops.cpp | 60 ++++----- .../graph/passes/regmodule_graph_passes.cpp | 2 +- .../graph/passes/transformations.cpp | 14 +- .../graph/preprocess/pre_post_process.cpp | 84 ++++++------ .../python/src/pyopenvino/graph/rt_map.cpp | 2 +- .../python/src/pyopenvino/graph/shape.cpp | 2 +- .../python/src/pyopenvino/graph/strides.cpp | 2 +- .../python/src/pyopenvino/graph/symbol.cpp | 2 +- .../pyopenvino/graph/types/element_type.cpp | 8 +- .../python/src/pyopenvino/graph/util.cpp | 2 +- .../python/src/pyopenvino/pyopenvino.cpp | 6 +- .../python/tests/test_graph/test_core.py | 2 +- .../tests/test_runtime/test_compiled_model.py | 2 +- .../tests/test_runtime/test_input_node.py | 2 +- .../python/tests/test_runtime/test_model.py | 8 +- .../test_runtime/test_output_const_node.py | 4 +- .../test_runtime/test_sync_infer_request.py | 2 +- 65 files changed, 443 insertions(+), 445 deletions(-) diff --git a/src/bindings/python/src/openvino/_ov_api.py b/src/bindings/python/src/openvino/_ov_api.py index f1b69fb86fcbc0..6b1d99cbb96e29 100644 --- a/src/bindings/python/src/openvino/_ov_api.py +++ b/src/bindings/python/src/openvino/_ov_api.py @@ -63,13 +63,13 @@ def clone(self) -> "Model": return Model(self.__model.clone()) def __copy__(self) -> "Model": - raise TypeError("Cannot copy 'openvino.runtime.Model'. Please, use deepcopy instead.") + raise TypeError("Cannot copy 'openvino.Model'. Please, use deepcopy instead.") def __deepcopy__(self, memo: Dict) -> "Model": """Returns a deepcopy of Model. :return: A copy of Model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model """ return Model(self.__model.clone()) @@ -108,14 +108,14 @@ def infer( (1) `int` (2) `str` - (3) `openvino.runtime.ConstOutput` + (3) `openvino.ConstOutput` The allowed types of values in the `inputs` are: (1) `numpy.ndarray` and all the types that are castable to it, e.g. `torch.Tensor` - (2) `openvino.runtime.Tensor` + (2) `openvino.Tensor` - Can be called with only one `openvino.runtime.Tensor` or `numpy.ndarray`, + Can be called with only one `openvino.Tensor` or `numpy.ndarray`, it will work only with one-input models. When model has more inputs, function throws error. @@ -190,14 +190,14 @@ def start_async( (1) `int` (2) `str` - (3) `openvino.runtime.ConstOutput` + (3) `openvino.ConstOutput` The allowed types of values in the `inputs` are: (1) `numpy.ndarray` and all the types that are castable to it, e.g. `torch.Tensor` - (2) `openvino.runtime.Tensor` + (2) `openvino.Tensor` - Can be called with only one `openvino.runtime.Tensor` or `numpy.ndarray`, + Can be called with only one `openvino.Tensor` or `numpy.ndarray`, it will work only with one-input models. When model has more inputs, function throws error. @@ -241,7 +241,7 @@ def get_compiled_model(self) -> "CompiledModel": """Gets the compiled model this InferRequest is using. :return: a CompiledModel object - :rtype: openvino.runtime.ie_api.CompiledModel + :rtype: openvino.CompiledModel """ return CompiledModel(super().get_compiled_model()) @@ -250,7 +250,7 @@ def results(self) -> OVDict: """Gets all outputs tensors of this InferRequest. :return: Dictionary of results from output tensors with ports as keys. - :rtype: Dict[openvino.runtime.ConstOutput, numpy.array] + :rtype: Dict[openvino.ConstOutput, numpy.array] """ return OVDict(super().results) @@ -277,7 +277,7 @@ def create_infer_request(self) -> InferRequest: The created request has allocated input and output tensors. :return: New InferRequest object. - :rtype: openvino.runtime.InferRequest + :rtype: openvino.InferRequest """ return InferRequest(super().create_infer_request()) @@ -285,7 +285,7 @@ def query_state(self) -> None: """Gets state control interface for the underlaying infer request. :return: List of VariableState objects. - :rtype: List[openvino.runtime.VariableState] + :rtype: List[openvino.VariableState] """ if self._infer_request is None: self._infer_request = self.create_infer_request() @@ -316,14 +316,14 @@ def infer_new_request(self, inputs: Any = None) -> OVDict: (1) `int` (2) `str` - (3) `openvino.runtime.ConstOutput` + (3) `openvino.ConstOutput` The allowed types of values in the `inputs` are: (1) `numpy.ndarray` and all the types that are castable to it, e.g. `torch.Tensor` - (2) `openvino.runtime.Tensor` + (2) `openvino.Tensor` - Can be called with only one `openvino.runtime.Tensor` or `numpy.ndarray`, + Can be called with only one `openvino.Tensor` or `numpy.ndarray`, it will work only with one-input models. When model has more inputs, function throws error. @@ -361,14 +361,14 @@ def __call__( (1) `int` (2) `str` - (3) `openvino.runtime.ConstOutput` + (3) `openvino.ConstOutput` The allowed types of values in the `inputs` are: (1) `numpy.ndarray` and all the types that are castable to it, e.g. `torch.Tensor` - (2) `openvino.runtime.Tensor` + (2) `openvino.Tensor` - Can be called with only one `openvino.runtime.Tensor` or `numpy.ndarray`, + Can be called with only one `openvino.Tensor` or `numpy.ndarray`, it will work only with one-input models. When model has more inputs, function throws error. @@ -448,7 +448,7 @@ def __iter__(self) -> Iterable[InferRequest]: will put the parent AsyncInferQueue object in an invalid state. :return: a generator that yields InferRequests. - :rtype: Iterable[openvino.runtime.InferRequest] + :rtype: Iterable[openvino.InferRequest] """ return (InferRequest(x) for x in super().__iter__()) @@ -462,7 +462,7 @@ def __getitem__(self, i: int) -> InferRequest: :param i: InferRequest id. :type i: int :return: InferRequests from the pool with given id. - :rtype: openvino.runtime.InferRequest + :rtype: openvino.InferRequest """ return InferRequest(super().__getitem__(i)) @@ -478,14 +478,14 @@ def start_async( (1) `int` (2) `str` - (3) `openvino.runtime.ConstOutput` + (3) `openvino.ConstOutput` The allowed types of values in the `inputs` are: (1) `numpy.ndarray` and all the types that are castable to it, e.g. `torch.Tensor` - (2) `openvino.runtime.Tensor` + (2) `openvino.Tensor` - Can be called with only one `openvino.runtime.Tensor` or `numpy.ndarray`, + Can be called with only one `openvino.Tensor` or `numpy.ndarray`, it will work only with one-input models. When model has more inputs, function throws error. @@ -574,7 +574,7 @@ def compile_model( :param model: Model acquired from read_model function or a path to a model in IR / ONNX / PDPD / TF and TFLite format. - :type model: Union[openvino.runtime.Model, str, pathlib.Path] + :type model: Union[openvino.Model, str, pathlib.Path] :param device_name: Optional. Name of the device to load the model to. If not specified, the default OpenVINO device will be selected by AUTO plugin. :type device_name: str @@ -584,7 +584,7 @@ def compile_model( :param weights: Optional. Weights of model in memory to be loaded to the model. :type weights: bytes, optional, keyword-only :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel """ if isinstance(model, Model): model = model._Model__model @@ -635,7 +635,7 @@ def import_model( (property name, property value) relevant only for this load operation. :type config: dict, optional :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel :Example: @@ -680,7 +680,7 @@ def compile_model( :param model: Model acquired from read_model function or a path to a model in IR / ONNX / PDPD / TF and TFLite format. - :type model: Union[openvino.runtime.Model, str, pathlib.Path] + :type model: Union[openvino.Model, str, pathlib.Path] :param device_name: Optional. Name of the device to load the model to. If not specified, the default OpenVINO device will be selected by AUTO plugin. :type device_name: str @@ -688,7 +688,7 @@ def compile_model( (property name, property value) relevant only for this load operation. :type config: dict, optional :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel """ core = Core() diff --git a/src/bindings/python/src/openvino/helpers/packing.py b/src/bindings/python/src/openvino/helpers/packing.py index 0d72f47b5f1746..cf085e3eca2a5b 100644 --- a/src/bindings/python/src/openvino/helpers/packing.py +++ b/src/bindings/python/src/openvino/helpers/packing.py @@ -21,7 +21,7 @@ def pack_data(array: np.ndarray, type: Type) -> np.ndarray: :param array: numpy array with values to pack. :type array: numpy array :param type: Type to interpret the array values. Type must be u1, u4, i4, nf4 or f4e2m1. - :type type: openvino.runtime.Type + :type type: openvino.Type """ assert type in [Type.u1, Type.u4, Type.i4, Type.nf4, Type.f4e2m1], "Packing algorithm for the" "data types stored in 1, 2 or 4 bits" @@ -58,9 +58,9 @@ def unpack_data(array: np.ndarray, type: Type, shape: Union[list, Shape]) -> np. :param array: numpy array to unpack. :type array: numpy array :param type: Type to extract from array values. Type must be u1, u4, i4, nf4 or f4e2m1. - :type type: openvino.runtime.Type + :type type: openvino.Type :param shape: the new shape for the unpacked array. - :type shape: Union[list, openvino.runtime.Shape] + :type shape: Union[list, openvino.Shape] """ assert type in [Type.u1, Type.u4, Type.i4, Type.nf4, Type.f4e2m1], "Unpacking algorithm for the" "data types stored in 1, 2 or 4 bits" unpacked = np.unpackbits(array.view(np.uint8)) diff --git a/src/bindings/python/src/openvino/utils/data_helpers/wrappers.py b/src/bindings/python/src/openvino/utils/data_helpers/wrappers.py index 903ae2c6ad3888..02c6cfa473d21d 100644 --- a/src/bindings/python/src/openvino/utils/data_helpers/wrappers.py +++ b/src/bindings/python/src/openvino/utils/data_helpers/wrappers.py @@ -36,7 +36,7 @@ class OVDict(Mapping): This class is a dict-like object. It provides possibility to address data tensors with three key types: - * `openvino.runtime.ConstOutput` - port of the output + * `openvino.ConstOutput` - port of the output * `int` - index of the output * `str` - names of the output diff --git a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp index 0a579a557d6b9d..2894e07f1d5d77 100644 --- a/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp +++ b/src/bindings/python/src/pyopenvino/core/async_infer_queue.cpp @@ -166,7 +166,7 @@ class AsyncInferQueue { void regclass_AsyncInferQueue(py::module m) { py::class_> cls(m, "AsyncInferQueue"); - cls.doc() = "openvino.runtime.AsyncInferQueue represents a helper that creates a pool of asynchronous" + cls.doc() = "openvino.AsyncInferQueue represents a helper that creates a pool of asynchronous" "InferRequests and provides synchronization functions to control flow of a simple pipeline."; cls.def(py::init(), @@ -176,11 +176,11 @@ void regclass_AsyncInferQueue(py::module m) { Creates AsyncInferQueue. :param model: Model to be used to create InferRequests in a pool. - :type model: openvino.runtime.CompiledModel + :type model: openvino.CompiledModel :param jobs: Number of InferRequests objects in a pool. If 0, jobs number will be set automatically to the optimal number. Default: 0 :type jobs: int - :rtype: openvino.runtime.AsyncInferQueue + :rtype: openvino.AsyncInferQueue )"); // Overload for single input, it will throw error if a model has more than one input. @@ -216,7 +216,7 @@ void regclass_AsyncInferQueue(py::module m) { :param inputs: Data to set on single input tensor of next available InferRequest from AsyncInferQueue's pool. - :type inputs: openvino.runtime.Tensor + :type inputs: openvino.Tensor :param userdata: Any data that will be passed to a callback :type userdata: Any :rtype: None @@ -262,7 +262,7 @@ void regclass_AsyncInferQueue(py::module m) { :param inputs: Data to set on input tensors of next available InferRequest from AsyncInferQueue's pool. - :type inputs: dict[Union[int, str, openvino.runtime.ConstOutput] : openvino.runtime.Tensor] + :type inputs: dict[Union[int, str, openvino.ConstOutput] : openvino.Tensor] :param userdata: Any data that will be passed to a callback :rtype: None @@ -348,7 +348,7 @@ void regclass_AsyncInferQueue(py::module m) { :param i: InferRequest id :type i: int :return: InferRequests from the pool with given id. - :rtype: openvino.runtime.InferRequest + :rtype: openvino.InferRequest )"); cls.def_property_readonly( diff --git a/src/bindings/python/src/pyopenvino/core/common.cpp b/src/bindings/python/src/pyopenvino/core/common.cpp index e98d4398cf2b8c..bf730f3ae89eb6 100644 --- a/src/bindings/python/src/pyopenvino/core/common.cpp +++ b/src/bindings/python/src/pyopenvino/core/common.cpp @@ -578,7 +578,7 @@ ov::PartialShape partial_shape_from_list(const py::list& shape) { } else { throw py::type_error("Incorrect type " + std::string(py::str(dim.get_type())) + " for dimension. Expected types are: " - "int, str, openvino.runtime.Dimension, list/tuple with lower and upper values for " + "int, str, openvino.Dimension, list/tuple with lower and upper values for " "dynamic dimension."); } } diff --git a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp index 3395f628b1b303..b7c12c4310f1f2 100644 --- a/src/bindings/python/src/pyopenvino/core/compiled_model.cpp +++ b/src/bindings/python/src/pyopenvino/core/compiled_model.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; void regclass_CompiledModel(py::module m) { py::class_> cls(m, "CompiledModel"); - cls.doc() = "openvino.runtime.CompiledModel represents Model that is compiled for a specific device by applying " + cls.doc() = "openvino.CompiledModel represents Model that is compiled for a specific device by applying " "multiple optimization transformations, then mapping to compute kernels."; cls.def(py::init([](ov::CompiledModel& other) { @@ -40,7 +40,7 @@ void regclass_CompiledModel(py::module m) { The created request has allocated input and output tensors. :return: New InferRequest object. - :rtype: openvino.runtime.InferRequest + :rtype: openvino.InferRequest )"); cls.def( @@ -174,7 +174,7 @@ void regclass_CompiledModel(py::module m) { is optimized and which kernels, element types, and layouts are selected. :return: Model, containing Executable Graph information. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); cls.def("release_memory", @@ -193,7 +193,7 @@ void regclass_CompiledModel(py::module m) { Gets all inputs of a compiled model. :return: Inputs of a compiled model. - :rtype: List[openvino.runtime.ConstOutput] + :rtype: List[openvino.ConstOutput] )"); cls.def("input", @@ -203,7 +203,7 @@ void regclass_CompiledModel(py::module m) { If a model has more than one input, this method throws an exception. :return: A compiled model input. - :rtype: openvino.runtime.ConstOutput + :rtype: openvino.ConstOutput )"); cls.def("input", @@ -216,7 +216,7 @@ void regclass_CompiledModel(py::module m) { :param index: An input index. :type index: int :return: A compiled model input. - :rtype: openvino.runtime.ConstOutput + :rtype: openvino.ConstOutput )"); cls.def( @@ -230,7 +230,7 @@ void regclass_CompiledModel(py::module m) { :param tensor_name: An input tensor name. :type tensor_name: str :return: A compiled model input. - :rtype: openvino.runtime.ConstOutput + :rtype: openvino.ConstOutput )"); cls.def_property_readonly("outputs", @@ -239,7 +239,7 @@ void regclass_CompiledModel(py::module m) { Gets all outputs of a compiled model. :return: Outputs of a compiled model. - :rtype: List[openvino.runtime.ConstOutput] + :rtype: List[openvino.ConstOutput] )"); cls.def("output", @@ -249,7 +249,7 @@ void regclass_CompiledModel(py::module m) { If the model has more than one output, this method throws an exception. :return: A compiled model output. - :rtype: openvino.runtime.ConstOutput + :rtype: openvino.ConstOutput )"); cls.def("output", @@ -262,7 +262,7 @@ void regclass_CompiledModel(py::module m) { :param index: An output index. :type index: int :return: A compiled model output. - :rtype: openvino.runtime.ConstOutput + :rtype: openvino.ConstOutput )"); cls.def("output", @@ -276,7 +276,7 @@ void regclass_CompiledModel(py::module m) { :param tensor_name: An output tensor name. :type tensor_name: str :return: A compiled model output. - :rtype: openvino.runtime.ConstOutput + :rtype: openvino.ConstOutput )"); cls.def("__repr__", [](const ov::CompiledModel& self) { diff --git a/src/bindings/python/src/pyopenvino/core/core.cpp b/src/bindings/python/src/pyopenvino/core/core.cpp index 526ebb02952782..778ca6a9874de2 100644 --- a/src/bindings/python/src/pyopenvino/core/core.cpp +++ b/src/bindings/python/src/pyopenvino/core/core.cpp @@ -22,7 +22,7 @@ namespace py = pybind11; void regclass_Core(py::module m) { py::class_> cls(m, "Core"); cls.doc() = - "openvino.runtime.Core class represents OpenVINO runtime Core entity. User applications can create several " + "openvino.Core class represents OpenVINO runtime Core entity. User applications can create several " "Core class instances, but in this case, the underlying plugins are created multiple times and not shared " "between several Core instances. The recommended way is to have a single Core instance per application."; @@ -144,13 +144,13 @@ void regclass_Core(py::module m) { GIL is released while running this function. :param model: Model acquired from read_model function. - :type model: openvino.runtime.Model + :type model: openvino.Model :param device_name: Name of the device which will load the model. :type device_name: str :param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation. :type properties: dict :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel )"); cls.def( @@ -172,11 +172,11 @@ void regclass_Core(py::module m) { GIL is released while running this function. :param model: Model acquired from read_model function. - :type model: openvino.runtime.Model + :type model: openvino.Model :param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation. :type properties: dict :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel )"); cls.def( @@ -207,7 +207,7 @@ void regclass_Core(py::module m) { :param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation. :type properties: dict :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel )"); cls.def( @@ -258,7 +258,7 @@ void regclass_Core(py::module m) { :param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation. :type properties: dict :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel )"); cls.def( @@ -283,7 +283,7 @@ void regclass_Core(py::module m) { :param properties: Optional dict of pairs: (property name, property value) relevant only for this load operation. :type properties: dict :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel )"); cls.def( @@ -358,7 +358,7 @@ void regclass_Core(py::module m) { :param device_name: Device name to identify a plugin. :type device_name: str :return: Plugin version information. - :rtype: Dict[str, openvino.runtime.Version] + :rtype: Dict[str, openvino.Version] )"); cls.def( @@ -389,7 +389,7 @@ void regclass_Core(py::module m) { :param weights: Bytes with tensor's data. :type weights: bytes :return: A model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); cls.def( @@ -423,7 +423,7 @@ void regclass_Core(py::module m) { :param config: Optional map of pairs: (property name, property value) relevant only for this read operation. :type config: dict, optional :return: A model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); cls.def( @@ -441,9 +441,9 @@ void regclass_Core(py::module m) { :type model: str :param weights: Tensor with weights. Reading ONNX / PDPD / TF and TFLite models doesn't support loading weights from weights tensors. - :type weights: openvino.runtime.Tensor + :type weights: openvino.Tensor :return: A model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); cls.def( @@ -510,7 +510,7 @@ void regclass_Core(py::module m) { :param config: Optional map of pairs: (property name, property value) relevant only for this read operation. :type config: dict, optional :return: A model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); cls.def( @@ -561,7 +561,7 @@ void regclass_Core(py::module m) { :param properties: Optional map of pairs: (property name, property value) relevant only for this load operation. :type properties: dict, optional :return: A compiled model. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel :Example: .. code-block:: python @@ -664,7 +664,7 @@ void regclass_Core(py::module m) { GIL is released while running this function. :param model: Model object to query. - :type model: openvino.runtime.Model + :type model: openvino.Model :param device_name: A name of a device to query. :type device_name: str :param properties: Optional dict of pairs: (property name, property value) @@ -690,7 +690,7 @@ void regclass_Core(py::module m) { Registers an extension to a Core object. :param extension: Extension object. - :type extension: openvino.runtime.Extension + :type extension: openvino.Extension )"); cls.def( @@ -701,7 +701,7 @@ void regclass_Core(py::module m) { Registers extensions to a Core object. :param extensions: List of Extension objects. - :type extensions: list[openvino.runtime.Extension] + :type extensions: list[openvino.Extension] )"); cls.def( diff --git a/src/bindings/python/src/pyopenvino/core/extension.cpp b/src/bindings/python/src/pyopenvino/core/extension.cpp index ac8869fca1f47a..d9c1f4a2a11554 100644 --- a/src/bindings/python/src/pyopenvino/core/extension.cpp +++ b/src/bindings/python/src/pyopenvino/core/extension.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; void regclass_Extension(py::module m) { py::class_> ext(m, "Extension", py::dynamic_attr()); - ext.doc() = "openvino.runtime.Extension provides the base interface for OpenVINO extensions."; + ext.doc() = "openvino.Extension provides the base interface for OpenVINO extensions."; ext.def("__repr__", [](const ov::Extension& self) { return Common::get_simple_repr(self); diff --git a/src/bindings/python/src/pyopenvino/core/infer_request.cpp b/src/bindings/python/src/pyopenvino/core/infer_request.cpp index 23ae154473e45f..4405fbd62c9b75 100644 --- a/src/bindings/python/src/pyopenvino/core/infer_request.cpp +++ b/src/bindings/python/src/pyopenvino/core/infer_request.cpp @@ -27,7 +27,7 @@ inline py::object run_sync_infer(InferRequestWrapper& self, bool share_outputs, void regclass_InferRequest(py::module m) { py::class_> cls(m, "InferRequest"); - cls.doc() = "openvino.runtime.InferRequest represents infer request which can be run in asynchronous or " + cls.doc() = "openvino.InferRequest represents infer request which can be run in asynchronous or " "synchronous manners."; cls.def(py::init([](InferRequestWrapper& other) { @@ -46,7 +46,7 @@ void regclass_InferRequest(py::module m) { Set tensors using given keys. :param inputs: Data to set on tensors. - :type inputs: Dict[Union[int, str, openvino.runtime.ConstOutput], openvino.runtime.Tensor] + :type inputs: Dict[Union[int, str, openvino.ConstOutput], openvino.Tensor] )"); cls.def( @@ -68,7 +68,7 @@ void regclass_InferRequest(py::module m) { :param tensors: Input tensors for batched infer request. The type of each tensor must match the model input element type and shape (except batch dimension). Total size of tensors needs to match with input's size. - :type tensors: List[openvino.runtime.Tensor] + :type tensors: List[openvino.Tensor] )"); cls.def( @@ -87,11 +87,11 @@ void regclass_InferRequest(py::module m) { :param port: Port of input tensor. - :type port: openvino.runtime.ConstOutput + :type port: openvino.ConstOutput :param tensors: Input tensors for batched infer request. The type of each tensor must match the model input element type and shape (except batch dimension). Total size of tensors needs to match with input's size. - :type tensors: List[openvino.runtime.Tensor] + :type tensors: List[openvino.Tensor] :rtype: None )"); @@ -109,7 +109,7 @@ void regclass_InferRequest(py::module m) { Set output tensors using given indexes. :param inputs: Data to set on output tensors. - :type inputs: Dict[int, openvino.runtime.Tensor] + :type inputs: Dict[int, openvino.Tensor] )"); // Python API exclusive function @@ -126,7 +126,7 @@ void regclass_InferRequest(py::module m) { Set input tensors using given indexes. :param inputs: Data to set on output tensors. - :type inputs: Dict[int, openvino.runtime.Tensor] + :type inputs: Dict[int, openvino.Tensor] )"); cls.def( @@ -143,7 +143,7 @@ void regclass_InferRequest(py::module m) { :param tensors: Input tensors for batched infer request. The type of each tensor must match the model input element type and shape (except batch dimension). Total size of tensors needs to match with input's size. - :type tensors: List[openvino.runtime.Tensor] + :type tensors: List[openvino.Tensor] )"); cls.def( @@ -183,9 +183,9 @@ void regclass_InferRequest(py::module m) { GIL is released while running the inference. :param inputs: Data to set on single input tensor. - :type inputs: openvino.runtime.Tensor + :type inputs: openvino.Tensor :return: Dictionary of results from output tensors with ports as keys. - :rtype: Dict[openvino.runtime.ConstOutput, numpy.array] + :rtype: Dict[openvino.ConstOutput, numpy.array] )"); // Overload for general case, it accepts dict of inputs that are pairs of (key, value). @@ -213,9 +213,9 @@ void regclass_InferRequest(py::module m) { GIL is released while running the inference. :param inputs: Data to set on input tensors. - :type inputs: Dict[Union[int, str, openvino.runtime.ConstOutput], openvino.runtime.Tensor] + :type inputs: Dict[Union[int, str, openvino.ConstOutput], openvino.Tensor] :return: Dictionary of results from output tensors with ports as keys. - :rtype: Dict[openvino.runtime.ConstOutput, numpy.array] + :rtype: Dict[openvino.ConstOutput, numpy.array] )"); // Overload for single input, it will throw error if a model has more than one input. @@ -247,7 +247,7 @@ void regclass_InferRequest(py::module m) { running will lead to throwing exceptions. :param inputs: Data to set on single input tensors. - :type inputs: openvino.runtime.Tensor + :type inputs: openvino.Tensor :param userdata: Any data that will be passed inside callback call. :type userdata: Any )"); @@ -286,7 +286,7 @@ void regclass_InferRequest(py::module m) { running will lead to throwing exceptions. :param inputs: Data to set on input tensors. - :type inputs: Dict[Union[int, str, openvino.runtime.ConstOutput], openvino.runtime.Tensor] + :type inputs: Dict[Union[int, str, openvino.ConstOutput], openvino.Tensor] :param userdata: Any data that will be passed inside callback call. :type userdata: Any )"); @@ -379,7 +379,7 @@ void regclass_InferRequest(py::module m) { :param name: Name of tensor to get. :type name: str :return: A Tensor object with given name. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -392,9 +392,9 @@ void regclass_InferRequest(py::module m) { Gets input/output tensor of InferRequest. :param port: Port of tensor to get. - :type port: openvino.runtime.ConstOutput + :type port: openvino.ConstOutput :return: A Tensor object for the port. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -407,9 +407,9 @@ void regclass_InferRequest(py::module m) { Gets input/output tensor of InferRequest. :param port: Port of tensor to get. - :type port: openvino.runtime.Output + :type port: openvino.Output :return: A Tensor object for the port. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -426,7 +426,7 @@ void regclass_InferRequest(py::module m) { :return: An input Tensor with index idx for the model. If a tensor with specified idx is not found, an exception is thrown. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -439,7 +439,7 @@ void regclass_InferRequest(py::module m) { :return: An input Tensor for the model. If model has several inputs, an exception is thrown. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -455,7 +455,7 @@ void regclass_InferRequest(py::module m) { :type idx: int :return: An output Tensor with index idx for the model. If a tensor with specified idx is not found, an exception is thrown. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -468,7 +468,7 @@ void regclass_InferRequest(py::module m) { :return: An output Tensor for the model. If model has several outputs, an exception is thrown. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); cls.def( @@ -485,7 +485,7 @@ void regclass_InferRequest(py::module m) { :type name: str :param tensor: RemoteTensor object. The element_type and shape of a tensor must match the model's input/output element_type and shape. - :type tensor: openvino.runtime.RemoteTensor + :type tensor: openvino.RemoteTensor )"); cls.def( @@ -502,7 +502,7 @@ void regclass_InferRequest(py::module m) { :type name: str :param tensor: Tensor object. The element_type and shape of a tensor must match the model's input/output element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -516,10 +516,10 @@ void regclass_InferRequest(py::module m) { Sets input/output tensor of InferRequest. :param port: Port of input/output tensor. - :type port: openvino.runtime.ConstOutput + :type port: openvino.ConstOutput :param tensor: Tensor object. The element_type and shape of a tensor must match the model's input/output element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -533,10 +533,10 @@ void regclass_InferRequest(py::module m) { Sets input/output tensor of InferRequest. :param port: Port of input/output tensor. - :type port: openvino.runtime.Output + :type port: openvino.Output :param tensor: Tensor object. The element_type and shape of a tensor must match the model's input/output element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -554,7 +554,7 @@ void regclass_InferRequest(py::module m) { :type idx: int :param tensor: Tensor object. The element_type and shape of a tensor must match the model's input element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -569,7 +569,7 @@ void regclass_InferRequest(py::module m) { :param tensor: Tensor object. The element_type and shape of a tensor must match the model's input element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -586,7 +586,7 @@ void regclass_InferRequest(py::module m) { :type idx: int :param tensor: Tensor object. The element_type and shape of a tensor must match the model's output element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -601,7 +601,7 @@ void regclass_InferRequest(py::module m) { :param tensor: Tensor object. The element_type and shape of a tensor must match the model's output element_type and shape. - :type tensor: openvino.runtime.Tensor + :type tensor: openvino.Tensor )"); cls.def( @@ -618,7 +618,7 @@ void regclass_InferRequest(py::module m) { GIL is released while running this function. :return: List of profiling information for operations in model. - :rtype: List[openvino.runtime.ProfilingInfo] + :rtype: List[openvino.ProfilingInfo] )"); cls.def( @@ -633,7 +633,7 @@ void regclass_InferRequest(py::module m) { GIL is released while running this function. :return: List of VariableState objects. - :rtype: List[openvino.runtime.VariableState] + :rtype: List[openvino.VariableState] )"); cls.def( @@ -655,7 +655,7 @@ void regclass_InferRequest(py::module m) { Returns the compiled model. :return: Compiled model object. - :rtype: openvino.runtime.CompiledModel + :rtype: openvino.CompiledModel )"); cls.def_property_readonly( @@ -677,7 +677,7 @@ void regclass_InferRequest(py::module m) { R"( Gets all inputs of a compiled model which was used to create this InferRequest. - :rtype: List[openvino.runtime.ConstOutput] + :rtype: List[openvino.ConstOutput] )"); cls.def_property_readonly( @@ -688,7 +688,7 @@ void regclass_InferRequest(py::module m) { R"( Gets all outputs of a compiled model which was used to create this InferRequest. - :rtype: List[openvino.runtime.ConstOutput] + :rtype: List[openvino.ConstOutput] )"); cls.def_property_readonly("input_tensors", @@ -696,7 +696,7 @@ void regclass_InferRequest(py::module m) { R"( Gets all input tensors of this InferRequest. - :rtype: List[openvino.runtime.Tensor] + :rtype: List[openvino.Tensor] )"); cls.def_property_readonly("output_tensors", @@ -705,7 +705,7 @@ void regclass_InferRequest(py::module m) { Gets all output tensors of this InferRequest. - :rtype: List[openvino.runtime.Tensor] + :rtype: List[openvino.Tensor] )"); cls.def_property_readonly( @@ -732,7 +732,7 @@ void regclass_InferRequest(py::module m) { GIL is released while running this function. :return: Inference time. - :rtype: List[openvino.runtime.ProfilingInfo] + :rtype: List[openvino.ProfilingInfo] )"); cls.def_property_readonly( @@ -746,7 +746,7 @@ void regclass_InferRequest(py::module m) { Note: All string-based data is decoded by default. :return: Dictionary of results from output tensors with ports as keys. - :rtype: Dict[openvino.runtime.ConstOutput, numpy.array] + :rtype: Dict[openvino.ConstOutput, numpy.array] )"); cls.def("__repr__", [](const InferRequestWrapper& self) { diff --git a/src/bindings/python/src/pyopenvino/core/profiling_info.cpp b/src/bindings/python/src/pyopenvino/core/profiling_info.cpp index 8912559ee0da80..2f8f3fb44a3493 100644 --- a/src/bindings/python/src/pyopenvino/core/profiling_info.cpp +++ b/src/bindings/python/src/pyopenvino/core/profiling_info.cpp @@ -13,7 +13,7 @@ namespace py = pybind11; void regclass_ProfilingInfo(py::module m) { py::class_> cls(m, "ProfilingInfo"); - cls.doc() = "openvino.runtime.ProfilingInfo contains performance metrics for single node."; + cls.doc() = "openvino.ProfilingInfo contains performance metrics for single node."; cls.def("__repr__", [](const ov::ProfilingInfo& self) { return Common::get_simple_repr(self); diff --git a/src/bindings/python/src/pyopenvino/core/remote_tensor.cpp b/src/bindings/python/src/pyopenvino/core/remote_tensor.cpp index c0e2a16c5ecbf6..249e5bd604982d 100644 --- a/src/bindings/python/src/pyopenvino/core/remote_tensor.cpp +++ b/src/bindings/python/src/pyopenvino/core/remote_tensor.cpp @@ -26,9 +26,9 @@ void regclass_RemoteTensor(py::module m) { :param remote_tensor: The RemoteTensor object on which the RoiRemoteTensor will be based. :type remote_tensor: openvino.RemoteTensor :param begin: The starting coordinates for the tensor bound. - :type begin: openvino.runtime.Coordinate + :type begin: openvino.Coordinate :param end: The ending coordinates for the tensor bound. - :type end: openvino.runtime.Coordinate + :type end: openvino.Coordinate )"); cls.def( diff --git a/src/bindings/python/src/pyopenvino/core/tensor.cpp b/src/bindings/python/src/pyopenvino/core/tensor.cpp index 0e8873e412958e..0ed8e753469e7a 100644 --- a/src/bindings/python/src/pyopenvino/core/tensor.cpp +++ b/src/bindings/python/src/pyopenvino/core/tensor.cpp @@ -16,7 +16,7 @@ namespace py = pybind11; void regclass_Tensor(py::module m) { py::class_> cls(m, "Tensor"); - cls.doc() = "openvino.runtime.Tensor holding either copy of memory or shared host memory."; + cls.doc() = "openvino.Tensor holding either copy of memory or shared host memory."; cls.def(py::init([](py::array& array, bool shared_memory) { return Common::object_from_data(array, shared_memory); @@ -52,19 +52,19 @@ void regclass_Tensor(py::module m) { memory with the specific openvino element type parameter. :param array: C_CONTIGUOUS numpy array which will be wrapped in - openvino.runtime.Tensor with given parameters (shape + openvino.Tensor with given parameters (shape and element_type). Array's memory is being shared with a host. Any action performed on the host memory will be reflected on this Tensor's memory! :type array: numpy.array :param shape: Shape of the new tensor. - :type shape: openvino.runtime.Shape + :type shape: openvino.Shape :param type: Element type - :type type: openvino.runtime.Type + :type type: openvino.Type :Example: .. code-block:: python - import openvino.runtime as ov + import openvino as ov import numpy as np arr = np.array(shape=(100), dtype=np.uint8) @@ -86,19 +86,19 @@ void regclass_Tensor(py::module m) { memory with the specific openvino element type parameter. :param array: C_CONTIGUOUS numpy array which will be wrapped in - openvino.runtime.Tensor with given parameters (shape + openvino.Tensor with given parameters (shape and element_type). Array's memory is being shared with a host. Any action performed on the host memory will be reflected on this Tensor's memory! :type array: numpy.array :param shape: Shape of the new tensor. :type shape: list or tuple :param type: Element type. - :type type: openvino.runtime.Type + :type type: openvino.Type :Example: .. code-block:: python - import openvino.runtime as ov + import openvino as ov import numpy as np arr = np.array(shape=(100), dtype=np.uint8) @@ -158,7 +158,7 @@ void regclass_Tensor(py::module m) { Type and shape will be taken from the port. :param port: Output port from a node. - :type param: openvino.runtime.Output + :type param: openvino.Output )"); cls.def(py::init([](ov::Output& port, py::array& array) { @@ -172,9 +172,9 @@ void regclass_Tensor(py::module m) { Type and shape will be taken from the port. :param port: Output port from a node. - :type param: openvino.runtime.Output + :type param: openvino.Output :param array: C_CONTIGUOUS numpy array which will be wrapped in - openvino.runtime.Tensor. Array's memory is being shared wi a host. + openvino.Tensor. Array's memory is being shared wi a host. Any action performed on the host memory will be reflected on this Tensor's memory! :type array: numpy.array )"); @@ -186,7 +186,7 @@ void regclass_Tensor(py::module m) { Type and shape will be taken from the port. :param port: Output port from a node. - :type param: openvino.runtime.ConstOutput + :type param: openvino.ConstOutput )"); cls.def(py::init([](const ov::Output& port, py::array& array) { @@ -200,9 +200,9 @@ void regclass_Tensor(py::module m) { Type and shape will be taken from the port. :param port: Output port from a node. - :type param: openvino.runtime.ConstOutput + :type param: openvino.ConstOutput :param array: C_CONTIGUOUS numpy array which will be wrapped in - openvino.runtime.Tensor. Array's memory is being shared with a host. + openvino.Tensor. Array's memory is being shared with a host. Any action performed on the host memory will be reflected on this Tensor's memory! :type array: numpy.array )"); @@ -219,7 +219,7 @@ void regclass_Tensor(py::module m) { R"( Gets Tensor's element type. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); cls.def_property_readonly("element_type", @@ -227,7 +227,7 @@ void regclass_Tensor(py::module m) { R"( Tensor's element type. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); cls.def("get_size", @@ -267,7 +267,7 @@ void regclass_Tensor(py::module m) { R"( Gets Tensor's strides in bytes. - :rtype: openvino.runtime.Strides + :rtype: openvino.Strides )"); cls.def_property_readonly("strides", @@ -275,7 +275,7 @@ void regclass_Tensor(py::module m) { R"( Tensor's strides in bytes. - :rtype: openvino.runtime.Strides + :rtype: openvino.Strides )"); cls.def_property_readonly( @@ -361,7 +361,7 @@ void regclass_Tensor(py::module m) { R"( Gets Tensor's shape. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); cls.def("set_shape", diff --git a/src/bindings/python/src/pyopenvino/core/variable_state.cpp b/src/bindings/python/src/pyopenvino/core/variable_state.cpp index 7e4f96f0fcc999..008222f2fe8855 100644 --- a/src/bindings/python/src/pyopenvino/core/variable_state.cpp +++ b/src/bindings/python/src/pyopenvino/core/variable_state.cpp @@ -13,7 +13,7 @@ namespace py = pybind11; void regclass_VariableState(py::module m) { py::class_> variable_st(m, "VariableState"); - variable_st.doc() = "openvino.runtime.VariableState class."; + variable_st.doc() = "openvino.VariableState class."; variable_st.def("__repr__", [](const ov::VariableState& self) { return Common::get_simple_repr(self); diff --git a/src/bindings/python/src/pyopenvino/core/version.cpp b/src/bindings/python/src/pyopenvino/core/version.cpp index 2e17d7fd85e525..a60b4f1bdffc41 100644 --- a/src/bindings/python/src/pyopenvino/core/version.cpp +++ b/src/bindings/python/src/pyopenvino/core/version.cpp @@ -13,8 +13,7 @@ namespace py = pybind11; void regclass_Version(py::module m) { py::class_ cls(m, "Version"); - cls.doc() = - "openvino.runtime.Version represents version information that describes plugins and the OpenVINO library."; + cls.doc() = "openvino.Version represents version information that describes plugins and the OpenVINO library."; cls.def("__repr__", [](const ov::Version& self) { return "<" + Common::get_class_name(self) + ": " + std::string(self.buildNumber) + " " + self.description + ">"; diff --git a/src/bindings/python/src/pyopenvino/experimental/experimental.cpp b/src/bindings/python/src/pyopenvino/experimental/experimental.cpp index fffc1f92fb908a..d3de09e3d2fbd2 100644 --- a/src/bindings/python/src/pyopenvino/experimental/experimental.cpp +++ b/src/bindings/python/src/pyopenvino/experimental/experimental.cpp @@ -23,7 +23,7 @@ void regmodule_experimental(py::module m) { using Dimension(min, max) for each element. :param output: Node output pointing to the tensor for estimation. - :type output: openvino.runtime.Output + :type output: openvino.Output :param partial_shape: The resulting estimation will be stored in this PartialShape. :type partial_shape: openvino.PartialShape :return: True if estimation evaluation was successful, false otherwise. @@ -37,7 +37,7 @@ void regmodule_experimental(py::module m) { It traverses the graph upwards to deduce the estimation. :param output: Node output pointing to the tensor for estimation. - :type output: openvino.runtime.Output + :type output: openvino.Output :return: Tensors representing the lower and upper bound value estimations. :rtype: Tuple[openvino.Tensor, openvino.Tensor] )"); diff --git a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp index 19f5287a590bbe..daede38a5579e1 100644 --- a/src/bindings/python/src/pyopenvino/frontend/frontend.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/frontend.cpp @@ -110,7 +110,7 @@ void regclass_frontend_FrontEnd(py::module m) { :param model: Input model. :type model: openvino.frontend.InputModel :return: Fully converted OpenVINO Model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); fem.def( @@ -125,7 +125,7 @@ void regclass_frontend_FrontEnd(py::module m) { :param model: Partially converted OpenVINO model. :type model: openvino.frontend.Model :return: Fully converted OpenVINO Model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); fem.def("convert_partially", @@ -139,7 +139,7 @@ void regclass_frontend_FrontEnd(py::module m) { :param model : Input model. :type model: openvino.frontend.InputModel :return: Partially converted OpenVINO Model. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); fem.def("decode", @@ -153,7 +153,7 @@ void regclass_frontend_FrontEnd(py::module m) { :param model : Input model. :type model: openvino.frontend.InputModel :return: OpenVINO Model after decoding. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); fem.def( @@ -166,7 +166,7 @@ void regclass_frontend_FrontEnd(py::module m) { Runs normalization passes on function that was loaded with partial conversion. :param model : Partially converted OpenVINO model. - :type model: openvino.runtime.Model + :type model: openvino.Model )"); fem.def("get_name", diff --git a/src/bindings/python/src/pyopenvino/frontend/input_model.cpp b/src/bindings/python/src/pyopenvino/frontend/input_model.cpp index 455f03a2b2bcb9..4140a097a28ae9 100644 --- a/src/bindings/python/src/pyopenvino/frontend/input_model.cpp +++ b/src/bindings/python/src/pyopenvino/frontend/input_model.cpp @@ -225,7 +225,7 @@ void regclass_frontend_InputModel(py::module m) { :param place: Model place. :type place: openvino.frontend.Place :param shape: Partial shape for this place. - :type shape: openvino.runtime.PartialShape + :type shape: openvino.PartialShape )"); im.def("get_partial_shape", @@ -237,7 +237,7 @@ void regclass_frontend_InputModel(py::module m) { :param place: Model place. :type place: openvino.frontend.Place :return: Partial shape for this place. - :rtype: openvino.runtime.PartialShape + :rtype: openvino.PartialShape )"); im.def("get_inputs", @@ -303,7 +303,7 @@ void regclass_frontend_InputModel(py::module m) { :param place: Model place. :type place: openvino.frontend.Place :param type: New element type. - :type type: openvino.runtime.Type + :type type: openvino.Type )"); im.def("get_element_type", @@ -315,7 +315,7 @@ void regclass_frontend_InputModel(py::module m) { :param place: Model place. :type place: openvino.frontend.Place :return: Element type for this place. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); im.def( diff --git a/src/bindings/python/src/pyopenvino/graph/any.cpp b/src/bindings/python/src/pyopenvino/graph/any.cpp index 7e2c2ba3b2681e..5e1689ddffe279 100644 --- a/src/bindings/python/src/pyopenvino/graph/any.cpp +++ b/src/bindings/python/src/pyopenvino/graph/any.cpp @@ -22,7 +22,7 @@ bool check_key(py::object key, py::object obj) { void regclass_graph_Any(py::module m) { py::class_> ov_any(m, "OVAny"); - ov_any.doc() = "openvino.runtime.OVAny provides object wrapper for OpenVINO" + ov_any.doc() = "openvino.OVAny provides object wrapper for OpenVINO" "ov::Any class. It allows to pass different types of objects" "into C++ based core of the project."; diff --git a/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp b/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp index d07dcd235f738d..82699434d80633 100644 --- a/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp +++ b/src/bindings/python/src/pyopenvino/graph/attribute_visitor.cpp @@ -35,7 +35,7 @@ void regclass_graph_AttributeVisitor(py::module m) { "on_attributes", [](ov::AttributeVisitor* self, py::dict& attributes) { py::object float_32_type = py::module_::import("numpy").attr("float32"); - py::object model = py::module_::import("openvino.runtime").attr("Model"); + py::object model = py::module_::import("openvino").attr("Model"); for (const auto& attribute : attributes) { if (py::isinstance(attribute.second)) { visit_attribute(attributes, attribute, self); diff --git a/src/bindings/python/src/pyopenvino/graph/axis_set.cpp b/src/bindings/python/src/pyopenvino/graph/axis_set.cpp index 2ec3cd565588b6..c5e527bec45add 100644 --- a/src/bindings/python/src/pyopenvino/graph/axis_set.cpp +++ b/src/bindings/python/src/pyopenvino/graph/axis_set.cpp @@ -18,7 +18,7 @@ namespace py = pybind11; void regclass_graph_AxisSet(py::module m) { py::class_> axis_set(m, "AxisSet"); - axis_set.doc() = "openvino.runtime.AxisSet wraps ov::AxisSet"; + axis_set.doc() = "openvino.AxisSet wraps ov::AxisSet"; axis_set.def(py::init&>(), py::arg("axes")); axis_set.def(py::init&>(), py::arg("axes")); axis_set.def(py::init&>(), py::arg("axes")); diff --git a/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp b/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp index a42978400f5874..325e8583fa3147 100644 --- a/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp +++ b/src/bindings/python/src/pyopenvino/graph/axis_vector.cpp @@ -14,7 +14,7 @@ namespace py = pybind11; void regclass_graph_AxisVector(py::module m) { py::class_> axis_vector(m, "AxisVector"); - axis_vector.doc() = "openvino.runtime.AxisVector wraps ov::AxisVector"; + axis_vector.doc() = "openvino.AxisVector wraps ov::AxisVector"; axis_vector.def(py::init&>(), py::arg("axes")); axis_vector.def(py::init&>(), py::arg("axes")); axis_vector.def(py::init(), py::arg("axes")); diff --git a/src/bindings/python/src/pyopenvino/graph/coordinate.cpp b/src/bindings/python/src/pyopenvino/graph/coordinate.cpp index ad28454db8d7c0..fa86259b2e497b 100644 --- a/src/bindings/python/src/pyopenvino/graph/coordinate.cpp +++ b/src/bindings/python/src/pyopenvino/graph/coordinate.cpp @@ -14,7 +14,7 @@ namespace py = pybind11; void regclass_graph_Coordinate(py::module m) { py::class_> coordinate(m, "Coordinate"); - coordinate.doc() = "openvino.runtime.Coordinate wraps ov::Coordinate"; + coordinate.doc() = "openvino.Coordinate wraps ov::Coordinate"; coordinate.def(py::init&>()); coordinate.def(py::init()); coordinate.def(py::init&>()); diff --git a/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp b/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp index 0309144a29c4f2..d251ca82874672 100644 --- a/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp +++ b/src/bindings/python/src/pyopenvino/graph/coordinate_diff.cpp @@ -18,7 +18,7 @@ namespace py = pybind11; void regclass_graph_CoordinateDiff(py::module m) { py::class_> coordinate_diff(m, "CoordinateDiff"); - coordinate_diff.doc() = "openvino.runtime.CoordinateDiff wraps ov::CoordinateDiff"; + coordinate_diff.doc() = "openvino.CoordinateDiff wraps ov::CoordinateDiff"; coordinate_diff.def(py::init&>()); coordinate_diff.def(py::init&>()); coordinate_diff.def(py::init()); diff --git a/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp b/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp index 903f10c3e96656..4ad656baa481fc 100644 --- a/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp +++ b/src/bindings/python/src/pyopenvino/graph/descriptors/tensor.cpp @@ -29,7 +29,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns the shape description. :return: The shape description. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); tensor.def("get_rt_info", @@ -39,7 +39,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns PyRTMap which is a dictionary of user defined runtime info. :return: A dictionary of user defined data. - :rtype: openvino.runtime.RTMap + :rtype: openvino.RTMap )"); tensor.def("size", @@ -57,7 +57,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns the partial shape description. :return: PartialShape description. - :rtype: openvino.runtime.PartialShape + :rtype: openvino.PartialShape )"); tensor.def("get_element_type", @@ -66,7 +66,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns the element type description. :return: Type description. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); tensor.def("get_names", @@ -113,7 +113,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns the lower bound of the tensor. :return: Lower bound. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); tensor.def("set_lower_value", @@ -123,7 +123,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Sets the lower bound of the tensor. :param lower_bound: The lower bound value. - :type lower_bound: openvino.runtime.Tensor + :type lower_bound: openvino.Tensor )"); tensor.def("get_upper_value", @@ -132,7 +132,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns the upper bound of the tensor. :return: Upper bound. - :rtype: openvino.runtime.Tensor + :rtype: openvino.Tensor )"); tensor.def("set_upper_value", @@ -142,7 +142,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Sets the upper bound of the tensor. :param upper_bound: Sets the upper bound value. - :type upper_bound: openvino.runtime.Tensor + :type upper_bound: openvino.Tensor )"); tensor.def("get_value_symbol", @@ -151,7 +151,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Returns the List of symbols. :return: List of Symbols. - :rtype: List[openvino.runtime.Symbol] + :rtype: List[openvino.Symbol] )"); tensor.def("set_value_symbol", @@ -161,7 +161,7 @@ void regclass_graph_descriptor_Tensor(py::module m) { Sets the value symbol of the tensor. :param value_symbol: List of Symbols - :type value_symbol: List[openvino.runtime.Symbol] + :type value_symbol: List[openvino.Symbol] )"); tensor.def_property_readonly("shape", &ov::descriptor::Tensor::get_shape); diff --git a/src/bindings/python/src/pyopenvino/graph/dimension.cpp b/src/bindings/python/src/pyopenvino/graph/dimension.cpp index 4c2a67c60108fd..e74dc6878ac92b 100644 --- a/src/bindings/python/src/pyopenvino/graph/dimension.cpp +++ b/src/bindings/python/src/pyopenvino/graph/dimension.cpp @@ -20,7 +20,7 @@ void regclass_graph_Dimension(py::module m) { using value_type = ov::Dimension::value_type; py::class_> dim(m, "Dimension"); - dim.doc() = "openvino.runtime.Dimension wraps ov::Dimension"; + dim.doc() = "openvino.Dimension wraps ov::Dimension"; dim.def(py::init<>()); dim.def(py::init(), py::arg("dimension"), diff --git a/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp b/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp index 6cd4586fa4a2ae..60f5d21c26a73a 100644 --- a/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp +++ b/src/bindings/python/src/pyopenvino/graph/discrete_type_info.cpp @@ -15,7 +15,7 @@ namespace py = pybind11; void regclass_graph_DiscreteTypeInfo(py::module m) { py::class_> discrete_type_info(m, "DiscreteTypeInfo"); - discrete_type_info.doc() = "openvino.runtime.DiscreteTypeInfo wraps ov::DiscreteTypeInfo"; + discrete_type_info.doc() = "openvino.DiscreteTypeInfo wraps ov::DiscreteTypeInfo"; discrete_type_info.def(py::init([](const std::string& name, const std::string& version_id) { return std::make_shared(name, version_id); diff --git a/src/bindings/python/src/pyopenvino/graph/layout.cpp b/src/bindings/python/src/pyopenvino/graph/layout.cpp index 0c35df12caa2b9..18f6ffcbcc70ea 100644 --- a/src/bindings/python/src/pyopenvino/graph/layout.cpp +++ b/src/bindings/python/src/pyopenvino/graph/layout.cpp @@ -14,7 +14,7 @@ namespace py = pybind11; void regclass_graph_Layout(py::module m) { py::class_> layout(m, "Layout"); - layout.doc() = "openvino.runtime.Layout wraps ov::Layout"; + layout.doc() = "openvino.Layout wraps ov::Layout"; layout.def(py::init<>()); layout.def(py::init(), py::arg("layout_str")); diff --git a/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp b/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp index 2622ee7e8d2c2e..c3c00f4a0c852f 100644 --- a/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp +++ b/src/bindings/python/src/pyopenvino/graph/layout_helpers.cpp @@ -11,7 +11,7 @@ namespace py = pybind11; void regmodule_graph_layout_helpers(py::module m) { - py::module mod = m.def_submodule("layout_helpers", "openvino.runtime.layout_helpers"); + py::module mod = m.def_submodule("layout_helpers", "openvino.layout_helpers"); mod.def("has_batch", &ov::layout::has_batch, py::arg("layout")); mod.def("batch_idx", &ov::layout::batch_idx, py::arg("layout")); diff --git a/src/bindings/python/src/pyopenvino/graph/model.cpp b/src/bindings/python/src/pyopenvino/graph/model.cpp index 30be517281d671..337eca6ee9aadd 100644 --- a/src/bindings/python/src/pyopenvino/graph/model.cpp +++ b/src/bindings/python/src/pyopenvino/graph/model.cpp @@ -104,7 +104,7 @@ static ov::Output output_from_handle(ov::Model& model, const py::handl return handle.cast>(); } else { throw py::type_error("Incorrect key type " + std::string(py::str(handle.get_type())) + - " to reshape a model, expected keys as openvino.runtime.Output, int or str."); + " to reshape a model, expected keys as openvino.Output, int or str."); } } @@ -116,9 +116,8 @@ static ov::PartialShape partial_shape_from_handle(const py::handle& handle) { } else if (py::isinstance(handle)) { return ov::PartialShape(handle.cast()); } else { - throw py::type_error( - "Incorrect value type " + std::string(py::str(handle.get_type())) + - " to reshape a model, expected values as openvino.runtime.PartialShape, str, list or tuple."); + throw py::type_error("Incorrect value type " + std::string(py::str(handle.get_type())) + + " to reshape a model, expected values as openvino.PartialShape, str, list or tuple."); } } @@ -153,7 +152,7 @@ static int64_t find_sink_position(const ov::SinkVector& sinks, const std::shared void regclass_graph_Model(py::module m) { py::class_> model(m, "Model", py::module_local()); - model.doc() = "openvino.runtime.Model wraps ov::Model"; + model.doc() = "openvino.Model wraps ov::Model"; model.def(py::init([](const std::shared_ptr& other) { return other; @@ -180,7 +179,7 @@ void regclass_graph_Model(py::module m) { :param results: List of results. :type results: List[op.Result] :param sinks: List of Nodes to be used as Sinks (e.g. Assign ops). - :type sinks: List[openvino.runtime.Node] + :type sinks: List[openvino.Node] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -200,7 +199,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model. :param results: List of Nodes to be used as results. - :type results: List[openvino.runtime.Node] + :type results: List[openvino.Node] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -220,7 +219,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model. :param result: Node to be used as result. - :type result: openvino.runtime.Node + :type result: openvino.Node :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -239,7 +238,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of outputs. - :type results: List[openvino.runtime.Output] + :type results: List[openvino.Output] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -264,9 +263,9 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of outputs. - :type results: List[openvino.runtime.Output] + :type results: List[openvino.Output] :param sinks: List of Nodes to be used as Sinks (e.g. Assign ops). - :type sinks: List[openvino.runtime.Node] + :type sinks: List[openvino.Node] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -291,9 +290,9 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of outputs. - :type results: List[openvino.runtime.Output] + :type results: List[openvino.Output] :param sinks: List of Output sink node handles. - :type sinks: List[openvino.runtime.Output] + :type sinks: List[openvino.Output] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -318,9 +317,9 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of outputs. - :type results: List[openvino.runtime.Output] + :type results: List[openvino.Output] :param sinks: List of Output sink node handles. - :type sinks: List[openvino.runtime.Output] + :type sinks: List[openvino.Output] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param variables: List of variables. @@ -349,7 +348,7 @@ void regclass_graph_Model(py::module m) { :param results: List of results. :type results: List[op.Result] :param sinks: List of Output sink node handles. - :type sinks: List[openvino.runtime.Output] + :type sinks: List[openvino.Output] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -376,7 +375,7 @@ void regclass_graph_Model(py::module m) { :param results: List of results. :type results: List[op.Result] :param sinks: List of Output sink node handles. - :type sinks: List[openvino.runtime.Output] + :type sinks: List[openvino.Output] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param variables: List of variables. @@ -405,7 +404,7 @@ void regclass_graph_Model(py::module m) { :param results: List of results. :type results: List[op.Result] :param sinks: List of Nodes to be used as Sinks (e.g. Assign ops). - :type sinks: List[openvino.runtime.Node] + :type sinks: List[openvino.Node] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param variables: List of variables. @@ -432,9 +431,9 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of results. - :type results: List[openvino.runtime.Output] + :type results: List[openvino.Output] :param sinks: List of Nodes to be used as Sinks (e.g. Assign ops). - :type sinks: List[openvino.runtime.Node] + :type sinks: List[openvino.Node] :param variables: List of variables. :type variables: List[op.util.Variable] :param name: String to set as model's friendly name. @@ -480,7 +479,7 @@ void regclass_graph_Model(py::module m) { Create user-defined Model which is a representation of a model :param results: List of results. - :type results: List[openvino.runtime.Output] + :type results: List[openvino.Output] :param parameters: List of parameters. :type parameters: List[op.Parameter] :param name: String to set as model's friendly name. @@ -504,10 +503,10 @@ void regclass_graph_Model(py::module m) { The allowed types of keys in the `variables_shapes` dictionary is `str`. The allowed types of values in the `variables_shapes` are: - (1) `openvino.runtime.PartialShape` + (1) `openvino.PartialShape` (2) `list` consisting of dimensions (3) `tuple` consisting of dimensions - (4) `str`, string representation of `openvino.runtime.PartialShape` + (4) `str`, string representation of `openvino.PartialShape` When list or tuple are used to describe dimensions, each dimension can be written in form: @@ -515,7 +514,7 @@ void regclass_graph_Model(py::module m) { (2) `[min, max]`, dynamic dimension where `min` specifies lower bound and `max` specifies upper bound; the range includes both `min` and `max`; using `-1` for `min` or `max` means no known bound (3) `(min, max)`, the same as above (4) `-1` is a dynamic dimension without known bounds (4) - `openvino.runtime.Dimension` (5) `str` using next syntax: + `openvino.Dimension` (5) `str` using next syntax: '?' - to define fully dynamic dimension '1' - to define dimension which length is 1 '1..10' - to define bounded dimension @@ -524,7 +523,7 @@ void regclass_graph_Model(py::module m) { GIL is released while running this function. :param partial_shape: New shape. - :type partial_shape: openvino.runtime.PartialShape + :type partial_shape: openvino.PartialShape :param variables_shapes: New shapes for variables :type variables_shapes: Dict[keys, values] :return : void @@ -546,10 +545,10 @@ void regclass_graph_Model(py::module m) { The allowed types of keys in the `variables_shapes` dictionary is `str`. The allowed types of values in the `variables_shapes` are: - (1) `openvino.runtime.PartialShape` + (1) `openvino.PartialShape` (2) `list` consisting of dimensions (3) `tuple` consisting of dimensions - (4) `str`, string representation of `openvino.runtime.PartialShape` + (4) `str`, string representation of `openvino.PartialShape` When list or tuple are used to describe dimensions, each dimension can be written in form: @@ -557,7 +556,7 @@ void regclass_graph_Model(py::module m) { (2) `[min, max]`, dynamic dimension where `min` specifies lower bound and `max` specifies upper bound; the range includes both `min` and `max`; using `-1` for `min` or `max` means no known bound (3) `(min, max)`, the same as above (4) `-1` is a dynamic dimension without known bounds (4) - `openvino.runtime.Dimension` (5) `str` using next syntax: + `openvino.Dimension` (5) `str` using next syntax: '?' - to define fully dynamic dimension '1' - to define dimension which length is 1 '1..10' - to define bounded dimension @@ -588,10 +587,10 @@ void regclass_graph_Model(py::module m) { The allowed types of keys in the `variables_shapes` dictionary is `str`. The allowed types of values in the `variables_shapes` are: - (1) `openvino.runtime.PartialShape` + (1) `openvino.PartialShape` (2) `list` consisting of dimensions (3) `tuple` consisting of dimensions - (4) `str`, string representation of `openvino.runtime.PartialShape` + (4) `str`, string representation of `openvino.PartialShape` When list or tuple are used to describe dimensions, each dimension can be written in form: @@ -599,7 +598,7 @@ void regclass_graph_Model(py::module m) { (2) `[min, max]`, dynamic dimension where `min` specifies lower bound and `max` specifies upper bound; the range includes both `min` and `max`; using `-1` for `min` or `max` means no known bound (3) `(min, max)`, the same as above (4) `-1` is a dynamic dimension without known bounds (4) - `openvino.runtime.Dimension` (5) `str` using next syntax: + `openvino.Dimension` (5) `str` using next syntax: '?' - to define fully dynamic dimension '1' - to define dimension which length is 1 '1..10' - to define bounded dimension @@ -629,10 +628,10 @@ void regclass_graph_Model(py::module m) { The allowed types of keys in the `variables_shapes` dictionary is `str`. The allowed types of values in the `variables_shapes` are: - (1) `openvino.runtime.PartialShape` + (1) `openvino.PartialShape` (2) `list` consisting of dimensions (3) `tuple` consisting of dimensions - (4) `str`, string representation of `openvino.runtime.PartialShape` + (4) `str`, string representation of `openvino.PartialShape` When list or tuple are used to describe dimensions, each dimension can be written in form: @@ -640,7 +639,7 @@ void regclass_graph_Model(py::module m) { (2) `[min, max]`, dynamic dimension where `min` specifies lower bound and `max` specifies upper bound; the range includes both `min` and `max`; using `-1` for `min` or `max` means no known bound (3) `(min, max)`, the same as above (4) `-1` is a dynamic dimension without known bounds (4) - `openvino.runtime.Dimension` (5) `str` using next syntax: + `openvino.Dimension` (5) `str` using next syntax: '?' - to define fully dynamic dimension '1' - to define dimension which length is 1 '1..10' - to define bounded dimension @@ -677,14 +676,14 @@ void regclass_graph_Model(py::module m) { (1) `int`, input index (2) `str`, input tensor name - (3) `openvino.runtime.Output` + (3) `openvino.Output` The allowed types of values in the `partial_shapes` are: - (1) `openvino.runtime.PartialShape` + (1) `openvino.PartialShape` (2) `list` consisting of dimensions (3) `tuple` consisting of dimensions - (4) `str`, string representation of `openvino.runtime.PartialShape` + (4) `str`, string representation of `openvino.PartialShape` When list or tuple are used to describe dimensions, each dimension can be written in form: @@ -692,7 +691,7 @@ void regclass_graph_Model(py::module m) { (2) `[min, max]`, dynamic dimension where `min` specifies lower bound and `max` specifies upper bound; the range includes both `min` and `max`; using `-1` for `min` or `max` means no known bound (3) `(min, max)`, the same as above (4) `-1` is a dynamic dimension without known bounds - (4) `openvino.runtime.Dimension` + (4) `openvino.Dimension` (5) `str` using next syntax: '?' - to define fully dynamic dimension '1' - to define dimension which length is 1 @@ -702,10 +701,10 @@ void regclass_graph_Model(py::module m) { The allowed types of keys in the `variables_shapes` dictionary is `str`. The allowed types of values in the `variables_shapes` are: - (1) `openvino.runtime.PartialShape` + (1) `openvino.PartialShape` (2) `list` consisting of dimensions (3) `tuple` consisting of dimensions - (4) `str`, string representation of `openvino.runtime.PartialShape` + (4) `str`, string representation of `openvino.PartialShape` When list or tuple are used to describe dimensions, each dimension can be written in form: @@ -713,7 +712,7 @@ void regclass_graph_Model(py::module m) { (2) `[min, max]`, dynamic dimension where `min` specifies lower bound and `max` specifies upper bound; the range includes both `min` and `max`; using `-1` for `min` or `max` means no known bound (3) `(min, max)`, the same as above (4) `-1` is a dynamic dimension without known bounds (4) - `openvino.runtime.Dimension` (5) `str` using next syntax: + `openvino.Dimension` (5) `str` using next syntax: '?' - to define fully dynamic dimension '1' - to define dimension which length is 1 '1..10' - to define bounded dimension @@ -743,7 +742,7 @@ void regclass_graph_Model(py::module m) { Return ops used in the model. :return: List of Nodes representing ops used in model. - :rtype: List[openvino.runtime.Node] + :rtype: List[openvino.Node] )"); model.def("get_ordered_ops", &ov::Model::get_ordered_ops, @@ -751,7 +750,7 @@ void regclass_graph_Model(py::module m) { Return ops used in the model in topological order. :return: List of sorted Nodes representing ops used in model. - :rtype: List[openvino.runtime.Node] + :rtype: List[openvino.Node] )"); model.def("get_output_op", &ov::Model::get_output_op, @@ -762,7 +761,7 @@ void regclass_graph_Model(py::module m) { :param index: output index :type index: output index :return: Node object that generates output i - :rtype: openvino.runtime.Node + :rtype: openvino.Node )"); model.def("get_output_element_type", &ov::Model::get_output_element_type, @@ -773,7 +772,7 @@ void regclass_graph_Model(py::module m) { :param index: output index :type index: int :return: Type object of output i - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); model.def("get_output_shape", &ov::Model::get_output_shape, @@ -784,7 +783,7 @@ void regclass_graph_Model(py::module m) { :param index: element index :type index: int :return: Shape object of element i - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); model.def("get_output_partial_shape", &ov::Model::get_output_partial_shape, @@ -795,7 +794,7 @@ void regclass_graph_Model(py::module m) { :param index: element index :type index: int :return: PartialShape object of element i - :rtype: openvino.runtime.PartialShape + :rtype: openvino.PartialShape )"); model.def("get_parameters", &ov::Model::get_parameters, @@ -854,7 +853,7 @@ void regclass_graph_Model(py::module m) { Return -1 if `value` not matched. :param value: Output containing Node - :type value: openvino.runtime.Output + :type value: openvino.Output :return: Index for value referencing it. :rtype: int )"); @@ -867,7 +866,7 @@ void regclass_graph_Model(py::module m) { Return -1 if `value` not matched. :param value: Output containing Node - :type value: openvino.runtime.Output + :type value: openvino.Output :return: Index for value referencing it. :rtype: int )"); @@ -905,7 +904,7 @@ void regclass_graph_Model(py::module m) { Return -1 if `value` not matched. :param value: Output sink node handle - :type value: openvino.runtime.Output + :type value: openvino.Output :return: Index of sink node referenced by output handle. :rtype: int )"); @@ -927,7 +926,7 @@ void regclass_graph_Model(py::module m) { Return -1 if `value` not matched. :param value: Output sink node handle - :type value: openvino.runtime.Output + :type value: openvino.Output :return: Index of sink node referenced by output handle. :rtype: int )"); @@ -952,7 +951,7 @@ void regclass_graph_Model(py::module m) { Return -1 if `sink` not matched. :param sink: Sink node. - :type sink: openvino.runtime.Node + :type sink: openvino.Node :return: Index of sink node. :rtype: int )"); @@ -1153,7 +1152,7 @@ void regclass_graph_Model(py::module m) { Delete sink node from the list of sinks. Method doesn't delete node from graph. :param sink: Sink to delete. - :type sink: openvino.runtime.Node + :type sink: openvino.Node )"); model.def("remove_variable", @@ -1215,7 +1214,7 @@ void regclass_graph_Model(py::module m) { Method doesn't validate graph, it should be done manually after all changes. :param sinks: new sink nodes. - :type sinks: List[openvino.runtime.Node] + :type sinks: List[openvino.Node] )"); model.def("add_variables", @@ -1269,7 +1268,7 @@ void regclass_graph_Model(py::module m) { Return a list of model's sinks. :return: a list of model's sinks. - :rtype: List[openvino.runtime.Node] + :rtype: List[openvino.Node] )"); model.def_property_readonly( @@ -1282,7 +1281,7 @@ void regclass_graph_Model(py::module m) { Return a list of model's sinks. :return: a list of model's sinks. - :rtype: List[openvino.runtime.Node] + :rtype: List[openvino.Node] )"); model.def( @@ -1300,13 +1299,13 @@ void regclass_graph_Model(py::module m) { Evaluate the model on inputs, putting results in outputs :param output_tensors: Tensors for the outputs to compute. One for each result - :type output_tensors: List[openvino.runtime.Tensor] + :type output_tensors: List[openvino.Tensor] :param input_tensors: Tensors for the inputs. One for each inputs. - :type input_tensors: List[openvino.runtime.Tensor] + :type input_tensors: List[openvino.Tensor] :param evaluation_context: Storage of additional settings and attributes that can be used when evaluating the model. This additional information can be shared across nodes. - :type evaluation_context: openvino.runtime.RTMap + :type evaluation_context: openvino.RTMap :rtype: bool )"); @@ -1315,7 +1314,7 @@ void regclass_graph_Model(py::module m) { R"( Return a copy of self. :return: A copy of self. - :rtype: openvino.runtime.Model + :rtype: openvino.Model )"); model.def("__repr__", [](const ov::Model& self) { @@ -1335,7 +1334,7 @@ void regclass_graph_Model(py::module m) { Returns PyRTMap which is a dictionary of user defined runtime info. :return: A dictionary of user defined data. - :rtype: openvino.runtime.RTMap + :rtype: openvino.RTMap )"); model.def( "get_rt_info", @@ -1354,7 +1353,7 @@ void regclass_graph_Model(py::module m) { :type path: List[str] :return: A runtime attribute. - :rtype: openvino.runtime.OVAny + :rtype: openvino.OVAny )"); model.def( "get_rt_info", @@ -1369,7 +1368,7 @@ void regclass_graph_Model(py::module m) { :type path: str :return: A runtime attribute. - :rtype: openvino.runtime.OVAny + :rtype: openvino.OVAny )"); model.def( "has_rt_info", diff --git a/src/bindings/python/src/pyopenvino/graph/node.cpp b/src/bindings/python/src/pyopenvino/graph/node.cpp index 146acf0ff651eb..0f10f00fa7de8e 100644 --- a/src/bindings/python/src/pyopenvino/graph/node.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node.cpp @@ -41,7 +41,7 @@ PYBIND11_MAKE_OPAQUE(PyRTMap); void regclass_graph_Node(py::module m) { py::class_, PyNode> node(m, "Node", py::dynamic_attr()); - node.doc() = "openvino.runtime.Node wraps ov::Node"; + node.doc() = "openvino.Node wraps ov::Node"; node.def( "__add__", [](const std::shared_ptr& a, const std::shared_ptr b) { @@ -110,7 +110,7 @@ void regclass_graph_Node(py::module m) { }); } // Throw error with original name if stripped set_attribute was not found: - throw py::attribute_error("'openvino.runtime.Node' object has no attribute '" + name + "'"); + throw py::attribute_error("'openvino.Node' object has no attribute '" + name + "'"); } else { // regex_match is equal to "set_" if (dict_serializer.contains_attribute(stripped_name)) { return py::cpp_function([self, stripped_name](py::object& value) { @@ -123,12 +123,12 @@ void regclass_graph_Node(py::module m) { }); } // Throw error with original name if stripped set_attribute was not found: - throw py::attribute_error("'openvino.runtime.Node' object has no attribute '" + name + "'"); + throw py::attribute_error("'openvino.Node' object has no attribute '" + name + "'"); } } // If nothing was found raise AttributeError: - throw py::attribute_error("'openvino.runtime.Node' object has no attribute '" + name + "'"); + throw py::attribute_error("'openvino.Node' object has no attribute '" + name + "'"); }); node.def( @@ -146,12 +146,12 @@ void regclass_graph_Node(py::module m) { Evaluate the node on inputs, putting results in outputs :param output_tensors: Tensors for the outputs to compute. One for each result. - :type output_tensors: List[openvino.runtime.Tensor] + :type output_tensors: List[openvino.Tensor] :param input_tensors: Tensors for the inputs. One for each inputs. - :type input_tensors: List[openvino.runtime.Tensor] + :type input_tensors: List[openvino.Tensor] :param evaluation_context: Storage of additional settings and attributes that can be used when evaluating the function. This additional information can be shared across nodes. - :type evaluation_context: openvino.runtime.RTMap + :type evaluation_context: openvino.RTMap :rtype: bool )"); node.def( @@ -165,9 +165,9 @@ void regclass_graph_Node(py::module m) { Evaluate the function on inputs, putting results in outputs :param output_tensors: Tensors for the outputs to compute. One for each result. - :type output_tensors: List[openvino.runtime.Tensor] + :type output_tensors: List[openvino.Tensor] :param input_tensors: Tensors for the inputs. One for each inputs. - :type input_tensors: List[openvino.runtime.Tensor] + :type input_tensors: List[openvino.Tensor] :rtype: bool )"); node.def("get_instance_id", @@ -198,7 +198,7 @@ void regclass_graph_Node(py::module m) { Checks that there is exactly one output and returns it's element type. :return: Type of the output. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); node.def("input_values", &ov::Node::input_values, @@ -206,7 +206,7 @@ void regclass_graph_Node(py::module m) { Returns list of node's inputs, in order. :return: List of node's inputs - :rtype: List[openvino.runtime.Input] + :rtype: List[openvino.Input] )"); node.def("input_value", &ov::Node::input_value, @@ -217,7 +217,7 @@ void regclass_graph_Node(py::module m) { :param index: Index of Input. :type index: int :return: Input of this node. - :rtype: openvino.runtime.Input + :rtype: openvino.Input )"); node.def("get_input_size", &ov::Node::get_input_size, @@ -301,7 +301,7 @@ void regclass_graph_Node(py::module m) { :param index: Index of the output. :type index: int :return: Type of the output index - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); node.def("get_output_shape", &ov::Node::get_output_shape, @@ -312,7 +312,7 @@ void regclass_graph_Node(py::module m) { :param index: Index of the output. :type index: int :return: Shape of the output index - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); node.def("get_output_partial_shape", &ov::Node::get_output_partial_shape, @@ -323,7 +323,7 @@ void regclass_graph_Node(py::module m) { :param index: Index of the output. :type index: int :return: PartialShape of the output index - :rtype: openvino.runtime.PartialShape + :rtype: openvino.PartialShape )"); node.def("get_output_tensor", &ov::Node::get_output_tensor, @@ -384,7 +384,7 @@ void regclass_graph_Node(py::module m) { :param input_index: Index of Input. :type input_index: int :return: Input of this node. - :rtype: openvino.runtime.Input + :rtype: openvino.Input )"); node.def("inputs", (std::vector>(ov::Node::*)()) & ov::Node::inputs, @@ -392,7 +392,7 @@ void regclass_graph_Node(py::module m) { A list containing a handle for each of this node's inputs, in order. :return: List of node's inputs. - :rtype: List[openvino.runtime.Input] + :rtype: List[openvino.Input] )"); node.def("output", (ov::Output(ov::Node::*)(size_t)) & ov::Node::output, @@ -403,7 +403,7 @@ void regclass_graph_Node(py::module m) { :param output_index: Index of Output. :type output_index: int :return: Output of this node. - :rtype: openvino.runtime.Output + :rtype: openvino.Output )"); node.def("outputs", (std::vector>(ov::Node::*)()) & ov::Node::outputs, @@ -411,7 +411,7 @@ void regclass_graph_Node(py::module m) { A list containing a handle for each of this node's outputs, in order. :return: List of node's outputs. - :rtype: List[openvino.runtime.Output] + :rtype: List[openvino.Output] )"); node.def("get_rt_info", (PyRTMap & (ov::Node::*)()) & ov::Node::get_rt_info, @@ -420,7 +420,7 @@ void regclass_graph_Node(py::module m) { Returns PyRTMap which is a dictionary of user defined runtime info. :return: A dictionary of user defined data. - :rtype: openvino.runtime.RTMap + :rtype: openvino.RTMap )"); node.def("set_argument", &ov::Node::set_argument); diff --git a/src/bindings/python/src/pyopenvino/graph/node_input.cpp b/src/bindings/python/src/pyopenvino/graph/node_input.cpp index 1ce0aed9b2209d..21ca2cc7181169 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_input.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_input.cpp @@ -18,7 +18,7 @@ PYBIND11_MAKE_OPAQUE(PyRTMap); void regclass_graph_Input(py::module m) { py::class_, std::shared_ptr>> input(m, "Input", py::dynamic_attr()); - input.doc() = "openvino.runtime.Input wraps ov::Input"; + input.doc() = "openvino.Input wraps ov::Input"; input.def("get_node", &ov::Input::get_node, @@ -26,7 +26,7 @@ void regclass_graph_Input(py::module m) { Get node referenced by this input handle. :return: Node object referenced by this input handle. - :rtype: openvino.runtime.Node + :rtype: openvino.Node )"); input.def("get_index", &ov::Input::get_index, @@ -42,7 +42,7 @@ void regclass_graph_Input(py::module m) { The element type of the input referred to by this input handle. :return: Type of the input. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); input.def("get_shape", &ov::Input::get_shape, @@ -50,7 +50,7 @@ void regclass_graph_Input(py::module m) { The shape of the input referred to by this input handle. :return: Shape of the input. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); input.def("get_partial_shape", &ov::Input::get_partial_shape, @@ -58,7 +58,7 @@ void regclass_graph_Input(py::module m) { The partial shape of the input referred to by this input handle. :return: PartialShape of the input. - :rtype: openvino.runtime.PartialShape + :rtype: openvino.PartialShape )"); input.def("get_source_output", &ov::Input::get_source_output, @@ -66,7 +66,7 @@ void regclass_graph_Input(py::module m) { A handle to the output that is connected to this input. :return: Output that is connected to the input. - :rtype: openvino.runtime.Output + :rtype: openvino.Output )"); input.def("get_tensor", &ov::Input::get_tensor, @@ -84,7 +84,7 @@ void regclass_graph_Input(py::module m) { Returns RTMap which is a dictionary of user defined runtime info. :return: A dictionary of user defined data. - :rtype: openvino.runtime.RTMap + :rtype: openvino.RTMap )"); input.def("replace_source_output", &ov::Input::replace_source_output, @@ -93,7 +93,7 @@ void regclass_graph_Input(py::module m) { Replaces the source output of this input. :param new_source_output: A handle for the output that will replace this input's source. - :type new_source_output: openvino.runtime.Input + :type new_source_output: openvino.Input )"); input.def_property_readonly("rt_info", (ov::RTMap & (ov::Input::*)()) & ov::Input::get_rt_info); input.def_property_readonly("rt_info", diff --git a/src/bindings/python/src/pyopenvino/graph/node_output.cpp b/src/bindings/python/src/pyopenvino/graph/node_output.cpp index aab89f9acc9fd5..b2bd472b484a1b 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_output.cpp +++ b/src/bindings/python/src/pyopenvino/graph/node_output.cpp @@ -49,7 +49,7 @@ void def_type_dependent_functions( Removes a target input from the output referenced by this output handle. :param target_input: The target input to remove. - :type target_input: openvino.runtime.Output + :type target_input: openvino.Output )"); output.def("replace", &ov::Output::replace, @@ -58,6 +58,6 @@ void def_type_dependent_functions( Replace all users of this value with replacement. :param replacement: The node that is a replacement. - :type replacement: openvino.runtime.Output + :type replacement: openvino.Output )"); } diff --git a/src/bindings/python/src/pyopenvino/graph/node_output.hpp b/src/bindings/python/src/pyopenvino/graph/node_output.hpp index c8af3582026147..f67c821093e84a 100644 --- a/src/bindings/python/src/pyopenvino/graph/node_output.hpp +++ b/src/bindings/python/src/pyopenvino/graph/node_output.hpp @@ -36,7 +36,7 @@ template void regclass_graph_Output(py::module m, std::string typestring) { auto pyclass_name = py::detail::c_str((typestring + std::string("Output"))); - auto docs = py::detail::c_str(std::string("openvino.runtime.") + typestring + std::string("Output represents port/node output.")); + auto docs = py::detail::c_str(std::string("openvino.") + typestring + std::string("Output represents port/node output.")); py::class_, std::shared_ptr>> output(m, pyclass_name, py::dynamic_attr()); @@ -59,7 +59,7 @@ void regclass_graph_Output(py::module m, std::string typestring) }); output.def("__deepcopy__", [typestring](ov::Output& self, py::dict& memo) { - throw py::type_error("Cannot deepcopy 'openvino.runtime." + typestring + "Output' object."); + throw py::type_error("Cannot deepcopy 'openvino." + typestring + "Output' object."); }); output.def("get_node", @@ -68,7 +68,7 @@ void regclass_graph_Output(py::module m, std::string typestring) Get node referenced by this output handle. :return: Node object referenced by this output handle. - :rtype: openvino.runtime.Node + :rtype: openvino.Node )"); output.def("get_index", &ov::Output::get_index, @@ -101,7 +101,7 @@ void regclass_graph_Output(py::module m, std::string typestring) The element type of the output referred to by this output handle. :return: Type of the output. - :rtype: openvino.runtime.Type + :rtype: openvino.Type )"); output.def("get_shape", &ov::Output::get_shape, @@ -110,7 +110,7 @@ void regclass_graph_Output(py::module m, std::string typestring) The shape of the output referred to by this output handle. :return: Copy of Shape of the output. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); output.def("get_partial_shape", &ov::Output::get_partial_shape, @@ -119,7 +119,7 @@ void regclass_graph_Output(py::module m, std::string typestring) The partial shape of the output referred to by this output handle. :return: Copy of PartialShape of the output. - :rtype: openvino.runtime.PartialShape + :rtype: openvino.PartialShape )"); output.def("get_target_inputs", &ov::Output::get_target_inputs, @@ -128,7 +128,7 @@ void regclass_graph_Output(py::module m, std::string typestring) referenced by this output handle. :return: Set of Inputs. - :rtype: Set[openvino.runtime.Input] + :rtype: Set[openvino.Input] )"); output.def("_from_node", [](const std::shared_ptr& node) { return ov::Output(node); @@ -149,7 +149,7 @@ void regclass_graph_Output(py::module m, std::string typestring) Returns RTMap which is a dictionary of user defined runtime info. :return: A dictionary of user defined data. - :rtype: openvino.runtime.RTMap + :rtype: openvino.RTMap )"); output.def("__repr__", [](const ov::Output& self) { std::stringstream shape_type_ss; diff --git a/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp b/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp index 52320ddb439ee9..132ec1119d29c6 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/assign.cpp @@ -15,7 +15,7 @@ namespace py = pybind11; void regclass_graph_op_Assign(py::module m) { py::class_, ov::Node> assign(m, "assign"); - assign.doc() = "openvino.runtime.op.assign wraps ov::op::v6::Assign"; + assign.doc() = "openvino.op.assign wraps ov::op::v6::Assign"; assign.def(py::init<>()); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp b/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp index 047b8c5d3e9510..c5f704cf8bce96 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/constant.cpp @@ -60,7 +60,7 @@ void regclass_graph_op_Constant(py::module m) { py::class_, ov::Node> constant(m, "Constant", py::buffer_protocol()); - constant.doc() = "openvino.runtime.op.Constant wraps ov::op::v0::Constant"; + constant.doc() = "openvino.op.Constant wraps ov::op::v0::Constant"; // Numpy-based constructor constant.def(py::init([](py::array& array, bool shared_memory) { return Common::object_from_data(array, shared_memory); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp index 72f7bcd8dab164..1a6cc25ff570c0 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/if.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/if.cpp @@ -26,7 +26,7 @@ void regclass_graph_op_If(py::module m) { Constructs If with condition. :param execution_condition: condition node. - :type execution_condition: openvino.runtime.Output + :type execution_condition: openvino.Output :rtype: openvino.impl.op.If )"); @@ -45,7 +45,7 @@ void regclass_graph_op_If(py::module m) { Constructs If with condition. :param execution_condition: condition node. - :type execution_condition: openvino.runtime.Node + :type execution_condition: openvino.Node :rtype: openvino.impl.op.If )"); @@ -54,7 +54,7 @@ void regclass_graph_op_If(py::module m) { "get_then_body", [](ov::op::v8::If& self) { auto model = self.get_then_body(); - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }, R"( @@ -68,7 +68,7 @@ void regclass_graph_op_If(py::module m) { "get_else_body", [](ov::op::v8::If& self) { auto model = self.get_else_body(); - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }, R"( @@ -119,13 +119,13 @@ void regclass_graph_op_If(py::module m) { Sets new input to the operation associated with parameters of each sub-graphs. :param value: input to operation. - :type value: openvino.runtime.Output + :type value: openvino.Output :param then_result: parameter for then_body or nullptr. - :type then_result: openvino.runtime.Node + :type then_result: openvino.Node :param else_result: parameter for else_body or nullptr. - :type else_result: openvino.runtime.Node + :type else_result: openvino.Node :rtype: None )"); @@ -144,14 +144,14 @@ void regclass_graph_op_If(py::module m) { :type else_result: op.Result :return: output from operation. - :rtype: openvino.runtime.Output + :rtype: openvino.Output )"); cls.def( "get_function", [](ov::op::v8::If& self, size_t index) { auto model = self.get_function(index); - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }, py::arg("index"), @@ -199,9 +199,9 @@ void regclass_graph_op_If(py::module m) { :type index: int :param inputs: list of input descriptions. - :type inputs: list[Union[openvino.runtime.op.util.MergedInputDescription, - openvino.runtime.op.util.InvariantInputDescription, - openvino.runtime.op.util.SliceInputDescription]] + :type inputs: list[Union[openvino.op.util.MergedInputDescription, + openvino.op.util.InvariantInputDescription, + openvino.op.util.SliceInputDescription]] :rtype: None )"); @@ -220,8 +220,8 @@ void regclass_graph_op_If(py::module m) { :type index: int :param outputs: list of output descriptions. - :type outputs: list[Union[openvino.runtime.op.util.BodyOutputDescription, - openvino.runtime.op.util.ConcatOutputDescription]] + :type outputs: list[Union[openvino.op.util.BodyOutputDescription, + openvino.op.util.ConcatOutputDescription]] :rtype: None )"); @@ -245,8 +245,8 @@ void regclass_graph_op_If(py::module m) { :type index: int :return: list of output descriptions. - :rtype: list[Union[openvino.runtime.op.util.BodyOutputDescription, - openvino.runtime.op.util.ConcatOutputDescription]] + :rtype: list[Union[openvino.op.util.BodyOutputDescription, + openvino.op.util.ConcatOutputDescription]] )"); cls.def( @@ -268,9 +268,9 @@ void regclass_graph_op_If(py::module m) { :type index: int :return: list of input descriptions. - :rtype: list[Union[openvino.runtime.op.util.MergedInputDescription, - openvino.runtime.op.util.InvariantInputDescription, - openvino.runtime.op.util.SliceInputDescription]] + :rtype: list[Union[openvino.op.util.MergedInputDescription, + openvino.op.util.InvariantInputDescription, + openvino.op.util.SliceInputDescription]] )"); cls.def("__repr__", [](const ov::op::v8::If& self) { diff --git a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp index 2cb7334653e9f8..1d815bc7b37eee 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/loop.cpp @@ -86,7 +86,7 @@ void regclass_graph_op_Loop(py::module m) { cls.def("get_function", [](const std::shared_ptr& self) { auto model = self->get_function(); - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp b/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp index 45473339b0e882..fe84bc31c20597 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/parameter.cpp @@ -18,7 +18,7 @@ namespace py = pybind11; void regclass_graph_op_Parameter(py::module m) { py::class_, ov::Node> parameter(m, "Parameter"); - parameter.doc() = "openvino.runtime.op.Parameter wraps ov::op::v0::Parameter"; + parameter.doc() = "openvino.op.Parameter wraps ov::op::v0::Parameter"; parameter.def("__repr__", [](const ov::Node& self) { std::string class_name = py::cast(self).get_type().attr("__name__").cast(); std::string shape = py::cast(self.get_output_partial_shape(0)).attr("__str__")().cast(); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/read_value.cpp b/src/bindings/python/src/pyopenvino/graph/ops/read_value.cpp index 53f665c512ff19..99b96e376321d1 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/read_value.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/read_value.cpp @@ -19,7 +19,7 @@ namespace py = pybind11; void regclass_graph_op_ReadValue(py::module m) { py::class_, ov::Node> read_value(m, "read_value"); - read_value.doc() = "openvino.runtime.op.read_value wraps ov::op::v6::ReadValue"; + read_value.doc() = "openvino.op.read_value wraps ov::op::v6::ReadValue"; read_value.def(py::init<>()); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/result.cpp b/src/bindings/python/src/pyopenvino/graph/ops/result.cpp index 113d464ccad898..79f419006473d7 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/result.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/result.cpp @@ -18,7 +18,7 @@ namespace py = pybind11; void regclass_graph_op_Result(py::module m) { py::class_, ov::Node> result(m, "Result"); - result.doc() = "openvino.runtime.op.Result wraps ov::op::v0::Result"; + result.doc() = "openvino.op.Result wraps ov::op::v0::Result"; result.def(py::init&>()); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp index de4db8da979d7a..902f4edbb1b726 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/tensor_iterator.cpp @@ -63,13 +63,13 @@ void regclass_graph_op_TensorIterator(py::module m) { cls.def("get_body", [](const std::shared_ptr& self) { auto model = self->get_body(); - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }); cls.def("get_function", [](const std::shared_ptr& self) { auto model = self->get_function(); - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }); diff --git a/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp b/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp index 456d36730aba51..7adf90d18eb5cb 100644 --- a/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp +++ b/src/bindings/python/src/pyopenvino/graph/ops/util/variable.cpp @@ -15,7 +15,7 @@ namespace py = pybind11; void regclass_graph_op_util_Variable(py::module m) { py::class_ variable_info(m, "VariableInfo"); - variable_info.doc() = "openvino.runtime.op.util.VariableInfo wraps ov::op::util::VariableInfo"; + variable_info.doc() = "openvino.op.util.VariableInfo wraps ov::op::util::VariableInfo"; variable_info.def(py::init<>()); variable_info.def_readwrite("data_shape", &ov::op::util::VariableInfo::data_shape); variable_info.def_readwrite("data_type", &ov::op::util::VariableInfo::data_type); @@ -25,7 +25,7 @@ void regclass_graph_op_util_Variable(py::module m) { }); py::class_> variable(m, "Variable"); - variable.doc() = "openvino.runtime.op.util.Variable wraps ov::op::util::Variable"; + variable.doc() = "openvino.op.util.Variable wraps ov::op::util::Variable"; variable.def(py::init([](const ov::op::util::VariableInfo& info) { return ov::op::util::Variable{info}; }), diff --git a/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp b/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp index 73fc52f51f976a..9e3410600215bb 100644 --- a/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp +++ b/src/bindings/python/src/pyopenvino/graph/partial_shape.cpp @@ -31,7 +31,7 @@ bool compare_shape(const ov::PartialShape& a, const T& b) { void regclass_graph_PartialShape(py::module m) { py::class_> shape(m, "PartialShape"); - shape.doc() = "openvino.runtime.PartialShape wraps ov::PartialShape"; + shape.doc() = "openvino.PartialShape wraps ov::PartialShape"; shape.def(py::init()); shape.def(py::init()); @@ -102,7 +102,7 @@ void regclass_graph_PartialShape(py::module m) { whether it is possible to merge them. :param shape: The shape to be checked for compatibility with this shape. - :type shape: openvino.runtime.PartialShape + :type shape: openvino.PartialShape :return: True if this shape is compatible with s, else False. :rtype: bool )"); @@ -113,7 +113,7 @@ void regclass_graph_PartialShape(py::module m) { Check whether this shape is a refinement of the argument. :param shape: The shape which is being compared against this shape. - :type shape: openvino.runtime.PartialShape + :type shape: openvino.PartialShape :return: True if this shape refines s, else False. :rtype: bool )"); @@ -124,7 +124,7 @@ void regclass_graph_PartialShape(py::module m) { Check whether this shape is a relaxation of the argument. :param shape: The shape which is being compared against this shape. - :type shape: openvino.runtime.PartialShape + :type shape: openvino.PartialShape :return: True if this shape relaxes s, else False. :rtype: bool )"); @@ -135,7 +135,7 @@ void regclass_graph_PartialShape(py::module m) { Check whether this shape represents the same scheme as the argument. :param shape: The shape which is being compared against this shape. - :type shape: openvino.runtime.PartialShape + :type shape: openvino.PartialShape :return: True if shape represents the same scheme as s, else False. :rtype: bool )"); @@ -143,25 +143,25 @@ void regclass_graph_PartialShape(py::module m) { &ov::PartialShape::get_max_shape, R"( :return: Get the max bounding shape. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); shape.def("get_min_shape", &ov::PartialShape::get_min_shape, R"( :return: Get the min bounding shape. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); shape.def("get_shape", &ov::PartialShape::get_shape, R"( :return: Get the unique shape. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); shape.def("to_shape", &ov::PartialShape::to_shape, R"( :return: Get the unique shape. - :rtype: openvino.runtime.Shape + :rtype: openvino.Shape )"); shape.def( "get_dimension", @@ -175,7 +175,7 @@ void regclass_graph_PartialShape(py::module m) { :param index: The index of dimension. :type index: int :return: Get the particular dimension of a partial shape. - :rtype: openvino.runtime.Dimension + :rtype: openvino.Dimension )"); shape.def( diff --git a/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp b/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp index 6f40f17828673c..d1a92cb3c1911d 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/graph_rewrite.cpp @@ -16,7 +16,7 @@ void regclass_passes_GraphRewrite(py::module m) { py::class_, ov::pass::ModelPass, ov::pass::PassBase> graph_rewrite(m, "GraphRewrite"); graph_rewrite.doc() = - "openvino.runtime.passes.GraphRewrite executes sequence of MatcherPass transformations in topological order"; + "openvino.passes.GraphRewrite executes sequence of MatcherPass transformations in topological order"; graph_rewrite.def(py::init<>()); graph_rewrite.def(py::init([](const std::shared_ptr& pass) { @@ -26,8 +26,8 @@ void regclass_passes_GraphRewrite(py::module m) { R"( Register single MatcherPass pass inside GraphRewrite. - :param pass: openvino.runtime.passes.MatcherPass instance - :type pass: openvino.runtime.passes.MatcherPass + :param pass: openvino.passes.MatcherPass instance + :type pass: openvino.passes.MatcherPass )"); graph_rewrite.def("add_matcher", @@ -37,8 +37,8 @@ void regclass_passes_GraphRewrite(py::module m) { R"( Register single MatcherPass pass inside GraphRewrite. - :param pass: openvino.runtime.passes.MatcherPass instance - :type pass: openvino.runtime.passes.MatcherPass + :param pass: openvino.passes.MatcherPass instance + :type pass: openvino.passes.MatcherPass )"); py::class_ back_graph_rewrite(m, "BackwardGraphRewrite"); - back_graph_rewrite.doc() = "openvino.runtime.passes.BackwardGraphRewrite executes sequence of MatcherPass " + back_graph_rewrite.doc() = "openvino.passes.BackwardGraphRewrite executes sequence of MatcherPass " "transformations in reversed topological order"; back_graph_rewrite.def(py::init<>()); @@ -58,8 +58,8 @@ void regclass_passes_GraphRewrite(py::module m) { R"( Register single MatcherPass pass inside BackwardGraphRewrite. - :param pass: openvino.runtime.passes.MatcherPass instance - :type pass: openvino.runtime.passes.MatcherPass + :param pass: openvino.passes.MatcherPass instance + :type pass: openvino.passes.MatcherPass )"); back_graph_rewrite.def( @@ -70,8 +70,8 @@ void regclass_passes_GraphRewrite(py::module m) { R"( Register single MatcherPass pass inside BackwardGraphRewrite. - :param pass: openvino.runtime.passes.MatcherPass instance - :type pass: openvino.runtime.passes.MatcherPass + :param pass: openvino.passes.MatcherPass instance + :type pass: openvino.passes.MatcherPass )"); back_graph_rewrite.def("__repr__", [](const ov::pass::BackwardGraphRewrite& self) { diff --git a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp index 6e3ff2a201f8f5..8b07a2ba814860 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/manager.cpp @@ -22,7 +22,7 @@ using FilePaths = std::pair; void regclass_passes_Manager(py::module m) { py::class_ manager(m, "Manager"); - manager.doc() = "openvino.runtime.passes.Manager executes sequence of transformation on a given Model"; + manager.doc() = "openvino.passes.Manager executes sequence of transformation on a given Model"; manager.def(py::init<>()); manager.def("set_per_pass_validation", @@ -45,8 +45,8 @@ void regclass_passes_Manager(py::module m) { R"( Executes sequence of transformations on given Model. - :param model: openvino.runtime.Model to be transformed. - :type model: openvino.runtime.Model + :param model: openvino.Model to be transformed. + :type model: openvino.Model )"); manager.def("register_pass", @@ -56,7 +56,7 @@ void regclass_passes_Manager(py::module m) { Register pass instance for execution. Execution order matches the registration order. :param transformation: transformation instance. - :type transformation: openvino.runtime.passes.PassBase + :type transformation: openvino.passes.PassBase )"); manager.def("__repr__", [](const ov::pass::Manager& self) { diff --git a/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp b/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp index 88c0304fd04ea0..afa4292d3261ab 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/matcher_pass.cpp @@ -19,7 +19,7 @@ namespace py = pybind11; void regclass_passes_Matcher(py::module m) { py::class_> matcher(m, "Matcher"); - matcher.doc() = "openvino.runtime.passes.Matcher wraps ov::pass::pattern::Matcher"; + matcher.doc() = "openvino.passes.Matcher wraps ov::pass::pattern::Matcher"; matcher.def(py::init([](const std::shared_ptr& node, const std::string& name) { return std::make_shared(node, name); }), @@ -30,7 +30,7 @@ void regclass_passes_Matcher(py::module m) { Matcher object is used for pattern matching on Model. :param node: pattern root node. - :type node: openvino.runtime.Node + :type node: openvino.Node :param name: pattern name. Usually matches the MatcherPass class name. :type name: str @@ -46,7 +46,7 @@ void regclass_passes_Matcher(py::module m) { Matcher object is used for pattern matching on Model. :param node: pattern root node output. - :type node: openvino.runtime.Output + :type node: openvino.Output :param name: pattern name. Usually matches the MatcherPass class name. :type name: str @@ -57,7 +57,7 @@ void regclass_passes_Matcher(py::module m) { R"( Get Matcher name. - :return: openvino.runtime.passes.Matcher name. + :return: openvino.passes.Matcher name. :rtype: str )"); @@ -67,7 +67,7 @@ void regclass_passes_Matcher(py::module m) { Get matched root node inside Model. Should be used after match() method is called. :return: matched node. - :rtype: openvino.runtime.Node + :rtype: openvino.Node )"); matcher.def("get_match_value", @@ -76,7 +76,7 @@ void regclass_passes_Matcher(py::module m) { Get matched node output inside Model. Should be used after match() method is called. :return: matched node output. - :rtype: openvino.runtime.Output + :rtype: openvino.Output )"); matcher.def("get_match_nodes", @@ -85,7 +85,7 @@ void regclass_passes_Matcher(py::module m) { Get NodeVector of matched nodes. Should be used after match() method is called. :return: matched nodes vector. - :rtype: List[openvino.runtime.Node] + :rtype: List[openvino.Node] )"); matcher.def("get_match_values", @@ -95,7 +95,7 @@ void regclass_passes_Matcher(py::module m) { Get OutputVector of matched outputs. Should be used after match() method is called. :return: matched outputs vector. - :rtype: List[openvino.runtime.Output] + :rtype: List[openvino.Output] )"); matcher.def("get_pattern_value_map", @@ -140,7 +140,7 @@ class PyMatcherPass : public ov::pass::MatcherPass { void regclass_passes_MatcherPass(py::module m) { py::class_, ov::pass::PassBase, PyMatcherPass> matcher_pass(m, "MatcherPass"); - matcher_pass.doc() = "openvino.runtime.passes.MatcherPass wraps ov::pass::MatcherPass"; + matcher_pass.doc() = "openvino.passes.MatcherPass wraps ov::pass::MatcherPass"; matcher_pass.def(py::init<>()); matcher_pass.def( py::init([](const std::shared_ptr& m, ov::matcher_pass_callback callback) { @@ -151,14 +151,14 @@ void regclass_passes_MatcherPass(py::module m) { R"( Create MatcherPass from existing Matcher and callback objects. - :param matcher: openvino.runtime.passes.Matcher with registered pattern. - :type matcher: openvino.runtime.passes.Matcher + :param matcher: openvino.passes.Matcher with registered pattern. + :type matcher: openvino.passes.Matcher :param callback: Function that performs transformation on the matched nodes. :type callback: function - :return: created openvino.runtime.passes.MatcherPass instance. - :rtype: openvino.runtime.passes.MatcherPass + :return: created openvino.passes.MatcherPass instance. + :rtype: openvino.passes.MatcherPass )"); matcher_pass.def("apply", @@ -177,11 +177,11 @@ void regclass_passes_MatcherPass(py::module m) { R"( Register node for additional pattern matching. - :param node: openvino.runtime.Node for matching. - :type node: openvino.runtime.Node + :param node: openvino.Node for matching. + :type node: openvino.Node :return: registered node instance - :rtype: openvino.runtime.Node + :rtype: openvino.Node )"); matcher_pass.def("register_matcher", @@ -193,8 +193,8 @@ void regclass_passes_MatcherPass(py::module m) { R"( Initialize matcher and callback for further execution. - :param matcher: openvino.runtime.passes.Matcher with registered pattern. - :type matcher: openvino.runtime.passes.Matcher + :param matcher: openvino.passes.Matcher with registered pattern. + :type matcher: openvino.passes.Matcher :param callback: Function that performs transformation on the matched nodes. :type callback: function diff --git a/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp b/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp index bbb985f888bd40..8947a4871e59dc 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/model_pass.cpp @@ -32,7 +32,7 @@ void regclass_passes_ModelPass(py::module m) { py::class_, ov::pass::PassBase, PyModelPass> model_pass( m, "ModelPass"); - model_pass.doc() = "openvino.runtime.passes.ModelPass wraps ov::pass::ModelPass"; + model_pass.doc() = "openvino.passes.ModelPass wraps ov::pass::ModelPass"; model_pass.def(py::init<>()); model_pass.def("run_on_model", &ov::pass::ModelPass::run_on_model, @@ -40,8 +40,8 @@ void regclass_passes_ModelPass(py::module m) { R"( run_on_model must be defined in inherited class. This method is used to work with Model directly. - :param model: openvino.runtime.Model to be transformed. - :type model: openvino.runtime.Model + :param model: openvino.Model to be transformed. + :type model: openvino.Model :return: True in case if Model was changed and False otherwise. :rtype: bool diff --git a/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp b/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp index 0262254839be75..1823f570c9e18a 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/pass_base.cpp @@ -15,7 +15,7 @@ namespace py = pybind11; void regclass_passes_PassBase(py::module m) { py::class_> pass_base(m, "PassBase"); - pass_base.doc() = "openvino.runtime.passes.PassBase wraps ov::pass::PassBase"; + pass_base.doc() = "openvino.passes.PassBase wraps ov::pass::PassBase"; pass_base.def("set_name", &ov::pass::PassBase::set_name, py::arg("name"), diff --git a/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp b/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp index 59fa656432e7ea..fa9dbab63e7b89 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/pattern_ops.cpp @@ -59,7 +59,7 @@ static void reg_pattern_wrap_type(py::module m) { py::class_, ov::Node> wrap_type( m, "WrapType"); - wrap_type.doc() = "openvino.runtime.passes.WrapType wraps ov::pass::pattern::op::WrapType"; + wrap_type.doc() = "openvino.passes.WrapType wraps ov::pass::pattern::op::WrapType"; wrap_type.def(py::init([](const std::string& type_name) { return std::make_shared(get_type(type_name)); @@ -101,7 +101,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param input: Node output. - :type input: openvino.runtime.Output + :type input: openvino.Output )"); wrap_type.def(py::init([](const std::string& type_name, const std::shared_ptr& input) { @@ -118,7 +118,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param input: Input node. - :type input: openvino.runtime.Node + :type input: openvino.Node )"); wrap_type.def(py::init([](const std::string& type_name, const ov::Output& input, const Predicate& pred) { @@ -136,7 +136,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param input: Node output. - :type input: openvino.runtime.Output + :type input: openvino.Output :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -158,7 +158,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param input: Input node. - :type input: openvino.runtime.Node + :type input: openvino.Node :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -176,7 +176,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param inputs: Node outputs. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] )"); wrap_type.def(py::init([](const std::string& type_name, const ov::NodeVector& inputs) { @@ -193,7 +193,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param inputs: Input nodes. - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] )"); wrap_type.def(py::init([](const std::string& type_name, const ov::OutputVector& inputs, const Predicate& pred) { @@ -209,7 +209,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param inputs: Node outputs. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -230,7 +230,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: str :param inputs: Input nodes. - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -276,7 +276,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param input: Node output. - :type input: openvino.runtime.Output + :type input: openvino.Output )"); wrap_type.def(py::init([](const std::vector& type_names, const std::shared_ptr& input) { @@ -293,7 +293,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_name: List[str] :param input: Input node. - :type input: openvino.runtime.Node + :type input: openvino.Node )"); wrap_type.def( @@ -313,7 +313,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param input: Node output. - :type input: openvino.runtime.Output + :type input: openvino.Output :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -336,7 +336,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param input: Input node. - :type input: openvino.runtime.Node + :type input: openvino.Node :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -354,7 +354,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param inputs: Nodes outputs. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] )"); wrap_type.def(py::init([](const std::vector& type_names, const ov::NodeVector& inputs) { @@ -371,7 +371,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param inputs: Input nodes. - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] )"); wrap_type.def( @@ -388,7 +388,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param inputs: Nodes outputs. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -410,7 +410,7 @@ static void reg_pattern_wrap_type(py::module m) { :type type_names: List[str] :param inputs: Input nodes. - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -423,7 +423,7 @@ static void reg_pattern_wrap_type(py::module m) { static void reg_pattern_or(py::module m) { py::class_, ov::Node> or_type(m, "Or"); - or_type.doc() = "openvino.runtime.passes.Or wraps ov::pass::pattern::op::Or"; + or_type.doc() = "openvino.passes.Or wraps ov::pass::pattern::op::Or"; or_type.def(py::init([](const ov::OutputVector& inputs) { return std::make_shared(inputs); @@ -433,7 +433,7 @@ static void reg_pattern_or(py::module m) { Create pattern Or operation which is used to match any of given inputs. :param inputs: Operation inputs. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] )"); or_type.def(py::init([](const ov::NodeVector& inputs) { @@ -444,7 +444,7 @@ static void reg_pattern_or(py::module m) { Create pattern Or operation which is used to match any of given inputs. :param inputs: Operation inputs. - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] )"); or_type.def("__repr__", [](const ov::pass::pattern::op::Or& self) { @@ -456,7 +456,7 @@ static void reg_pattern_any_input(py::module m) { py::class_, ov::Node> any_input( m, "AnyInput"); - any_input.doc() = "openvino.runtime.passes.AnyInput wraps ov::pass::pattern::op::Label"; + any_input.doc() = "openvino.passes.AnyInput wraps ov::pass::pattern::op::Label"; any_input.def(py::init([]() { return std::make_shared(); @@ -486,7 +486,7 @@ static void reg_pattern_any_input(py::module m) { static void reg_pattern_optional(py::module m) { py::class_, ov::Node> optional_type(m, "Optional"); - optional_type.doc() = "openvino.runtime.passes.Optional wraps ov::pass::pattern::op::Optional"; + optional_type.doc() = "openvino.passes.Optional wraps ov::pass::pattern::op::Optional"; optional_type.def(py::init([](const std::vector& type_names) { return std::make_shared(get_types(type_names)); @@ -513,7 +513,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param input: input node's output. - :type input: openvino.runtime.Output + :type input: openvino.Output )"); optional_type.def(py::init([](const std::vector& type_names, const std::shared_ptr& input) { @@ -530,7 +530,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param input: input node. - :type input: openvino.runtime.Node + :type input: openvino.Node )"); optional_type.def( @@ -546,7 +546,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param inputs: input node's output list. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] )"); optional_type.def(py::init([](const std::vector& type_names, const ov::NodeVector& inputs) { @@ -563,7 +563,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param inputs: input node list - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] )"); optional_type.def(py::init([](const std::vector& type_names, const Predicate& predicate) { @@ -600,7 +600,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param input: input node's output. - :type input: openvino.runtime.Output + :type input: openvino.Output :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -623,7 +623,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param input: input node - :type input: openvino.runtime.Node + :type input: openvino.Node :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -644,7 +644,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param inputs: input node's output list. - :type inputs: List[openvino.runtime.Output] + :type inputs: List[openvino.Output] :param predicate: Function that performs additional checks for matching. :type predicate: function @@ -666,7 +666,7 @@ static void reg_pattern_optional(py::module m) { :type type_names: List[str] :param inputs: input node list - :type inputs: List[openvino.runtime.Node] + :type inputs: List[openvino.Node] :param predicate: Function that performs additional checks for matching. :type predicate: function diff --git a/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp b/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp index 511144931c37ec..39a47bda2af389 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/regmodule_graph_passes.cpp @@ -17,7 +17,7 @@ namespace py = pybind11; void regmodule_graph_passes(py::module m) { - py::module m_passes = m.def_submodule("passes", "Package openvino.runtime.passes wraps ov::passes"); + py::module m_passes = m.def_submodule("passes", "Package openvino.passes wraps ov::passes"); regclass_passes_PassBase(m_passes); regclass_passes_ModelPass(m_passes); regclass_passes_GraphRewrite(m_passes); diff --git a/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp b/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp index 7c8797fc4ecc9c..574ab29ebd3db0 100644 --- a/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp +++ b/src/bindings/python/src/pyopenvino/graph/passes/transformations.cpp @@ -31,7 +31,7 @@ void regclass_transformations(py::module m) { py::class_, ov::pass::ModelPass, ov::pass::PassBase> serialize(m, "Serialize"); - serialize.doc() = "openvino.runtime.passes.Serialize transformation"; + serialize.doc() = "openvino.passes.Serialize transformation"; serialize.def( py::init([](const py::object& path_to_xml, const py::object& path_to_bin, const py::object& version) { @@ -62,7 +62,7 @@ void regclass_transformations(py::module m) { :type path_to_xml: Union[str, bytes, pathlib.Path] :param version: Optional serialized IR version. - :type version: Union[str, openvino.runtime.passes.Version] + :type version: Union[str, openvino.passes.Version] )"); serialize.def("__repr__", [](const ov::pass::Serialize& self) { @@ -74,7 +74,7 @@ void regclass_transformations(py::module m) { ov::pass::ModelPass, ov::pass::PassBase> cf(m, "ConstantFolding"); - cf.doc() = "openvino.runtime.passes.ConstantFolding transformation"; + cf.doc() = "openvino.passes.ConstantFolding transformation"; cf.def(py::init<>()); cf.def("__repr__", [](const ov::pass::ConstantFolding& self) { return Common::get_simple_repr(self); @@ -85,7 +85,7 @@ void regclass_transformations(py::module m) { ov::pass::ModelPass, ov::pass::PassBase> visualize(m, "VisualizeTree"); - visualize.doc() = "openvino.runtime.passes.VisualizeTree transformation"; + visualize.doc() = "openvino.passes.VisualizeTree transformation"; visualize.def(py::init(), py::arg("file_name"), py::arg("nm") = nullptr, @@ -108,7 +108,7 @@ void regclass_transformations(py::module m) { py::class_, ov::pass::ModelPass, ov::pass::PassBase> make_stateful(m, "MakeStateful"); - make_stateful.doc() = "openvino.runtime.passes.MakeStateful transformation"; + make_stateful.doc() = "openvino.passes.MakeStateful transformation"; make_stateful.def( py::init(), py::arg("pairs_to_replace"), @@ -131,7 +131,7 @@ void regclass_transformations(py::module m) { py::class_, ov::pass::ModelPass, ov::pass::PassBase> low_latency(m, "LowLatency2"); - low_latency.doc() = "openvino.runtime.passes.LowLatency2 transformation"; + low_latency.doc() = "openvino.passes.LowLatency2 transformation"; low_latency.def(py::init(), py::arg("use_const_initializer") = true, @@ -156,7 +156,7 @@ void regclass_transformations(py::module m) { ov::pass::ModelPass, ov::pass::PassBase> convert(m, "ConvertFP32ToFP16"); - convert.doc() = "openvino.runtime.passes.ConvertFP32ToFP16 transformation"; + convert.doc() = "openvino.passes.ConvertFP32ToFP16 transformation"; convert.def(py::init<>()); convert.def("__repr__", [](const ov::pass::ConvertFP32ToFP16& self) { return Common::get_simple_repr(self); diff --git a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp index 8daa1ff55ffce8..eda38c43759339 100644 --- a/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp +++ b/src/bindings/python/src/pyopenvino/graph/preprocess/pre_post_process.cpp @@ -22,7 +22,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { py::class_> steps( m, "PreProcessSteps"); - steps.doc() = "openvino.runtime.preprocess.PreProcessSteps wraps ov::preprocess::PreProcessSteps"; + steps.doc() = "openvino.preprocess.PreProcessSteps wraps ov::preprocess::PreProcessSteps"; steps.def( "mean", @@ -37,7 +37,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param value: Value to subtract. :type value: float :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); steps.def( @@ -53,7 +53,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param values: Values to subtract. :type values: List[float] :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); steps.def( @@ -69,7 +69,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param value: Value used in division. :type value: float :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); steps.def( @@ -85,7 +85,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param values: Values which are used in division. :type values: List[float] :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); steps.def( @@ -93,15 +93,15 @@ static void regclass_graph_PreProcessSteps(py::module m) { [](ov::preprocess::PreProcessSteps& self, ov::element::Type type = {}) { return &self.convert_element_type(type); }, - py::arg_v("type", ov::element::undefined, "openvino.runtime.Type.undefined"), + py::arg_v("type", ov::element::undefined, "openvino.Type.undefined"), R"( Converts input tensor element type to specified type. Input tensor must have openvino.Type data type. :param type: Destination type. If not specified, type will be taken from model input's element type - :type type: openvino.runtime.Type + :type type: openvino.Type :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); steps.def( @@ -113,10 +113,10 @@ static void regclass_graph_PreProcessSteps(py::module m) { R"( Adds custom preprocessing operation. - :param operation: Python's function which takes `openvino.runtime.Output` as input argument and returns`openvino.runtime.Output`. + :param operation: Python's function which takes `openvino.Output` as input argument and returns`openvino.Output`. :type operation: function :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); steps.def( @@ -224,7 +224,7 @@ static void regclass_graph_PreProcessSteps(py::module m) { :param mode: pad_mode specifies the method used to generate new element values. :type mode: string :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.PreProcessSteps + :rtype: openvino.PreProcessSteps )"); } @@ -232,22 +232,22 @@ static void regclass_graph_PostProcessSteps(py::module m) { py::class_> steps( m, "PostProcessSteps"); - steps.doc() = "openvino.runtime.preprocess.PostprocessSteps wraps ov::preprocess::PostProcessSteps"; + steps.doc() = "openvino.preprocess.PostprocessSteps wraps ov::preprocess::PostProcessSteps"; steps.def( "convert_element_type", [](ov::preprocess::PostProcessSteps& self, ov::element::Type type = {}) { return &self.convert_element_type(type); }, - py::arg_v("type", ov::element::undefined, "openvino.runtime.Type.undefined"), + py::arg_v("type", ov::element::undefined, "openvino.Type.undefined"), R"( Converts tensor element type to specified type. Tensor must have openvino.Type data type. :param type: Destination type. If not specified, type will be taken from model output's element type. - :type type: openvino.runtime.Type + :type type: openvino.Type :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PostProcessSteps + :rtype: openvino.preprocess.PostProcessSteps )"); steps.def( @@ -273,10 +273,10 @@ static void regclass_graph_PostProcessSteps(py::module m) { R"( Adds custom postprocessing operation. - :param operation: Python's function which takes `openvino.runtime.Output` as input argument and returns`openvino.runtime.Output`. + :param operation: Python's function which takes `openvino.Output` as input argument and returns`openvino.Output`. :type operation: function :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.PreProcessSteps + :rtype: openvino.preprocess.PreProcessSteps )"); } @@ -284,7 +284,7 @@ static void regclass_graph_InputTensorInfo(py::module m) { py::class_> info( m, "InputTensorInfo"); - info.doc() = "openvino.runtime.preprocess.InputTensorInfo wraps ov::preprocess::InputTensorInfo"; + info.doc() = "openvino.preprocess.InputTensorInfo wraps ov::preprocess::InputTensorInfo"; info.def( "set_element_type", @@ -297,9 +297,9 @@ static void regclass_graph_InputTensorInfo(py::module m) { conversion of element type will be done automatically. :param type: Client's input tensor element type. - :type type: openvino.runtime.Type + :type type: openvino.Type :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.InputTensorInfo + :rtype: openvino.preprocess.InputTensorInfo )"); info.def( @@ -312,7 +312,7 @@ static void regclass_graph_InputTensorInfo(py::module m) { Set layout for input tensor info :param layout: layout to be set - :type layout: Union[str, openvino.runtime.Layout] + :type layout: Union[str, openvino.Layout] )"); info.def("set_spatial_dynamic_shape", [](ov::preprocess::InputTensorInfo& self) { @@ -364,16 +364,16 @@ static void regclass_graph_InputTensorInfo(py::module m) { [](ov::preprocess::InputTensorInfo& self, const ov::Tensor& tensor) { return &self.set_from(tensor); }, - py::arg("runtime_tensor"), + py::arg("tensor"), R"( Helper function to reuse element type and shape from user's created tensor. Overwrites previously set shape and element type via `set_shape` and `set_element_type' methods. This method should be - used only in case if runtime tensor is already known and avaiable before. + used only in case if tensor is already known and avaiable before. - :param runtime_tensor: User's created tensor - :type type: openvino.runtime.Tensor + :param tensor: User's created tensor + :type type: openvino.Tensor :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.InputTensorInfo + :rtype: openvino.preprocess.InputTensorInfo )"); info.def( @@ -382,16 +382,16 @@ static void regclass_graph_InputTensorInfo(py::module m) { // Convert to contiguous array if not already C-style. return &self.set_from(Common::object_from_data(numpy_array, false)); }, - py::arg("runtime_tensor"), + py::arg("tensor"), R"( Helper function to reuse element type and shape from user's created tensor. Overwrites previously set shape and element type via `set_shape` and `set_element_type' methods. This method should be - used only in case if runtime tensor is already known and avaiable before. + used only in case if tensor is already known and avaiable before. - :param runtime_tensor: User's created numpy array + :param tensor: User's created numpy array :type type: numpy.ndarray :return: Reference to itself, allows chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.InputTensorInfo + :rtype: openvino.preprocess.InputTensorInfo )"); } @@ -399,7 +399,7 @@ static void regclass_graph_OutputTensorInfo(py::module m) { py::class_> info( m, "OutputTensorInfo"); - info.doc() = "openvino.runtime.preprocess.OutputTensorInfo wraps ov::preprocess::OutputTensorInfo"; + info.doc() = "openvino.preprocess.OutputTensorInfo wraps ov::preprocess::OutputTensorInfo"; info.def( "set_element_type", @@ -412,9 +412,9 @@ static void regclass_graph_OutputTensorInfo(py::module m) { conversion of element type will be done automatically. :param type: Client's output tensor element type. - :type type: openvino.runtime.Type + :type type: openvino.Type :return: Reference to itself to allow chaining of calls in client's code in a builder-like manner. - :rtype: openvino.runtime.preprocess.OutputTensorInfo + :rtype: openvino.preprocess.OutputTensorInfo )"); info.def( @@ -427,13 +427,13 @@ static void regclass_graph_OutputTensorInfo(py::module m) { Set layout for output tensor info :param layout: layout to be set - :type layout: Union[str, openvino.runtime.Layout] + :type layout: Union[str, openvino.Layout] )"); } static void regclass_graph_InputInfo(py::module m) { py::class_> inp(m, "InputInfo"); - inp.doc() = "openvino.runtime.preprocess.InputInfo wraps ov::preprocess::InputInfo"; + inp.doc() = "openvino.preprocess.InputInfo wraps ov::preprocess::InputInfo"; inp.def("tensor", [](ov::preprocess::InputInfo& self) { return &self.tensor(); @@ -450,7 +450,7 @@ static void regclass_graph_InputInfo(py::module m) { static void regclass_graph_OutputInfo(py::module m) { py::class_> out(m, "OutputInfo"); - out.doc() = "openvino.runtime.preprocess.OutputInfo wraps ov::preprocess::OutputInfo"; + out.doc() = "openvino.preprocess.OutputInfo wraps ov::preprocess::OutputInfo"; out.def("tensor", [](ov::preprocess::OutputInfo& self) { return &self.tensor(); @@ -469,7 +469,7 @@ static void regclass_graph_OutputModelInfo(py::module m) { py::class_> info( m, "OutputModelInfo"); - info.doc() = "openvino.runtime.preprocess.OutputModelInfo wraps ov::preprocess::OutputModelInfo"; + info.doc() = "openvino.preprocess.OutputModelInfo wraps ov::preprocess::OutputModelInfo"; info.def( "set_layout", @@ -481,7 +481,7 @@ static void regclass_graph_OutputModelInfo(py::module m) { Set layout for output model info :param layout: layout to be set - :type layout: Union[str, openvino.runtime.Layout] + :type layout: Union[str, openvino.Layout] )"); } @@ -489,7 +489,7 @@ static void regclass_graph_InputModelInfo(py::module m) { py::class_> info( m, "InputModelInfo"); - info.doc() = "openvino.runtime.preprocess.InputModelInfo wraps ov::preprocess::InputModelInfo"; + info.doc() = "openvino.preprocess.InputModelInfo wraps ov::preprocess::InputModelInfo"; info.def( "set_layout", @@ -500,7 +500,7 @@ static void regclass_graph_InputModelInfo(py::module m) { R"( Set layout for input model :param layout: layout to be set - :type layout: Union[str, openvino.runtime.Layout] + :type layout: Union[str, openvino.Layout] )"); } @@ -552,7 +552,7 @@ void regclass_graph_PrePostProcessor(py::module m) { py::class_> proc( m, "PrePostProcessor"); - proc.doc() = "openvino.runtime.preprocess.PrePostProcessor wraps ov::preprocess::PrePostProcessor"; + proc.doc() = "openvino.preprocess.PrePostProcessor wraps ov::preprocess::PrePostProcessor"; proc.def(py::init([](const py::object& ie_api_model) { const auto model = Common::utils::convert_to_model(ie_api_model); @@ -605,7 +605,7 @@ void regclass_graph_PrePostProcessor(py::module m) { py::gil_scoped_release release; model = self.build(); } - py::type model_class = py::module_::import("openvino.runtime").attr("Model"); + py::type model_class = py::module_::import("openvino").attr("Model"); return model_class(py::cast(model)); }); diff --git a/src/bindings/python/src/pyopenvino/graph/rt_map.cpp b/src/bindings/python/src/pyopenvino/graph/rt_map.cpp index a453c672964b74..07efb1877cc355 100644 --- a/src/bindings/python/src/pyopenvino/graph/rt_map.cpp +++ b/src/bindings/python/src/pyopenvino/graph/rt_map.cpp @@ -60,7 +60,7 @@ class PyRTMapIterator { void regclass_graph_PyRTMap(py::module m) { auto py_map = py::class_(m, "RTMap"); - py_map.doc() = "openvino.runtime.RTMap makes bindings for std::map, which can later be used as ov::Node::RTMap"; py::class_(m, "Iterator") diff --git a/src/bindings/python/src/pyopenvino/graph/shape.cpp b/src/bindings/python/src/pyopenvino/graph/shape.cpp index 32fad8118ec9bb..4842a6b63ffbc6 100644 --- a/src/bindings/python/src/pyopenvino/graph/shape.cpp +++ b/src/bindings/python/src/pyopenvino/graph/shape.cpp @@ -27,7 +27,7 @@ bool compare_shape(const ov::Shape& a, const T& b) { void regclass_graph_Shape(py::module m) { py::class_> shape(m, "Shape"); - shape.doc() = "openvino.runtime.Shape wraps ov::Shape"; + shape.doc() = "openvino.Shape wraps ov::Shape"; shape.def(py::init<>()); shape.def(py::init&>(), py::arg("axis_lengths")); shape.def(py::init&>(), py::arg("axis_lengths")); diff --git a/src/bindings/python/src/pyopenvino/graph/strides.cpp b/src/bindings/python/src/pyopenvino/graph/strides.cpp index 31a5c18e3a2a18..507e00ddd9b924 100644 --- a/src/bindings/python/src/pyopenvino/graph/strides.cpp +++ b/src/bindings/python/src/pyopenvino/graph/strides.cpp @@ -26,7 +26,7 @@ bool compare_strides(const ov::Strides& a, const T& b) { void regclass_graph_Strides(py::module m) { py::class_> strides(m, "Strides"); - strides.doc() = "openvino.runtime.Strides wraps ov::Strides"; + strides.doc() = "openvino.Strides wraps ov::Strides"; strides.def(py::init&>(), py::arg("axis_strides")); strides.def(py::init&>(), py::arg("axis_strides")); strides.def(py::init(), py::arg("axis_strides")); diff --git a/src/bindings/python/src/pyopenvino/graph/symbol.cpp b/src/bindings/python/src/pyopenvino/graph/symbol.cpp index 7dfaa4cd736272..d24ab813b96064 100644 --- a/src/bindings/python/src/pyopenvino/graph/symbol.cpp +++ b/src/bindings/python/src/pyopenvino/graph/symbol.cpp @@ -12,7 +12,7 @@ namespace py = pybind11; void regclass_graph_Symbol(py::module m) { py::class_> symbol(m, "Symbol"); - symbol.doc() = "openvino.runtime.Symbol wraps ov::Symbol"; + symbol.doc() = "openvino.Symbol wraps ov::Symbol"; symbol.def(py::init([]() { return std::make_shared(); diff --git a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp index 2a807ce5cb54a6..66e0eda890e309 100644 --- a/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp +++ b/src/bindings/python/src/pyopenvino/graph/types/element_type.cpp @@ -15,7 +15,7 @@ namespace py = pybind11; void regclass_graph_Type(py::module m) { py::class_> type(m, "Type"); - type.doc() = "openvino.runtime.Type wraps ov::element::Type"; + type.doc() = "openvino.Type wraps ov::element::Type"; type.def(py::init([](py::object& np_literal) { auto dtype = py::dtype::from_args(np_literal); @@ -96,7 +96,7 @@ void regclass_graph_Type(py::module m) { `other`. :param other: The element type to compare this element type to. - :type other: openvino.runtime.Type + :type other: openvino.Type :return: `True` if element types are compatible, otherwise `False`. :rtype: bool )"); @@ -117,10 +117,10 @@ void regclass_graph_Type(py::module m) { otherwise return None. :param other: The element type to merge with this element type. - :type other: openvino.runtime.Type + :type other: openvino.Type :return: If element types are compatible return the least restrictive Type, otherwise `None`. - :rtype: Union[openvino.runtime.Type|None] + :rtype: Union[openvino.Type|None] )"); type.def( diff --git a/src/bindings/python/src/pyopenvino/graph/util.cpp b/src/bindings/python/src/pyopenvino/graph/util.cpp index 4bf3a69ba9052a..3fd297adc34a13 100644 --- a/src/bindings/python/src/pyopenvino/graph/util.cpp +++ b/src/bindings/python/src/pyopenvino/graph/util.cpp @@ -24,7 +24,7 @@ inline void* numpy_to_c(py::array a) { } void regmodule_graph_util(py::module m) { - py::module mod = m.def_submodule("util", "openvino.runtime.utils"); + py::module mod = m.def_submodule("util", "openvino.utils"); mod.def("numpy_to_c", &numpy_to_c); mod.def("replace_output_update_name", &ov::replace_output_update_name, py::arg("output"), py::arg("target_output")); diff --git a/src/bindings/python/src/pyopenvino/pyopenvino.cpp b/src/bindings/python/src/pyopenvino/pyopenvino.cpp index 00d43223c1a6c9..5d6f73c538599d 100644 --- a/src/bindings/python/src/pyopenvino/pyopenvino.cpp +++ b/src/bindings/python/src/pyopenvino/pyopenvino.cpp @@ -147,7 +147,7 @@ PYBIND11_MODULE(_pyopenvino, m) { when it is not related to debugging. :param model: model which will be converted to IR representation - :type model: openvino.runtime.Model + :type model: openvino.Model :param xml_path: path where .xml file will be saved :type xml_path: Union[str, bytes, pathlib.Path] :param bin_path: path where .bin file will be saved (optional), @@ -204,7 +204,7 @@ PYBIND11_MODULE(_pyopenvino, m) { compressed to FP16, debug information in model nodes are cleaned up, etc. :param model: model which will be converted to IR representation - :type model: openvino.runtime.Model + :type model: openvino.Model :param output_model: path to output model file :type output_model: Union[str, bytes, pathlib.Path] :param compress_to_fp16: whether to compress floating point weights to FP16 (default: True). The parameter is ignored for pre-optimized models. @@ -267,7 +267,7 @@ PYBIND11_MODULE(_pyopenvino, m) { regmodule_graph_op_util(m_op); regmodule_experimental(m); py::module m_preprocess = - m.def_submodule("preprocess", "Package openvino.runtime.preprocess that wraps ov::preprocess"); + m.def_submodule("preprocess", "Package openvino.preprocess that wraps ov::preprocess"); regclass_graph_PrePostProcessor(m_preprocess); regclass_graph_Model(m); regmodule_graph_passes(m); diff --git a/src/bindings/python/tests/test_graph/test_core.py b/src/bindings/python/tests/test_graph/test_core.py index 697d212e0b3eaf..a75d6aa4059ac9 100644 --- a/src/bindings/python/tests/test_graph/test_core.py +++ b/src/bindings/python/tests/test_graph/test_core.py @@ -203,7 +203,7 @@ def test_partial_shape(): PartialShape([range(10)]) assert ( "Incorrect type for dimension. Expected types are: " - "int, str, openvino.runtime.Dimension, list/tuple with lower " + "int, str, openvino.Dimension, list/tuple with lower " "and upper values for dynamic dimension." in str(e.value) ) diff --git a/src/bindings/python/tests/test_runtime/test_compiled_model.py b/src/bindings/python/tests/test_runtime/test_compiled_model.py index 3f885bf98b53ee..9a914b803652c4 100644 --- a/src/bindings/python/tests/test_runtime/test_compiled_model.py +++ b/src/bindings/python/tests/test_runtime/test_compiled_model.py @@ -225,7 +225,7 @@ def test_inputs_docs(device): compiled_model = generate_relu_compiled_model(device) input_0 = compiled_model.inputs[0] - assert input_0.__doc__ == "openvino.runtime.ConstOutput represents port/node output." + assert input_0.__doc__ == "openvino.ConstOutput represents port/node output." def test_infer_new_request_numpy(device): diff --git a/src/bindings/python/tests/test_runtime/test_input_node.py b/src/bindings/python/tests/test_runtime/test_input_node.py index e791a9c20c0c04..ffc3aebc3b76b6 100644 --- a/src/bindings/python/tests/test_runtime/test_input_node.py +++ b/src/bindings/python/tests/test_runtime/test_input_node.py @@ -27,7 +27,7 @@ def test_const_output_docs(device): compiled_model = core.compile_model(model, device) net_input = compiled_model.output(0) input_node = net_input.get_node().inputs()[0] - exptected_string = "openvino.runtime.Input wraps ov::Input" + exptected_string = "openvino.Input wraps ov::Input" assert input_node.__doc__ == exptected_string diff --git a/src/bindings/python/tests/test_runtime/test_model.py b/src/bindings/python/tests/test_runtime/test_model.py index 265c504efa36ff..c247c066b27551 100644 --- a/src/bindings/python/tests/test_runtime/test_model.py +++ b/src/bindings/python/tests/test_runtime/test_model.py @@ -507,14 +507,14 @@ def check_shape(new_shape): model.reshape({model.input().node: shape10}) assert ( "Incorrect key type to reshape a model, " - "expected keys as openvino.runtime.Output, int or str." in str(e.value) + "expected keys as openvino.Output, int or str." in str(e.value) ) with pytest.raises(TypeError) as e: model.reshape({0: range(1, 9)}) assert ( "Incorrect value type to reshape a model, " - "expected values as openvino.runtime.PartialShape, str, list or tuple." + "expected values as openvino.PartialShape, str, list or tuple." in str(e.value) ) @@ -593,7 +593,7 @@ def check_shape(new_shape): model.reshape({0: shape10}, {var_id: range(1, 9)}) assert ( "Incorrect value type to reshape a model, " - "expected values as openvino.runtime.PartialShape, str, list or tuple." + "expected values as openvino.PartialShape, str, list or tuple." in str(e.value) ) @@ -812,7 +812,7 @@ def test_copy_failed(): model = generate_add_model() with pytest.raises(TypeError) as e: copy(model) - assert "Cannot copy 'openvino.runtime.Model'. Please, use deepcopy instead." in str(e.value) + assert "Cannot copy 'openvino.Model'. Please, use deepcopy instead." in str(e.value) def test_model_attr_not_found(): diff --git a/src/bindings/python/tests/test_runtime/test_output_const_node.py b/src/bindings/python/tests/test_runtime/test_output_const_node.py index 718a6e3f164061..5c33647c1d7c14 100644 --- a/src/bindings/python/tests/test_runtime/test_output_const_node.py +++ b/src/bindings/python/tests/test_runtime/test_output_const_node.py @@ -34,7 +34,7 @@ def test_const_output_docs(device): model = get_relu_model() compiled_model = core.compile_model(model, device) node = compiled_model.input(0) - exptected_string = "openvino.runtime.ConstOutput represents port/node output." + exptected_string = "openvino.ConstOutput represents port/node output." assert node.__doc__ == exptected_string @@ -156,4 +156,4 @@ def test_deepcopy(): output_node = node.outputs()[0] with pytest.raises(TypeError) as e: deepcopy(output_node) - assert "Cannot deepcopy 'openvino.runtime.Output' object." in str(e) + assert "Cannot deepcopy 'openvino.Output' object." in str(e) diff --git a/src/bindings/python/tests/test_runtime/test_sync_infer_request.py b/src/bindings/python/tests/test_runtime/test_sync_infer_request.py index 3f8947598261c3..192823086d236a 100644 --- a/src/bindings/python/tests/test_runtime/test_sync_infer_request.py +++ b/src/bindings/python/tests/test_runtime/test_sync_infer_request.py @@ -591,7 +591,7 @@ def test_convert_infer_request(device): res = request.infer(inputs) with pytest.raises(TypeError) as e: deepcopy(res) - assert "Cannot deepcopy 'openvino.runtime.ConstOutput' object." in str(e) + assert "Cannot deepcopy 'openvino.ConstOutput' object." in str(e) @pytest.mark.parametrize("share_inputs", [True, False]) From 3d60768ba34d87d581ee33141b61aaaa6cb2acfb Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Thu, 16 Jan 2025 16:46:38 +0400 Subject: [PATCH 08/97] [GPU] KV-cache compression micro_sdpa kernel (#28004) ### Details: - Added KV-cache compression support to the micro_sdpa kernel - Performance still needs to be adjusted ### Tickets: - [*CVS-144834*](https://jira.devtools.intel.com/browse/CVS-144834) --------- Co-authored-by: Pavel Durandin Co-authored-by: Vladimir Paramuzov --- .../graph_optimizer/prepare_buffer_fusing.cpp | 4 +- .../src/graph/impls/ocl/kv_cache.cpp | 42 +++- .../ocl/scaled_dot_product_attention.cpp | 2 +- .../src/graph/include/kv_cache_inst.h | 5 +- .../intel_gpu/src/graph/primitive_inst.cpp | 28 ++- .../dynamic_quantize_gpu_kv_cache.cl | 12 +- .../cl_kernels/dynamic_quantize_gpu_ref.cl | 12 +- .../include/batch_headers/sdpa_utils.cl | 2 + .../kernel_selector/cl_kernels/sdpa_micro.cl | 106 +++++++- .../kernels/sdpa/sdpa_kernel_micro.cpp | 230 ++++++++++++++++-- .../src/kernel_selector/micro_utils.hpp | 8 + .../transformations/kv_cache_compression.cpp | 13 +- .../transformations/kv_cache_compression.hpp | 3 +- .../plugin/transformations/op/kv_cache.cpp | 3 +- .../src/plugin/transformations_pipeline.cpp | 2 +- .../src/runtime/execution_config.cpp | 2 +- .../test_cases/dynamic_quantize_gpu_test.cpp | 112 +++++++-- .../transformations/kv_cache_compression.cpp | 4 +- 18 files changed, 489 insertions(+), 101 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 0e9f3e14eb3f2e..7db9c2c0d59419 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -926,6 +926,7 @@ void prepare_buffer_fusing::run(program& p) { if (kv_out_layout.is_dynamic()) { // set dynamic pad dims for shape agnostic kernel + const auto& desc = node.get_primitive(); padding::DynamicDimsMask info_dynamic_pad; info_dynamic_pad[concat_axis] = 1; kv_out_layout.data_padding._dynamic_dims_mask = info_dynamic_pad; @@ -942,7 +943,7 @@ void prepare_buffer_fusing::run(program& p) { auto update_scale_zp = [&](size_t kv_cache_output_idx, size_t read_value_output_idx) { auto scales_out_layout = node.get_output_layout(false, kv_cache_output_idx); - const size_t scales_zp_concat_axis = 2; + const auto scales_zp_concat_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); padding::DynamicDimsMask info_dynamic_pad_scales; info_dynamic_pad_scales[scales_zp_concat_axis] = 1; scales_out_layout.data_padding._dynamic_dims_mask = info_dynamic_pad_scales; @@ -958,7 +959,6 @@ void prepare_buffer_fusing::run(program& p) { update_dep(gather_prim, info_dynamic_pad, 0); } - const auto& desc = node.get_primitive(); if (desc->compressed) { update_scale_zp(2, 1); diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp index fef2a3c51ee821..1ffbfbbfbade37 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/kv_cache.cpp @@ -230,7 +230,7 @@ struct kv_cache_impl : multi_stage_primitive { if (desc->get_compression_zp_inputs_num() > 0) { // Copy zero points to the new buffer if needed - execute_stage(events, instance, res_events, scale_concat_stage, zp_concat_stage); + execute_stage(events, instance, res_events, zp_concat_stage, zp_concat_stage); } // Perform dynamic quantization of new token data and append result to the KV-cache @@ -417,15 +417,19 @@ struct kv_cache_impl : multi_stage_primitive { return params; } - static kernel_params_t get_compression_scale_update_kernel_params(const kernel_impl_params& impl_param, bool is_shape_agnostic = false) { + static kernel_params_t get_compression_scale_update_kernel_params(const kernel_impl_params& impl_param, + bool is_scale = true, + bool is_shape_agnostic = false) { auto params = get_default_params(impl_param, is_shape_agnostic); const auto concat_axis = 2; params.axis = convert_axis(concat_axis, impl_param.get_output_layout().get_rank()); - auto inputs_count = 1; - auto comp_scale_past_layout = impl_param.input_layouts[3]; - auto comp_scale_present_layout = impl_param.output_layouts[2]; + const auto inputs_count = 1; + const auto input_idx = is_scale ? 3 : 4; // scale or zp + const auto output_idx = is_scale ? 2 : 3; // scale or zp + auto comp_scale_past_layout = impl_param.input_layouts[input_idx]; + auto comp_scale_present_layout = impl_param.output_layouts[output_idx]; params.inputs.resize(inputs_count); params.inputs[0] = convert_data_tensor(comp_scale_past_layout); @@ -435,10 +439,10 @@ struct kv_cache_impl : multi_stage_primitive { const auto& out_offsets_map = impl_param.out_port_to_shape_info_offset; std::map in_tensor_to_offset_map = { - {0, in_offsets_map.at(3)}, // compression_scale_past + {0, in_offsets_map.at(input_idx)}, // compression_[scale/zp]_past }; std::map out_tensor_to_offset_map = { - {0, out_offsets_map.at(2)}, // compression_scale_present + {0, out_offsets_map.at(output_idx)}, // compression_[scale/zp]_present }; params.set_dynamic_shape_offsets(in_tensor_to_offset_map, out_tensor_to_offset_map); @@ -451,8 +455,11 @@ struct kv_cache_impl : multi_stage_primitive { auto concat_kernel_params = get_concat_kernel_params(impl_param, impl_param.is_dynamic()); auto& concat_kernel_selector = kernel_selector_t::Instance(); kernels_data.push_back(concat_kernel_selector.get_best_kernel(concat_kernel_params)); - const bool indirect = impl_param.typed_desc()->indirect; - const bool compressed = impl_param.typed_desc()->compressed; + + const auto desc = impl_param.typed_desc(); + const bool indirect = desc->indirect; + const bool compressed = desc->compressed; + const bool has_zp_input = desc->get_compression_zp_inputs_num() > 0; if (indirect) { auto bt_update_kernel_params = get_bt_update_kernel_params(impl_param, false); auto& bt_update_kernel_selector = bt_kernel_selector_t::Instance(); @@ -464,9 +471,14 @@ struct kv_cache_impl : multi_stage_primitive { auto& dq_kernel_selector = dq_kernel_selector_t::Instance(); kernels_data.push_back(dq_kernel_selector.get_best_kernel(dq_kernel_params)); - auto concat_scale_zp_kernel_params = get_compression_scale_update_kernel_params(impl_param, impl_param.is_dynamic()); auto& concat_scale_zp_kernel_selector = kernel_selector_t::Instance(); - kernels_data.push_back(concat_scale_zp_kernel_selector.get_best_kernel(concat_scale_zp_kernel_params)); + auto concat_scale_kernel_params = get_compression_scale_update_kernel_params(impl_param, true, impl_param.is_dynamic()); + kernels_data.push_back(concat_scale_zp_kernel_selector.get_best_kernel(concat_scale_kernel_params)); + + if (has_zp_input) { + auto concat_zp_kernel_params = get_compression_scale_update_kernel_params(impl_param, false, impl_param.is_dynamic()); + kernels_data.push_back(concat_scale_zp_kernel_selector.get_best_kernel(concat_zp_kernel_params)); + } } return cldnn::make_unique(kernels_data); } @@ -494,9 +506,15 @@ struct kv_cache_impl : multi_stage_primitive { _kernels_data[concat_stage].kernels[1].skip_execution = true; // Update dynamic quantization parameters - auto comp_scale_kernel_params = get_compression_scale_update_kernel_params(impl_param, impl_param.is_dynamic()); + auto comp_scale_kernel_params = get_compression_scale_update_kernel_params(impl_param, true, impl_param.is_dynamic()); (_kernels_data[scale_concat_stage].update_dispatch_data_func)(comp_scale_kernel_params, _kernels_data[scale_concat_stage]); _kernels_data[scale_concat_stage].kernels[0].skip_execution = impl_param._can_be_optimized || impl_param.get_input_layout(3).count() == 0; + + if (impl_param.typed_desc()->get_compression_zp_inputs_num() > 0) { + auto comp_scale_kernel_params = get_compression_scale_update_kernel_params(impl_param, false, impl_param.is_dynamic()); + (_kernels_data[zp_concat_stage].update_dispatch_data_func)(comp_scale_kernel_params, _kernels_data[zp_concat_stage]); + _kernels_data[zp_concat_stage].kernels[0].skip_execution = impl_param._can_be_optimized || impl_param.get_input_layout(4).count() == 0; + } } } }; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/scaled_dot_product_attention.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/scaled_dot_product_attention.cpp index dad93d94946490..86b49484282238 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/scaled_dot_product_attention.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/scaled_dot_product_attention.cpp @@ -287,7 +287,7 @@ struct scaled_dot_product_attention_impl : multi_stage_primitiveget_compression_zp_inputs_num() > 0; if (desc->is_kv_compressed) { data_inputs_num -= 2; // key and value compression scales are handled separately diff --git a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h index da0a9397433f89..e95e2e94ff4ab0 100644 --- a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h @@ -62,9 +62,8 @@ class typed_primitive_inst : public typed_primitive_inst_base= 0 ? sequence_axis : past_layout_rank + sequence_axis; } - static int64_t get_scale_zp_sequence_axis() { - // The order of scales and zero points is fixed, so use constant axis - const auto scale_zp_concat_axis = 2; + static int64_t get_scale_zp_sequence_axis(int64_t sequence_axis, const kv_cache::QuantizationAttributes& quantization_attrs) { + const auto scale_zp_concat_axis = quantization_attrs.scales_zp_output_order[sequence_axis]; return scale_zp_concat_axis; } diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 3712202f4926c8..6e1af3f5429283 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -338,13 +338,13 @@ void primitive_inst::update_shape() { _impl_params->state_layouts.resize(compressed_cache_variable->has_zp_state() ? 3 : 2); auto scales_state = compressed_cache_variable->get_compression_scale_state(); - auto new_scales_layout = compressed_cache_variable->get_compression_scale_state()->get_layout(); + auto new_scales_layout = scales_state->get_layout(); update_state_layout(*scales_state, new_scales_layout, 1); if (compressed_cache_variable->has_zp_state()) { - auto scales_state = compressed_cache_variable->get_compression_zp_state(); - auto new_zp_layout = compressed_cache_variable->get_compression_zp_state()->get_layout(); - update_state_layout(*scales_state, new_zp_layout, 2); + auto zp_state = compressed_cache_variable->get_compression_zp_state(); + auto new_zp_layout = zp_state->get_layout(); + update_state_layout(*zp_state, new_zp_layout, 2); } } } @@ -851,7 +851,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { auto prealloc_shape = updated_layouts[i].get_shape(); const auto shape_rank = prealloc_shape.size(); const auto seq_axis = i == 0 ? kv_cache_inst::get_sequence_axis(desc->concat_axis, shape_rank) - : kv_cache_inst::get_scale_zp_sequence_axis(); + : kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); prealloc_shape[seq_axis] += tmp_prealloc_count; required_buffer_size = std::accumulate(prealloc_shape.begin(), prealloc_shape.end(), size_t(1), std::multiplies()); @@ -883,7 +883,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { const auto& desc = _node->as().get_primitive(); const auto shape_rank = updated_layouts[i].get_shape().size(); const auto seq_axis = i == 0 ? kv_cache_inst::get_sequence_axis(desc->concat_axis, shape_rank) - : kv_cache_inst::get_scale_zp_sequence_axis(); + : kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); prealloc_info = sp.predict_preallocation_shape(id(), updated_layouts[i], false, i, tmp_prealloc_count, seq_axis); } else { @@ -907,7 +907,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { auto& present_layout = _impl_params->output_layouts[i]; const auto present_layout_rank = present_layout.get_partial_shape().size(); const auto sequence_axis = i == 0 ? kv_cache_inst::get_sequence_axis(desc->concat_axis, present_layout_rank) - : kv_cache_inst::get_scale_zp_sequence_axis();; + : kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); auto max_pad = kv_cache_inst::get_max_pad(present_layout, _max_output_layout_count[i], @@ -978,7 +978,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { if (max_pad > 0) { if (auto compressed_cache_variable = dynamic_cast(&variable)) { auto present_scales_layout = _impl_params->output_layouts[2]; - const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis();; + const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); // In case of compressed KV-cache, calling update_impl for each iteration // because of scales layout [batch, num_heads, seq_len, head_size], which requires proper @@ -990,8 +990,9 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { compressed_cache_variable->get_compression_scale_state()->set_memory(_outputs[2], present_scales_layout); if (compressed_cache_variable->has_zp_state()) { auto present_zp_layout = present_scales_layout; + present_zp_layout.data_type = _impl_params->output_layouts[3].data_type; - _impl_params->output_layouts[3] = present_scales_layout; + _impl_params->output_layouts[3] = present_zp_layout; compressed_cache_variable->get_compression_zp_state()->set_memory(_outputs[3], present_zp_layout); } } @@ -1373,7 +1374,7 @@ void primitive_inst::do_runtime_in_place_kv_cache() { if (desc->compressed) { auto compressed_cache_variable = dynamic_cast(&variable); auto& present_scales_layout = _impl_params->output_layouts[2]; - const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(); + const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); kv_cache_inst::update_pad(present_scales_layout, max_pad - new_seq_len, sequence_axis); GPU_DEBUG_TRACE_DETAIL << "[do runtime_in_place_kv_cache] " << id() << " Updated present_scale_layout's pad : " << present_scales_layout.to_string() << std::endl; @@ -1385,7 +1386,7 @@ void primitive_inst::do_runtime_in_place_kv_cache() { GPU_DEBUG_TRACE_DETAIL << "[do runtime_in_place_kv_cache] " << id() << " Updated present_zp_layout's pad : " << present_scales_layout.to_string() << std::endl; - compressed_cache_variable->get_compression_zp_state()->set_layout(present_scales_layout); + compressed_cache_variable->get_compression_zp_state()->set_layout(present_zp_layout); } } @@ -1397,7 +1398,7 @@ void primitive_inst::do_runtime_in_place_kv_cache() { if (desc->compressed) { auto& past_scale_layout = _impl_params->input_layouts[3]; - const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(); + const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); kv_cache_inst::update_pad(past_scale_layout, max_pad, sequence_axis); if (desc->get_compression_zp_inputs_num() > 0) { @@ -2104,6 +2105,9 @@ primitive_inst::primitive_inst(network & network, program_node const& node, bool _outputs = allocate_outputs(); } } + if (_node) { + GPU_DEBUG_TRACE_DETAIL << _node->type()->to_string(*_node) << "\n"; + } _impls_factory = std::make_shared(_node); _impl_params->strm = _network.get_stream_ptr(); for (size_t i = 0; i < get_node().get_output_layouts().size(); ++i) { diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl index 16add2e0397d32..169e7cc62635b8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_kv_cache.cl @@ -87,12 +87,14 @@ KERNEL(dynamic_quantize_gpu_kv_cache)( #if ASYMMETRIC_QUANTIZATION min_value = work_group_reduce_min(min_value); max_value = work_group_reduce_max(max_value); + // If the range of input data is zero, it is adjusted to the minimum value(0.001). - half diff_value = max_value == min_value ? (grp_max) : (max_value - min_value); + ACCUMULATOR_TYPE diff_value = max_value == min_value ? (grp_max) : (max_value - min_value); ACCUMULATOR_TYPE scale_tmp = (ACCUMULATOR_TYPE)((CHAR_MAX - CHAR_MIN) / diff_value); - ACCUMULATOR_TYPE zp_tmp = (ACCUMULATOR_TYPE)(-min_value * scale_tmp) - CHAR_MAX; + ACCUMULATOR_TYPE zp_tmp = (ACCUMULATOR_TYPE)(-min_value * scale_tmp) + CHAR_MIN; OUTPUT1_TYPE scale = (OUTPUT1_TYPE)(scale_tmp); OUTPUT1_TYPE zp = (OUTPUT1_TYPE)(zp_tmp); + #else max_value = work_group_reduce_max(max_value); OUTPUT1_TYPE scale = 127.0h / max_value; @@ -120,7 +122,13 @@ KERNEL(dynamic_quantize_gpu_kv_cache)( #if GROUP_SCALES_WITH_ZP output_scale[scale_idx + 1] = zp; #else + + #if OUTPUT2_IS_FP + output_zp[scale_idx] = zp; + #else output_zp[scale_idx] = convert_char_rte(zp); + #endif + #endif #else output_scale[scale_idx] = 1.0h / scale; diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_ref.cl index 72dc057d44a040..236fe4c9dab684 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/dynamic_quantize_gpu_ref.cl @@ -104,12 +104,12 @@ KERNEL(dynamic_quantize_gpu_ref)( #if ASYMMETRIC_QUANTIZATION // If the range of input data is zero, it is adjusted to the minimum value(0.001). - half diff_value = max_val == min_val ? (grp_max) : (max_val - min_val); + ACCUMULATOR_TYPE diff_value = max_val == min_val ? (grp_max) : (max_val - min_val); ACCUMULATOR_TYPE scale_tmp = (ACCUMULATOR_TYPE)((CHAR_MAX - CHAR_MIN) / diff_value); # if UNSIGNED_OUTPUT ACCUMULATOR_TYPE zp_tmp = (ACCUMULATOR_TYPE)(-min_val * scale_tmp); # else // !UNSIGNED_OUTPUT - ACCUMULATOR_TYPE zp_tmp = (ACCUMULATOR_TYPE)(-min_val * scale_tmp) - CHAR_MAX; + ACCUMULATOR_TYPE zp_tmp = (ACCUMULATOR_TYPE)(-min_val * scale_tmp) + CHAR_MIN; # endif OUTPUT1_TYPE scale = (OUTPUT1_TYPE)(scale_tmp); OUTPUT1_TYPE zp = (OUTPUT1_TYPE)(zp_tmp); @@ -161,6 +161,12 @@ KERNEL(dynamic_quantize_gpu_ref)( #if ASYMMETRIC_QUANTIZATION && GROUP_SCALES_WITH_ZP output_scale[scale_idx + 1] = zp; #elif ASYMMETRIC_QUANTIZATION - output_zp[scale_idx] = convert_uchar_rte(zp); + #if OUTPUT2_IS_FP + output_zp[scale_idx] = zp; + #elif UNSIGNED_OUTPUT + output_zp[scale_idx] = convert_uchar_rte(zp); + #else + output_zp[scale_idx] = convert_char_rte(zp); + #endif #endif } diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/sdpa_utils.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/sdpa_utils.cl index 5943f23251bb7a..36c9741f3f3c7a 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/sdpa_utils.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/include/batch_headers/sdpa_utils.cl @@ -27,6 +27,8 @@ #define KEY_OFF(x0, x1, x2, x3) _4D_OFF(KEY, x0, x1, x2, x3) #define VAL_OFF(x0, x1, x2, x3) _4D_OFF(VAL, x0, x1, x2, x3) #define MSK_OFF(x0, x1, x2, x3) _4D_OFF(MSK, x0, x1, x2, x3) +#define KEY_COMP_OFF(x0, x1, x2, x3) _4D_OFF(KEY_COMP, x0, x1, x2, x3) +#define VAL_COMP_OFF(x0, x1, x2, x3) _4D_OFF(VAL_COMP, x0, x1, x2, x3) #define DST_OFF(x0, x1, d, h, w) \ (((x0) % DST_B0) * DST_SB0 + ((x0) / DST_B0) * DST_S0 \ diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/sdpa_micro.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/sdpa_micro.cl index 1584dffe95a3c3..b50410a3cf761f 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/sdpa_micro.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/sdpa_micro.cl @@ -18,6 +18,12 @@ #include "include/batch_headers/sdpa_utils.cl" #include "include/batch_headers/tile_ops.cl" +/* The quantization parameter may be unique for each token/element */ +#define QUANTIZE_2D 2 + +/* The quantization parameter shares the same value across the work-group */ +#define QUANTIZE_COMMON 3 + #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define DIV_UP(x, y) (((x) + (y)-1) / (y)) @@ -133,7 +139,9 @@ DECLARE_2D_TILE_RSELECT(a_scale_tile_type, SUBGROUP_SIZE, ugemm_vs_sg_tile_n, 1, __attribute__((intel_reqd_sub_group_size(SUBGROUP_SIZE))) KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG - const global half *K, const global half *Q, const global half *V, + const global KEY_DATA_T *K, + const global QRY_DATA_T *Q, + const global VAL_DATA_T *V, global half *A, #if WITH_ATTN_MASK const global half *msk, @@ -141,10 +149,18 @@ KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG #if WITH_SCALE global SCALE_DATA_T *scale_ptr, #endif - int d, int k, int q) { + int d, int k, int q +#ifdef KV_COMPRESSED + , const global KEY_ATTR_SCALES_DATA_T *K_scales + , const global KEY_ATTR_ZP_DATA_T *K_zp + , const global VAL_ATTR_SCALES_DATA_T *V_scales + , const global VAL_ATTR_ZP_DATA_T *V_zp +#endif + ) { uint sg_ij = sub_group_broadcast(get_local_id(1), 0); uint b0 = get_group_id(1); uint b1 = get_group_id(2); + uint b0_kv = b0 / KV_GROUP_SIZE; uint wg_j0 = get_group_id(0) * ugemm_kq_wg_tile_n; @@ -154,6 +170,13 @@ KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG uint ldv = VAL_S2; uint lda = DST_S2; +#if KEY_SCALES || KEY_ZERO_POINTS + uint ldkq = DIV_UP(d, KEY_GROUP_SIZE); +#endif +#if VAL_SCALES || VAL_ZERO_POINTS + uint ldvq = DIV_UP(d, VAL_GROUP_SIZE); +#endif + /* Subgroup IDs for each GEMM */ uint sg_i_kq = sg_ij % ugemm_kq_sg_per_wg_m; uint sg_j_kq = sg_ij / ugemm_kq_sg_per_wg_m; @@ -183,11 +206,30 @@ KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG const bool need_sum_barrier = (ugemm_vs_barrier_count == 0); /* Locate K/Q/V/A matrices within batch */ - K += KEY_OFF(b1, (b0 / KV_GROUP_SIZE), 0, 0) + INPUT1_OFFSET; - Q += QRY_OFF(b1, b0, 0, 0) + INPUT0_OFFSET; - V += VAL_OFF(b1, (b0 / KV_GROUP_SIZE), 0, 0) + INPUT2_OFFSET; + K += (KEY_OFF(b1, b0_kv, 0, 0) + INPUT1_OFFSET) / KEY_ELEMENTS_PER_BYTE; + Q += (QRY_OFF(b1, b0, 0, 0) + INPUT0_OFFSET); + V += (VAL_OFF(b1, b0_kv, 0, 0) + INPUT2_OFFSET) / VAL_ELEMENTS_PER_BYTE; A += DST_OFF(b1, b0, 0, 0, 0); +#if KEY_SCALES + K_scales += KEY_COMP_OFF(b1, b0_kv, 0, 0); +#endif +#if KEY_SCALES == QUANTIZE_COMMON + float k_scale = convert_float(*K_scales); +#endif +#if KEY_ZERO_POINTS + K_zp += KEY_COMP_OFF(b1, b0_kv, 0, 0) / KEY_ZP_ELEMENTS_PER_BYTE; +#endif +#if VAL_SCALES + V_scales += VAL_COMP_OFF(b1, b0_kv, 0, 0); +#endif +#if VAL_SCALES == QUANTIZE_COMMON + float v_scale = convert_float(*V_scales); +#endif +#if VAL_ZERO_POINTS + V_zp += VAL_COMP_OFF(b1, b0_kv, 0, 0) / VAL_ZP_ELEMENTS_PER_BYTE; +#endif + __builtin_assume_aligned(K, K_ALIGN); __builtin_assume_aligned(Q, Q_ALIGN); __builtin_assume_aligned(V, V_ALIGN); @@ -283,7 +325,25 @@ KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG /* Calculate S = (K^T) * Q */ s_tile_type S_tile = ugemm_kq(K, ldk, Q_slm, D_MAX, k, ugemm_kq_wg_tile_n, d, k0, - 0, 0, sg_i_kq, sg_j_kq, (local char *)ugemm_slm); + 0, 0, sg_i_kq, sg_j_kq, (local char *)ugemm_slm +#if KEY_SCALES == QUANTIZE_2D + , + K_scales +#endif +#if KEY_ZERO_POINTS + , + K_zp +#endif +#if (KEY_SCALES == QUANTIZE_2D) || KEY_ZERO_POINTS + , + ldkq +#endif + ); + +#if KEY_SCALES == QUANTIZE_COMMON +#define k_scale_op(x) ((x)*k_scale) + tile_elementwise(S_tile, k_scale_op); +#endif /* Apply attention mask */ #if WITH_ATTN_MASK @@ -419,10 +479,31 @@ KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG /* Accumulate A += V * S */ int k_chunk = min(k - k0, ugemm_kq_wg_tile_m); - a_tile_type A_tile1 = ugemm_vs(V, ldv, S_slm, ugemm_kq_wg_tile_m, d, - ugemm_kq_wg_tile_n, k_chunk, 0, 0, 0, sg_i_vs, sg_j_vs, - (local char *)ugemm_slm); - V += ldv * ugemm_kq_wg_tile_m; + + a_tile_type A_tile1 = ugemm_vs( + V, ldv, S_slm, ugemm_kq_wg_tile_m, d, ugemm_kq_wg_tile_n, + k_chunk, 0, 0, 0, sg_i_vs, sg_j_vs, (local char *)ugemm_slm +#if VAL_SCALES == QUANTIZE_2D + , + V_scales +#endif +#if VAL_ZERO_POINTS + , + V_zp +#endif +#if (VAL_SCALES == QUANTIZE_2D) || VAL_ZERO_POINTS + , + ldvq +#endif + ); + + V += ldv * ugemm_kq_wg_tile_m / VAL_ELEMENTS_PER_BYTE; +#if VAL_SCALES == QUANTIZE_2D + V_scales += ldvq * ugemm_kq_wg_tile_m; +#endif +#if VAL_ZERO_POINTS == QUANTIZE_2D + V_zp += ldvq * ugemm_kq_wg_tile_m / VAL_ZP_ELEMENTS_PER_BYTE; +#endif tile_binary(A_tile, A_tile1, binary_add); } @@ -440,6 +521,11 @@ KERNEL(micro_sdpa)(OPTIONAL_SHAPE_INFO_ARG tile_binary(A_scale_tile, A_scale_tile_load, binary_add); } +#if VAL_SCALES == QUANTIZE_COMMON +#define v_scale_op(x) ((x)*v_scale) + tile_elementwise(A_tile, v_scale_op); +#endif + /* Rescale by 1 / (column sums) */ tile_elementwise(A_scale_tile, native_vrecip); tile_hbroadcast_mul(&A_tile, A_scale_tile); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/sdpa/sdpa_kernel_micro.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/sdpa/sdpa_kernel_micro.cpp index 467dd71da37944..028c95b77c9b06 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/sdpa/sdpa_kernel_micro.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/sdpa/sdpa_kernel_micro.cpp @@ -47,6 +47,8 @@ micro::Type convert_type(Datatype t) { switch (t) { case Datatype::F32: return micro::Type::f32; case Datatype::F16: return micro::Type::f16; + case Datatype::INT8: return micro::Type::s8; + case Datatype::UINT8: return micro::Type::u8; default: break; } OPENVINO_THROW("Unsupported dt: ", toString(t)); @@ -83,16 +85,26 @@ sdpa_config_t xehpg_h32_s64 = {16, 16, 16, 8, 4, 4, 2, 8}; sdpa_config_t xehpg_h32_s32 = {8, 8, 8, 8, 4, 4, 4, 4}; sdpa_config_t xehpg_h32_2nd = {8, 32, 16, 8, 8, 1, 2, 4}; +sdpa_config_t xehpg_q_h32 = {32, 16, 16, 16, 2, 8, 2, 8}; +sdpa_config_t xehpg_q_h32_2nd = {32, 16, 8, 8, 8, 1, 4, 2}; + sdpa_config_t xehpg_h64 = {32, 16, 16, 16, 4, 8, 4, 8}; sdpa_config_t xehpg_h64_s128 = {16, 16, 16, 16, 4, 8, 4, 8}; sdpa_config_t xehpg_h64_s64 = {32, 16, 16, 8, 8, 4, 4, 8}; sdpa_config_t xehpg_h64_2nd = {8, 16, 16, 8, 8, 1, 4, 2}; +sdpa_config_t xehpg_q_h64 = {32, 16, 16, 16, 4, 4, 4, 4}; +sdpa_config_t xehpg_q_h64_2nd = {16, 16, 8, 8, 16, 1, 8, 2}; + sdpa_config_t xehpg_h128 = {16, 16, 32, 8, 8, 4, 4, 8}; sdpa_config_t xehpg_h128_s32 = {16, 16, 16, 8, 16, 2, 8, 4}; sdpa_config_t xehpg_h128_2nd = {8, 16, 16, 8, 16, 1, 8, 2}; sdpa_config_t xehpg_h128_s256_2nd = {8, 16, 32, 8, 8, 1, 4, 2}; +sdpa_config_t xehpg_q_h128 = {32, 16, 16, 16, 8, 4, 8, 4}; +sdpa_config_t xehpg_q_h128_2nd = {32, 16, 16, 8, 16, 1, 8, 2}; +sdpa_config_t xehpg_q_h128_s64_2nd = {16, 16, 16, 8, 16, 1, 8, 2}; + sdpa_config_t xehpg_h256 = {16, 16, 32, 8, 16, 2, 8, 4}; sdpa_config_t xehpg_h256_s128 = {8, 16, 32, 16, 8, 4, 8, 4}; sdpa_config_t xehpg_h256_s32 = {8, 16, 32, 8, 16, 2, 8, 4}; @@ -110,28 +122,52 @@ sdpa_config_t xehpc_h64_s32 = {16, 16, 16, 16, 4, 2, 4, 2}; sdpa_config_t xehpc_h64_2nd = {32, 32, 32, 16, 4, 1, 2, 2}; sdpa_config_t xehpc_h64_s64_2nd = {16, 16, 16, 16, 4, 1, 4, 1}; +sdpa_config_t xehpc_q_h64 = {16, 64, 32, 16, 8, 4, 2, 16}; + sdpa_config_t xehpc_h128 = {16, 64, 32, 16, 16, 2, 4, 8}; sdpa_config_t xehpc_h128_s64 = {16, 32, 32, 32, 4, 2, 4, 2}; sdpa_config_t xehpc_h128_s32 = {16, 16, 16, 16, 8, 2, 8, 2}; sdpa_config_t xehpc_h128_2nd = {32, 32, 32, 16, 8, 1, 4, 2}; +sdpa_config_t xehpc_q_h128 = {16, 64, 16, 32, 16, 2, 8, 4}; +sdpa_config_t xehpc_q_h128_s64 = {16, 16, 32, 16, 4, 4, 4, 4}; +sdpa_config_t xehpc_q_h128_s32 = {16, 16, 32, 16, 4, 2, 4, 2}; +sdpa_config_t xehpc_q_h128_2nd = {32, 32, 16, 32, 4, 1, 4, 1}; +sdpa_config_t xehpc_q_h128_s32_2nd = {16, 32, 16, 16, 8, 1, 4, 2}; + sdpa_config_t xehpc_h256 = {16, 32, 32, 32, 8, 4, 8, 4}; sdpa_config_t xehpc_h256_s64 = {16, 32, 32, 32, 8, 1, 8, 1}; sdpa_config_t xehpc_h256_2nd = {16, 16, 16, 16, 16, 1, 16, 1}; -sdpa_config_t *choose_config_xehpg(int head_size, int seq, bool thin_q) { +sdpa_config_t *choose_config_xehpg(int head_size, int seq, bool thin_q, bool quantized) { if (head_size <= 32) { + if (quantized && seq >= 128) { + if (thin_q) return &xehpg_q_h32_2nd; + return &xehpg_q_h32; + } if (thin_q) return &xehpg_h32_2nd; if (seq <= 32) return &xehpg_h32_s32; if (seq <= 64) return &xehpg_h32_s64; if (seq <= 256) return &xehpg_h32_s256; return &xehpg_h32; } else if (head_size <= 64) { + if (quantized) { + if (thin_q) return &xehpg_q_h64_2nd; + return &xehpg_q_h64; + } if (thin_q) return &xehpg_h64_2nd; if (seq <= 64) return &xehpg_h64_s64; if (seq <= 128) return &xehpg_h64_s128; return &xehpg_h64; } else if (head_size <= 128) { + if (quantized) { + if (thin_q) { + if (seq <= 64) return &xehpg_q_h128_s64_2nd; + return &xehpg_q_h128_2nd; + } + if (seq <= 32) return &xehpg_h128_s32; + return &xehpg_q_h128; + } if (thin_q) { if (seq <= 256) return &xehpg_h128_s256_2nd; return &xehpg_h128_2nd; @@ -151,7 +187,7 @@ sdpa_config_t *choose_config_xehpg(int head_size, int seq, bool thin_q) { return nullptr; } -sdpa_config_t *choose_config_xehpc(int head_size, int seq, bool thin_q) { +sdpa_config_t *choose_config_xehpc(int head_size, int seq, bool thin_q, bool quantized) { if (head_size <= 32) { if (thin_q) return &xehpc_h32_2nd; if (seq <= 32) return &xehpc_h32_s32; @@ -161,10 +197,20 @@ sdpa_config_t *choose_config_xehpc(int head_size, int seq, bool thin_q) { if (seq <= 64) return &xehpc_h64_s64_2nd; return &xehpc_h64_2nd; } + if (quantized && seq >= 256) return &xehpc_q_h64; if (seq <= 32) return &xehpc_h64_s32; if (seq <= 64) return &xehpc_h64_s64; return &xehpc_h64; } else if (head_size <= 128) { + if (quantized) { + if (thin_q) { + if (seq <= 32) return &xehpc_q_h128_s32_2nd; + return &xehpc_q_h128_2nd; + } + if (seq <= 32) return &xehpc_q_h128_s32; + if (seq <= 64) return &xehpc_q_h128_s64; + return &xehpc_q_h128; + } if (thin_q) return &xehpc_h128_2nd; if (seq <= 32) return &xehpc_h128_s32; if (seq <= 64) return &xehpc_h128_s64; @@ -179,6 +225,11 @@ sdpa_config_t *choose_config_xehpc(int head_size, int seq, bool thin_q) { } // namespace +const bool kq_common_scales = false; +const bool kq_common_zp = false; +const bool vs_common_scales = false; +const bool vs_common_zp = false; + std::mutex SDPAKernelMicro::m; void SDPAKernelMicro::init_microkernels(const sdpa_params& params, micro::Package& gemm_kq, micro::Package& gemm_vs, bool is_prefill) const { @@ -200,15 +251,18 @@ void SDPAKernelMicro::init_microkernels(const sdpa_params& params, micro::Packag sdpa_config_t *config = nullptr; bool thin_q = (!n_queries.is_dynamic && (n_queries.v <= 16)) || !is_prefill; + bool is_quantized = (K.GetDType() == Datatype::UINT8 || K.GetDType() == Datatype::INT8) || + (V.GetDType() == Datatype::UINT8 || V.GetDType() == Datatype::INT8); + switch (params.engineInfo.arch) { case gpu_arch::xe_hpg: { - config = choose_config_xehpg(static_cast(head_size), static_cast(n_keys.v), thin_q); + config = choose_config_xehpg(static_cast(head_size), static_cast(n_keys.v), thin_q, is_quantized); break; } case gpu_arch::xe_hpc: case gpu_arch::xe2: case gpu_arch::xe3: { - config = choose_config_xehpc(static_cast(head_size), static_cast(n_keys.v), thin_q); + config = choose_config_xehpc(static_cast(head_size), static_cast(n_keys.v), thin_q, is_quantized); break; } default: break; @@ -224,13 +278,47 @@ void SDPAKernelMicro::init_microkernels(const sdpa_params& params, micro::Packag /* Set up GEMMProblem structure for first GEMM: K^T * Q */ micro::GEMMProblem problem; - problem.Ta = problem.Ta_ext = convert_type(K.GetDType()); - problem.Tb = problem.Tb_ext = convert_type(Q.GetDType()); + problem.Ta_ext = convert_type(K.GetDType()); + problem.Tb_ext = convert_type(Q.GetDType()); + + problem.Ta = problem.Tb = micro::Type::f16; problem.Tc = problem.Tc_ext = micro::Type::f32; problem.Ts = problem.Tc; auto problem_kq = problem; problem_kq.A.layout = micro::MatrixLayout::T; + + /* Set up microkernel options */ + micro::GEMMProtocol::Options opts_kq; + opts_kq.localB = true; + opts_kq.slmPtr = true; + + if (params.conf.is_kv_compressed && !kq_common_scales) { + const auto scale_dt = convert_type(params.key_cache_comp_scale.GetDType()); + problem_kq.Ta_scale = scale_dt; + problem_kq.A_scale.alignment = micro::data_type_size(scale_dt); + + problem_kq.A_scale.layout = micro::MatrixLayout::T; + problem_kq.aScale2D = true; + } + + if (params.conf.is_kv_compressed && params.conf.use_asymmetric_quantization) { + const auto zp_dt = convert_type(params.key_cache_comp_zp.GetDType()); + problem_kq.Tao = zp_dt; + problem_kq.AO.alignment = micro::data_type_size(zp_dt); + problem_kq.AO.layout = micro::MatrixLayout::T; + problem_kq.aoPtrDims = kq_common_zp ? 0 : 2; + problem_kq.aOffset = micro::ABOffset::Calc; + } + + if (params.conf.is_kv_compressed) { + problem_kq.aqGroupM = 1; + problem_kq.aqGroupK = (kq_common_scales || kq_common_zp) ? 1 : params.conf.head_size; + } + + opts_kq.scaleA = params.conf.is_kv_compressed && !kq_common_scales; + opts_kq.offsetA = params.conf.is_kv_compressed && params.conf.use_asymmetric_quantization; + problem_kq.B.layout = micro::MatrixLayout::Pr; problem_kq.C.layout = micro::MatrixLayout::T; problem_kq.A.setAlignment(micro::alignment_for_ld(head_size * problem.Ta)); @@ -253,18 +341,49 @@ void SDPAKernelMicro::init_microkernels(const sdpa_params& params, micro::Packag reqs_kq.push_back(micro::StrategyRequirement::WGM == config->wg_m_kq); reqs_kq.push_back(micro::StrategyRequirement::WGN == config->wg_n_kq); - /* Set up microkernel options */ - micro::GEMMProtocol::Options opts_kq; - opts_kq.localB = true; - opts_kq.slmPtr = true; - /* Ask microkernel provider for microkernel */ - gemm_kq = micro::select_gemm_microkernel(opts_kq, hw_info, sizes, problem_kq, reqs_kq); + try { + gemm_kq = micro::select_gemm_microkernel(opts_kq, hw_info, sizes, problem_kq, reqs_kq); + } catch (const std::runtime_error &ex) { + GPU_DEBUG_TRACE_DETAIL << "Can't create KQ sdpa_micro kernel: " << ex.what() << "\n"; + throw; + } + + /* Set up microkernel options */ + micro::GEMMProtocol::Options opts_vs; + opts_vs.localB = true; + opts_vs.slmPtr = true; /* Update for second GEMM: V*S */ auto problem_vs = problem; - problem_vs.Ta = problem_vs.Ta_ext = convert_type(V.GetDType()); + problem_vs.Ta_ext = convert_type(V.GetDType()); problem_vs.A.layout = micro::MatrixLayout::N; + + if (params.conf.is_kv_compressed && !vs_common_scales) { + auto scale_dt = convert_type(params.value_cache_comp_scale.GetDType()); + problem_vs.Ta_scale = scale_dt; + problem_vs.A_scale.alignment = micro::data_type_size(scale_dt); + problem_vs.A_scale.layout = micro::MatrixLayout::N; + problem_vs.aScale2D = true; + } + + if (params.conf.is_kv_compressed && params.conf.use_asymmetric_quantization) { + auto zp_dt = convert_type(params.value_cache_comp_zp.GetDType()); + problem_vs.Tao = zp_dt; + problem_vs.AO.alignment = micro::data_type_size(zp_dt); + problem_vs.AO.layout = micro::MatrixLayout::N; + problem_vs.aoPtrDims = vs_common_zp ? 0 : 2; + problem_vs.aOffset = micro::ABOffset::Calc; + } + + if (params.conf.is_kv_compressed) { + problem_vs.aqGroupM = (vs_common_scales || vs_common_zp) ? 1 : micro::rnd_up_pow2(params.conf.head_size); + problem_vs.aqGroupK = 1; + } + + opts_vs.scaleA = params.conf.is_kv_compressed && !vs_common_scales; + opts_vs.offsetA = params.conf.is_kv_compressed && params.conf.use_asymmetric_quantization; + problem_vs.B.layout = micro::MatrixLayout::Pr; problem_vs.C.layout = micro::MatrixLayout::N; problem_vs.A.setAlignment(micro::alignment_for_ld(head_size * problem.Ta)); @@ -281,20 +400,23 @@ void SDPAKernelMicro::init_microkernels(const sdpa_params& params, micro::Packag reqs_vs.push_back(micro::StrategyRequirement::WGM == config->wg_m_vs); reqs_vs.push_back(micro::StrategyRequirement::WGN == config->wg_n_vs); - micro::GEMMProtocol::Options opts_vs; - opts_vs.localB = true; - opts_vs.slmPtr = true; - auto adjust_vs = [](micro::GEMMStrategy &strategy) { /* Enable dpasw */ strategy.dpasw |= strategy.fused; }; /* Ask microkernel provider for microkernel */ - gemm_vs = micro::select_gemm_microkernel(opts_vs, hw_info, sizes, problem_vs, reqs_vs, adjust_vs); + try { + gemm_vs = micro::select_gemm_microkernel(opts_vs, hw_info, sizes, problem_vs, reqs_vs, adjust_vs); + } catch (const std::runtime_error &ex) { + GPU_DEBUG_TRACE_DETAIL << "Can't create VS sdpa_micro kernel: " << ex.what() << "\n"; + throw; + } } ParamsKey SDPAKernelMicro::GetSupportedKey() const { ParamsKey k; + k.EnableInputDataType(Datatype::INT8); + k.EnableInputDataType(Datatype::UINT8); k.EnableInputDataType(Datatype::F16); k.EnableOutputDataType(Datatype::F16); @@ -344,9 +466,6 @@ bool SDPAKernelMicro::Validate(const Params& p) const { if (params.conf.head_size > 256) return false; - if (params.conf.is_kv_compressed) - return false; - // Do not use sdpa_micro kernel with a scalar-value mask if (params.inputs.size() > 3 && !params.inputs[3].is_dynamic() && params.inputs[3].LogicalSize() == 1) return false; @@ -388,6 +507,52 @@ JitConstants SDPAKernelMicro::GetJitConstants(const sdpa_params& params, const m jit.AddConstant(MakeJitConstant("TRANSPOSE_K", false)); + jit.AddConstant(MakeJitConstant("QRY_DATA_T", toCLType(Q.GetDType()))); + jit.AddConstant(MakeJitConstant("KEY_DATA_T", toCLType(K.GetDType()))); + jit.AddConstant(MakeJitConstant("VAL_DATA_T", toCLType(V.GetDType()))); + + if (params.conf.is_kv_compressed) { + jit.AddConstant(MakeJitConstant("KV_COMPRESSED", 1)); + jit.AddConstant(MakeJitConstant("KEY_ATTR_SCALES_DATA_T", toCLType(params.key_cache_comp_scale.GetDType()))); + jit.AddConstant(MakeJitConstant("VAL_ATTR_SCALES_DATA_T", toCLType(params.value_cache_comp_scale.GetDType()))); + + if (params.conf.use_asymmetric_quantization) { + jit.AddConstant(MakeJitConstant("KEY_ATTR_ZP_DATA_T", toCLType(params.key_cache_comp_zp.GetDType()))); + jit.AddConstant(MakeJitConstant("VAL_ATTR_ZP_DATA_T", toCLType(params.value_cache_comp_zp.GetDType()))); + } + } + + auto elems_per_byte = [](Datatype dt) { + switch (dt) { + case Datatype::UINT4: + case Datatype::INT4: + return 2; + default: + return 1; + } + }; + + jit.AddConstant(MakeJitConstant("KEY_ELEMENTS_PER_BYTE", elems_per_byte(params.inputs[1].GetDType()))); + jit.AddConstant(MakeJitConstant("VAL_ELEMENTS_PER_BYTE", elems_per_byte(params.inputs[2].GetDType()))); + + if (params.conf.is_kv_compressed) { + int kq_scale_mask = (static_cast(params.conf.is_kv_compressed) << 1) | static_cast(kq_common_scales); + int vs_scale_mask = (static_cast(params.conf.is_kv_compressed) << 1) | static_cast(vs_common_scales); + jit.AddConstant(MakeJitConstant("KEY_SCALES", kq_scale_mask)); + jit.AddConstant(MakeJitConstant("VAL_SCALES", vs_scale_mask)); + jit.AddConstant(MakeJitConstant("KEY_GROUP_SIZE", params.conf.head_size)); + jit.AddConstant(MakeJitConstant("VAL_GROUP_SIZE", params.conf.head_size)); + + if (params.conf.use_asymmetric_quantization) { + int kq_zp_mask = (static_cast(params.conf.use_asymmetric_quantization) << 1) | static_cast(kq_common_zp); + int vs_zp_mask = (static_cast(params.conf.use_asymmetric_quantization) << 1) | static_cast(vs_common_zp); + jit.AddConstant(MakeJitConstant("KEY_ZERO_POINTS", kq_zp_mask)); + jit.AddConstant(MakeJitConstant("VAL_ZERO_POINTS", vs_zp_mask)); + jit.AddConstant(MakeJitConstant("KEY_ZP_ELEMENTS_PER_BYTE", elems_per_byte(params.key_cache_comp_zp.GetDType()))); + jit.AddConstant(MakeJitConstant("VAL_ZP_ELEMENTS_PER_BYTE", elems_per_byte(params.value_cache_comp_zp.GetDType()))); + } + } + int tile_k = gemm_kq.getSetting("wg_tile_m"); int tile_q = gemm_kq.getSetting("wg_tile_n"); int tile_v = gemm_vs.getSetting("wg_tile_m"); @@ -470,6 +635,18 @@ JitConstants SDPAKernelMicro::GetJitConstants(const sdpa_params& params, const m jit.Merge(unit_parameters("VAL")); jit.Merge(unit_parameters("DST")); + if (params.conf.is_kv_compressed) { + jit.AddConstant(MakeJitConstant("KEY_SCALE", params.key_cache_comp_scale)); + jit.AddConstant(MakeJitConstant("VAL_SCALE", params.value_cache_comp_scale)); + + const std::vector default_order = { 0, 1, 2, 3 }; + jit.Merge(convert_strides("KEY_COMP", "KEY_SCALE", default_order)); + jit.Merge(convert_strides("VAL_COMP", "VAL_SCALE", default_order)); + + jit.Merge(unit_parameters("KEY_COMP")); + jit.Merge(unit_parameters("VAL_COMP")); + } + return jit; } @@ -521,6 +698,17 @@ clKernelData SDPAKernelMicro::get_kernel_data(const sdpa_params& params, bool is kernel.params.arguments.push_back({ArgumentDescriptor::Types::SCALAR, 1}); // K kernel.params.arguments.push_back({ArgumentDescriptor::Types::SCALAR, 2}); // Q + if (params.conf.is_kv_compressed) { + uint32_t input_idx = static_cast(params.inputs.size()); + kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, input_idx + 0}); // K scales + if (params.conf.use_asymmetric_quantization) + kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, input_idx + 2}); // K zp + + kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, input_idx + 1}); // V scales + if (params.conf.use_asymmetric_quantization) + kernel.params.arguments.push_back({ArgumentDescriptor::Types::INPUT, input_idx + 3}); // V zp + } + const auto& Q = params.inputs[0]; const auto& K = params.inputs[1]; diff --git a/src/plugins/intel_gpu/src/kernel_selector/micro_utils.hpp b/src/plugins/intel_gpu/src/kernel_selector/micro_utils.hpp index c6b0e031a027e8..2d28caec5694af 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/micro_utils.hpp +++ b/src/plugins/intel_gpu/src/kernel_selector/micro_utils.hpp @@ -20,12 +20,14 @@ #include "gpu/intel/microkernels/package.hpp" #include "gpu/intel/jit/gemm/include/microkernel_provider.hpp" #include "gpu/intel/microkernels/shim.hpp" +#include "common/utils.hpp" namespace micro { using Package = dnnl::impl::gpu::intel::micro::Package; using HWInformation = dnnl::impl::gpu::intel::jit::HWInformation; using GEMMProblem = dnnl::impl::gpu::intel::jit::GEMMProblem; +using ABOffset = dnnl::impl::gpu::intel::jit::ABOffset; using GEMMStrategy = dnnl::impl::gpu::intel::jit::GEMMStrategy; using GEMMProtocol = dnnl::impl::gpu::intel::micro::GEMMProtocol; using MatrixLayout = dnnl::impl::gpu::intel::jit::MatrixLayout; @@ -36,6 +38,8 @@ using ShimOptions = dnnl::impl::gpu::intel::micro::ShimOptions; using HostLanguage = dnnl::impl::gpu::intel::micro::HostLanguage; using Setting = dnnl::impl::gpu::intel::micro::Setting; +using dnnl::impl::utils::rnd_up_pow2; + // Wrapper for Package which is used in clKernelData with forward declaration // to avoid including this header in many places in plugin // which may cause symbols conflicts with oneDNN @@ -77,6 +81,10 @@ static inline int alignment_for_ld(int ld) { return dnnl::impl::gpu::intel::jit::alignmentForLD(ld); } +static inline uint8_t data_type_size(micro::Type dt) { + return uint8_t(dnnl::impl::types::data_type_size(micro::Type(dt).get_dnnl_type())); +} + } // namespace micro #undef UNUSED diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp index 6a50a55e619fc9..c63a0b27f38577 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.cpp @@ -127,17 +127,18 @@ std::shared_ptr class KVCacheCompressionMatcher : public ov::pass::MatcherPass { public: OPENVINO_MATCHER_PASS_RTTI("KVCacheCompressionMatcher"); - KVCacheCompressionMatcher(ov::element::Type compression_dt); + KVCacheCompressionMatcher(ov::element::Type compression_dt, bool supports_immad); }; -KVCacheCompressionMatcher::KVCacheCompressionMatcher(ov::element::Type compression_dt) { +KVCacheCompressionMatcher::KVCacheCompressionMatcher(ov::element::Type compression_dt, bool supports_immad) { using namespace ov::pass::pattern; if (compression_dt != element::i8 && compression_dt != element::u8) return; const auto quantization_type = ov::op::internal::DynamicQuantize::QuantizationType::Asymmetric; - const auto output_storage_type = ov::op::internal::DynamicQuantize::OutputStorageType::InterleavedScalesZP; + const auto output_storage_type = supports_immad ? ov::op::internal::DynamicQuantize::OutputStorageType::Planar + : ov::op::internal::DynamicQuantize::OutputStorageType::InterleavedScalesZP; bool combine_scales_and_zp = output_storage_type == ov::op::internal::DynamicQuantize::OutputStorageType::InterleavedScalesZP; GPU_DEBUG_LOG << "KV-cache compression configuration: " @@ -219,7 +220,7 @@ KVCacheCompressionMatcher::KVCacheCompressionMatcher(ov::element::Type compressi config.output_storage_type = output_storage_type; if (config.quantization_type == ov::op::internal::DynamicQuantize::QuantizationType::Asymmetric) - config.zp_dt = query_node->get_output_element_type(0); + config.zp_dt = supports_immad ? element::i8 : query_node->get_output_element_type(0); key_past_rv_node = update_past_read_value(key_past_rv_node, config); value_past_rv_node = update_past_read_value(value_past_rv_node, config); @@ -284,8 +285,8 @@ bool KVCacheCompression::run_on_model(const std::shared_ptr& m) { return pass::GraphRewrite::run_on_model(m); } -KVCacheCompression::KVCacheCompression(ov::element::Type compression_dt) { - add_matcher(compression_dt); +KVCacheCompression::KVCacheCompression(ov::element::Type compression_dt, bool supports_immad) { + add_matcher(compression_dt, supports_immad); } } // namespace intel_gpu diff --git a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp index 036fdb78914891..f4a930686520ba 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/kv_cache_compression.hpp @@ -32,8 +32,9 @@ namespace intel_gpu { class KVCacheCompression : public ov::pass::GraphRewrite { public: + OPENVINO_GRAPH_REWRITE_RTTI("KVCacheCompression"); - KVCacheCompression(ov::element::Type compression_dt); + KVCacheCompression(ov::element::Type compression_dt, bool supports_immad); bool run_on_model(const std::shared_ptr& m) override; }; diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp index 6721d0f9ebd608..908732dc357222 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp @@ -191,7 +191,8 @@ std::vector shape_infer(const KVCacheCompressed* op, auto quantized_data_shapes = ov::op::internal::DynamicQuantize::shape_infer(&dq_op, { input_shapes[1] }); - const auto scales_concat_axis = 2; + const auto concat_axis = ov::util::normalize(op->get_concat_axis(), input_shapes[0].size()); + const auto scales_concat_axis = op->get_quantization_attrs().scales_zp_output_order[concat_axis]; ov::PartialShape compression_scale_shape = input_shapes[3]; compression_scale_shape[scales_concat_axis] += quantized_data_shapes[1][scales_concat_axis]; out_shapes[2] = compression_scale_shape; diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index c893e14f193a93..9094354a03fbe8 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -1034,7 +1034,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { manager.register_pass(); auto kv_cache_compression_dt = config.get_property(ov::hint::kv_cache_precision); - manager.register_pass(kv_cache_compression_dt); + manager.register_pass(kv_cache_compression_dt, device_info.supports_immad); manager.register_pass(); diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 16c47b7116853b..4ba78c74ee7597 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -235,7 +235,7 @@ void ExecutionConfig::update_specific_default_properties(const cldnn::device_inf return; specific_default_properties_is_set = true; - // Enable KV-cache compression by default for non-systolic platforms + // Enable KV-cache compression by default for non-systolic platforms MFDNN-11755 if (get_property(ov::hint::kv_cache_precision) == ov::element::undefined && !info.supports_immad) { set_property(ov::hint::kv_cache_precision(ov::element::i8)); } diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp index fe9b917bee9aef..3bf1a9e937c37c 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/dynamic_quantize_gpu_test.cpp @@ -71,10 +71,14 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { dq_config.scales_zp_output_order.emplace_back(3); dq_config.output_storage_type = storage_type; + bool has_zp_output = dq_config.quantization_type == QuantizationType::Asymmetric && + dq_config.output_storage_type == OutputStorageType::Planar; + auto reorder_1 = reorder("reorder_1", input_info("input"), layout{ input_ps, data_types::f16, format::bfyx }); auto dyn_quan_prim = dynamic_quantize("dyn_quan_prim", input_info("reorder_1"), dq_config); auto reorder_data = reorder("reorder_data", input_info("dyn_quan_prim", 0), layout{ input_ps, data_types::f16, format::bfyx }); auto reorder_scale = reorder("reorder_scale", input_info("dyn_quan_prim", 1), layout{ scales_ps, data_types::f16, format::bfyx }); + auto reorder_zp = reorder("reorder_zp", input_info("dyn_quan_prim", 2), layout{ scales_ps, data_types::f16, format::bfyx }); // Implemented dynamic quantize kernel auto get_ref_results = [&]() { @@ -86,6 +90,9 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { reorder_scale ); + if (has_zp_output) + topology.add(reorder_zp); + auto config = get_test_default_config(engine); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); @@ -98,19 +105,27 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { auto outputs = network.execute(); - auto output_layout = outputs.begin()->second.get_layout(); - auto output_mem = outputs.begin()->second.get_memory(); + std::vector output_buffers; + for (const auto& output : outputs) { + auto output_layout = output.second.get_layout(); + auto output_mem = output.second.get_memory(); + output_buffers.push_back(engine.reinterpret_buffer(*output_mem, output_layout)); + } - return engine.reinterpret_buffer(*output_mem, output_layout); + return output_buffers; }; topology topology( input_layout("input", in_layout_f32), reorder_1, dyn_quan_prim, - reorder_data + reorder_data, + reorder_scale ); + if (has_zp_output) + topology.add(reorder_zp); + auto config = get_test_default_config(engine); config.set_property(ov::intel_gpu::allow_new_shape_infer(true)); config.set_property(ov::intel_gpu::optimize_data(true)); @@ -126,23 +141,28 @@ class dynamic_quantization_gpu_tests: public ::testing::Test { auto outputs = network->execute(); - auto output_mem = outputs.begin()->second.get_memory(); - cldnn::mem_lock output_ptr (output_mem, get_test_stream()); - - auto ref_output_mem = get_ref_results(); - cldnn::mem_lock output_ptr_ref (ref_output_mem, get_test_stream()); - size_t count = 0; - float max_diff = 0.f; - float avg = 0.f; - for (size_t i = 0; i < output_ptr_ref.size(); ++i) { - auto abs_diff = std::abs(output_ptr_ref[i] - output_ptr[i]); - if (max_diff < abs_diff) - max_diff = abs_diff; - avg += abs_diff; - count++; - ASSERT_LE(abs_diff, 1); + std::vector output_buffers; + for (const auto& output : outputs) { + auto output_layout = output.second.get_layout(); + auto output_mem = output.second.get_memory(); + output_buffers.push_back(engine.reinterpret_buffer(*output_mem, output_layout)); + } + + auto ref_output_buffers = get_ref_results(); + + ASSERT_EQ(ref_output_buffers.size(), output_buffers.size()); + + std::cout << "Outputs number: " << ref_output_buffers.size() << "\n"; + + for (size_t i = 0; i < ref_output_buffers.size(); i++) { + cldnn::mem_lock output_ptr(output_buffers[i], get_test_stream()); + cldnn::mem_lock output_ptr_ref(ref_output_buffers[i], get_test_stream()); + + for (size_t i = 0; i < output_ptr_ref.size(); ++i) { + auto abs_diff = std::abs(output_ptr_ref[i] - output_ptr[i]); + ASSERT_LE(abs_diff, 1); + } } - GPU_DEBUG_LOG << "---> count: " << count << ", max_diff:" << max_diff << ", avg_diff: " << (avg/count) << std::endl; } }; @@ -215,21 +235,67 @@ TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_reorde data_types::i8, data_types::undefined, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); } -TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_asym) { +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_asym_planar) { + this->test_dynamic_quantization(false, {-1, 8, -1, 96}, {1, 8, 1, 96}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::f16, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_asym_planar) { + this->test_dynamic_quantization(false, {-1, 4, -1, 64}, {1, 4, 35, 64}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::f16, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_reordered_asym_planar) { + this->test_dynamic_quantization(false, {-1, -1, 8, 96}, {1, 1, 8, 96}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::f16, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_reordered_asym_planar) { + this->test_dynamic_quantization(false, {-1, -1, 4, 64}, {1, 35, 4, 64}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::f16, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_asym_interleaved) { this->test_dynamic_quantization(false, {-1, 8, -1, 96}, {1, 8, 1, 96}, QuantizationType::Asymmetric, UINT64_MAX, data_types::i8, data_types::f16, OutputStorageType::InterleavedScalesZP, "dynamic_quantize_gpu_kv_cache"); } -TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_asym) { +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_asym_interleaved) { this->test_dynamic_quantization(false, {-1, 4, -1, 64}, {1, 4, 35, 64}, QuantizationType::Asymmetric, UINT64_MAX, data_types::i8, data_types::f16, OutputStorageType::InterleavedScalesZP, "dynamic_quantize_gpu_kv_cache"); } -TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_reordered_asym) { +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_reordered_asym_interleaved) { this->test_dynamic_quantization(false, {-1, -1, 8, 96}, {1, 1, 8, 96}, QuantizationType::Asymmetric, UINT64_MAX, data_types::i8, data_types::f16, OutputStorageType::InterleavedScalesZP, "dynamic_quantize_gpu_kv_cache"); } + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_reordered_asym_interleaved) { + this->test_dynamic_quantization(false, {-1, -1, 4, 64}, {1, 35, 4, 64}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::f16, OutputStorageType::InterleavedScalesZP, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_asym_planar_i8_zp) { + this->test_dynamic_quantization(false, {-1, 8, -1, 32}, {1, 8, 1, 32}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::i8, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_asym_planar_i8_zp) { + this->test_dynamic_quantization(false, {-1, 4, -1, 64}, {1, 4, 35, 64}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::i8, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_reordered_asym_planar_i8_zp) { + this->test_dynamic_quantization(false, {-1, -1, 8, 96}, {1, 1, 8, 96}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::i8, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + +TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_batched_reordered_asym_planar_i8_zp) { + this->test_dynamic_quantization(false, {-1, -1, 4, 64}, {1, 35, 4, 64}, QuantizationType::Asymmetric, UINT64_MAX, + data_types::i8, data_types::i8, OutputStorageType::Planar, "dynamic_quantize_gpu_kv_cache"); +} + TEST_F(dynamic_quantization_gpu_tests, simple_quantizing_kv_cache_inner_most_dim_zero_values_asym) { this->test_dynamic_quantization(false, {-1, 8, -1, 128}, {1, 8, 52, 128}, QuantizationType::Asymmetric, UINT64_MAX, data_types::i8, data_types::f16, OutputStorageType::InterleavedScalesZP, "dynamic_quantize_gpu_kv_cache", true); diff --git a/src/plugins/intel_gpu/tests/unit/transformations/kv_cache_compression.cpp b/src/plugins/intel_gpu/tests/unit/transformations/kv_cache_compression.cpp index 67123f1d84cfe7..ca101d1b5d2c6c 100644 --- a/src/plugins/intel_gpu/tests/unit/transformations/kv_cache_compression.cpp +++ b/src/plugins/intel_gpu/tests/unit/transformations/kv_cache_compression.cpp @@ -93,7 +93,7 @@ TEST_F(TransformationTestsF, KVCacheCompression) { ov::ResultVector results{ result }; model = std::make_shared(results, params); - manager.register_pass(ov::element::i8); + manager.register_pass(ov::element::i8, false); } { ov::op::internal::DynamicQuantize::Attributes dq_config; @@ -244,7 +244,7 @@ TEST_F(TransformationTestsF, KVCacheCompressionWithInitializers) { ov::ResultVector results{ result }; model = std::make_shared(results, params); - manager.register_pass(ov::element::i8); + manager.register_pass(ov::element::i8, false); } { ov::op::internal::DynamicQuantize::Attributes dq_config; From 513dcc5c7b7a0a4d85ec9e59e6f7e50040008fba Mon Sep 17 00:00:00 2001 From: Michal Miotk Date: Thu, 16 Jan 2025 13:48:56 +0100 Subject: [PATCH 09/97] [GPU] prevent too long sort in experimental detectron generate proposals single image (#28422) ### Details: - changed quickSortIterative to quickSelectIterative - added changing pivot position - issue happens on ref kernel when many zeroes on fp16 inference - on UHD770 fp32 inference on model instance segmentation-security-0010.xml speed up 0.14fps->0.19fps ### Tickets: - CVS-160165 --------- Co-authored-by: Your Name --- ...ron_generate_proposals_single_image_ref.cl | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/experimental_detectron_generate_proposals_single_image_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/experimental_detectron_generate_proposals_single_image_ref.cl index aa9cd2d3c387e6..5ea7c3be62e0df 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/experimental_detectron_generate_proposals_single_image_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/experimental_detectron_generate_proposals_single_image_ref.cl @@ -102,7 +102,17 @@ inline void FUNC(swap_box)(__global Box* a, __global Box* b) { } inline int FUNC(partition)(__global Box* arr, int l, int h) { - INPUT0_TYPE pivotScore = arr[h].score; + static int static_counter = 0; + static_counter++; + int pivot_idx = l; + if (static_counter%3 == 0) { //cyclic pivot selection rotation + pivot_idx = (l+h)/2; + } + if (static_counter%3 == 1) { + pivot_idx = h; + } + INPUT0_TYPE pivotScore = arr[pivot_idx].score; + FUNC_CALL(swap_box)(&arr[h], &arr[pivot_idx]); int i = (l - 1); for (int j = l; j <= h - 1; j++) { if (arr[j].score > pivotScore) { @@ -129,7 +139,7 @@ inline void FUNC(bubbleSortIterative)(__global Box* arr, int l, int h) { } } -inline void FUNC(quickSortIterative)(__global Box* arr, int l, int h) { +inline void FUNC(quickSelectIterative)(__global Box* arr, int l, int h) { // Create an auxiliary stack const int kStackSize = 100; int stack[kStackSize]; @@ -153,7 +163,7 @@ inline void FUNC(quickSortIterative)(__global Box* arr, int l, int h) { // If there are elements on left side of pivot, // then push left side to stack - if (p - 1 > l) { + if (p - 1 > l && l < PRE_NMS_TOPN) { if (top >= (kStackSize - 1)) { FUNC_CALL(bubbleSortIterative)(arr, l, p - 1); } else { @@ -164,7 +174,7 @@ inline void FUNC(quickSortIterative)(__global Box* arr, int l, int h) { // If there are elements on right side of pivot, // then push right side to stack - if (p + 1 < h) { + if (p + 1 < h && p + 1 < PRE_NMS_TOPN) { if (top >= (kStackSize - 1)) { FUNC_CALL(bubbleSortIterative)(arr, p + 1, h); } else { @@ -179,7 +189,7 @@ inline void FUNC(quickSortIterative)(__global Box* arr, int l, int h) { KERNEL(edgpsi_ref_stage_1)(__global OUTPUT_TYPE* proposals) { __global Box* boxes = (__global Box*)proposals; - FUNC_CALL(quickSortIterative)(boxes, 0, NUM_PROPOSALS-1); + FUNC_CALL(quickSelectIterative)(boxes, 0, NUM_PROPOSALS-1); } #undef Box #endif /* EDGPSI_STAGE_1 */ From c849f725a662dd6bfc2d273ec3605b17532187ca Mon Sep 17 00:00:00 2001 From: Sun Xiaoxia Date: Thu, 16 Jan 2025 21:21:43 +0800 Subject: [PATCH 10/97] Reserving CPU resource in CPU inference (#27321) ### Details: - Add property `ov::hint::enable_cpu_reservation` to reserve CPU resource in CPU inference - `ov::hint::enable_cpu_reservation` defaults to false, user can explicitly set it to true to enable CPU reservation. - update proc_type_table before stream scheduling in compile_model() ### Tickets: - *CVS-155268* - *https://github.com/openvinotoolkit/openvino/issues/27083* --------- Co-authored-by: Shen, Wanglei Co-authored-by: Chen Peter --- .../cpu-device.rst | 1 + .../dev_api/openvino/runtime/system_conf.hpp | 7 + .../threading/cpu_streams_executor.hpp | 2 + .../runtime/threading/istreams_executor.hpp | 23 +- .../include/openvino/runtime/properties.hpp | 17 + .../dev/threading/cpu_streams_executor.cpp | 26 +- .../src/dev/threading/istreams_executor.cpp | 37 +- src/inference/src/os/cpu_map_info.hpp | 32 - src/inference/src/os/lin/lin_system_conf.cpp | 5 - src/inference/src/os/win/win_system_conf.cpp | 5 - src/inference/src/system_conf.cpp | 43 + .../unit/cpu_map_parser/update_proc_table.cpp | 173 --- .../tests/unit/executor_config_test.cpp | 285 +++- src/plugins/intel_cpu/src/compiled_model.cpp | 4 + src/plugins/intel_cpu/src/compiled_model.h | 2 + src/plugins/intel_cpu/src/config.cpp | 10 + src/plugins/intel_cpu/src/config.h | 1 + .../intel_cpu/src/cpu_map_scheduling.cpp | 9 +- .../intel_cpu/src/cpu_map_scheduling.hpp | 2 + .../intel_cpu/src/cpu_streams_calculation.cpp | 65 +- .../intel_cpu/src/cpu_streams_calculation.hpp | 15 +- src/plugins/intel_cpu/src/plugin.cpp | 4 + .../ov_executable_network/properties.cpp | 1 + .../custom/behavior/ov_plugin/properties.cpp | 1 + .../compiled_model/cpu_reservation_test.cpp | 90 ++ .../behavior/ov_plugin/properties_tests.cpp | 1 + .../unit/streams_info/cpu_pinning_test.cpp | 1 + .../unit/streams_info/streams_e2e_test.cpp | 1327 ++++++++++++++++- .../streams_info/streams_info_table_test.cpp | 208 +-- .../streams_info/update_proc_table_test.cpp | 72 + src/plugins/intel_gpu/src/graph/program.cpp | 1 + .../intel_gpu/src/plugin/compiled_model.cpp | 9 +- src/plugins/intel_gpu/src/plugin/plugin.cpp | 1 + .../src/runtime/execution_config.cpp | 11 + .../concurrency/gpu_reservation_test.cpp | 57 + .../behavior/ov_plugin/properties_tests.cpp | 1 + 36 files changed, 2048 insertions(+), 501 deletions(-) delete mode 100644 src/inference/tests/unit/cpu_map_parser/update_proc_table.cpp create mode 100644 src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp create mode 100644 src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp create mode 100644 src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst index 30d376e18a608a..00fd19dd404b11 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst @@ -355,6 +355,7 @@ All parameters must be set before calling ``ov::Core::compile_model()`` in order - ``ov::hint::num_request`` - ``ov::hint::scheduling_core_type`` - ``ov::hint::enable_hyper_threading`` +- ``ov::hint::enable_cpu_reservation`` - ``ov::hint::enable_cpu_pinning`` - ``ov::num_streams`` - ``ov::inference_num_threads`` diff --git a/src/inference/dev_api/openvino/runtime/system_conf.hpp b/src/inference/dev_api/openvino/runtime/system_conf.hpp index 12a625c112d342..617e4eeff1146b 100644 --- a/src/inference/dev_api/openvino/runtime/system_conf.hpp +++ b/src/inference/dev_api/openvino/runtime/system_conf.hpp @@ -219,6 +219,13 @@ OPENVINO_RUNTIME_API std::vector> get_proc_type_table(); */ OPENVINO_RUNTIME_API int get_current_socket_id(); +/** + * @brief Returns the numa node ID in cpu mapping table of the currently running thread. + * @ingroup ov_dev_api_system_conf + * @return numa node ID in cpu mapping + */ +OPENVINO_RUNTIME_API int get_current_numa_node_id(); + /** * @brief Returns a table of original number of processor types without filtering other plugins occupying CPU * resources. The difference from get_proc_type_table: This is used to get the configuration of current machine. For diff --git a/src/inference/dev_api/openvino/runtime/threading/cpu_streams_executor.hpp b/src/inference/dev_api/openvino/runtime/threading/cpu_streams_executor.hpp index 26b0592c94ce10..2ee4aa6fe05871 100644 --- a/src/inference/dev_api/openvino/runtime/threading/cpu_streams_executor.hpp +++ b/src/inference/dev_api/openvino/runtime/threading/cpu_streams_executor.hpp @@ -57,6 +57,8 @@ class OPENVINO_RUNTIME_API CPUStreamsExecutor : public IStreamsExecutor { std::vector get_rank() override; + void cpu_reset() override; + private: struct Impl; std::unique_ptr _impl; diff --git a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp index 4ce7a76cbd7743..efb9d41a4dd5a6 100644 --- a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp +++ b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp @@ -12,6 +12,7 @@ #include #include #include +#include #include "openvino/runtime/common.hpp" #include "openvino/runtime/properties.hpp" @@ -89,14 +90,15 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { ov::hint::SchedulingCoreType::ANY_CORE; //!< PCORE_ONLY and ECORE_ONLY are valid in hybrid core machine, //!< ANY_CORE is valid in all machines. Core type priority: //!< physical PCore, ECore, logical PCore - bool _cpu_reservation = false; //!< Whether to reserve current cores which will not be used by other plugin. - //!< If it is true, cpu_pinning defaults to true. + bool _cpu_reservation = false; //!< Whether to reserve current cores which will not be used by other plugin or + //!< compiled model. If it is true, cpu_pinning defaults to true. bool _cpu_pinning = false; //!< Whether to bind threads to cores. bool _cores_limit = true; //!< Whether to limit the number of streams and threads by the number of cpu cores std::vector> _streams_info_table = {}; std::vector> _stream_processor_ids; int _sub_streams = 0; std::vector _rank = {}; + bool _add_lock = true; /** * @brief Get and reserve cpu ids based on configuration and hardware information, @@ -109,6 +111,8 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { */ void update_executor_config(); + void update_executor_config(bool lock); + /** * @brief Set _streams_info_table and _cpu_reservation in cpu streams executor config when nstreams = 0, * that is, only create one thread with TBB @@ -136,7 +140,8 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { bool cpu_pinning = false, bool cores_limit = true, std::vector> streams_info_table = {}, - std::vector rank = {}) + std::vector rank = {}, + bool add_lock = true) : _name{std::move(name)}, _streams{streams}, _threads_per_stream{threads_per_stream}, @@ -145,8 +150,9 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { _cpu_pinning{cpu_pinning}, _cores_limit{cores_limit}, _streams_info_table{std::move(streams_info_table)}, - _rank{rank} { - update_executor_config(); + _rank{rank}, + _add_lock(add_lock) { + update_executor_config(_add_lock); } // These APIs which includes set_property and get_property can not be removed until they will never be called by @@ -266,6 +272,11 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { */ virtual std::vector get_rank() = 0; + /** + * @brief Reset cpu map table when user set enable_cpu_reservation = true + */ + virtual void cpu_reset() = 0; + /** * @brief Execute the task in the current thread using streams executor configuration and constraints * @param task A task to start @@ -273,5 +284,7 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { virtual void execute(Task task) = 0; }; +static std::mutex _streams_executor_mutex; + } // namespace threading } // namespace ov diff --git a/src/inference/include/openvino/runtime/properties.hpp b/src/inference/include/openvino/runtime/properties.hpp index fb4f8d2af17f67..5917cb33373e8b 100644 --- a/src/inference/include/openvino/runtime/properties.hpp +++ b/src/inference/include/openvino/runtime/properties.hpp @@ -479,6 +479,23 @@ static constexpr Property> model_distribution_ */ static constexpr Property enable_cpu_pinning{"ENABLE_CPU_PINNING"}; +/** + * @brief This property allows CPU reservation during inference. + * @ingroup ov_runtime_cpp_prop_api + * + * Cpu Reservation means reserve cpus which will not be used by other plugin or compiled model. Developer can use this + * property to enable or disable CPU reservation during inference on Windows and Linux. MacOS does not support CPU + * reservation, and this property is always disabled. This property defaults to false. + * + * The following code is example to use this property. + * + * @code + * ie.set_property(ov::hint::enable_cpu_reservation(true)); + * ie.set_property(ov::hint::enable_cpu_reservation(false)); + * @endcode + */ +static constexpr Property enable_cpu_reservation{"ENABLE_CPU_RESERVATION"}; + /** * @brief This property define if using hyper threading during inference. * @ingroup ov_runtime_cpp_prop_api diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index b51289dabc2735..a10709aa6db3df 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -92,14 +92,6 @@ struct CPUStreamsExecutor::Impl { _impl->_streamIdQueue.push(_streamId); } #if OV_THREAD == OV_THREAD_TBB || OV_THREAD == OV_THREAD_TBB_AUTO - if (_impl->_config.get_name().find("StreamsExecutor") == std::string::npos) { - try { - set_cpu_used(_cpu_ids, NOT_USED); - } catch (const ov::Exception&) { - // Destructor should not throw - catch needed for static analysis. - // CPU::CPU() won't throw here as cpu_info() is called from Stream constructor. - } - } if (nullptr != _observer) { _observer->observe(false); } @@ -345,6 +337,7 @@ struct CPUStreamsExecutor::Impl { _exectorMgr = executor_manager(); auto numaNodes = get_available_numa_nodes(); int streams_num = _config.get_streams(); + auto processor_ids = _config.get_stream_processor_ids(); if (streams_num != 0) { std::copy_n(std::begin(numaNodes), std::min(streams_num, numaNodes.size()), @@ -353,6 +346,10 @@ struct CPUStreamsExecutor::Impl { _usedNumaNodes = std::move(numaNodes); } for (auto streamId = 0; streamId < streams_num; ++streamId) { + if (_config.get_cpu_reservation()) { + std::lock_guard lock(_cpu_ids_mutex); + _cpu_ids_all.insert(_cpu_ids_all.end(), processor_ids[streamId].begin(), processor_ids[streamId].end()); + } _threads.emplace_back([this, streamId] { openvino::itt::threadName(_config.get_name() + "_" + std::to_string(streamId)); for (bool stopped = false; !stopped;) { @@ -457,6 +454,8 @@ struct CPUStreamsExecutor::Impl { CustomThreadLocal _streams; std::shared_ptr _exectorMgr; bool _isExit = false; + std::vector _cpu_ids_all; + std::mutex _cpu_ids_mutex; }; int CPUStreamsExecutor::get_stream_id() { @@ -492,9 +491,20 @@ std::vector CPUStreamsExecutor::get_rank() { return stream->_rank; } +void CPUStreamsExecutor::cpu_reset() { + if (!_impl->_cpu_ids_all.empty()) { + set_cpu_used(_impl->_cpu_ids_all, NOT_USED); + { + std::lock_guard lock(_impl->_cpu_ids_mutex); + _impl->_cpu_ids_all.clear(); + } + } +} + CPUStreamsExecutor::CPUStreamsExecutor(const IStreamsExecutor::Config& config) : _impl{new Impl{config}} {} CPUStreamsExecutor::~CPUStreamsExecutor() { + cpu_reset(); { std::lock_guard lock(_impl->_mutex); _impl->_isStopped = true; diff --git a/src/inference/src/dev/threading/istreams_executor.cpp b/src/inference/src/dev/threading/istreams_executor.cpp index 5563232ca87e7f..59201baadfd387 100644 --- a/src/inference/src/dev/threading/istreams_executor.cpp +++ b/src/inference/src/dev/threading/istreams_executor.cpp @@ -159,27 +159,32 @@ void IStreamsExecutor::Config::update_executor_config() { const auto proc_type_table = get_proc_type_table(); bool streams_info_available = false; - if (proc_type_table.empty()) { - return; - } - - if (_cpu_reservation && !_cpu_pinning) { - _cpu_pinning = true; + if (proc_type_table.empty() || proc_type_table[0][ALL_PROC] == 0) { + if (_cpu_reservation) { + OPENVINO_THROW("[ Config ] proc_type_table is empty. No CPU resources available!"); + } else { + return; + } } if (!_streams_info_table.empty()) { streams_info_available = true; std::vector threads_proc_type(HYPER_THREADING_PROC + 1, 0); + int threads_all = 0; for (size_t i = 0; i < _streams_info_table.size(); i++) { if (_streams_info_table[i][NUMBER_OF_STREAMS] > 0) { - threads_proc_type[_streams_info_table[i][PROC_TYPE]] += + int num_threads = _streams_info_table[i][THREADS_PER_STREAM] * _streams_info_table[i][NUMBER_OF_STREAMS]; + threads_proc_type[_streams_info_table[i][PROC_TYPE]] += num_threads; + threads_all += num_threads; } } + if (threads_all == 0) { + OPENVINO_THROW("streams_info_table is invalid!"); + } for (size_t i = ALL_PROC; i < threads_proc_type.size(); i++) { if (threads_proc_type[i] > proc_type_table[0][i]) { - streams_info_available = false; - break; + OPENVINO_THROW("Not enough CPU resources!"); } } } @@ -269,7 +274,7 @@ void IStreamsExecutor::Config::update_executor_config() { } } - if (_cpu_pinning) { + if (_cpu_pinning || _cpu_reservation) { reserve_available_cpus(_streams_info_table, _stream_processor_ids, _cpu_reservation ? CPU_USED : NOT_USED); } @@ -319,6 +324,17 @@ void IStreamsExecutor::Config::update_executor_config() { #endif } +void IStreamsExecutor::Config::update_executor_config(bool lock) { + if (lock) { + { + std::lock_guard lock{_streams_executor_mutex}; + update_executor_config(); + } + } else { + update_executor_config(); + } +} + void IStreamsExecutor::Config::set_config_zero_stream() { std::vector> proc_type_table = get_proc_type_table(); int core_type = MAIN_CORE_PROC; @@ -333,6 +349,7 @@ void IStreamsExecutor::Config::set_config_zero_stream() { socket_id = std::max(0, proc_type_table[0][PROC_SOCKET_ID]); } _streams_info_table.push_back({1, core_type, 1, numa_id, socket_id}); + _cpu_reservation = false; _cpu_pinning = false; } diff --git a/src/inference/src/os/cpu_map_info.hpp b/src/inference/src/os/cpu_map_info.hpp index aaac930b167ee0..097057bc054b28 100644 --- a/src/inference/src/os/cpu_map_info.hpp +++ b/src/inference/src/os/cpu_map_info.hpp @@ -54,38 +54,6 @@ class CPU { std::mutex _cpu_mutex; int _socket_idx = 0; -private: - /** - * @brief Sort proc_type_table by CPU ID on which application is running. The numa node containing this CPU ID - * will move to first row. - * @param[in] _processor_id CPU ID on which application is running. - * @param[in] _proc_type_table summary table of number of processors per type - * @param[in] _cpu_mapping_table CPU mapping table for each processor - * @return - */ - void sort_table_by_cpu_id(const int _processor_id, - std::vector>& _proc_type_table, - const std::vector>& _cpu_mapping_table) { - int current_numa_node = 0; - int current_socket = 0; - - for (auto& row : _cpu_mapping_table) { - if (_processor_id == row[CPU_MAP_PROCESSOR_ID]) { - current_numa_node = row[CPU_MAP_NUMA_NODE_ID]; - current_socket = row[CPU_MAP_SOCKET_ID]; - break; - } - } - for (size_t i = 1; i < _proc_type_table.size(); i++) { - if ((current_numa_node == _proc_type_table[i][PROC_NUMA_NODE_ID]) && - (current_socket == _proc_type_table[i][PROC_SOCKET_ID])) { - std::rotate(_proc_type_table.begin() + 1, _proc_type_table.begin() + i, _proc_type_table.end()); - break; - } - } - }; - - friend class LinuxSortProcTableTests; }; CPU& cpu_info(); diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index 6a6f02799cae46..a235227a4b56f0 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -320,11 +320,6 @@ CPU::CPU() { OPENVINO_THROW("CPU affinity check failed. No CPU is eligible to run inference."); }; - if (_proc_type_table.size() > 1) { - int cur_processor_id = sched_getcpu(); - sort_table_by_cpu_id(cur_processor_id, _proc_type_table, _cpu_mapping_table); - } - _org_proc_type_table = _proc_type_table; cpu_debug(); diff --git a/src/inference/src/os/win/win_system_conf.cpp b/src/inference/src/os/win/win_system_conf.cpp index 8f468cbaeafe6d..fd26dcd4bc4741 100644 --- a/src/inference/src/os/win/win_system_conf.cpp +++ b/src/inference/src/os/win/win_system_conf.cpp @@ -52,11 +52,6 @@ CPU::CPU() { } } - if (_proc_type_table.size() > 1) { - int cur_processor_id = GetCurrentProcessorNumber(); - sort_table_by_cpu_id(cur_processor_id, _proc_type_table, _cpu_mapping_table); - } - cpu_debug(); } diff --git a/src/inference/src/system_conf.cpp b/src/inference/src/system_conf.cpp index 9fab3591493f6f..b25c16da6494da 100644 --- a/src/inference/src/system_conf.cpp +++ b/src/inference/src/system_conf.cpp @@ -261,6 +261,10 @@ int get_current_socket_id() { return 0; } +int get_current_numa_node_id() { + return 0; +} + std::vector> get_proc_type_table() { return {{-1}}; } @@ -322,6 +326,10 @@ int get_current_socket_id() { return 0; } +int get_current_numa_node_id() { + return 0; +} + std::vector> get_proc_type_table() { CPU& cpu = cpu_info(); std::lock_guard lock{cpu._cpu_mutex}; @@ -411,8 +419,43 @@ int get_current_socket_id() { return 0; } + +int get_current_numa_node_id() { + CPU& cpu = cpu_info(); + int cur_processor_id = sched_getcpu(); + + for (auto& row : cpu._cpu_mapping_table) { + if (cur_processor_id == row[CPU_MAP_PROCESSOR_ID]) { + return row[CPU_MAP_NUMA_NODE_ID]; + } + } + + return 0; +} # else int get_current_socket_id() { + CPU& cpu = cpu_info(); + int cur_processor_id = GetCurrentProcessorNumber(); + + for (auto& row : cpu._cpu_mapping_table) { + if (cur_processor_id == row[CPU_MAP_PROCESSOR_ID]) { + return row[CPU_MAP_SOCKET_ID]; + } + } + + return 0; +} + +int get_current_numa_node_id() { + CPU& cpu = cpu_info(); + int cur_processor_id = GetCurrentProcessorNumber(); + + for (auto& row : cpu._cpu_mapping_table) { + if (cur_processor_id == row[CPU_MAP_PROCESSOR_ID]) { + return row[CPU_MAP_NUMA_NODE_ID]; + } + } + return 0; } # endif diff --git a/src/inference/tests/unit/cpu_map_parser/update_proc_table.cpp b/src/inference/tests/unit/cpu_map_parser/update_proc_table.cpp deleted file mode 100644 index fd78974da3fde9..00000000000000 --- a/src/inference/tests/unit/cpu_map_parser/update_proc_table.cpp +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include - -#include "common_test_utils/test_common.hpp" -#include "openvino/runtime/system_conf.hpp" -#include "os/cpu_map_info.hpp" - -using namespace testing; - -namespace ov { - -#ifdef __linux__ - -struct LinuxSortProcTableTestCase { - int current_processor_id; - std::vector> _proc_type_table_input; - std::vector> _cpu_mapping_table; - std::vector> _proc_type_table_output; -}; - -class LinuxSortProcTableTests : public ov::test::TestsCommon, - public testing::WithParamInterface> { -public: - void SetUp() override { - const auto& test_data = std::get<0>(GetParam()); - - CPU& cpu = cpu_info(); - std::vector> test_proc_type_table = test_data._proc_type_table_input; - - cpu.sort_table_by_cpu_id(test_data.current_processor_id, test_proc_type_table, test_data._cpu_mapping_table); - - ASSERT_EQ(test_proc_type_table, test_data._proc_type_table_output); - } -}; - -LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_1 = { - 2, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 2, 1, 12, HYPER_THREADING_PROC, 12, -1}, - {2, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, {3, 2, 1, 13, HYPER_THREADING_PROC, 13, -1}, - {4, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {5, 2, 1, 14, HYPER_THREADING_PROC, 14, -1}, - {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 2, 1, 15, HYPER_THREADING_PROC, 15, -1}, - {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 2, 1, 16, HYPER_THREADING_PROC, 16, -1}, - {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 2, 1, 17, HYPER_THREADING_PROC, 17, -1}, - {12, 1, 0, 6, HYPER_THREADING_PROC, 6, -1}, {13, 3, 1, 18, HYPER_THREADING_PROC, 18, -1}, - {14, 1, 0, 7, HYPER_THREADING_PROC, 7, -1}, {15, 3, 1, 19, HYPER_THREADING_PROC, 19, -1}, - {16, 1, 0, 8, HYPER_THREADING_PROC, 8, -1}, {17, 3, 1, 20, HYPER_THREADING_PROC, 20, -1}, - {18, 1, 0, 9, HYPER_THREADING_PROC, 9, -1}, {19, 3, 1, 21, HYPER_THREADING_PROC, 21, -1}, - {20, 1, 0, 10, HYPER_THREADING_PROC, 10, -1}, {21, 3, 1, 22, HYPER_THREADING_PROC, 22, -1}, - {22, 1, 0, 11, HYPER_THREADING_PROC, 11, -1}, {23, 3, 1, 23, HYPER_THREADING_PROC, 23, -1}, - {24, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {25, 2, 1, 12, MAIN_CORE_PROC, 12, -1}, - {26, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {27, 2, 1, 13, MAIN_CORE_PROC, 13, -1}, - {28, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {29, 2, 1, 14, MAIN_CORE_PROC, 14, -1}, - {30, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, {31, 2, 1, 15, MAIN_CORE_PROC, 15, -1}, - {32, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {33, 2, 1, 16, MAIN_CORE_PROC, 16, -1}, - {34, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, {35, 2, 1, 17, MAIN_CORE_PROC, 17, -1}, - {36, 1, 0, 6, MAIN_CORE_PROC, 6, -1}, {37, 3, 1, 18, MAIN_CORE_PROC, 18, -1}, - {38, 1, 0, 7, MAIN_CORE_PROC, 7, -1}, {39, 3, 1, 19, MAIN_CORE_PROC, 19, -1}, - {40, 1, 0, 8, MAIN_CORE_PROC, 8, -1}, {41, 3, 1, 20, MAIN_CORE_PROC, 20, -1}, - {42, 1, 0, 9, MAIN_CORE_PROC, 9, -1}, {43, 3, 1, 21, MAIN_CORE_PROC, 21, -1}, - {44, 1, 0, 10, MAIN_CORE_PROC, 10, -1}, {45, 3, 1, 22, MAIN_CORE_PROC, 22, -1}, - {46, 1, 0, 11, MAIN_CORE_PROC, 11, -1}, {47, 3, 1, 23, MAIN_CORE_PROC, 23, -1}, - }, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, -}; -LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_2 = { - 16, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 2, 1, 12, HYPER_THREADING_PROC, 12, -1}, - {2, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, {3, 2, 1, 13, HYPER_THREADING_PROC, 13, -1}, - {4, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {5, 2, 1, 14, HYPER_THREADING_PROC, 14, -1}, - {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 2, 1, 15, HYPER_THREADING_PROC, 15, -1}, - {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 2, 1, 16, HYPER_THREADING_PROC, 16, -1}, - {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 2, 1, 17, HYPER_THREADING_PROC, 17, -1}, - {12, 1, 0, 6, HYPER_THREADING_PROC, 6, -1}, {13, 3, 1, 18, HYPER_THREADING_PROC, 18, -1}, - {14, 1, 0, 7, HYPER_THREADING_PROC, 7, -1}, {15, 3, 1, 19, HYPER_THREADING_PROC, 19, -1}, - {16, 1, 0, 8, HYPER_THREADING_PROC, 8, -1}, {17, 3, 1, 20, HYPER_THREADING_PROC, 20, -1}, - {18, 1, 0, 9, HYPER_THREADING_PROC, 9, -1}, {19, 3, 1, 21, HYPER_THREADING_PROC, 21, -1}, - {20, 1, 0, 10, HYPER_THREADING_PROC, 10, -1}, {21, 3, 1, 22, HYPER_THREADING_PROC, 22, -1}, - {22, 1, 0, 11, HYPER_THREADING_PROC, 11, -1}, {23, 3, 1, 23, HYPER_THREADING_PROC, 23, -1}, - {24, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {25, 2, 1, 12, MAIN_CORE_PROC, 12, -1}, - {26, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {27, 2, 1, 13, MAIN_CORE_PROC, 13, -1}, - {28, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {29, 2, 1, 14, MAIN_CORE_PROC, 14, -1}, - {30, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, {31, 2, 1, 15, MAIN_CORE_PROC, 15, -1}, - {32, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {33, 2, 1, 16, MAIN_CORE_PROC, 16, -1}, - {34, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, {35, 2, 1, 17, MAIN_CORE_PROC, 17, -1}, - {36, 1, 0, 6, MAIN_CORE_PROC, 6, -1}, {37, 3, 1, 18, MAIN_CORE_PROC, 18, -1}, - {38, 1, 0, 7, MAIN_CORE_PROC, 7, -1}, {39, 3, 1, 19, MAIN_CORE_PROC, 19, -1}, - {40, 1, 0, 8, MAIN_CORE_PROC, 8, -1}, {41, 3, 1, 20, MAIN_CORE_PROC, 20, -1}, - {42, 1, 0, 9, MAIN_CORE_PROC, 9, -1}, {43, 3, 1, 21, MAIN_CORE_PROC, 21, -1}, - {44, 1, 0, 10, MAIN_CORE_PROC, 10, -1}, {45, 3, 1, 22, MAIN_CORE_PROC, 22, -1}, - {46, 1, 0, 11, MAIN_CORE_PROC, 11, -1}, {47, 3, 1, 23, MAIN_CORE_PROC, 23, -1}, - }, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}, {12, 6, 0, 6, 0, 0}}, -}; -LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_3 = { - 7, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 2, 1, 12, HYPER_THREADING_PROC, 12, -1}, - {2, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, {3, 2, 1, 13, HYPER_THREADING_PROC, 13, -1}, - {4, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {5, 2, 1, 14, HYPER_THREADING_PROC, 14, -1}, - {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 2, 1, 15, HYPER_THREADING_PROC, 15, -1}, - {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 2, 1, 16, HYPER_THREADING_PROC, 16, -1}, - {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 2, 1, 17, HYPER_THREADING_PROC, 17, -1}, - {12, 1, 0, 6, HYPER_THREADING_PROC, 6, -1}, {13, 3, 1, 18, HYPER_THREADING_PROC, 18, -1}, - {14, 1, 0, 7, HYPER_THREADING_PROC, 7, -1}, {15, 3, 1, 19, HYPER_THREADING_PROC, 19, -1}, - {16, 1, 0, 8, HYPER_THREADING_PROC, 8, -1}, {17, 3, 1, 20, HYPER_THREADING_PROC, 20, -1}, - {18, 1, 0, 9, HYPER_THREADING_PROC, 9, -1}, {19, 3, 1, 21, HYPER_THREADING_PROC, 21, -1}, - {20, 1, 0, 10, HYPER_THREADING_PROC, 10, -1}, {21, 3, 1, 22, HYPER_THREADING_PROC, 22, -1}, - {22, 1, 0, 11, HYPER_THREADING_PROC, 11, -1}, {23, 3, 1, 23, HYPER_THREADING_PROC, 23, -1}, - {24, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {25, 2, 1, 12, MAIN_CORE_PROC, 12, -1}, - {26, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {27, 2, 1, 13, MAIN_CORE_PROC, 13, -1}, - {28, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {29, 2, 1, 14, MAIN_CORE_PROC, 14, -1}, - {30, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, {31, 2, 1, 15, MAIN_CORE_PROC, 15, -1}, - {32, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {33, 2, 1, 16, MAIN_CORE_PROC, 16, -1}, - {34, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, {35, 2, 1, 17, MAIN_CORE_PROC, 17, -1}, - {36, 1, 0, 6, MAIN_CORE_PROC, 6, -1}, {37, 3, 1, 18, MAIN_CORE_PROC, 18, -1}, - {38, 1, 0, 7, MAIN_CORE_PROC, 7, -1}, {39, 3, 1, 19, MAIN_CORE_PROC, 19, -1}, - {40, 1, 0, 8, MAIN_CORE_PROC, 8, -1}, {41, 3, 1, 20, MAIN_CORE_PROC, 20, -1}, - {42, 1, 0, 9, MAIN_CORE_PROC, 9, -1}, {43, 3, 1, 21, MAIN_CORE_PROC, 21, -1}, - {44, 1, 0, 10, MAIN_CORE_PROC, 10, -1}, {45, 3, 1, 22, MAIN_CORE_PROC, 22, -1}, - {46, 1, 0, 11, MAIN_CORE_PROC, 11, -1}, {47, 3, 1, 23, MAIN_CORE_PROC, 23, -1}, - }, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}}, -}; -LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_4 = { - 21, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, - { - {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 2, 1, 12, HYPER_THREADING_PROC, 12, -1}, - {2, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, {3, 2, 1, 13, HYPER_THREADING_PROC, 13, -1}, - {4, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {5, 2, 1, 14, HYPER_THREADING_PROC, 14, -1}, - {6, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, {7, 2, 1, 15, HYPER_THREADING_PROC, 15, -1}, - {8, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {9, 2, 1, 16, HYPER_THREADING_PROC, 16, -1}, - {10, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, {11, 2, 1, 17, HYPER_THREADING_PROC, 17, -1}, - {12, 1, 0, 6, HYPER_THREADING_PROC, 6, -1}, {13, 3, 1, 18, HYPER_THREADING_PROC, 18, -1}, - {14, 1, 0, 7, HYPER_THREADING_PROC, 7, -1}, {15, 3, 1, 19, HYPER_THREADING_PROC, 19, -1}, - {16, 1, 0, 8, HYPER_THREADING_PROC, 8, -1}, {17, 3, 1, 20, HYPER_THREADING_PROC, 20, -1}, - {18, 1, 0, 9, HYPER_THREADING_PROC, 9, -1}, {19, 3, 1, 21, HYPER_THREADING_PROC, 21, -1}, - {20, 1, 0, 10, HYPER_THREADING_PROC, 10, -1}, {21, 3, 1, 22, HYPER_THREADING_PROC, 22, -1}, - {22, 1, 0, 11, HYPER_THREADING_PROC, 11, -1}, {23, 3, 1, 23, HYPER_THREADING_PROC, 23, -1}, - {24, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {25, 2, 1, 12, MAIN_CORE_PROC, 12, -1}, - {26, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, {27, 2, 1, 13, MAIN_CORE_PROC, 13, -1}, - {28, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {29, 2, 1, 14, MAIN_CORE_PROC, 14, -1}, - {30, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, {31, 2, 1, 15, MAIN_CORE_PROC, 15, -1}, - {32, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {33, 2, 1, 16, MAIN_CORE_PROC, 16, -1}, - {34, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, {35, 2, 1, 17, MAIN_CORE_PROC, 17, -1}, - {36, 1, 0, 6, MAIN_CORE_PROC, 6, -1}, {37, 3, 1, 18, MAIN_CORE_PROC, 18, -1}, - {38, 1, 0, 7, MAIN_CORE_PROC, 7, -1}, {39, 3, 1, 19, MAIN_CORE_PROC, 19, -1}, - {40, 1, 0, 8, MAIN_CORE_PROC, 8, -1}, {41, 3, 1, 20, MAIN_CORE_PROC, 20, -1}, - {42, 1, 0, 9, MAIN_CORE_PROC, 9, -1}, {43, 3, 1, 21, MAIN_CORE_PROC, 21, -1}, - {44, 1, 0, 10, MAIN_CORE_PROC, 10, -1}, {45, 3, 1, 22, MAIN_CORE_PROC, 22, -1}, - {46, 1, 0, 11, MAIN_CORE_PROC, 11, -1}, {47, 3, 1, 23, MAIN_CORE_PROC, 23, -1}, - }, - {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 3, 1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}}, -}; - -TEST_P(LinuxSortProcTableTests, LinuxProcTable) {} - -INSTANTIATE_TEST_SUITE_P(CPUMap, - LinuxSortProcTableTests, - testing::Values(proc_table_2sockets_24cores_hyperthreading_1, - proc_table_2sockets_24cores_hyperthreading_2, - proc_table_2sockets_24cores_hyperthreading_3, - proc_table_2sockets_24cores_hyperthreading_4)); -#endif -} // namespace ov diff --git a/src/inference/tests/unit/executor_config_test.cpp b/src/inference/tests/unit/executor_config_test.cpp index 5da183d5257899..90ab9c630c70f1 100644 --- a/src/inference/tests/unit/executor_config_test.cpp +++ b/src/inference/tests/unit/executor_config_test.cpp @@ -5,6 +5,7 @@ #include #include "common_test_utils/test_common.hpp" +#include "openvino/runtime/threading/cpu_streams_info.hpp" #include "openvino/runtime/threading/istreams_executor.hpp" #include "os/cpu_map_info.hpp" @@ -22,6 +23,7 @@ struct ExecutorConfigTestCase { int _num_streams; int _threads_per_stream; ov::hint::SchedulingCoreType _core_type; + bool _cpu_reservation; bool _cpu_pinning; bool _cores_limit; std::vector> _streams_info_table_in; @@ -46,11 +48,11 @@ class ExecutorConfigTest : public ov::test::TestsCommon, test_data._num_streams, test_data._threads_per_stream, test_data._core_type, - false, + test_data._cpu_reservation, test_data._cpu_pinning, test_data._cores_limit, test_data._streams_info_table_in}; - + ASSERT_EQ(test_data._cpu_reservation, config.get_cpu_reservation()); ASSERT_EQ(test_data._cpu_pinning, config.get_cpu_pinning()); ASSERT_EQ(test_data._streams_info_table, config.get_streams_info_table()); ASSERT_EQ(test_data._stream_processors, config.get_stream_processor_ids()); @@ -58,6 +60,35 @@ class ExecutorConfigTest : public ov::test::TestsCommon, ASSERT_EQ(test_data._num_streams, config.get_streams()); ASSERT_EQ(0, config.get_threads_per_stream()); } + if (test_data._cpu_reservation) { + std::vector> proc_type_table = test_data._proc_type_table; + for (size_t i = 0; i < test_data._streams_info_table.size(); i++) { + if (test_data._streams_info_table[i][PROC_TYPE] >= MAIN_CORE_PROC && + test_data._streams_info_table[i][PROC_TYPE] <= HYPER_THREADING_PROC) { + int nstreams = test_data._streams_info_table[i][NUMBER_OF_STREAMS] > 0 + ? test_data._streams_info_table[i][NUMBER_OF_STREAMS] + : 1; + int nthreads = nstreams * test_data._streams_info_table[i][THREADS_PER_STREAM]; + if (proc_type_table.size() > 1) { + for (size_t j = 0; j < proc_type_table.size(); j++) { + if (proc_type_table[j][PROC_NUMA_NODE_ID] == + test_data._streams_info_table[i][STREAM_NUMA_NODE_ID] && + proc_type_table[j][PROC_SOCKET_ID] == + test_data._streams_info_table[i][STREAM_SOCKET_ID]) { + proc_type_table[j][test_data._streams_info_table[i][PROC_TYPE]] -= nthreads; + proc_type_table[j][ALL_PROC] -= nthreads; + proc_type_table[0][test_data._streams_info_table[i][PROC_TYPE]] -= nthreads; + proc_type_table[0][ALL_PROC] -= nthreads; + } + } + } else { + proc_type_table[0][test_data._streams_info_table[i][PROC_TYPE]] -= nthreads; + proc_type_table[0][ALL_PROC] -= nthreads; + } + } + } + ASSERT_EQ(proc_type_table, cpu._proc_type_table); + } } }; @@ -85,6 +116,7 @@ ExecutorConfigTestCase _1sockets_streams_4_threads_1 = { 4, // param[in]: the number of streams 1, // param[in]: the number of threads per stream ov::hint::SchedulingCoreType::ANY_CORE, // param[in]: specified cpu core type + false, // param[in]: specified cpu reservation false, // param[in]: specified cpu pinning true, // param[in]: specified cores limit {}, // param[in]: streams info table @@ -119,6 +151,7 @@ ExecutorConfigTestCase _1sockets_streams_4_threads_0 = { 0, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, {}, @@ -147,6 +180,7 @@ ExecutorConfigTestCase _1sockets_streams_1_threads_12 = { 12, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -179,6 +213,7 @@ ExecutorConfigTestCase _1sockets_streams_1_threads_10 = { 10, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -211,6 +246,7 @@ ExecutorConfigTestCase _1sockets_streams_12_threads_1 = { 1, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -242,6 +278,7 @@ ExecutorConfigTestCase _1sockets_streams_13_threads_1 = { 1, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -273,6 +310,7 @@ ExecutorConfigTestCase _1sockets_streams_6_threads_1_core_e = { 1, ov::hint::SchedulingCoreType::ECORE_ONLY, false, + false, true, {}, { @@ -303,6 +341,69 @@ ExecutorConfigTestCase _1sockets_streams_5_threads_1_binding = { 5, 1, ov::hint::SchedulingCoreType::ANY_CORE, + false, + true, + true, + {}, + { + {5, MAIN_CORE_PROC, 1, 0, 0}, + }, + {{0}, {2}, {4}, {6}, {8}}, +}; + +ExecutorConfigTestCase _1sockets_streams_5_threads_1_reservation = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 5, + 1, + ov::hint::SchedulingCoreType::ANY_CORE, + true, + false, + true, + {}, + { + {5, MAIN_CORE_PROC, 1, 0, 0}, + }, + {{0}, {2}, {4}, {6}, {8}}, +}; + +ExecutorConfigTestCase _1sockets_streams_5_threads_1_binding_reservation = { + { + {12, 6, 0, 6, 0, 0}, + }, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, + {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, + {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, + {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + }, + 5, + 1, + ov::hint::SchedulingCoreType::ANY_CORE, + true, true, true, {}, @@ -360,6 +461,7 @@ ExecutorConfigTestCase _2sockets_streams_36_threads_1 = { 1, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -417,6 +519,7 @@ ExecutorConfigTestCase _2sockets_streams_4_threads_5 = { 5, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -474,6 +577,7 @@ ExecutorConfigTestCase _2sockets_streams_1_threads_36 = { 36, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -532,6 +636,7 @@ ExecutorConfigTestCase _2sockets_streams_1_threads_30 = { 30, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -542,6 +647,126 @@ ExecutorConfigTestCase _2sockets_streams_1_threads_30 = { {}, }; +ExecutorConfigTestCase _2sockets_streams_1_threads_30_binding = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 1, 1, 18, HYPER_THREADING_PROC, 18, -1}, {19, 1, 1, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 1, 1, 20, HYPER_THREADING_PROC, 20, -1}, {21, 1, 1, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 1, 1, 22, HYPER_THREADING_PROC, 22, -1}, {23, 1, 1, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 0, 0, 36, MAIN_CORE_PROC, 36, -1}, {37, 0, 0, 37, MAIN_CORE_PROC, 37, -1}, + {38, 0, 0, 38, MAIN_CORE_PROC, 38, -1}, {39, 0, 0, 39, MAIN_CORE_PROC, 39, -1}, + {40, 0, 0, 40, MAIN_CORE_PROC, 40, -1}, {41, 0, 0, 41, MAIN_CORE_PROC, 41, -1}, + {42, 0, 0, 42, MAIN_CORE_PROC, 42, -1}, {43, 0, 0, 43, MAIN_CORE_PROC, 43, -1}, + {44, 0, 0, 44, MAIN_CORE_PROC, 44, -1}, {45, 0, 0, 45, MAIN_CORE_PROC, 45, -1}, + {46, 0, 0, 46, MAIN_CORE_PROC, 46, -1}, {47, 0, 0, 47, MAIN_CORE_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {55, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + {56, 1, 1, 56, MAIN_CORE_PROC, 56, -1}, {57, 1, 1, 57, MAIN_CORE_PROC, 57, -1}, + {58, 1, 1, 58, MAIN_CORE_PROC, 58, -1}, {59, 1, 1, 59, MAIN_CORE_PROC, 59, -1}, + {60, 1, 1, 60, MAIN_CORE_PROC, 60, -1}, {61, 1, 1, 61, MAIN_CORE_PROC, 61, -1}, + {62, 1, 1, 62, MAIN_CORE_PROC, 62, -1}, {63, 1, 1, 63, MAIN_CORE_PROC, 63, -1}, + {64, 1, 1, 64, MAIN_CORE_PROC, 64, -1}, {65, 1, 1, 65, MAIN_CORE_PROC, 65, -1}, + {66, 1, 1, 66, MAIN_CORE_PROC, 66, -1}, {67, 1, 1, 67, MAIN_CORE_PROC, 67, -1}, + {68, 1, 1, 68, MAIN_CORE_PROC, 68, -1}, {69, 1, 1, 69, MAIN_CORE_PROC, 69, -1}, + {70, 1, 1, 70, MAIN_CORE_PROC, 70, -1}, {71, 1, 1, 71, MAIN_CORE_PROC, 71, -1}, + }, + 1, + 30, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + true, + true, + {}, + { + {1, ALL_PROC, 30, -1, -1}, + {0, MAIN_CORE_PROC, 18, 0, 0}, + {0, MAIN_CORE_PROC, 12, 1, 1}, + }, + {{36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65}}, +}; + +ExecutorConfigTestCase _2sockets_streams_1_threads_30_reservation = { + { + {72, 36, 0, 36, -1, -1}, + {36, 18, 0, 18, 0, 0}, + {36, 18, 0, 18, 1, 1}, + }, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 1, 1, 18, HYPER_THREADING_PROC, 18, -1}, {19, 1, 1, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 1, 1, 20, HYPER_THREADING_PROC, 20, -1}, {21, 1, 1, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 1, 1, 22, HYPER_THREADING_PROC, 22, -1}, {23, 1, 1, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 0, 0, 36, MAIN_CORE_PROC, 36, -1}, {37, 0, 0, 37, MAIN_CORE_PROC, 37, -1}, + {38, 0, 0, 38, MAIN_CORE_PROC, 38, -1}, {39, 0, 0, 39, MAIN_CORE_PROC, 39, -1}, + {40, 0, 0, 40, MAIN_CORE_PROC, 40, -1}, {41, 0, 0, 41, MAIN_CORE_PROC, 41, -1}, + {42, 0, 0, 42, MAIN_CORE_PROC, 42, -1}, {43, 0, 0, 43, MAIN_CORE_PROC, 43, -1}, + {44, 0, 0, 44, MAIN_CORE_PROC, 44, -1}, {45, 0, 0, 45, MAIN_CORE_PROC, 45, -1}, + {46, 0, 0, 46, MAIN_CORE_PROC, 46, -1}, {47, 0, 0, 47, MAIN_CORE_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 1, 1, 54, MAIN_CORE_PROC, 54, -1}, {55, 1, 1, 55, MAIN_CORE_PROC, 55, -1}, + {56, 1, 1, 56, MAIN_CORE_PROC, 56, -1}, {57, 1, 1, 57, MAIN_CORE_PROC, 57, -1}, + {58, 1, 1, 58, MAIN_CORE_PROC, 58, -1}, {59, 1, 1, 59, MAIN_CORE_PROC, 59, -1}, + {60, 1, 1, 60, MAIN_CORE_PROC, 60, -1}, {61, 1, 1, 61, MAIN_CORE_PROC, 61, -1}, + {62, 1, 1, 62, MAIN_CORE_PROC, 62, -1}, {63, 1, 1, 63, MAIN_CORE_PROC, 63, -1}, + {64, 1, 1, 64, MAIN_CORE_PROC, 64, -1}, {65, 1, 1, 65, MAIN_CORE_PROC, 65, -1}, + {66, 1, 1, 66, MAIN_CORE_PROC, 66, -1}, {67, 1, 1, 67, MAIN_CORE_PROC, 67, -1}, + {68, 1, 1, 68, MAIN_CORE_PROC, 68, -1}, {69, 1, 1, 69, MAIN_CORE_PROC, 69, -1}, + {70, 1, 1, 70, MAIN_CORE_PROC, 70, -1}, {71, 1, 1, 71, MAIN_CORE_PROC, 71, -1}, + }, + 1, + 30, + ov::hint::SchedulingCoreType::ANY_CORE, + true, + false, + true, + {}, + { + {1, ALL_PROC, 30, -1, -1}, + {0, MAIN_CORE_PROC, 18, 0, 0}, + {0, MAIN_CORE_PROC, 12, 1, 1}, + }, + {{36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65}}, +}; + ExecutorConfigTestCase _pecore_streams_5_threads_2 = { { {24, 8, 8, 8, 0, 0}, @@ -564,6 +789,7 @@ ExecutorConfigTestCase _pecore_streams_5_threads_2 = { 2, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -595,6 +821,7 @@ ExecutorConfigTestCase _pecore_streams_5_threads_5 = { 5, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -627,6 +854,7 @@ ExecutorConfigTestCase _pecore_streams_4_threads_5 = { 5, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -659,6 +887,7 @@ ExecutorConfigTestCase _pecore_streams_4_threads_1 = { 1, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -689,6 +918,7 @@ ExecutorConfigTestCase _pecore_streams_5_threads_10 = { 10, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -721,6 +951,7 @@ ExecutorConfigTestCase _pecore_streams_26_threads_1 = { 1, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, { @@ -753,6 +984,7 @@ ExecutorConfigTestCase _pecore_streams_26_threads_1_p = { 1, ov::hint::SchedulingCoreType::PCORE_ONLY, false, + false, true, {}, { @@ -784,6 +1016,7 @@ ExecutorConfigTestCase _pecore_streams_26_threads_1_e = { 1, ov::hint::SchedulingCoreType::ECORE_ONLY, false, + false, true, {}, { @@ -814,6 +1047,7 @@ ExecutorConfigTestCase _pecore_streams_1_threads_0 = { 0, ov::hint::SchedulingCoreType::ANY_CORE, false, + false, true, {}, {}, @@ -842,6 +1076,7 @@ ExecutorConfigTestCase _pecore_streams_1_threads_1_p = { 1, ov::hint::SchedulingCoreType::PCORE_ONLY, false, + false, true, {}, { @@ -872,6 +1107,7 @@ ExecutorConfigTestCase _pecore_streams_1_threads_1_e = { 1, ov::hint::SchedulingCoreType::ECORE_ONLY, false, + false, true, {}, { @@ -902,6 +1138,7 @@ ExecutorConfigTestCase _pecore_streams_1_threads_16_p = { 16, ov::hint::SchedulingCoreType::PCORE_ONLY, false, + false, true, {}, { @@ -934,6 +1171,7 @@ ExecutorConfigTestCase _pecore_streams_1_threads_18_p = { 18, ov::hint::SchedulingCoreType::PCORE_ONLY, false, + false, true, {}, { @@ -966,6 +1204,7 @@ ExecutorConfigTestCase _pecore_streams_1_threads_10_p = { 10, ov::hint::SchedulingCoreType::PCORE_ONLY, false, + false, true, {}, { @@ -998,6 +1237,7 @@ ExecutorConfigTestCase _pecore_streams_10_threads_1_e = { 1, ov::hint::SchedulingCoreType::ECORE_ONLY, false, + false, true, {}, { @@ -1029,6 +1269,7 @@ ExecutorConfigTestCase _pecore_streams_10_threads_1_binding = { ov::hint::SchedulingCoreType::ANY_CORE, true, true, + true, {}, { {4, MAIN_CORE_PROC, 2, 0, 0}, @@ -1038,7 +1279,7 @@ ExecutorConfigTestCase _pecore_streams_10_threads_1_binding = { {{0, 2}, {4, 6}, {8, 10}, {12, 14}, {16, 17}, {18, 19}, {20, 21}, {22, 23}, {1, 3}, {5, 7}}, }; -ExecutorConfigTestCase _pecore_streams_info_table_1 = { +ExecutorConfigTestCase _pecore_streams_10_threads_2_reservation = { { {24, 8, 8, 8, 0, 0}, }, @@ -1056,23 +1297,22 @@ ExecutorConfigTestCase _pecore_streams_info_table_1 = { {20, 0, 0, 12, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 13, EFFICIENT_CORE_PROC, 21, -1}, {22, 0, 0, 14, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 15, EFFICIENT_CORE_PROC, 23, -1}, }, - 1, - 8, - ov::hint::SchedulingCoreType::PCORE_ONLY, + 10, + 2, + ov::hint::SchedulingCoreType::ANY_CORE, + true, false, true, + {}, { - {2, MAIN_CORE_PROC, 2, 0, 0}, - {2, EFFICIENT_CORE_PROC, 2, 0, 0}, - }, - { - {2, MAIN_CORE_PROC, 2, 0, 0}, - {2, EFFICIENT_CORE_PROC, 2, 0, 0}, + {4, MAIN_CORE_PROC, 2, 0, 0}, + {4, EFFICIENT_CORE_PROC, 2, 0, 0}, + {2, HYPER_THREADING_PROC, 2, 0, 0}, }, - {}, + {{0, 2}, {4, 6}, {8, 10}, {12, 14}, {16, 17}, {18, 19}, {20, 21}, {22, 23}, {1, 3}, {5, 7}}, }; -ExecutorConfigTestCase _pecore_streams_info_table_2 = { +ExecutorConfigTestCase _pecore_streams_info_table_1 = { { {24, 8, 8, 8, 0, 0}, }, @@ -1094,18 +1334,20 @@ ExecutorConfigTestCase _pecore_streams_info_table_2 = { 8, ov::hint::SchedulingCoreType::PCORE_ONLY, false, + false, true, { - {5, MAIN_CORE_PROC, 2, 0, 0}, + {2, MAIN_CORE_PROC, 2, 0, 0}, {2, EFFICIENT_CORE_PROC, 2, 0, 0}, }, { - {1, MAIN_CORE_PROC, 8, 0, 0}, + {2, MAIN_CORE_PROC, 2, 0, 0}, + {2, EFFICIENT_CORE_PROC, 2, 0, 0}, }, {}, }; -ExecutorConfigTestCase _pecore_streams_info_table_3 = { +ExecutorConfigTestCase _pecore_streams_info_table_2 = { { {24, 8, 8, 8, 0, 0}, }, @@ -1128,6 +1370,7 @@ ExecutorConfigTestCase _pecore_streams_info_table_3 = { ov::hint::SchedulingCoreType::PCORE_ONLY, true, true, + true, { {2, MAIN_CORE_PROC, 2, 0, 0}, {2, EFFICIENT_CORE_PROC, 2, 0, 0}, @@ -1153,6 +1396,7 @@ ExecutorConfigTestCase _streams_info_table_cores_limit_false_1 = { ov::hint::SchedulingCoreType::PCORE_ONLY, false, false, + false, {}, {}, {}, @@ -1170,6 +1414,7 @@ ExecutorConfigTestCase _streams_info_table_cores_limit_false_2 = { ov::hint::SchedulingCoreType::PCORE_ONLY, false, false, + false, {}, {}, {}, @@ -1187,10 +1432,14 @@ INSTANTIATE_TEST_SUITE_P(smoke_ExecutorConfig, _1sockets_streams_13_threads_1, _1sockets_streams_6_threads_1_core_e, _1sockets_streams_5_threads_1_binding, + _1sockets_streams_5_threads_1_reservation, + _1sockets_streams_5_threads_1_binding_reservation, _2sockets_streams_36_threads_1, _2sockets_streams_4_threads_5, _2sockets_streams_1_threads_36, _2sockets_streams_1_threads_30, + _2sockets_streams_1_threads_30_binding, + _2sockets_streams_1_threads_30_reservation, _pecore_streams_5_threads_2, _pecore_streams_5_threads_5, _pecore_streams_4_threads_5, @@ -1207,9 +1456,9 @@ INSTANTIATE_TEST_SUITE_P(smoke_ExecutorConfig, _pecore_streams_1_threads_10_p, _pecore_streams_10_threads_1_e, _pecore_streams_10_threads_1_binding, + _pecore_streams_10_threads_2_reservation, _pecore_streams_info_table_1, _pecore_streams_info_table_2, - _pecore_streams_info_table_3, _streams_info_table_cores_limit_false_1, _streams_info_table_cores_limit_false_2)); #endif diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index 7c1eb44ab37bc4..3b560cf5518ba4 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -248,6 +248,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { RO_property(ov::hint::execution_mode.name()), RO_property(ov::hint::num_requests.name()), RO_property(ov::hint::enable_cpu_pinning.name()), + RO_property(ov::hint::enable_cpu_reservation.name()), RO_property(ov::hint::scheduling_core_type.name()), RO_property(ov::hint::model_distribution_policy.name()), RO_property(ov::hint::enable_hyper_threading.name()), @@ -293,6 +294,9 @@ ov::Any CompiledModel::get_property(const std::string& name) const { } else if (name == ov::hint::enable_cpu_pinning.name()) { const bool use_pin = config.enableCpuPinning; return decltype(ov::hint::enable_cpu_pinning)::value_type(use_pin); + } else if (name == ov::hint::enable_cpu_reservation.name()) { + const bool use_reserve = config.enableCpuReservation; + return decltype(ov::hint::enable_cpu_reservation)::value_type(use_reserve); } else if (name == ov::hint::scheduling_core_type) { const auto stream_mode = config.schedulingCoreType; return stream_mode; diff --git a/src/plugins/intel_cpu/src/compiled_model.h b/src/plugins/intel_cpu/src/compiled_model.h index ee55002dbb3eb9..68e82bee77ae38 100644 --- a/src/plugins/intel_cpu/src/compiled_model.h +++ b/src/plugins/intel_cpu/src/compiled_model.h @@ -43,6 +43,8 @@ class CompiledModel : public ov::ICompiledModel { m_sub_compiled_models.clear(); m_sub_memory_manager->_memorys_table.clear(); } + auto streamsExecutor = std::dynamic_pointer_cast(m_task_executor); + streamsExecutor->cpu_reset(); } std::shared_ptr create_infer_request() const override; diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 8267a0c6c66e34..db53bb0c531b1a 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -115,6 +115,16 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { ov::hint::enable_cpu_pinning.name(), ". Expected only true/false."); } + } else if (key == ov::hint::enable_cpu_reservation.name()) { + try { + enableCpuReservation = val.as(); + } catch (ov::Exception&) { + OPENVINO_THROW("Wrong value ", + val.as(), + "for property key ", + ov::hint::enable_cpu_reservation.name(), + ". Expected only true/false."); + } } else if (key == ov::hint::scheduling_core_type.name()) { try { schedulingCoreType = val.as(); diff --git a/src/plugins/intel_cpu/src/config.h b/src/plugins/intel_cpu/src/config.h index 2abf45d20ca46a..c8a215e76a7573 100644 --- a/src/plugins/intel_cpu/src/config.h +++ b/src/plugins/intel_cpu/src/config.h @@ -81,6 +81,7 @@ struct Config { uint32_t hintNumRequests = 0; bool enableCpuPinning = true; bool changedCpuPinning = false; + bool enableCpuReservation = false; ov::hint::SchedulingCoreType schedulingCoreType = ov::hint::SchedulingCoreType::ANY_CORE; std::set modelDistributionPolicy = {}; int streamsRankLevel = 1; diff --git a/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp b/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp index c8b3f400bbc8dc..e9855a01309564 100644 --- a/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp +++ b/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp @@ -72,6 +72,7 @@ std::vector> apply_hyper_threading(bool& input_ht_hint, bool get_cpu_pinning(bool& input_value, const bool input_changed, + const bool cpu_reservation, const std::vector>& proc_type_table, const std::vector>& streams_info_table) { bool result_value; @@ -79,7 +80,11 @@ bool get_cpu_pinning(bool& input_value, #if defined(__APPLE__) result_value = false; #elif defined(_WIN32) - result_value = ((input_changed) && (proc_type_table.size() == 1)) ? input_value : false; + if (proc_type_table.size() == 1) { + result_value = input_changed ? input_value : cpu_reservation; + } else { + result_value = false; + } #else if (input_changed) { result_value = input_value; @@ -90,7 +95,7 @@ bool get_cpu_pinning(bool& input_value, if ((streams_info_table[0][PROC_TYPE] == ALL_PROC) && (streams_info_table[1][PROC_TYPE] != EFFICIENT_CORE_PROC) && (streams_info_table[2][PROC_TYPE] == EFFICIENT_CORE_PROC)) { - result_value = false; + result_value = cpu_reservation; } } } diff --git a/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp b/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp index 43cd0e8ec33f01..2d6a29f38bf595 100644 --- a/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp +++ b/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp @@ -44,12 +44,14 @@ std::vector> apply_hyper_threading(bool& input_ht_hint, * @brief whether pinning cpu cores according to enableCpuPinning property * @param[in] input_type indicate value of property enableCpuPinning. * @param[in] input_changed indicate if value is set by user. + * @param[in] cpu_reservation indicate if cpu need to be reserved * @param[in] proc_type_table indicate processors information of this platform * @param[in] streams_info_table indicate streams detail of this model * @return whether pinning threads to cpu cores */ bool get_cpu_pinning(bool& input_value, const bool input_changed, + const bool cpu_reservation, const std::vector>& proc_type_table, const std::vector>& streams_info_table); diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index e30dfe42fcba4d..a88ec8e4a1da4a 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -30,13 +30,25 @@ using namespace ov::threading; namespace ov { namespace intel_cpu { +void sort_table_by_numa_node_id(const int current_numa_node, std::vector>& proc_type_table) { + if (proc_type_table.size() > 1) { + for (size_t i = 1; i < proc_type_table.size(); i++) { + if (current_numa_node == proc_type_table[i][PROC_NUMA_NODE_ID]) { + std::rotate(proc_type_table.begin() + 1, proc_type_table.begin() + i, proc_type_table.end()); + break; + } + } + } + + return; +}; + std::vector> get_streams_info_table( const int input_streams, const bool input_streams_changed, const int input_threads, const int input_infer_requests, const int model_prefer_threads, - const int input_current_socket_id, const std::string input_perf_hint, const std::set hint_model_distribution_policy, const std::vector>& proc_type_table) { @@ -179,11 +191,7 @@ std::vector> get_streams_info_table( std::unordered_set socket_id_list(proc_type_table.size()); for (size_t i = 1; i < proc_type_table.size(); i++) { if (!socket_id_list.count(proc_type_table[i][PROC_SOCKET_ID])) { - if (proc_type_table[i][PROC_SOCKET_ID] == input_current_socket_id) { - proc_socket_table.insert(proc_socket_table.begin(), proc_type_table[i]); - } else { - proc_socket_table.push_back(proc_type_table[i]); - } + proc_socket_table.push_back(proc_type_table[i]); socket_id_list.insert(proc_type_table[i][PROC_SOCKET_ID]); } else { for (auto& row : proc_socket_table) { @@ -205,7 +213,12 @@ std::vector> get_streams_info_table( ((input_streams_changed == true) && (input_streams == 1))) { n_streams = 1; stream_info[NUMBER_OF_STREAMS] = n_streams; - current_socket_id = input_current_socket_id == -1 ? get_current_socket_id() : input_current_socket_id; + for (size_t n = 0; n < proc_socket_table.size(); n++) { + if (proc_socket_table[n][ALL_PROC] > 0) { + current_socket_id = proc_socket_table[n][PROC_SOCKET_ID]; + break; + } + } if (input_threads > 0) { if (hint_model_distribution_policy.size() == 0) { n_threads_per_stream = std::min(input_threads, proc_type_table[0][ALL_PROC]); @@ -677,11 +690,14 @@ int get_model_prefer_threads(const int num_streams, } std::vector> generate_stream_info(const int streams, - const int input_current_socket_id, + const int input_numa_node_id, const std::shared_ptr& model, Config& config, std::vector>& proc_type_table, int preferred_nthreads_per_stream) { + if (proc_type_table.empty() || proc_type_table[0][ALL_PROC] == 0) { + OPENVINO_THROW("proc_type_table is empty. No CPU resources available!"); + } int model_prefer_threads = preferred_nthreads_per_stream; proc_type_table = apply_scheduling_core_type(config.schedulingCoreType, proc_type_table); @@ -693,41 +709,56 @@ std::vector> generate_stream_info(const int streams, model_prefer_threads = get_model_prefer_threads(streams, proc_type_table, model, config); } + if (proc_type_table.size() > 1) { + const auto cur_numa_node_id = input_numa_node_id < 0 ? get_current_numa_node_id() : input_numa_node_id; + sort_table_by_numa_node_id(cur_numa_node_id, proc_type_table); + } + if (proc_type_table.empty() || proc_type_table[0][ALL_PROC] == 0) { + OPENVINO_THROW("proc_type_table is empty. No valid CPU resources available!"); + } auto streams_info_table = get_streams_info_table(config.streams, config.streamsChanged, config.threads, config.hintNumRequests, model_prefer_threads, - input_current_socket_id, ov::util::to_string(config.hintPerfMode), config.modelDistributionPolicy, proc_type_table); - // streams_info_table = {{1, 1, 56, 1, 1}, {-1, 1, 28, 1, 1}, {-1, 1, 28, 0, 0}}; + if (streams_info_table.empty()) { + OPENVINO_THROW("streams_info_table is empty!"); + } if (config.modelDistributionPolicy.find(ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL) != config.modelDistributionPolicy.end()) { config.streamsRankTable = get_streams_rank_table(streams_info_table, config.streamsRankLevel, config.numSubStreams); } - auto cpu_pinning = - get_cpu_pinning(config.enableCpuPinning, config.changedCpuPinning, proc_type_table, streams_info_table); + auto cpu_pinning = get_cpu_pinning(config.enableCpuPinning, + config.changedCpuPinning, + config.enableCpuReservation, + proc_type_table, + streams_info_table); config.streamExecutorConfig = IStreamsExecutor::Config{"CPUStreamsExecutor", config.streams, config.threadsPerStream, ov::hint::SchedulingCoreType::ANY_CORE, - false, + config.enableCpuReservation, cpu_pinning, true, - std::move(streams_info_table)}; - + std::move(streams_info_table), + {}, + false}; return proc_type_table; } void get_num_streams(const int streams, const std::shared_ptr& model, Config& config) { - std::vector> proc_type_table = get_proc_type_table(); + { + std::lock_guard lock{_streams_executor_mutex}; + std::vector> proc_type_table = get_proc_type_table(); - generate_stream_info(streams, -1, model, config, proc_type_table); + generate_stream_info(streams, -1, model, config, proc_type_table); + } } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp index 96564e0a22eb81..2af57e05bfd53d 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp @@ -35,8 +35,6 @@ namespace intel_cpu { * function. * - input "0" indicates that the function generates the optimal number of threads per stream based on * processors type information. - * @param[in] input_current_socket_id is the socket ID in cpu mapping table of the currently running thread - * - input "-1" indicates that the function get_streams_info_table will query this id internally. * @param[in] input_perf_hint is performance hint set by user via ov::hint::performance_mode or the default value. * @param[in] hint_llm_distribution_policy is the distribution policy for Large language models * @param[in] proc_type_table is currently available candidate processors. @@ -50,7 +48,6 @@ std::vector> get_streams_info_table( const int input_threads, const int input_infer_requests, const int model_prefer_threads, - const int input_current_socket_id, const std::string input_perf_hint, const std::set hint_llm_distribution_policy, const std::vector>& proc_type_table); @@ -85,7 +82,7 @@ int get_model_prefer_threads(const int num_streams, /** * @brief Generate streams information according to processors type table * @param[in] streams number of streams - * @param[in] input_current_socket_id is the socket ID in cpu mapping table of the currently running thread + * @param[in] input_numa_node_id is the numa node ID in cpu mapping table of the currently running thread * - input "-1" indicates that the function get_streams_info_table will query this id internally. * @param[in] model graph handle * @param[in] config intel cpu configuration @@ -95,7 +92,7 @@ int get_model_prefer_threads(const int num_streams, * ov::hint::enable_hyper_threading */ std::vector> generate_stream_info(const int streams, - const int input_current_socket_id, + const int input_numa_node_id, const std::shared_ptr& model, Config& config, std::vector>& proc_type_table, @@ -109,5 +106,13 @@ std::vector> generate_stream_info(const int streams, */ void get_num_streams(const int streams, const std::shared_ptr& model, Config& config); +/** + * @brief Sort proc_type_table by numa node id on which application is running. The numa node will move to first + * row. + * @param[in] current_numa_node numa node ID on which application is running. + * @param[in] proc_type_table summary table of number of processors per type + */ +void sort_table_by_numa_node_id(const int current_numa_node, std::vector>& proc_type_table); + } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index 4369588182ac5c..f2494e061c8301 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -335,6 +335,9 @@ ov::Any Plugin::get_property(const std::string& name, const ov::AnyMap& options) } else if (name == ov::hint::enable_cpu_pinning) { const bool pin_value = engConfig.enableCpuPinning; return decltype(ov::hint::enable_cpu_pinning)::value_type(pin_value); + } else if (name == ov::hint::enable_cpu_reservation) { + const bool reserve_value = engConfig.enableCpuReservation; + return decltype(ov::hint::enable_cpu_reservation)::value_type(reserve_value); } else if (name == ov::hint::scheduling_core_type) { const auto core_type = engConfig.schedulingCoreType; return core_type; @@ -419,6 +422,7 @@ ov::Any Plugin::get_ro_property(const std::string& name, const ov::AnyMap& optio RW_property(ov::hint::execution_mode.name()), RW_property(ov::hint::num_requests.name()), RW_property(ov::hint::enable_cpu_pinning.name()), + RW_property(ov::hint::enable_cpu_reservation.name()), RW_property(ov::hint::scheduling_core_type.name()), RW_property(ov::hint::model_distribution_policy.name()), RW_property(ov::hint::enable_hyper_threading.name()), diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp index 6655a2a5e7d48d..fd9b5084a0c768 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_executable_network/properties.cpp @@ -32,6 +32,7 @@ TEST_F(OVClassConfigTestCPU, smoke_CpuExecNetworkSupportedPropertiesAreAvailable RO_property(ov::hint::execution_mode.name()), RO_property(ov::hint::num_requests.name()), RO_property(ov::hint::enable_cpu_pinning.name()), + RO_property(ov::hint::enable_cpu_reservation.name()), RO_property(ov::hint::scheduling_core_type.name()), RO_property(ov::hint::model_distribution_policy.name()), RO_property(ov::hint::enable_hyper_threading.name()), diff --git a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp index 5adf6cbb125185..f8a9558b308dad 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/behavior/ov_plugin/properties.cpp @@ -46,6 +46,7 @@ TEST_F(OVClassConfigTestCPU, smoke_PluginAllSupportedPropertiesAreAvailable) { RW_property(ov::hint::execution_mode.name()), RW_property(ov::hint::num_requests.name()), RW_property(ov::hint::enable_cpu_pinning.name()), + RW_property(ov::hint::enable_cpu_reservation.name()), RW_property(ov::hint::scheduling_core_type.name()), RW_property(ov::hint::model_distribution_policy.name()), RW_property(ov::hint::enable_hyper_threading.name()), diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp new file mode 100644 index 00000000000000..78ee401d169cbb --- /dev/null +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include "common_test_utils/test_constants.hpp" +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" +#include "openvino/openvino.hpp" +#include "common_test_utils/ov_plugin_cache.hpp" +#include "openvino/runtime/properties.hpp" +#include "openvino/util/file_util.hpp" + +using namespace testing; +using Device = std::string; +using Config = ov::AnyMap; +using CpuReservationTest = ::testing::Test; + +TEST_F(CpuReservationTest, Mutiple_CompiledModel_Reservation) { + std::vector> models; + Config config = {ov::enable_profiling(true)}; + Device target_device(ov::test::utils::DEVICE_CPU); + std::atomic counter{0u}; + + models.emplace_back(ov::test::utils::make_2_input_subtract()); + models.emplace_back(ov::test::utils::make_multi_single_conv()); + + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + core->set_property(target_device, config); + ov::AnyMap property_config_reserve = {{ov::num_streams.name(), ov::streams::Num(1)}, + {ov::inference_num_threads.name(), 1}, + {ov::hint::enable_cpu_reservation.name(), true}}; + ov::AnyMap property_config = {{ov::num_streams.name(), ov::streams::Num(1)}, {ov::inference_num_threads.name(), 1}}; + + std::vector threads(2); + for (auto& thread : threads) { + thread = std::thread([&]() { + auto value = counter++; + auto compiled_model = core->compile_model(models[value % models.size()], + target_device, + value == 1 ? property_config : property_config_reserve); + auto cpu_reservation = compiled_model.get_property(ov::hint::enable_cpu_reservation.name()); + auto num_streams = compiled_model.get_property(ov::num_streams.name()); + ASSERT_EQ(cpu_reservation, value == 1 ? false : true); + ASSERT_EQ(num_streams, ov::streams::Num(1)); + }); + } + + for (auto& thread : threads) { + if (thread.joinable()) + thread.join(); + } +} + +TEST_F(CpuReservationTest, Cpu_Reservation_NoAvailableCores) { + std::vector> models; + Config config = {ov::enable_profiling(true)}; + Device target_device(ov::test::utils::DEVICE_CPU); + models.emplace_back(ov::test::utils::make_2_input_subtract()); + + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + core->set_property(target_device, config); + ov::AnyMap property_config = {{ov::num_streams.name(), 1}, + {ov::inference_num_threads.name(), 2000}, + {ov::hint::enable_hyper_threading.name(), true}, + {ov::hint::enable_cpu_reservation.name(), true}}; + auto compiled_model = core->compile_model(models[0], target_device, property_config); + EXPECT_THROW(core->compile_model(models[0], target_device, property_config), ov::Exception); +} + +#if defined(__linux__) +TEST_F(CpuReservationTest, Cpu_Reservation_CpuPinning) { + std::vector> models; + Config config = {ov::enable_profiling(true)}; + Device target_device(ov::test::utils::DEVICE_CPU); + models.emplace_back(ov::test::utils::make_2_input_subtract()); + + std::shared_ptr core = ov::test::utils::PluginCache::get().core(); + core->set_property(target_device, config); + ov::AnyMap property_config = {{ov::inference_num_threads.name(), 1}, + {ov::hint::enable_cpu_reservation.name(), true}}; + auto compiled_model = core->compile_model(models[0], target_device, property_config); + auto cpu_pinning = compiled_model.get_property(ov::hint::enable_cpu_pinning.name()); + ASSERT_EQ(cpu_pinning, true); +} +#endif diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 12231dfb3c72f0..6d288d9f5ede8b 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -30,6 +30,7 @@ auto cpu_properties = []() -> std::vector { // check that hints doesn't override customer value (now for streams and later for other config opts) {{ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, {ov::hint::num_requests(3)}}, {{ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}, {ov::hint::num_requests(3)}}, + {{ov::hint::enable_cpu_reservation(true)}, {ov::num_streams(1)}, {ov::inference_num_threads(2)}}, }; auto numa_nodes = ov::get_available_numa_nodes(); diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/cpu_pinning_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/cpu_pinning_test.cpp index 23cc7a04cdfb9e..60d058a6cd0ce0 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/cpu_pinning_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/cpu_pinning_test.cpp @@ -29,6 +29,7 @@ class CpuPinningTests : public ov::test::TestsCommon, auto test_output = ov::intel_cpu::get_cpu_pinning(test_data.input_cpu_pinning, test_data.input_changed, + false, test_data.input_proc_type_table, test_data.input_stream_info_table); diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp index 5cd43bbcb56d04..a6981cfbbe1498 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_e2e_test.cpp @@ -8,6 +8,7 @@ #include "cpu_map_scheduling.hpp" #include "cpu_streams_calculation.hpp" #include "openvino/runtime/system_conf.hpp" +#include "openvino/runtime/threading/cpu_streams_info.hpp" #include "os/cpu_map_info.hpp" using namespace testing; @@ -21,14 +22,16 @@ struct StreamGenerateionTestCase { int input_thread; int input_request; int input_model_prefer; - int input_socket_id; + int input_numa_node_id; ov::hint::SchedulingCoreType input_type; bool input_ht_value; bool input_ht_changed; + bool input_cpu_reservation; bool input_cpu_value; bool input_cpu_changed; ov::hint::PerformanceMode input_pm_hint; std::set hint_llm_distribution_policy; + std::vector> cpu_mapping_table; std::vector> input_proc_type_table; ov::hint::SchedulingCoreType output_type; bool output_ht_value; @@ -40,6 +43,7 @@ struct StreamGenerateionTestCase { void make_config(StreamGenerateionTestCase& test_data, ov::intel_cpu::Config& config) { config.schedulingCoreType = test_data.input_type; + config.enableCpuReservation = test_data.input_cpu_reservation; config.enableCpuPinning = test_data.input_cpu_value; config.changedCpuPinning = test_data.input_cpu_changed; config.enableHyperThreading = test_data.input_ht_value; @@ -62,21 +66,65 @@ class StreamGenerationTests : public ov::test::TestsCommon, make_config(test_data, config); CPU& cpu = cpu_info(); + cpu._cpu_mapping_table = test_data.cpu_mapping_table; cpu._proc_type_table = test_data.input_proc_type_table; + cpu._org_proc_type_table = test_data.input_proc_type_table; + cpu._numa_nodes = cpu._proc_type_table.size() > 1 ? static_cast(cpu._proc_type_table.size()) - 1 : 1; + cpu._sockets = cpu._numa_nodes; + std::vector> res_proc_type_table = test_data.input_proc_type_table; - auto proc_type_table = ov::intel_cpu::generate_stream_info(test_data.input_stream, - test_data.input_socket_id, - nullptr, - config, - test_data.input_proc_type_table, - test_data.input_model_prefer); - - ASSERT_EQ(test_data.output_stream_info_table, config.streamExecutorConfig.get_streams_info_table()); - ASSERT_EQ(test_data.output_proc_type_table, proc_type_table); - ASSERT_EQ(test_data.output_cpu_value, config.streamExecutorConfig.get_cpu_pinning()); - ASSERT_EQ(test_data.output_ht_value, config.enableHyperThreading); - ASSERT_EQ(test_data.output_type, config.schedulingCoreType); - ASSERT_EQ(test_data.output_pm_hint, config.hintPerfMode); + if (cpu._cpu_mapping_table.empty()) { + EXPECT_THROW(ov::intel_cpu::generate_stream_info(test_data.input_stream, + test_data.input_numa_node_id, + nullptr, + config, + test_data.input_proc_type_table, + test_data.input_model_prefer), + ov::Exception); + } else { + auto proc_type_table = ov::intel_cpu::generate_stream_info(test_data.input_stream, + test_data.input_numa_node_id, + nullptr, + config, + test_data.input_proc_type_table, + test_data.input_model_prefer); + ASSERT_EQ(test_data.output_stream_info_table, config.streamExecutorConfig.get_streams_info_table()); + ASSERT_EQ(test_data.output_proc_type_table, proc_type_table); + ASSERT_EQ(test_data.output_cpu_value, config.streamExecutorConfig.get_cpu_pinning()); + ASSERT_EQ(test_data.output_ht_value, config.enableHyperThreading); + ASSERT_EQ(test_data.output_type, config.schedulingCoreType); + ASSERT_EQ(test_data.output_pm_hint, config.hintPerfMode); + if (config.enableCpuReservation) { + for (size_t i = 0; i < test_data.output_stream_info_table.size(); i++) { + if (test_data.output_stream_info_table[i][PROC_TYPE] >= MAIN_CORE_PROC && + test_data.output_stream_info_table[i][PROC_TYPE] <= HYPER_THREADING_PROC) { + int nstreams = test_data.output_stream_info_table[i][NUMBER_OF_STREAMS] > 0 + ? test_data.output_stream_info_table[i][NUMBER_OF_STREAMS] + : 1; + int nthreads = nstreams * test_data.output_stream_info_table[i][THREADS_PER_STREAM]; + if (res_proc_type_table.size() > 1) { + for (size_t j = 0; j < proc_type_table.size(); j++) { + if (res_proc_type_table[j][PROC_NUMA_NODE_ID] == + test_data.output_stream_info_table[i][STREAM_NUMA_NODE_ID] && + res_proc_type_table[j][PROC_SOCKET_ID] == + test_data.output_stream_info_table[i][STREAM_SOCKET_ID]) { + res_proc_type_table[j][test_data.output_stream_info_table[i][PROC_TYPE]] -= + nthreads; + res_proc_type_table[j][ALL_PROC] -= nthreads; + res_proc_type_table[0][test_data.output_stream_info_table[i][PROC_TYPE]] -= + nthreads; + res_proc_type_table[0][ALL_PROC] -= nthreads; + } + } + } else { + res_proc_type_table[0][test_data.output_stream_info_table[i][PROC_TYPE]] -= nthreads; + res_proc_type_table[0][ALL_PROC] -= nthreads; + } + } + } + ASSERT_EQ(res_proc_type_table, cpu._proc_type_table); + } + } } }; @@ -93,10 +141,23 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_1_pinning = { // (PCORE_ONLY/ECORE_ONLY/ANY_CORE) true, // param[in]: simulated setting for enableHyperThreading true, // param[in]: simulated settting for changedHyperThreading + false, // param[in]: simulated settting for enableCpuReservation true, // param[in]: simulated setting for enableCpuPinning true, // param[in]: simulated setting for changedCpuPinning ov::hint::PerformanceMode::LATENCY, // param[in]: simulated setting for performance mode (throughput/latency) - {}, // param[in]: simulated setting for model distribution policy + {}, // param[in]: simulated setting for model distribution policy + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, // param[in]: simulated proc_type_table for platform which has one socket, 6 Pcores, 8 // Ecores and hyper threading enabled ov::hint::SchedulingCoreType::ANY_CORE, // param[expected out]: scheduling core type needs to be the same as input @@ -122,10 +183,27 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_2_pinning = { ov::hint::SchedulingCoreType::ANY_CORE, true, true, + false, true, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, + {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, + {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, + {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + }, {{14, 6, 8, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -145,10 +223,23 @@ StreamGenerateionTestCase generation_tput_1sockets_14cores_1_pinning = { ov::hint::SchedulingCoreType::ANY_CORE, true, true, + false, true, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, true, @@ -168,10 +259,23 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_1_unpinning = { ov::hint::SchedulingCoreType::ANY_CORE, true, true, + false, true, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, true, @@ -194,10 +298,27 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_2_unpinning = { ov::hint::SchedulingCoreType::ANY_CORE, true, true, + false, true, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, + {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, + {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, + {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + }, {{14, 6, 8, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -217,10 +338,23 @@ StreamGenerateionTestCase generation_tput_1sockets_14cores_1_unpinning = { ov::hint::SchedulingCoreType::ANY_CORE, true, true, + false, true, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, true, @@ -241,9 +375,66 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_3 = { true, true, false, + false, + true, + ov::hint::PerformanceMode::LATENCY, + {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, + {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, + {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, + {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + }, + {{14, 6, 8, 0, 0, 0}}, + ov::hint::SchedulingCoreType::PCORE_ONLY, + false, + false, + ov::hint::PerformanceMode::LATENCY, + {{6, 6, 0, 0, 0, 0}}, + {{1, MAIN_CORE_PROC, 6, 0, 0}}, +}; + +StreamGenerateionTestCase generation_latency_1sockets_14cores_3_reservation = { + 1, + false, + 0, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + true, + true, + true, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, + {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, + {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, + {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + }, {{14, 6, 8, 0, 0, 0}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -253,6 +444,46 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_3 = { {{1, MAIN_CORE_PROC, 6, 0, 0}}, }; +StreamGenerateionTestCase generation_latency_1sockets_14cores_4_reservation = { + 1, + false, + 0, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + true, + true, + true, + false, + false, + ov::hint::PerformanceMode::LATENCY, + {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, + {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, + {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, + {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, + {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, + {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, + {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, + {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + }, + {{14, 6, 8, 0, 0, 0}}, + ov::hint::SchedulingCoreType::PCORE_ONLY, + false, + true, + ov::hint::PerformanceMode::LATENCY, + {{6, 6, 0, 0, 0, 0}}, + {{1, MAIN_CORE_PROC, 6, 0, 0}}, +}; + StreamGenerateionTestCase generation_latency_1sockets_14cores_4 = { 1, false, @@ -264,9 +495,22 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_4 = { true, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::PCORE_ONLY, true, @@ -287,9 +531,22 @@ StreamGenerateionTestCase generation_latency_1sockets_14cores_5 = { false, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -310,9 +567,134 @@ StreamGenerateionTestCase generation_latency_2sockets_48cores_6 = { false, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, + {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, + ov::hint::SchedulingCoreType::PCORE_ONLY, + false, + false, + ov::hint::PerformanceMode::LATENCY, + {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, + {{1, MAIN_CORE_PROC, 24, 0, 0}}, +}; + +StreamGenerateionTestCase generation_latency_2sockets_48cores_6_reservation = { + 1, + false, + 0, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + false, + true, + true, + false, + true, + ov::hint::PerformanceMode::LATENCY, + {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -333,9 +715,36 @@ StreamGenerateionTestCase generation_latency_2sockets_48cores_7 = { true, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {8, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {10, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, + {12, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, + {14, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, + {16, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, {17, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, + {18, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, {19, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {20, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {21, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {22, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {23, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {24, 1, 1, 24, MAIN_CORE_PROC, 24, -1}, {25, 1, 1, 25, MAIN_CORE_PROC, 25, -1}, + {26, 1, 1, 26, MAIN_CORE_PROC, 26, -1}, {27, 1, 1, 27, MAIN_CORE_PROC, 27, -1}, + {28, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {29, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {30, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {31, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {32, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {33, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {34, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {35, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {36, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {37, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {38, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {39, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {40, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {41, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {42, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {43, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {44, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {45, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {46, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {47, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + }, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -356,9 +765,60 @@ StreamGenerateionTestCase generation_latency_2sockets_48cores_8 = { false, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -379,9 +839,36 @@ StreamGenerateionTestCase generation_latency_2sockets_48cores_9 = { true, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {8, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {10, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, + {12, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, + {14, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, + {16, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, {17, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, + {18, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, {19, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {20, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {21, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {22, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {23, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {24, 1, 1, 24, MAIN_CORE_PROC, 24, -1}, {25, 1, 1, 25, MAIN_CORE_PROC, 25, -1}, + {26, 1, 1, 26, MAIN_CORE_PROC, 26, -1}, {27, 1, 1, 27, MAIN_CORE_PROC, 27, -1}, + {28, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {29, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {30, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {31, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {32, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {33, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {34, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {35, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {36, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {37, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {38, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {39, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {40, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {41, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {42, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {43, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {44, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {45, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {46, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {47, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + }, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -402,9 +889,60 @@ StreamGenerateionTestCase generation_latency_2sockets_48cores_10 = { false, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -425,9 +963,36 @@ StreamGenerateionTestCase generation_latency_2sockets_48cores_11 = { true, true, false, + false, true, ov::hint::PerformanceMode::LATENCY, {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 1, MAIN_CORE_PROC, 1, -1}, + {2, 0, 0, 2, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 3, MAIN_CORE_PROC, 3, -1}, + {4, 0, 0, 4, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 5, MAIN_CORE_PROC, 5, -1}, + {6, 0, 0, 6, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 7, MAIN_CORE_PROC, 7, -1}, + {8, 0, 0, 8, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 9, MAIN_CORE_PROC, 9, -1}, + {10, 0, 0, 10, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 11, MAIN_CORE_PROC, 11, -1}, + {12, 0, 0, 12, MAIN_CORE_PROC, 12, -1}, {13, 0, 0, 13, MAIN_CORE_PROC, 13, -1}, + {14, 0, 0, 14, MAIN_CORE_PROC, 14, -1}, {15, 0, 0, 15, MAIN_CORE_PROC, 15, -1}, + {16, 0, 0, 16, MAIN_CORE_PROC, 16, -1}, {17, 0, 0, 17, MAIN_CORE_PROC, 17, -1}, + {18, 0, 0, 18, MAIN_CORE_PROC, 18, -1}, {19, 0, 0, 19, MAIN_CORE_PROC, 19, -1}, + {20, 0, 0, 20, MAIN_CORE_PROC, 20, -1}, {21, 0, 0, 21, MAIN_CORE_PROC, 21, -1}, + {22, 0, 0, 22, MAIN_CORE_PROC, 22, -1}, {23, 0, 0, 23, MAIN_CORE_PROC, 23, -1}, + {24, 1, 1, 24, MAIN_CORE_PROC, 24, -1}, {25, 1, 1, 25, MAIN_CORE_PROC, 25, -1}, + {26, 1, 1, 26, MAIN_CORE_PROC, 26, -1}, {27, 1, 1, 27, MAIN_CORE_PROC, 27, -1}, + {28, 1, 1, 28, MAIN_CORE_PROC, 28, -1}, {29, 1, 1, 29, MAIN_CORE_PROC, 29, -1}, + {30, 1, 1, 30, MAIN_CORE_PROC, 30, -1}, {31, 1, 1, 31, MAIN_CORE_PROC, 31, -1}, + {32, 1, 1, 32, MAIN_CORE_PROC, 32, -1}, {33, 1, 1, 33, MAIN_CORE_PROC, 33, -1}, + {34, 1, 1, 34, MAIN_CORE_PROC, 34, -1}, {35, 1, 1, 35, MAIN_CORE_PROC, 35, -1}, + {36, 1, 1, 36, MAIN_CORE_PROC, 36, -1}, {37, 1, 1, 37, MAIN_CORE_PROC, 37, -1}, + {38, 1, 1, 38, MAIN_CORE_PROC, 38, -1}, {39, 1, 1, 39, MAIN_CORE_PROC, 39, -1}, + {40, 1, 1, 40, MAIN_CORE_PROC, 40, -1}, {41, 1, 1, 41, MAIN_CORE_PROC, 41, -1}, + {42, 1, 1, 42, MAIN_CORE_PROC, 42, -1}, {43, 1, 1, 43, MAIN_CORE_PROC, 43, -1}, + {44, 1, 1, 44, MAIN_CORE_PROC, 44, -1}, {45, 1, 1, 45, MAIN_CORE_PROC, 45, -1}, + {46, 1, 1, 46, MAIN_CORE_PROC, 46, -1}, {47, 1, 1, 47, MAIN_CORE_PROC, 47, -1}, + }, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -448,9 +1013,22 @@ StreamGenerateionTestCase generation_tput_1sockets_14cores_2 = { false, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::PCORE_ONLY, false, @@ -471,9 +1049,22 @@ StreamGenerateionTestCase generation_tput_1sockets_14cores_3 = { true, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::PCORE_ONLY, true, @@ -494,9 +1085,22 @@ StreamGenerateionTestCase generation_tput_1sockets_14cores_4 = { true, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, MAIN_CORE_PROC, 0, -1}, {1, 0, 0, 0, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 1, MAIN_CORE_PROC, 2, -1}, {3, 0, 0, 1, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 2, MAIN_CORE_PROC, 4, -1}, {5, 0, 0, 2, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 3, MAIN_CORE_PROC, 6, -1}, {7, 0, 0, 3, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 4, MAIN_CORE_PROC, 8, -1}, {9, 0, 0, 4, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 5, MAIN_CORE_PROC, 10, -1}, {11, 0, 0, 5, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 6, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 7, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 8, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 9, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 10, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 11, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 12, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 13, EFFICIENT_CORE_PROC, 19, -1}, + }, {{20, 6, 8, 6, 0, 0}}, ov::hint::SchedulingCoreType::PCORE_ONLY, true, @@ -517,9 +1121,60 @@ StreamGenerateionTestCase generation_tput_2sockets_48cores_5 = { true, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::ANY_CORE, true, @@ -543,9 +1198,60 @@ StreamGenerateionTestCase generation_tput_2sockets_48cores_6 = { false, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -566,9 +1272,60 @@ StreamGenerateionTestCase generation_tput_2sockets_48cores_7 = { false, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -589,9 +1346,134 @@ StreamGenerateionTestCase generation_tput_2sockets_48cores_8 = { false, true, false, + false, + true, + ov::hint::PerformanceMode::THROUGHPUT, + {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, + {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + ov::hint::PerformanceMode::THROUGHPUT, + {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, + {{2, MAIN_CORE_PROC, 10, 0, 0}}, +}; + +StreamGenerateionTestCase generation_tput_2sockets_48cores_8_reservation = { + 2, + true, + 20, + 0, + 1, + 0, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + true, + true, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -612,9 +1494,60 @@ StreamGenerateionTestCase generation_tput_2sockets_48cores_9 = { true, false, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, HYPER_THREADING_PROC, 0, -1}, {1, 0, 0, 1, HYPER_THREADING_PROC, 1, -1}, + {2, 0, 0, 2, HYPER_THREADING_PROC, 2, -1}, {3, 0, 0, 3, HYPER_THREADING_PROC, 3, -1}, + {4, 0, 0, 4, HYPER_THREADING_PROC, 4, -1}, {5, 0, 0, 5, HYPER_THREADING_PROC, 5, -1}, + {6, 0, 0, 6, HYPER_THREADING_PROC, 6, -1}, {7, 0, 0, 7, HYPER_THREADING_PROC, 7, -1}, + {8, 0, 0, 8, HYPER_THREADING_PROC, 8, -1}, {9, 0, 0, 9, HYPER_THREADING_PROC, 9, -1}, + {10, 0, 0, 10, HYPER_THREADING_PROC, 10, -1}, {11, 0, 0, 11, HYPER_THREADING_PROC, 11, -1}, + {12, 0, 0, 12, HYPER_THREADING_PROC, 12, -1}, {13, 0, 0, 13, HYPER_THREADING_PROC, 13, -1}, + {14, 0, 0, 14, HYPER_THREADING_PROC, 14, -1}, {15, 0, 0, 15, HYPER_THREADING_PROC, 15, -1}, + {16, 0, 0, 16, HYPER_THREADING_PROC, 16, -1}, {17, 0, 0, 17, HYPER_THREADING_PROC, 17, -1}, + {18, 0, 0, 18, HYPER_THREADING_PROC, 18, -1}, {19, 0, 0, 19, HYPER_THREADING_PROC, 19, -1}, + {20, 0, 0, 20, HYPER_THREADING_PROC, 20, -1}, {21, 0, 0, 21, HYPER_THREADING_PROC, 21, -1}, + {22, 0, 0, 22, HYPER_THREADING_PROC, 22, -1}, {23, 0, 0, 23, HYPER_THREADING_PROC, 23, -1}, + {24, 1, 1, 24, HYPER_THREADING_PROC, 24, -1}, {25, 1, 1, 25, HYPER_THREADING_PROC, 25, -1}, + {26, 1, 1, 26, HYPER_THREADING_PROC, 26, -1}, {27, 1, 1, 27, HYPER_THREADING_PROC, 27, -1}, + {28, 1, 1, 28, HYPER_THREADING_PROC, 28, -1}, {29, 1, 1, 29, HYPER_THREADING_PROC, 29, -1}, + {30, 1, 1, 30, HYPER_THREADING_PROC, 30, -1}, {31, 1, 1, 31, HYPER_THREADING_PROC, 31, -1}, + {32, 1, 1, 32, HYPER_THREADING_PROC, 32, -1}, {33, 1, 1, 33, HYPER_THREADING_PROC, 33, -1}, + {34, 1, 1, 34, HYPER_THREADING_PROC, 34, -1}, {35, 1, 1, 35, HYPER_THREADING_PROC, 35, -1}, + {36, 1, 1, 36, HYPER_THREADING_PROC, 36, -1}, {37, 1, 1, 37, HYPER_THREADING_PROC, 37, -1}, + {38, 1, 1, 38, HYPER_THREADING_PROC, 38, -1}, {39, 1, 1, 39, HYPER_THREADING_PROC, 39, -1}, + {40, 1, 1, 40, HYPER_THREADING_PROC, 40, -1}, {41, 1, 1, 41, HYPER_THREADING_PROC, 41, -1}, + {42, 1, 1, 42, HYPER_THREADING_PROC, 42, -1}, {43, 1, 1, 43, HYPER_THREADING_PROC, 43, -1}, + {44, 1, 1, 44, HYPER_THREADING_PROC, 44, -1}, {45, 1, 1, 45, HYPER_THREADING_PROC, 45, -1}, + {46, 1, 1, 46, HYPER_THREADING_PROC, 46, -1}, {47, 1, 1, 47, HYPER_THREADING_PROC, 47, -1}, + {48, 0, 0, 48, MAIN_CORE_PROC, 48, -1}, {49, 0, 0, 49, MAIN_CORE_PROC, 49, -1}, + {50, 0, 0, 50, MAIN_CORE_PROC, 50, -1}, {51, 0, 0, 51, MAIN_CORE_PROC, 51, -1}, + {52, 0, 0, 52, MAIN_CORE_PROC, 52, -1}, {53, 0, 0, 53, MAIN_CORE_PROC, 53, -1}, + {54, 0, 0, 54, MAIN_CORE_PROC, 54, -1}, {55, 0, 0, 55, MAIN_CORE_PROC, 55, -1}, + {56, 0, 0, 56, MAIN_CORE_PROC, 56, -1}, {57, 0, 0, 57, MAIN_CORE_PROC, 57, -1}, + {58, 0, 0, 58, MAIN_CORE_PROC, 58, -1}, {59, 0, 0, 59, MAIN_CORE_PROC, 59, -1}, + {60, 0, 0, 60, MAIN_CORE_PROC, 60, -1}, {61, 0, 0, 61, MAIN_CORE_PROC, 61, -1}, + {62, 0, 0, 62, MAIN_CORE_PROC, 62, -1}, {63, 0, 0, 63, MAIN_CORE_PROC, 63, -1}, + {64, 0, 0, 64, MAIN_CORE_PROC, 64, -1}, {65, 0, 0, 65, MAIN_CORE_PROC, 65, -1}, + {66, 0, 0, 66, MAIN_CORE_PROC, 66, -1}, {67, 0, 0, 67, MAIN_CORE_PROC, 67, -1}, + {68, 0, 0, 68, MAIN_CORE_PROC, 68, -1}, {69, 0, 0, 69, MAIN_CORE_PROC, 69, -1}, + {70, 0, 0, 70, MAIN_CORE_PROC, 70, -1}, {71, 0, 0, 71, MAIN_CORE_PROC, 71, -1}, + {72, 1, 1, 72, MAIN_CORE_PROC, 72, -1}, {73, 1, 1, 73, MAIN_CORE_PROC, 73, -1}, + {74, 1, 1, 74, MAIN_CORE_PROC, 74, -1}, {75, 1, 1, 75, MAIN_CORE_PROC, 75, -1}, + {76, 1, 1, 76, MAIN_CORE_PROC, 76, -1}, {77, 1, 1, 77, MAIN_CORE_PROC, 77, -1}, + {78, 1, 1, 78, MAIN_CORE_PROC, 78, -1}, {79, 1, 1, 79, MAIN_CORE_PROC, 79, -1}, + {80, 1, 1, 80, MAIN_CORE_PROC, 80, -1}, {81, 1, 1, 81, MAIN_CORE_PROC, 81, -1}, + {82, 1, 1, 82, MAIN_CORE_PROC, 82, -1}, {83, 1, 1, 83, MAIN_CORE_PROC, 83, -1}, + {84, 1, 1, 84, MAIN_CORE_PROC, 84, -1}, {85, 1, 1, 85, MAIN_CORE_PROC, 85, -1}, + {86, 1, 1, 86, MAIN_CORE_PROC, 86, -1}, {87, 1, 1, 87, MAIN_CORE_PROC, 87, -1}, + {88, 1, 1, 88, MAIN_CORE_PROC, 88, -1}, {89, 1, 1, 89, MAIN_CORE_PROC, 89, -1}, + {90, 1, 1, 90, MAIN_CORE_PROC, 90, -1}, {91, 1, 1, 91, MAIN_CORE_PROC, 91, -1}, + {92, 1, 1, 92, MAIN_CORE_PROC, 92, -1}, {93, 1, 1, 93, MAIN_CORE_PROC, 93, -1}, + {94, 1, 1, 94, MAIN_CORE_PROC, 94, -1}, {95, 1, 1, 95, MAIN_CORE_PROC, 95, -1}, + }, {{96, 48, 0, 48, -1, -1}, {48, 24, 0, 24, 0, 0}, {48, 24, 0, 24, 1, 1}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -633,10 +1566,61 @@ StreamGenerateionTestCase generation_latency_1sockets_96cores_pinning = { ov::hint::SchedulingCoreType::ANY_CORE, false, false, + false, true, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 1, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 2, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 23, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 24, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 25, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 26, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 27, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 28, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 29, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 30, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 31, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 32, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 33, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 34, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 35, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 36, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 37, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 38, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 39, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 40, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 41, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 42, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 43, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 44, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 45, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 46, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 47, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 48, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 49, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 50, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 51, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 52, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 53, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 54, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 55, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 56, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 57, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 58, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 59, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 60, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 61, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 62, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 63, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 64, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 65, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 66, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 67, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 68, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 69, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 70, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 71, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 72, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 73, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 74, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 75, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 76, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 77, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 78, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 79, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 80, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 81, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 82, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 83, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 84, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 85, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 86, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 87, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 88, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 89, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 90, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 91, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 92, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 93, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 94, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 95, -1}, + }, {{96, 0, 96, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -655,10 +1639,61 @@ StreamGenerateionTestCase generation_tput_1sockets_96cores_pinning = { ov::hint::SchedulingCoreType::ANY_CORE, false, false, + false, true, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 1, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 2, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 23, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 24, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 25, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 26, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 27, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 28, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 29, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 30, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 31, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 32, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 33, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 34, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 35, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 36, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 37, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 38, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 39, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 40, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 41, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 42, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 43, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 44, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 45, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 46, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 47, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 48, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 49, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 50, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 51, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 52, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 53, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 54, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 55, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 56, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 57, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 58, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 59, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 60, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 61, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 62, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 63, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 64, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 65, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 66, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 67, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 68, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 69, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 70, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 71, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 72, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 73, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 74, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 75, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 76, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 77, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 78, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 79, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 80, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 81, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 82, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 83, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 84, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 85, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 86, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 87, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 88, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 89, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 90, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 91, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 92, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 93, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 94, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 95, -1}, + }, {{96, 0, 96, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -677,10 +1712,61 @@ StreamGenerateionTestCase generation_tput_1sockets_96cores_2_pinning = { ov::hint::SchedulingCoreType::PCORE_ONLY, true, true, + false, true, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 1, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 2, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 23, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 24, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 25, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 26, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 27, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 28, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 29, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 30, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 31, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 32, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 33, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 34, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 35, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 36, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 37, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 38, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 39, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 40, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 41, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 42, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 43, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 44, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 45, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 46, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 47, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 48, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 49, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 50, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 51, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 52, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 53, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 54, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 55, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 56, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 57, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 58, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 59, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 60, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 61, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 62, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 63, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 64, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 65, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 66, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 67, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 68, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 69, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 70, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 71, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 72, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 73, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 74, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 75, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 76, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 77, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 78, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 79, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 80, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 81, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 82, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 83, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 84, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 85, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 86, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 87, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 88, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 89, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 90, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 91, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 92, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 93, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 94, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 95, -1}, + }, {{96, 0, 96, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -699,10 +1785,61 @@ StreamGenerateionTestCase generation_latency_1sockets_96cores_unpinning = { ov::hint::SchedulingCoreType::ANY_CORE, false, false, + false, true, true, ov::hint::PerformanceMode::LATENCY, {}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 1, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 2, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 23, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 24, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 25, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 26, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 27, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 28, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 29, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 30, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 31, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 32, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 33, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 34, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 35, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 36, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 37, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 38, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 39, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 40, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 41, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 42, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 43, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 44, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 45, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 46, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 47, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 48, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 49, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 50, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 51, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 52, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 53, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 54, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 55, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 56, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 57, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 58, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 59, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 60, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 61, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 62, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 63, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 64, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 65, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 66, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 67, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 68, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 69, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 70, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 71, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 72, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 73, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 74, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 75, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 76, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 77, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 78, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 79, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 80, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 81, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 82, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 83, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 84, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 85, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 86, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 87, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 88, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 89, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 90, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 91, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 92, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 93, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 94, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 95, -1}, + }, {{96, 0, 96, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -723,8 +1860,59 @@ StreamGenerateionTestCase generation_tput_1sockets_96cores_unpinning = { false, false, false, + false, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 1, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 2, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 23, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 24, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 25, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 26, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 27, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 28, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 29, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 30, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 31, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 32, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 33, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 34, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 35, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 36, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 37, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 38, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 39, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 40, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 41, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 42, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 43, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 44, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 45, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 46, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 47, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 48, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 49, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 50, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 51, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 52, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 53, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 54, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 55, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 56, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 57, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 58, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 59, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 60, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 61, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 62, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 63, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 64, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 65, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 66, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 67, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 68, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 69, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 70, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 71, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 72, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 73, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 74, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 75, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 76, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 77, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 78, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 79, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 80, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 81, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 82, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 83, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 84, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 85, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 86, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 87, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 88, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 89, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 90, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 91, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 92, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 93, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 94, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 95, -1}, + }, {{96, 0, 96, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -744,9 +1932,60 @@ StreamGenerateionTestCase generation_tput_1sockets_96cores_2_unpinning = { true, true, false, + false, true, ov::hint::PerformanceMode::THROUGHPUT, {}, + { + {0, 0, 0, 0, EFFICIENT_CORE_PROC, 0, -1}, {1, 0, 0, 1, EFFICIENT_CORE_PROC, 1, -1}, + {2, 0, 0, 2, EFFICIENT_CORE_PROC, 2, -1}, {3, 0, 0, 3, EFFICIENT_CORE_PROC, 3, -1}, + {4, 0, 0, 4, EFFICIENT_CORE_PROC, 4, -1}, {5, 0, 0, 5, EFFICIENT_CORE_PROC, 5, -1}, + {6, 0, 0, 6, EFFICIENT_CORE_PROC, 6, -1}, {7, 0, 0, 7, EFFICIENT_CORE_PROC, 7, -1}, + {8, 0, 0, 8, EFFICIENT_CORE_PROC, 8, -1}, {9, 0, 0, 9, EFFICIENT_CORE_PROC, 9, -1}, + {10, 0, 0, 10, EFFICIENT_CORE_PROC, 10, -1}, {11, 0, 0, 11, EFFICIENT_CORE_PROC, 11, -1}, + {12, 0, 0, 12, EFFICIENT_CORE_PROC, 12, -1}, {13, 0, 0, 13, EFFICIENT_CORE_PROC, 13, -1}, + {14, 0, 0, 14, EFFICIENT_CORE_PROC, 14, -1}, {15, 0, 0, 15, EFFICIENT_CORE_PROC, 15, -1}, + {16, 0, 0, 16, EFFICIENT_CORE_PROC, 16, -1}, {17, 0, 0, 17, EFFICIENT_CORE_PROC, 17, -1}, + {18, 0, 0, 18, EFFICIENT_CORE_PROC, 18, -1}, {19, 0, 0, 19, EFFICIENT_CORE_PROC, 19, -1}, + {20, 0, 0, 20, EFFICIENT_CORE_PROC, 20, -1}, {21, 0, 0, 21, EFFICIENT_CORE_PROC, 21, -1}, + {22, 0, 0, 22, EFFICIENT_CORE_PROC, 22, -1}, {23, 0, 0, 23, EFFICIENT_CORE_PROC, 23, -1}, + {24, 0, 0, 24, EFFICIENT_CORE_PROC, 24, -1}, {25, 0, 0, 25, EFFICIENT_CORE_PROC, 25, -1}, + {26, 0, 0, 26, EFFICIENT_CORE_PROC, 26, -1}, {27, 0, 0, 27, EFFICIENT_CORE_PROC, 27, -1}, + {28, 0, 0, 28, EFFICIENT_CORE_PROC, 28, -1}, {29, 0, 0, 29, EFFICIENT_CORE_PROC, 29, -1}, + {30, 0, 0, 30, EFFICIENT_CORE_PROC, 30, -1}, {31, 0, 0, 31, EFFICIENT_CORE_PROC, 31, -1}, + {32, 0, 0, 32, EFFICIENT_CORE_PROC, 32, -1}, {33, 0, 0, 33, EFFICIENT_CORE_PROC, 33, -1}, + {34, 0, 0, 34, EFFICIENT_CORE_PROC, 34, -1}, {35, 0, 0, 35, EFFICIENT_CORE_PROC, 35, -1}, + {36, 0, 0, 36, EFFICIENT_CORE_PROC, 36, -1}, {37, 0, 0, 37, EFFICIENT_CORE_PROC, 37, -1}, + {38, 0, 0, 38, EFFICIENT_CORE_PROC, 38, -1}, {39, 0, 0, 39, EFFICIENT_CORE_PROC, 39, -1}, + {40, 0, 0, 40, EFFICIENT_CORE_PROC, 40, -1}, {41, 0, 0, 41, EFFICIENT_CORE_PROC, 41, -1}, + {42, 0, 0, 42, EFFICIENT_CORE_PROC, 42, -1}, {43, 0, 0, 43, EFFICIENT_CORE_PROC, 43, -1}, + {44, 0, 0, 44, EFFICIENT_CORE_PROC, 44, -1}, {45, 0, 0, 45, EFFICIENT_CORE_PROC, 45, -1}, + {46, 0, 0, 46, EFFICIENT_CORE_PROC, 46, -1}, {47, 0, 0, 47, EFFICIENT_CORE_PROC, 47, -1}, + {48, 0, 0, 48, EFFICIENT_CORE_PROC, 48, -1}, {49, 0, 0, 49, EFFICIENT_CORE_PROC, 49, -1}, + {50, 0, 0, 50, EFFICIENT_CORE_PROC, 50, -1}, {51, 0, 0, 51, EFFICIENT_CORE_PROC, 51, -1}, + {52, 0, 0, 52, EFFICIENT_CORE_PROC, 52, -1}, {53, 0, 0, 53, EFFICIENT_CORE_PROC, 53, -1}, + {54, 0, 0, 54, EFFICIENT_CORE_PROC, 54, -1}, {55, 0, 0, 55, EFFICIENT_CORE_PROC, 55, -1}, + {56, 0, 0, 56, EFFICIENT_CORE_PROC, 56, -1}, {57, 0, 0, 57, EFFICIENT_CORE_PROC, 57, -1}, + {58, 0, 0, 58, EFFICIENT_CORE_PROC, 58, -1}, {59, 0, 0, 59, EFFICIENT_CORE_PROC, 59, -1}, + {60, 0, 0, 60, EFFICIENT_CORE_PROC, 60, -1}, {61, 0, 0, 61, EFFICIENT_CORE_PROC, 61, -1}, + {62, 0, 0, 62, EFFICIENT_CORE_PROC, 62, -1}, {63, 0, 0, 63, EFFICIENT_CORE_PROC, 63, -1}, + {64, 0, 0, 64, EFFICIENT_CORE_PROC, 64, -1}, {65, 0, 0, 65, EFFICIENT_CORE_PROC, 65, -1}, + {66, 0, 0, 66, EFFICIENT_CORE_PROC, 66, -1}, {67, 0, 0, 67, EFFICIENT_CORE_PROC, 67, -1}, + {68, 0, 0, 68, EFFICIENT_CORE_PROC, 68, -1}, {69, 0, 0, 69, EFFICIENT_CORE_PROC, 69, -1}, + {70, 0, 0, 70, EFFICIENT_CORE_PROC, 70, -1}, {71, 0, 0, 71, EFFICIENT_CORE_PROC, 71, -1}, + {72, 0, 0, 72, EFFICIENT_CORE_PROC, 72, -1}, {73, 0, 0, 73, EFFICIENT_CORE_PROC, 73, -1}, + {74, 0, 0, 74, EFFICIENT_CORE_PROC, 74, -1}, {75, 0, 0, 75, EFFICIENT_CORE_PROC, 75, -1}, + {76, 0, 0, 76, EFFICIENT_CORE_PROC, 76, -1}, {77, 0, 0, 77, EFFICIENT_CORE_PROC, 77, -1}, + {78, 0, 0, 78, EFFICIENT_CORE_PROC, 78, -1}, {79, 0, 0, 79, EFFICIENT_CORE_PROC, 79, -1}, + {80, 0, 0, 80, EFFICIENT_CORE_PROC, 80, -1}, {81, 0, 0, 81, EFFICIENT_CORE_PROC, 81, -1}, + {82, 0, 0, 82, EFFICIENT_CORE_PROC, 82, -1}, {83, 0, 0, 83, EFFICIENT_CORE_PROC, 83, -1}, + {84, 0, 0, 84, EFFICIENT_CORE_PROC, 84, -1}, {85, 0, 0, 85, EFFICIENT_CORE_PROC, 85, -1}, + {86, 0, 0, 86, EFFICIENT_CORE_PROC, 86, -1}, {87, 0, 0, 87, EFFICIENT_CORE_PROC, 87, -1}, + {88, 0, 0, 88, EFFICIENT_CORE_PROC, 88, -1}, {89, 0, 0, 89, EFFICIENT_CORE_PROC, 89, -1}, + {90, 0, 0, 90, EFFICIENT_CORE_PROC, 90, -1}, {91, 0, 0, 91, EFFICIENT_CORE_PROC, 91, -1}, + {92, 0, 0, 92, EFFICIENT_CORE_PROC, 92, -1}, {93, 0, 0, 93, EFFICIENT_CORE_PROC, 93, -1}, + {94, 0, 0, 94, EFFICIENT_CORE_PROC, 94, -1}, {95, 0, 0, 95, EFFICIENT_CORE_PROC, 95, -1}, + }, {{96, 0, 96, 0, 0, 0}}, ov::hint::SchedulingCoreType::ANY_CORE, false, @@ -756,13 +1995,66 @@ StreamGenerateionTestCase generation_tput_1sockets_96cores_2_unpinning = { {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, }; +StreamGenerateionTestCase generation_tput_1sockets_0cores_1 = { + 1, + false, + 0, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + true, + true, + false, + false, + true, + ov::hint::PerformanceMode::LATENCY, + {}, + {}, + {{0, 0, 0, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + ov::hint::PerformanceMode::LATENCY, + {{96, 0, 96, 0, 0, 0}}, + {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, +}; + +StreamGenerateionTestCase generation_tput_1sockets_0cores_1_reservation = { + 1, + false, + 0, + 0, + 0, + 0, + ov::hint::SchedulingCoreType::PCORE_ONLY, + true, + true, + true, + false, + true, + ov::hint::PerformanceMode::LATENCY, + {}, + {}, + {{0, 0, 0, 0, 0, 0}}, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + false, + ov::hint::PerformanceMode::LATENCY, + {{96, 0, 96, 0, 0, 0}}, + {{24, EFFICIENT_CORE_PROC, 4, 0, 0}}, +}; + #if defined(__linux__) || defined(_WIN32) INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, StreamGenerationTests, ::testing::Values(generation_latency_1sockets_14cores_3, + generation_latency_1sockets_14cores_3_reservation, + generation_latency_1sockets_14cores_4_reservation, generation_latency_1sockets_14cores_4, generation_latency_1sockets_14cores_5, generation_latency_2sockets_48cores_6, + generation_latency_2sockets_48cores_6_reservation, generation_latency_2sockets_48cores_7, generation_latency_2sockets_48cores_8, generation_latency_2sockets_48cores_9, @@ -778,10 +2070,13 @@ INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, generation_tput_2sockets_48cores_6, generation_tput_2sockets_48cores_7, generation_tput_2sockets_48cores_8, + generation_tput_2sockets_48cores_8_reservation, generation_tput_2sockets_48cores_9, generation_latency_1sockets_96cores_pinning, generation_tput_1sockets_96cores_pinning, - generation_tput_1sockets_96cores_2_pinning)); + generation_tput_1sockets_96cores_2_pinning, + generation_tput_1sockets_0cores_1, + generation_tput_1sockets_0cores_1_reservation)); #else INSTANTIATE_TEST_SUITE_P(smoke_StreamsGeneration, StreamGenerationTests, diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp index 0c7d85571661e5..9884ceb09f6122 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp @@ -20,7 +20,6 @@ struct StreamsCalculationTestCase { int input_threads; int input_infer_requests; int model_prefer_threads; - int input_socket_id; std::string input_perf_hint; std::set hint_llm_distribution_policy; std::vector> proc_type_table; @@ -39,7 +38,6 @@ class StreamsCalculationTests : public ov::test::TestsCommon, test_data.input_threads, test_data.input_infer_requests, test_data.model_prefer_threads, - test_data.input_socket_id, test_data.input_perf_hint, test_data.hint_llm_distribution_policy, test_data.proc_type_table); @@ -55,7 +53,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_auto_1 = { 0, // param[in]: the number of threads in this simulation 0, // param[in]: the number of infer requests in this simulation 0, // param[in]: the model preferred number of threads in this simulation - 0, // param[in]: the current socket id of the running thread in this simulation "LATENCY", // param[in]: the performance hint in this simulation {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, // param[in]: the hint of max threads per stream in this // simulation @@ -74,7 +71,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_auto_2 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -86,7 +82,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_auto_3 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{208, 104, 0, 104, -1, -1}, @@ -108,7 +103,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_auto_4 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, @@ -126,7 +120,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_platform_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -138,7 +131,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_platform_2 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -150,7 +142,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_platform_3 = { 0, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, @@ -170,7 +161,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_platform_4 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, @@ -182,7 +172,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -194,7 +183,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_2 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -206,7 +194,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_3 = { 0, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, @@ -226,7 +213,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_4 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, @@ -238,7 +224,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_5 = { 0, 0, 0, - 0, "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {10, 10, 0, 0, 1, 0}, {20, 20, 0, 0, 2, 1}, {20, 20, 0, 0, 3, 1}}, @@ -250,7 +235,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_6 = { 0, 0, 0, - 0, "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {20, 20, 0, 0, 1, 1}, {10, 10, 0, 0, 2, 0}, {20, 20, 0, 0, 3, 1}}, @@ -262,7 +246,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_7 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, @@ -274,7 +257,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_8 = { 208, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -290,7 +272,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_9 = { 104, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -302,7 +283,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_10 = { 52, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -315,7 +295,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_1 = { 20, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -327,7 +306,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_2 = { 20, 5, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -339,7 +317,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_4 = { 20, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -351,7 +328,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_5 = { 20, 5, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -363,7 +339,6 @@ StreamsCalculationTestCase _2sockets_104cores_latency_6 = { 208, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -379,7 +354,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -394,7 +368,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -411,7 +384,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_3 = { 20, 0, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -423,7 +395,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_4 = { 20, 0, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -435,7 +406,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_5 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -450,7 +420,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_6 = { 0, 0, 2, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -465,7 +434,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_7 = { 0, 0, 8, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -486,7 +454,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_7_1 = { 0, 0, 8, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -507,7 +474,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_7_2 = { 0, 0, 4, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, @@ -529,7 +495,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_8 = { 40, 0, 8, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -542,7 +507,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_9 = { 20, 2, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -555,7 +519,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_10 = { 0, 2, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -573,7 +536,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_11 = { 0, 5, 0, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -591,7 +553,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_12 = { 0, 2, 2, - 0, "THROUGHPUT", {}, {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, @@ -609,7 +570,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_13 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -622,7 +582,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_14 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -635,7 +594,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_15 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -648,7 +606,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_16 = { 0, 0, 2, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -661,7 +618,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_17 = { 0, 0, 8, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -678,7 +634,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_18 = { 0, 2, 0, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -691,7 +646,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_19 = { 0, 5, 0, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -704,7 +658,6 @@ StreamsCalculationTestCase _2sockets_104cores_tput_20 = { 0, 2, 2, - 0, "THROUGHPUT", {}, {{104, 104, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 1}}, @@ -717,7 +670,6 @@ StreamsCalculationTestCase _2sockets_48cores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -729,7 +681,6 @@ StreamsCalculationTestCase _2sockets_48cores_latency_2 = { 0, 0, 0, - 0, "LATENCY", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -741,7 +692,6 @@ StreamsCalculationTestCase _2sockets_48cores_latency_3 = { 96, 0, 0, - 0, "LATENCY", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -753,7 +703,6 @@ StreamsCalculationTestCase _2sockets_48cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -766,7 +715,6 @@ StreamsCalculationTestCase _2sockets_48cores_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -779,7 +727,6 @@ StreamsCalculationTestCase _2sockets_48cores_tput_3 = { 100, 0, 0, - 0, "THROUGHPUT", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -792,7 +739,6 @@ StreamsCalculationTestCase _2sockets_48cores_tput_4 = { 20, 0, 1, - 0, "THROUGHPUT", {}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -805,7 +751,6 @@ StreamsCalculationTestCase _2sockets_20cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{20, 20, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {10, 10, 0, 0, 1, 1}}, @@ -818,7 +763,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -831,7 +775,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_2 = { 10, 0, 0, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -844,7 +787,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_3 = { 0, 0, 6, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -857,7 +799,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_4 = { 0, 0, 14, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -873,7 +814,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_5 = { 0, 2, 14, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -889,7 +829,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_6 = { 0, 0, 0, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -905,7 +844,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_7 = { 0, 0, 6, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -918,7 +856,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_8 = { 0, 0, 14, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -931,7 +868,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_9 = { 0, 2, 14, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -944,7 +880,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_10 = { 0, 0, 0, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -957,7 +892,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_11 = { 10, 0, 0, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -970,7 +904,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_12 = { 0, 0, 6, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -983,7 +916,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_13 = { 0, 0, 14, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -999,7 +931,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_14 = { 0, 2, 14, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1015,7 +946,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_15 = { 0, 0, 0, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1031,7 +961,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_16 = { 0, 0, 6, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -1044,7 +973,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_17 = { 0, 0, 14, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -1057,7 +985,6 @@ StreamsCalculationTestCase _1sockets_14cores_latency_18 = { 0, 2, 14, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -1070,7 +997,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1083,7 +1009,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1096,7 +1021,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_3 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1109,7 +1033,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_4 = { 12, 0, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1122,7 +1045,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_5 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1135,7 +1057,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_6 = { 0, 0, 2, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1148,7 +1069,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_7 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1161,7 +1081,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_8 = { 100, 0, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1174,7 +1093,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_9 = { 0, 8, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1187,7 +1105,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_10 = { 0, 4, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1200,7 +1117,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_11 = { 0, 2, 0, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1213,7 +1129,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_12 = { 0, 2, 2, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1226,7 +1141,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_13 = { 1, 0, 1, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1239,7 +1153,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_14 = { 9, 0, 1, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1252,7 +1165,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_15 = { 12, 0, 1, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1265,7 +1177,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_16 = { 15, 0, 1, - 0, "THROUGHPUT", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1278,7 +1189,6 @@ StreamsCalculationTestCase _1sockets_14cores_tput_17 = { 14, 0, 6, - 0, "LATENCY", {}, {{20, 6, 8, 6, 0, 0}}, @@ -1291,7 +1201,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1307,7 +1216,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_2 = { 8, 0, 0, - 0, "LATENCY", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1320,7 +1228,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_3 = { 0, 0, 2, - 0, "LATENCY", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1333,7 +1240,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_4 = { 0, 0, 10, - 0, "LATENCY", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1349,7 +1255,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_5 = { 0, 0, 0, - 0, "LATENCY", {}, {{10, 2, 8, 0, 0, 0}}, @@ -1362,7 +1267,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_6 = { 0, 0, 2, - 0, "LATENCY", {}, {{10, 2, 8, 0, 0, 0}}, @@ -1375,7 +1279,6 @@ StreamsCalculationTestCase _1sockets_10cores_latency_7 = { 0, 0, 10, - 0, "LATENCY", {}, {{10, 2, 8, 0, 0, 0}}, @@ -1388,7 +1291,6 @@ StreamsCalculationTestCase _1sockets_10cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1401,7 +1303,6 @@ StreamsCalculationTestCase _1sockets_10cores_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1414,7 +1315,6 @@ StreamsCalculationTestCase _1sockets_10cores_tput_3 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1427,7 +1327,6 @@ StreamsCalculationTestCase _1sockets_10cores_tput_4 = { 6, 0, 0, - 0, "THROUGHPUT", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1440,7 +1339,6 @@ StreamsCalculationTestCase _1sockets_10cores_tput_5 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1453,7 +1351,6 @@ StreamsCalculationTestCase _1sockets_10cores_tput_6 = { 0, 0, 2, - 0, "THROUGHPUT", {}, {{12, 2, 8, 2, 0, 0}}, @@ -1466,7 +1363,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1482,7 +1378,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_2 = { 100, 0, 0, - 0, "LATENCY", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1498,7 +1393,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_3 = { 0, 0, 4, - 0, "LATENCY", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1511,7 +1405,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_4 = { 0, 0, 8, - 0, "LATENCY", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1527,7 +1420,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_5 = { 0, 0, 0, - 0, "LATENCY", {}, {{8, 4, 4, 0, 0, 0}}, @@ -1540,7 +1432,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_6 = { 0, 0, 4, - 0, "LATENCY", {}, {{8, 4, 4, 0, 0, 0}}, @@ -1553,7 +1444,6 @@ StreamsCalculationTestCase _1sockets_8cores_latency_7 = { 0, 0, 8, - 0, "LATENCY", {}, {{8, 4, 4, 0, 0, 0}}, @@ -1566,7 +1456,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1579,7 +1468,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1592,7 +1480,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_3 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1605,7 +1492,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_4 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1618,7 +1504,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_5 = { 6, 0, 0, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1631,7 +1516,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_6 = { 8, 0, 0, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1644,7 +1528,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_7 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{12, 4, 4, 4, 0, 0}}, @@ -1657,7 +1540,6 @@ StreamsCalculationTestCase _1sockets_8cores_tput_8 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{8, 4, 4, 0, 0, 0}}, @@ -1670,7 +1552,6 @@ StreamsCalculationTestCase _1sockets_6cores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{12, 6, 0, 6, 0, 0}}, @@ -1683,7 +1564,6 @@ StreamsCalculationTestCase _1sockets_6cores_latency_2 = { 100, 0, 0, - 0, "LATENCY", {}, {{12, 6, 0, 6, 0, 0}}, @@ -1696,7 +1576,6 @@ StreamsCalculationTestCase _1sockets_6cores_latency_3 = { 0, 0, 0, - 0, "LATENCY", {}, {{6, 6, 0, 0, 0, 0}}, @@ -1709,7 +1588,6 @@ StreamsCalculationTestCase _1sockets_6cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 6, 0, 6, 0, 0}}, @@ -1722,7 +1600,6 @@ StreamsCalculationTestCase _1sockets_6cores_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{12, 6, 0, 6, 0, 0}}, @@ -1735,7 +1612,6 @@ StreamsCalculationTestCase _1sockets_6cores_tput_3 = { 8, 0, 0, - 0, "THROUGHPUT", {}, {{12, 6, 0, 6, 0, 0}}, @@ -1748,7 +1624,6 @@ StreamsCalculationTestCase _1sockets_6cores_tput_4 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{12, 6, 0, 6, 0, 0}}, @@ -1761,7 +1636,6 @@ StreamsCalculationTestCase _1sockets_4cores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{4, 4, 0, 0, 0, 0}}, @@ -1774,7 +1648,6 @@ StreamsCalculationTestCase _1sockets_4cores_tput_1 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{4, 4, 0, 0, 0, 0}}, @@ -1787,7 +1660,6 @@ StreamsCalculationTestCase _1sockets_4cores_tput_2 = { 0, 0, 8, - -1, "THROUGHPUT", {}, {{4, 4, 0, 0, 0, 0}}, @@ -1800,7 +1672,6 @@ StreamsCalculationTestCase _1sockets_4cores_tput_3 = { 0, 0, 8, - -1, "THROUGHPUT", {}, {{4, 4, 0, 0, 0, 0}}, @@ -1813,7 +1684,6 @@ StreamsCalculationTestCase _1sockets_ecores_latency_1 = { 0, 0, 0, - 0, "LATENCY", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1826,7 +1696,6 @@ StreamsCalculationTestCase _1sockets_ecores_latency_2 = { 4, 0, 0, - 0, "LATENCY", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1839,7 +1708,6 @@ StreamsCalculationTestCase _1sockets_ecores_latency_3 = { 0, 4, 0, - 0, "LATENCY", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1852,7 +1720,6 @@ StreamsCalculationTestCase _1sockets_ecores_latency_4 = { 0, 0, 4, - 0, "LATENCY", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1865,7 +1732,6 @@ StreamsCalculationTestCase _1sockets_ecores_tput_1 = { 0, 0, 1, - 0, "THROUGHPUT", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1878,7 +1744,6 @@ StreamsCalculationTestCase _1sockets_ecores_tput_2 = { 0, 0, 4, - 0, "THROUGHPUT", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1891,7 +1756,6 @@ StreamsCalculationTestCase _1sockets_ecores_tput_3 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1904,7 +1768,6 @@ StreamsCalculationTestCase _1sockets_ecores_tput_4 = { 0, 4, 0, - 0, "THROUGHPUT", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1917,7 +1780,6 @@ StreamsCalculationTestCase _1sockets_ecores_tput_5 = { 0, 0, 4, - 0, "THROUGHPUT", {}, {{16, 0, 16, 0, 0, 0}}, @@ -1930,7 +1792,6 @@ StreamsCalculationTestCase _1sockets_mock_tput_1 = { 15, 0, 1, - 0, "THROUGHPUT", {}, {{20, 6, 7, 6, 0, 0}}, @@ -1943,7 +1804,6 @@ StreamsCalculationTestCase _1sockets_mock_tput_2 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{27, 27, 0, 0, -1, -1}, {19, 19, 0, 0, 0, 0}, {8, 8, 0, 0, 1, 1}}, @@ -1956,7 +1816,6 @@ StreamsCalculationTestCase _1sockets_mock_tput_3 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{19, 19, 0, 0, -1, -1}, {11, 11, 0, 0, 0, 0}, {8, 8, 0, 0, 1, 1}}, @@ -1969,7 +1828,6 @@ StreamsCalculationTestCase _1sockets_mock_tput_4 = { 0, 0, 0, - 0, "THROUGHPUT", {}, {{8, 8, 0, 0, 0, 0}}, @@ -1982,7 +1840,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_1 = { 20, 0, 0, - 0, "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {20, 20, 0, 0, 1, 0}, {10, 10, 0, 0, 2, 1}, {20, 20, 0, 0, 3, 1}}, @@ -1994,7 +1851,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_2 = { 0, 0, 0, - 1, "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {20, 20, 0, 0, 1, 1}, {40, 40, 0, 0, 0, 0}}, @@ -2006,7 +1862,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_3 = { 0, 0, 0, - 1, "THROUGHPUT", {}, {{60, 60, 0, 0, -1, -1}, {20, 20, 0, 0, 1, 1}, {40, 40, 0, 0, 0, 0}}, @@ -2018,7 +1873,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_4 = { 10, 0, 0, - 1, "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {20, 20, 0, 0, 1, 1}, {40, 40, 0, 0, 0, 0}}, @@ -2030,7 +1884,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_5 = { 50, 0, 0, - 1, "LATENCY", {}, {{60, 60, 0, 0, -1, -1}, {20, 20, 0, 0, 1, 1}, {40, 40, 0, 0, 0, 0}}, @@ -2042,7 +1895,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_6 = { 0, 0, 0, - 1, "LATENCY", {}, {{60, 0, 60, 0, -1, -1}, {20, 0, 20, 0, 1, 1}, {40, 0, 40, 0, 0, 0}}, @@ -2054,7 +1906,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_7 = { 0, 0, 0, - 1, "THROUGHPUT", {}, {{60, 0, 60, 0, -1, -1}, {20, 0, 20, 0, 1, 1}, {40, 0, 40, 0, 0, 0}}, @@ -2066,7 +1917,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_8 = { 10, 0, 0, - 1, "LATENCY", {}, {{60, 0, 60, 0, -1, -1}, {20, 0, 20, 0, 1, 1}, {40, 0, 40, 0, 0, 0}}, @@ -2078,7 +1928,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_9 = { 50, 0, 0, - 1, "LATENCY", {}, {{60, 0, 60, 0, -1, -1}, {20, 0, 20, 0, 1, 1}, {40, 0, 40, 0, 0, 0}}, @@ -2090,7 +1939,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_10 = { 0, 0, 0, - 1, "LATENCY", {}, {{60, 30, 0, 30, -1, -1}, {20, 10, 0, 10, 1, 1}, {40, 20, 0, 20, 0, 0}}, @@ -2102,7 +1950,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_11 = { 0, 0, 0, - 1, "THROUGHPUT", {}, {{60, 30, 0, 30, -1, -1}, {20, 10, 0, 10, 1, 1}, {40, 20, 0, 20, 0, 0}}, @@ -2114,7 +1961,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_12 = { 15, 0, 0, - 1, "LATENCY", {}, {{60, 30, 0, 30, -1, -1}, {20, 10, 0, 10, 1, 1}, {40, 20, 0, 20, 0, 0}}, @@ -2126,7 +1972,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_13 = { 50, 0, 0, - 1, "LATENCY", {}, {{60, 30, 0, 30, -1, -1}, {20, 10, 0, 10, 1, 1}, {40, 20, 0, 20, 0, 0}}, @@ -2142,7 +1987,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_14 = { 0, 0, 0, - 3, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2158,7 +2002,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_15 = { 0, 0, 0, - 3, "THROUGHPUT", {}, {{200, 100, 0, 100, -1, -1}, @@ -2174,7 +2017,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_16 = { 15, 0, 0, - 3, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2190,7 +2032,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_17 = { 50, 0, 0, - 3, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2209,7 +2050,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_18 = { 0, 0, 0, - 6, "LATENCY", {}, {{440, 220, 0, 220, -1, -1}, @@ -2229,7 +2069,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_19 = { 0, 0, 0, - 6, "THROUGHPUT", {}, {{440, 220, 0, 220, -1, -1}, @@ -2249,7 +2088,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_20 = { 25, 0, 0, - 6, "LATENCY", {}, {{440, 220, 0, 220, -1, -1}, @@ -2269,7 +2107,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_21 = { 50, 0, 0, - 6, "LATENCY", {}, {{440, 220, 0, 220, -1, -1}, @@ -2292,7 +2129,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_22 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{60, 30, 0, 30, -1, -1}, {40, 20, 0, 20, 0, 0}, {20, 10, 0, 10, 1, 1}}, @@ -2310,7 +2146,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_23 = { 0, 0, 0, - 0, "LATENCY", {}, {{60, 30, 0, 30, -1, -1}, {40, 20, 0, 20, 0, 0}, {20, 10, 0, 10, 1, 1}}, @@ -2322,7 +2157,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_24 = { 0, 0, 0, - 0, "LATENCY", {}, {{60, 30, 0, 30, -1, -1}, {40, 20, 0, 20, 0, 0}, {20, 10, 0, 10, 1, 1}}, @@ -2334,7 +2168,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_25 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{200, 100, 0, 100, -1, -1}, @@ -2355,7 +2188,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_26 = { 0, 0, 0, - 0, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2375,7 +2207,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_27 = { 0, 0, 0, - 0, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2395,7 +2226,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_28 = { 200, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{200, 100, 0, 100, -1, -1}, @@ -2416,7 +2246,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_29 = { 200, 0, 0, - 0, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2440,7 +2269,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_31 = { 140, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{200, 100, 0, 100, -1, -1}, @@ -2461,7 +2289,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_32 = { 70, 0, 0, - 0, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2477,7 +2304,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_33 = { 20, 0, 0, - 0, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, @@ -2493,7 +2319,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_34 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, @@ -2505,7 +2330,6 @@ StreamsCalculationTestCase _1sockets_mock_latency_1 = { 0, 0, 14, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{14, 6, 8, 0, 0, 0}}, @@ -2517,7 +2341,6 @@ StreamsCalculationTestCase _1sockets_mock_latency_2 = { 0, 0, 6, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{14, 6, 8, 0, 0, 0}}, @@ -2529,7 +2352,6 @@ StreamsCalculationTestCase _1sockets_mock_latency_3 = { 0, 0, 14, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -2541,7 +2363,6 @@ StreamsCalculationTestCase _1sockets_mock_latency_4 = { 0, 0, 6, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -2553,7 +2374,6 @@ StreamsCalculationTestCase _1sockets_mock_latency_5 = { 0, 0, 14, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -2565,7 +2385,6 @@ StreamsCalculationTestCase _1sockets_mock_latency_6 = { 0, 0, 6, - 0, "LATENCY", {}, {{14, 6, 8, 0, 0, 0}}, @@ -2577,14 +2396,13 @@ StreamsCalculationTestCase _2sockets_mock_latency_35 = { 200, 0, 0, - 3, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, + {20, 10, 0, 10, 3, 3}, {80, 40, 0, 40, 0, 0}, {60, 30, 0, 30, 1, 1}, - {40, 20, 0, 20, 2, 2}, - {20, 10, 0, 10, 3, 3}}, + {40, 20, 0, 20, 2, 2}}, {{1, ALL_PROC, 200, -1, -1}, {0, MAIN_CORE_PROC, 10, 3, 3}, {0, HYPER_THREADING_PROC, 10, 3, 3}, @@ -2601,14 +2419,13 @@ StreamsCalculationTestCase _2sockets_mock_latency_36 = { 200, 0, 0, - 3, "LATENCY", {}, {{200, 100, 0, 100, -1, -1}, + {20, 10, 0, 10, 3, 3}, {80, 40, 0, 40, 0, 0}, {60, 30, 0, 30, 1, 1}, - {40, 20, 0, 20, 2, 2}, - {20, 10, 0, 10, 3, 3}}, + {40, 20, 0, 20, 2, 2}}, {{1, ALL_PROC, 200, -1, -1}, {0, MAIN_CORE_PROC, 10, 3, 3}, {0, HYPER_THREADING_PROC, 10, 3, 3}, @@ -2625,10 +2442,9 @@ StreamsCalculationTestCase _2sockets_mock_latency_37 = { 0, 0, 0, - 1, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, - {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, + {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 1, 1}, {24, 24, 0, 0, 0, 0}}, {{1, MAIN_CORE_PROC, 48, -1, -1}, {-1, MAIN_CORE_PROC, 24, 1, 1}, {-1, MAIN_CORE_PROC, 24, 0, 0}}, }; StreamsCalculationTestCase _2sockets_mock_latency_38 = { @@ -2637,7 +2453,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_38 = { 0, 0, 0, - 0, "LATENCY", {ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL}, {{256, 128, 0, 128, 0, 0}, @@ -2653,7 +2468,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_39 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, @@ -2665,7 +2479,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_40 = { 0, 0, 0, - 0, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}, {26, 26, 0, 0, 0, 0}}, @@ -2677,7 +2490,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_41 = { 0, 0, 0, - 1, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}}, @@ -2689,7 +2501,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_42 = { 0, 0, 0, - 1, "LATENCY", {}, {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 3, 1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}}, @@ -2701,7 +2512,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_43 = { 0, 0, 0, - 0, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, @@ -2721,7 +2531,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_44 = { 0, 0, 0, - 1, "LATENCY", {}, {{208, 104, 0, 104, -1, -1}, @@ -2741,7 +2550,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_45 = { 0, 0, 0, - 0, "LATENCY", {}, {{208, 208, 0, 0, -1, -1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 0}, {52, 52, 0, 0, 2, 1}, {52, 52, 0, 0, 3, 1}}, @@ -2753,7 +2561,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_46 = { 0, 0, 0, - 1, "LATENCY", {}, {{208, 208, 0, 0, -1, -1}, {52, 52, 0, 0, 2, 1}, {52, 52, 0, 0, 3, 1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 0}}, @@ -2765,7 +2572,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_47 = { 0, 0, 0, - 0, "LATENCY", {}, {{416, 208, 0, 208, -1, -1}, @@ -2785,7 +2591,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_48 = { 0, 0, 0, - 1, "LATENCY", {}, {{416, 208, 0, 208, -1, -1}, @@ -2805,7 +2610,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_49 = { 80, 0, 0, - 1, "LATENCY", {}, {{416, 208, 0, 208, -1, -1}, @@ -2823,7 +2627,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_50 = { 80, 0, 0, - 1, "LATENCY", {}, {{208, 208, 0, 0, -1, -1}, {52, 52, 0, 0, 2, 1}, {52, 52, 0, 0, 3, 1}, {52, 52, 0, 0, 0, 0}, {52, 52, 0, 0, 1, 0}}, @@ -2835,7 +2638,6 @@ StreamsCalculationTestCase _2sockets_mock_latency_51 = { 16, 0, 0, - 0, "LATENCY", {}, {{16, 16, 0, 0, -1, -1}, {8, 8, 0, 0, 0, 0}, {8, 8, 0, 0, 1, 1}}, diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp new file mode 100644 index 00000000000000..08c0cf2c9089e9 --- /dev/null +++ b/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "common_test_utils/test_common.hpp" +#include "cpu_streams_calculation.hpp" + +using namespace testing; + +namespace ov { + +namespace intel_cpu { + +struct LinuxSortProcTableTestCase { + int current_numa_node_id; + std::vector> _proc_type_table_input; + std::vector> _proc_type_table_output; +}; + +class LinuxSortProcTableTests : public ov::test::TestsCommon, + public testing::WithParamInterface> { +public: + void SetUp() override { + const auto& test_data = std::get<0>(GetParam()); + + std::vector> test_proc_type_table = test_data._proc_type_table_input; + + sort_table_by_numa_node_id(test_data.current_numa_node_id, test_proc_type_table); + + ASSERT_EQ(test_proc_type_table, test_data._proc_type_table_output); + } +}; + +LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_1 = { + 0, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, +}; +LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_2 = { + 1, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}, {12, 6, 0, 6, 0, 0}}, +}; +LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_3 = { + 2, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}}, +}; +LinuxSortProcTableTestCase proc_table_2sockets_24cores_hyperthreading_4 = { + 3, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}, {12, 6, 0, 6, 3, 1}}, + {{48, 24, 0, 24, -1, -1}, {12, 6, 0, 6, 3, 1}, {12, 6, 0, 6, 0, 0}, {12, 6, 0, 6, 1, 0}, {12, 6, 0, 6, 2, 1}}, +}; +LinuxSortProcTableTestCase proc_table_1sockets_mock = { + 3, + {{48, 24, 0, 24, 0, 0}}, + {{48, 24, 0, 24, 0, 0}}, +}; + +TEST_P(LinuxSortProcTableTests, UpdateProcTable) {} + +INSTANTIATE_TEST_SUITE_P(UpdateProcTableList, + LinuxSortProcTableTests, + testing::Values(proc_table_2sockets_24cores_hyperthreading_1, + proc_table_2sockets_24cores_hyperthreading_2, + proc_table_2sockets_24cores_hyperthreading_3, + proc_table_2sockets_24cores_hyperthreading_4, + proc_table_1sockets_mock)); +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index f6046621463645..5072740240e2a5 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -122,6 +122,7 @@ static ov::threading::IStreamsExecutor::Config make_task_executor_config(const E streams, 1, core_type, + false, enable_cpu_pinning); return task_executor_config; diff --git a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp index 21344a403937a0..ed973666fd2e76 100644 --- a/src/plugins/intel_gpu/src/plugin/compiled_model.cpp +++ b/src/plugins/intel_gpu/src/plugin/compiled_model.cpp @@ -25,13 +25,17 @@ std::shared_ptr create_task_executor(const std::sh // exclusive_async_requests essentially disables the streams (and hence should be checked first) => aligned with // the CPU behavior return plugin->get_executor_manager()->get_executor("GPU"); - } else if (config.get_property(ov::hint::enable_cpu_pinning)) { + } else if (config.get_property(ov::hint::enable_cpu_pinning) || + config.get_property(ov::hint::enable_cpu_reservation)) { + bool enable_cpu_pinning = config.get_property(ov::hint::enable_cpu_pinning); + bool enable_cpu_reservation = config.get_property(ov::hint::enable_cpu_reservation); return std::make_shared( ov::threading::IStreamsExecutor::Config{"Intel GPU plugin executor", config.get_property(ov::num_streams), 1, ov::hint::SchedulingCoreType::PCORE_ONLY, - true}); + enable_cpu_reservation, + enable_cpu_pinning}); } else { return std::make_shared( ov::threading::IStreamsExecutor::Config{"Intel GPU plugin executor", @@ -262,6 +266,7 @@ ov::Any CompiledModel::get_property(const std::string& name) const { // Configs ov::PropertyName{ov::enable_profiling.name(), PropertyMutability::RO}, ov::PropertyName{ov::hint::enable_cpu_pinning.name(), PropertyMutability::RO}, + ov::PropertyName{ov::hint::enable_cpu_reservation.name(), PropertyMutability::RO}, ov::PropertyName{ov::hint::model_priority.name(), PropertyMutability::RO}, ov::PropertyName{ov::intel_gpu::hint::host_task_priority.name(), PropertyMutability::RO}, ov::PropertyName{ov::intel_gpu::hint::queue_priority.name(), PropertyMutability::RO}, diff --git a/src/plugins/intel_gpu/src/plugin/plugin.cpp b/src/plugins/intel_gpu/src/plugin/plugin.cpp index 4058b38dd78584..f98ffd0128bf6a 100644 --- a/src/plugins/intel_gpu/src/plugin/plugin.cpp +++ b/src/plugins/intel_gpu/src/plugin/plugin.cpp @@ -643,6 +643,7 @@ std::vector Plugin::get_supported_properties() const { ov::PropertyName{ov::hint::num_requests.name(), PropertyMutability::RW}, ov::PropertyName{ov::hint::inference_precision.name(), PropertyMutability::RW}, ov::PropertyName{ov::hint::enable_cpu_pinning.name(), PropertyMutability::RW}, + ov::PropertyName{ov::hint::enable_cpu_reservation.name(), PropertyMutability::RW}, ov::PropertyName{ov::device::id.name(), PropertyMutability::RW}, ov::PropertyName{ov::hint::dynamic_quantization_group_size.name(), PropertyMutability::RW}, ov::PropertyName{ov::hint::activations_scale_factor.name(), PropertyMutability::RW}, diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 4ba78c74ee7597..89edba4a69eee1 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -46,6 +46,7 @@ void ExecutionConfig::set_default() { std::make_tuple(ov::hint::execution_mode, ov::hint::ExecutionMode::PERFORMANCE), std::make_tuple(ov::hint::num_requests, 0), std::make_tuple(ov::hint::enable_cpu_pinning, false), + std::make_tuple(ov::hint::enable_cpu_reservation, false), std::make_tuple(ov::intel_gpu::hint::host_task_priority, ov::hint::Priority::MEDIUM), std::make_tuple(ov::intel_gpu::hint::queue_throttle, ov::intel_gpu::hint::ThrottleLevel::MEDIUM), @@ -268,6 +269,16 @@ void ExecutionConfig::apply_user_properties(const cldnn::device_info& info) { if (get_property(ov::intel_gpu::use_onednn)) { set_property(ov::intel_gpu::queue_type(QueueTypes::in_order)); } + if (!is_set_by_user(ov::hint::enable_cpu_reservation)) { + if (get_property(ov::hint::enable_cpu_pinning)) { + set_property(ov::hint::enable_cpu_reservation(true)); + } + } + if (get_property(ov::hint::enable_cpu_reservation)) { + if (!is_set_by_user(ov::hint::enable_cpu_pinning)) { + set_property(ov::hint::enable_cpu_pinning(true)); + } + } user_properties.clear(); } diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp new file mode 100644 index 00000000000000..07d4879257185c --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include + +#include "common_test_utils/file_utils.hpp" +#include "common_test_utils/subgraph_builders/2_input_subtract.hpp" +#include "common_test_utils/subgraph_builders/multi_single_conv.hpp" +#include "common_test_utils/ov_plugin_cache.hpp" +#include "openvino/openvino.hpp" +#include "openvino/runtime/intel_gpu/properties.hpp" + +using Device = std::string; +using Config = ov::AnyMap; +using GpuReservationTest = ::testing::Test; + +TEST_F(GpuReservationTest, Mutiple_CompiledModel_Reservation) { + std::vector> models; + Config config = {ov::enable_profiling(true)}; + std::vector target_devices = {ov::test::utils::DEVICE_CPU, ov::test::utils::DEVICE_GPU}; + std::atomic counter{0u}; + + models.emplace_back(ov::test::utils::make_2_input_subtract()); + models.emplace_back(ov::test::utils::make_multi_single_conv()); + + auto core = ov::test::utils::PluginCache::get().core(); + core->set_property(target_devices[1], config); + + ov::AnyMap property_config = {{ov::num_streams.name(), 1}, + {ov::inference_num_threads.name(), 1}, + {ov::hint::enable_cpu_reservation.name(), true}}; + ov::AnyMap property_config_gpu = {{ov::num_streams.name(), ov::streams::Num(1)}, + {ov::hint::enable_cpu_reservation.name(), true}}; + + std::vector threads(2); + for (auto& thread : threads) { + thread = std::thread([&]() { + auto value = counter++; + auto compiled_model = core->compile_model(models[value % models.size()], + target_devices[value % target_devices.size()], + value == 0 ? property_config : property_config_gpu); + auto num_streams = compiled_model.get_property(ov::num_streams.name()); + auto cpu_reservation = compiled_model.get_property(ov::hint::enable_cpu_reservation.name()); + ASSERT_EQ(num_streams, ov::streams::Num(1)); + ASSERT_EQ(cpu_reservation, true); + }); + } + + for (auto& thread : threads) { + if (thread.joinable()) + thread.join(); + } +} \ No newline at end of file diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 17b2b91d034bd6..e026a44fa74ce0 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -33,6 +33,7 @@ const std::vector gpu_compileModel_properties = { {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY), ov::hint::num_requests(10), ov::hint::enable_cpu_pinning(true), + ov::hint::enable_cpu_reservation(false), ov::enable_profiling(true)}}; INSTANTIATE_TEST_SUITE_P(smoke_gpuCompileModelBehaviorTests, From 9d06bdfa243ed786c994a6668308de57d3e6b33f Mon Sep 17 00:00:00 2001 From: Andrey Babushkin Date: Thu, 16 Jan 2025 15:52:54 +0000 Subject: [PATCH 11/97] Bump cache-apt-pkgs-action in documentation workflow (#28495) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Details: Old versions of `cache-apt-pkgs-action` used `upload-artifact@v3` action, which is deprecated and going to be disable on January 30. I wonder why Dependabot didn't do this 🤔 --- .github/workflows/build_doc.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index a68f5dbd976f33..df85b1ef3aa385 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -25,7 +25,7 @@ jobs: lfs: 'true' - name: Install apt-get dependencies - uses: awalsh128/cache-apt-pkgs-action@a6c3917cc929dd0345bfb2d3feaf9101823370ad # v1.4.2 + uses: awalsh128/cache-apt-pkgs-action@5902b33ae29014e6ca012c5d8025d4346556bd40 # v1.4.3 with: packages: graphviz texlive liblua5.2-0 libclang1-9 libclang-cpp9 version: 3.0 From 83c047443def45bea7b70fd4d1319bd43fdc6825 Mon Sep 17 00:00:00 2001 From: Alina Kladieva Date: Thu, 16 Jan 2025 18:58:11 +0100 Subject: [PATCH 12/97] Bump product version 2025.1 (#28490) Signed-off-by: Alina Kladieva --- cmake/packaging/debian.cmake | 1 + cmake/packaging/rpm.cmake | 1 + src/core/include/openvino/core/version.hpp | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmake/packaging/debian.cmake b/cmake/packaging/debian.cmake index cec21073cf43b2..b9d7cd1040ddf3 100644 --- a/cmake/packaging/debian.cmake +++ b/cmake/packaging/debian.cmake @@ -100,6 +100,7 @@ macro(ov_cpack_settings) 2024.4.0 2024.5.0 2024.6.0 + 2025.0.0 ) ov_check_conflicts_versions(conflicting_versions) diff --git a/cmake/packaging/rpm.cmake b/cmake/packaging/rpm.cmake index e0c89273383328..4d2e8452d53613 100644 --- a/cmake/packaging/rpm.cmake +++ b/cmake/packaging/rpm.cmake @@ -88,6 +88,7 @@ macro(ov_cpack_settings) 2024.4.0 2024.5.0 2024.6.0 + 2025.0.0 ) ov_check_conflicts_versions(conflicting_versions) diff --git a/src/core/include/openvino/core/version.hpp b/src/core/include/openvino/core/version.hpp index aa8130e246415c..e3230e7434ec76 100644 --- a/src/core/include/openvino/core/version.hpp +++ b/src/core/include/openvino/core/version.hpp @@ -20,7 +20,7 @@ */ #define OPENVINO_VERSION_MAJOR 2025 -#define OPENVINO_VERSION_MINOR 0 +#define OPENVINO_VERSION_MINOR 1 #define OPENVINO_VERSION_PATCH 0 namespace ov { From 518b23f02bf4b68441d28003550dd5cfa591a976 Mon Sep 17 00:00:00 2001 From: Attila Csok Date: Thu, 16 Jan 2025 15:32:19 +0200 Subject: [PATCH 13/97] [intel-npu] Quickfix for core.get_available_devices exception on hosts with no NPU compiler (#28484) ### Details: - Quickfix for core.get_available_devices exception on hosts with no NPU compiler - get_available_devices calls get_property(supported_properties), which throws exception if there is no compiler. catching that exception. ### Tickets: - [*CVS-160690*](https://jira.devtools.intel.com/browse/CVS-160690) --- src/plugins/intel_npu/src/plugin/src/plugin.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_npu/src/plugin/src/plugin.cpp b/src/plugins/intel_npu/src/plugin/src/plugin.cpp index a0af187d42944d..cfcec542e6219e 100644 --- a/src/plugins/intel_npu/src/plugin/src/plugin.cpp +++ b/src/plugins/intel_npu/src/plugin/src/plugin.cpp @@ -585,10 +585,17 @@ void Plugin::reset_supported_properties() const { } void Plugin::reset_compiler_dependent_properties() const { + uint32_t active_compiler_version = 0; // get active compiler version - CompilerAdapterFactory compilerAdapterFactory; - auto dummyCompiler = compilerAdapterFactory.getCompiler(_backends->getIEngineBackend(), _globalConfig); - uint32_t active_compiler_version = dummyCompiler->get_version(); + try { + CompilerAdapterFactory compilerAdapterFactory; + auto dummyCompiler = compilerAdapterFactory.getCompiler(_backends->getIEngineBackend(), _globalConfig); + active_compiler_version = dummyCompiler->get_version(); + } catch (...) { + _logger.warning( + "No available compiler. Can not determine version > compiler dependent properties remain hidden"); + return; + } // NPU_COMPILER_DYNAMIC_QUANTIZATION // unpublish if compiler version requirement is not met From 4e3c4572e64384dfd4779111123211cca8d7daf8 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Thu, 16 Jan 2025 15:15:42 +0100 Subject: [PATCH 14/97] Check for 'dot' tool presence in PATH before calling in VisualizeTree pass and make it debug only (#28432) ### Details: Check for dot tool availability in PATH before execution: - Ensure the Graphviz `dot` tool is installed and accessible in the system PATH before attempting to execute it - If the dot tool is not found, fail gracefully with a clear error message instead of producing the uninformative output: `sh: 1: dot: not found` - Make external 'dot' process call only when `ENABLE_OPENVINO_DEBUG` define is on - Add missing `if (VISUALIZE_TESTS_TREE)` in tests where `VisualizeTree` pass is invoked ### Tickets: - N/A --- .../offline_transformations/pruning_test.cpp | 18 +++++++++++------- src/core/src/pass/visualize_tree.cpp | 5 ++++- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/src/common/transformations/tests/offline_transformations/pruning_test.cpp b/src/common/transformations/tests/offline_transformations/pruning_test.cpp index 3f8e9356692ec8..924e25f6f42d22 100644 --- a/src/common/transformations/tests/offline_transformations/pruning_test.cpp +++ b/src/common/transformations/tests/offline_transformations/pruning_test.cpp @@ -4738,9 +4738,11 @@ TEST_F(TransformationTestsF, MaskPropagationComplexReshape) { }; manager.register_pass(); - manager.register_pass( - std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationComplexReshapeWithMasks.svg", - modifier); + if (VISUALIZE_TESTS_TREE) { + manager.register_pass( + std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationComplexReshapeWithMasks.svg", + modifier); + } } comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } @@ -4929,10 +4931,12 @@ TEST_P(TransformationTestsBoolParamF, MaskPropagationReshapedPassThroughP) { }; manager.register_pass(); - auto postfix = (add_shape_of) ? "True" : "False"; - manager.register_pass( - std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReverseFlattenWithMasks" + postfix + ".svg", - modifier); + if (VISUALIZE_TESTS_TREE) { + auto postfix = (add_shape_of) ? "True" : "False"; + manager.register_pass( + std::string(VISUALIZE_TREE_ROOT) + "MaskPropagationReverseFlattenWithMasks" + postfix + ".svg", + modifier); + } comparator.enable(FunctionsComparator::CmpValues::ACCURACY); } diff --git a/src/core/src/pass/visualize_tree.cpp b/src/core/src/pass/visualize_tree.cpp index 62e86abcc34b63..b74a8fcebe388a 100644 --- a/src/core/src/pass/visualize_tree.cpp +++ b/src/core/src/pass/visualize_tree.cpp @@ -690,8 +690,11 @@ void ov::pass::VisualizeTree::render() const { out.close(); if (!m_dot_only && ov::util::to_lower(ext) != ".dot") { -#ifndef _WIN32 +#if defined(ENABLE_OPENVINO_DEBUG) && !defined(_WIN32) std::stringstream ss; + if (system("command -v dot > /dev/null 2>&1") != 0) { + OPENVINO_THROW("Graphviz 'dot' command not found in PATH"); + } ss << "dot -T" << output_format << " " << dot_file << " -o" << m_name; auto cmd = ss.str(); auto stream = popen(cmd.c_str(), "r"); From 9f0a52b5d72c7afa02503741ae1cf0dc925d22b2 Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Thu, 16 Jan 2025 15:09:45 +0000 Subject: [PATCH 15/97] NPUW: LLMInferRequest - not copy kvcache for last generated token (#28489) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp b/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp index 2e987036483e34..647a4b9f53142b 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/llm_infer_request.cpp @@ -229,6 +229,10 @@ void ov::npuw::LLMInferRequest::infer_generate(ov::SoPtr input_ids, m_logits = m_kvcache_request->get_tensor(m_kvcache_out_ports.at("logits")); kvcache_desc.num_stored_tokens += 1; + if (kvcache_desc.num_stored_tokens == kvcache_desc.total_size) { + return; + } + LOG_DEBUG("Write KV-cache for the new token to the correct input position for next iteration."); const std::size_t kStartOutputKVCacheLayers = 1u; const auto& kvcache_compiled = m_kvcache_request->get_compiled_model(); From 0a20c6f465eda48db46e4c5e572bc7fb8c534ece Mon Sep 17 00:00:00 2001 From: Ivan Novoselov Date: Thu, 16 Jan 2025 15:22:08 +0000 Subject: [PATCH 16/97] [Snippets] Registers' assignment optimization (#27391) ### Details: - *Refactor and optimize the AssignRegisters pass* - *This will address load time issues for large IRs* - *This is also a prerequisite for ABI reg spills optimization* ### Tickets: - *121642*, *113661* (Load time issues) - *154722* (ABI reg spills overhead elimination) - *139932* (Separate reg pools and counters for gp and vec regs) - *157742* (fix RTTI for lowered passes) --- .../snippets/docs/snippets_design_guide.md | 18 +- .../snippets/include/snippets/emitter.hpp | 13 +- .../snippets/include/snippets/generator.hpp | 2 +- .../include/snippets/lowered/expression.hpp | 3 + .../snippets/lowered/expression_factory.hpp | 5 + .../lowered/pass/allocate_buffers.hpp | 4 +- .../lowered/pass/assign_registers.hpp | 12 +- .../snippets/lowered/pass/brgemm_blocking.hpp | 2 +- .../pass/clean_repeated_ptr_shifts.hpp | 2 +- .../lowered/pass/init_live_ranges.hpp | 33 ++ .../snippets/lowered/pass/init_registers.hpp | 36 ++ .../lowered/pass/insert_reg_spills.hpp | 44 +++ .../pass/mha_parallel_wa_optimizer.hpp | 1 + .../snippets/lowered/pass/optimize_domain.hpp | 2 +- .../include/snippets/lowered/pass/pass.hpp | 6 +- .../lowered/pass/runtime_optimizer.hpp | 1 + .../lowered/pass/serialize_control_flow.hpp | 2 +- .../lowered/pass/serialize_data_flow.hpp | 2 +- .../lowered/pass/set_buffer_reg_group.hpp | 2 +- .../lowered/pass/set_load_store_scalar.hpp | 2 +- .../snippets/lowered/pass/split_loops.hpp | 4 +- .../lowered/pass/validate_expanded_loops.hpp | 2 +- .../snippets/lowered/pass/validate_shapes.hpp | 2 +- .../lowered/pass/validate_unified_loops.hpp | 2 +- .../include/snippets/lowered/reg_manager.hpp | 69 ++++ .../snippets/include/snippets/op/kernel.hpp | 15 +- .../include/snippets/op/reg_spill.hpp | 81 ++++ .../include/snippets/snippets_isa.hpp | 1 + .../include/snippets/target_machine.hpp | 20 +- .../include/snippets/utils/reg_utils.hpp | 30 ++ src/common/snippets/src/emitter.cpp | 36 +- src/common/snippets/src/generator.cpp | 60 +-- .../snippets/src/lowered/expression.cpp | 21 +- .../src/lowered/expression_factory.cpp | 34 ++ src/common/snippets/src/lowered/linear_ir.cpp | 4 +- .../src/lowered/pass/assign_registers.cpp | 372 +++++------------- .../src/lowered/pass/init_live_ranges.cpp | 90 +++++ .../src/lowered/pass/init_registers.cpp | 35 ++ .../src/lowered/pass/insert_broadcastmove.cpp | 3 + .../src/lowered/pass/insert_reg_spills.cpp | 88 +++++ .../load_movebroadcast_to_broadcastload.cpp | 8 +- .../snippets/src/lowered/pass/validate.cpp | 10 +- .../snippets/src/lowered/port_descriptor.cpp | 2 +- src/common/snippets/src/op/kernel.cpp | 8 - src/common/snippets/src/op/reg_spill.cpp | 87 ++++ src/common/snippets/src/op/subgraph.cpp | 12 +- .../src/shape_inference/shape_inference.cpp | 4 +- .../snippets/tests/include/lowering_utils.hpp | 5 +- .../snippets/tests/src/lir_comparator.cpp | 4 +- .../snippets/tests/src/lowering_utils.cpp | 27 ++ .../plugin/aarch64/jit_eltwise_emitters.cpp | 2 +- .../emitters/plugin/aarch64/jit_emitter.cpp | 4 + .../emitters/plugin/aarch64/jit_emitter.hpp | 1 + .../src/emitters/plugin/x64/utils.cpp | 253 +++++++----- .../src/emitters/plugin/x64/utils.hpp | 52 ++- .../snippets/aarch64/cpu_generator.cpp | 37 +- .../snippets/aarch64/cpu_generator.hpp | 6 +- .../snippets/aarch64/jit_kernel_emitter.cpp | 242 ++++++------ .../snippets/aarch64/jit_kernel_emitter.hpp | 31 +- .../snippets/jit_container_emitter.cpp | 58 --- .../snippets/jit_container_emitter.hpp | 29 -- .../emitters/snippets/x64/cpu_generator.cpp | 67 +++- .../emitters/snippets/x64/cpu_generator.hpp | 6 +- .../snippets/x64/jit_binary_call_emitter.cpp | 68 ++++ .../snippets/x64/jit_binary_call_emitter.hpp | 73 ++++ .../x64/jit_brgemm_copy_b_emitter.cpp | 24 +- .../x64/jit_brgemm_copy_b_emitter.hpp | 3 +- .../snippets/x64/jit_brgemm_emitter.cpp | 28 +- .../snippets/x64/jit_brgemm_emitter.hpp | 3 +- .../snippets/x64/jit_kernel_emitter.cpp | 194 ++++----- .../snippets/x64/jit_kernel_emitter.hpp | 45 +-- .../x64/jit_perf_count_chrono_emitters.cpp | 4 +- .../snippets/x64/jit_reg_spill_emitters.cpp | 87 ++++ .../snippets/x64/jit_reg_spill_emitters.hpp | 71 ++++ .../x64/jit_segfault_detector_emitter.cpp | 2 +- .../x64/kernel_executors/brgemm_copy_b.cpp | 2 +- .../src/emitters/snippets/x64/utils.cpp | 17 +- .../src/emitters/snippets/x64/utils.hpp | 21 +- .../src/emitters/snippets/x64/verbose.cpp | 14 +- .../emitters/tpp/x64/jit_brgemm_emitter.hpp | 21 +- .../emitters/tpp/x64/jit_eltwise_emitters.cpp | 59 +-- .../emitters/tpp/x64/jit_eltwise_emitters.hpp | 81 ++-- .../emitters/tpp/x64/jit_equation_emitter.cpp | 98 ++--- .../emitters/tpp/x64/jit_equation_emitter.hpp | 16 +- .../src/emitters/tpp/x64/jit_tpp_emitter.cpp | 61 +-- .../src/emitters/tpp/x64/jit_tpp_emitter.hpp | 35 +- .../adjust_brgemm_copy_b_loop_ports.hpp | 2 +- .../brgemm_copy_b_loop_ports_adjuster.hpp | 1 + .../x64/pass/lowered/brgemm_cpu_blocking.hpp | 4 +- .../lowered/external_repacking_adjuster.hpp | 1 + .../lowered/insert_brgemm_copy_buffers.hpp | 2 +- 91 files changed, 2067 insertions(+), 1086 deletions(-) create mode 100644 src/common/snippets/include/snippets/lowered/pass/init_live_ranges.hpp create mode 100644 src/common/snippets/include/snippets/lowered/pass/init_registers.hpp create mode 100644 src/common/snippets/include/snippets/lowered/pass/insert_reg_spills.hpp create mode 100644 src/common/snippets/include/snippets/lowered/reg_manager.hpp create mode 100644 src/common/snippets/include/snippets/op/reg_spill.hpp create mode 100644 src/common/snippets/include/snippets/utils/reg_utils.hpp create mode 100644 src/common/snippets/src/lowered/pass/init_live_ranges.cpp create mode 100644 src/common/snippets/src/lowered/pass/init_registers.cpp create mode 100644 src/common/snippets/src/lowered/pass/insert_reg_spills.cpp create mode 100644 src/common/snippets/src/op/reg_spill.cpp delete mode 100644 src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.cpp delete mode 100644 src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.hpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.cpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.hpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.cpp create mode 100644 src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.hpp diff --git a/src/common/snippets/docs/snippets_design_guide.md b/src/common/snippets/docs/snippets_design_guide.md index 2cb50744d0b3dd..ea9b13b6e7dae7 100644 --- a/src/common/snippets/docs/snippets_design_guide.md +++ b/src/common/snippets/docs/snippets_design_guide.md @@ -630,13 +630,15 @@ Let's discuss the target-independent stage first. The `Preparation` consists of low-level target-independent transformations needed to prepare the `IR` for code generation. There are currently two such transformations: -1. `AssignRegisters` assigns abstract registers to `Expressions` based on their data dependencies. -The easiest way to think about the assignment logic is that a register is assigned to every `PortConnector` to ensure appropriate data propagation. -However, every `Expression` needs to know the assigned registers, so they are stored is the `PortDescriptors` (and could be obtained via `get_reg()`). -Consequently, all the ports connected to the same `PortConnector` will have the same register in their `PortDescriptors`. -`AssignRegisters` also supports register re-utilization, so if all `ExpressionPorts` connected to this `PortConnector` are evaluated, then the corresponding register may be reused by other `PortConnector`. -In other words, when all the `Expressions` that required input data in a certain register are evaluated, the register may be reused to hold another `Expression's` output. -`AssignRegisters` also supports two types of registers: general-purpose and vector ones. +1. `InitRegisters` assigns registers to `Expressions` based on their data dependencies. +This register assignment is organized in three steps implemented as separate passes: `InitLiveRanges`, `AssignRegisters` and `InsertRegSpills`. + * `InitLiveRanges` assigns an abstract register to every `PortConnector` and determines their live intervals based on data dependencies. +Note that the assigned registers are stored in `PortDescriptors` and could be obtained via `get_reg()`. +Similarly, the `get_live_regs()` method returns live registers for the given expression. + * `AssignRegisters` uses the information collected by `InitLiveRanges` to map abstract register to the physical ones. +Physical registers that have non-overlapping live intervals will be reused. + * Finally, `InsertRegSpills` inserts `RegSpillBegin` and `RegSpillEnd` operations to spill (and restore) some registers to the stack and update live registers for affected expressions. +For example, this is needed to reduce ABI call overheads if we need to call a binary inside a loop. Different types of registers are managed and assigned independently, and a particular register type required by an `Expression` is provided by the `ov::snippets::Generator` (or a derived generator for target-specific `Ops`). 2. `InsertSpecificIterations` injects initialization section before a loop body and tail-processing section after a loop body if needed. Note that every loop has two parameters that specify how its body is evaluated: `work_amount` and `increment` The `work_amount` indicates how much of the data needs to be processed, it often equals to the dimension's size the loop is working on. @@ -647,7 +649,7 @@ So if a loop's `work_amount` is not evenly divisible by its `increment`, it mean 4. `OptimizeLoopSingleEvaluation` moves all pointer arithmetic to finalization offsets in `LoopEnd`, and marks the loops that will be executed only once. This information will be used during code emission to eliminate redundant instructions. -Please see [assign_registers.cpp](../src/lowered/pass/assign_registers.cpp) and [insert_specific_iterations.cpp](../src/lowered/pass/insert_specific_iterations.cpp) for more info regarding the main passes in the `Preparation` stage. +Please see [init_registers.cpp](../src/lowered/pass/init_registers.cpp) and [insert_specific_iterations.cpp](../src/lowered/pass/insert_specific_iterations.cpp) for more info regarding the main passes in the `Preparation` stage. When the `Preparation` is finished, the `Generator` constructs target-specific emitters by calling `init_emitter(target)` method for every `Expression` in the `LinearIR`, where the `target` is a `TargetMachine` instance. The `TargetMachine` is a class that provides generator with target-specific information, such as supported instruction sets, vector register size etc. diff --git a/src/common/snippets/include/snippets/emitter.hpp b/src/common/snippets/include/snippets/emitter.hpp index ef4f0368afc070..0143fd56df6eee 100644 --- a/src/common/snippets/include/snippets/emitter.hpp +++ b/src/common/snippets/include/snippets/emitter.hpp @@ -16,25 +16,28 @@ namespace snippets { * @interface RegType * @brief Register type of input and output operations */ -enum class RegType { gpr, vec, undefined }; +enum class RegType { gpr, vec, mask, undefined }; /** * @interface Reg * @brief Register representation: type of register and index */ struct Reg { + enum {UNDEFINED_IDX = std::numeric_limits::max()}; Reg() = default; Reg(RegType type_, size_t idx_) : type(type_), idx(idx_) {} - RegType type = RegType::gpr; - size_t idx = 0; + bool is_defined() const { return type != RegType::undefined && idx != UNDEFINED_IDX; } + RegType type = RegType::undefined; + size_t idx = UNDEFINED_IDX; friend bool operator==(const Reg& lhs, const Reg& rhs); + friend bool operator<(const Reg& lhs, const Reg& rhs); + friend bool operator>(const Reg& lhs, const Reg& rhs); friend bool operator!=(const Reg& lhs, const Reg& rhs); + friend std::ostream& operator<<(std::ostream& s, const Reg& r); }; using RegInfo = std::pair, std::vector>; -std::string regTypeToStr(const RegType& type); - /** * @interface Emitter * @brief Base class for all target specific code emitters used by generator. diff --git a/src/common/snippets/include/snippets/generator.hpp b/src/common/snippets/include/snippets/generator.hpp index 42d2d7fb28a077..065bd275bdecc2 100644 --- a/src/common/snippets/include/snippets/generator.hpp +++ b/src/common/snippets/include/snippets/generator.hpp @@ -65,7 +65,7 @@ class Schedule { * @brief Target independent code generator interface * @ingroup snippets */ -class Generator { +class Generator : public std::enable_shared_from_this{ public: /** * @brief Default constructor diff --git a/src/common/snippets/include/snippets/lowered/expression.hpp b/src/common/snippets/include/snippets/lowered/expression.hpp index 286f561b5bcb03..18ad65ad1d7304 100644 --- a/src/common/snippets/include/snippets/lowered/expression.hpp +++ b/src/common/snippets/include/snippets/lowered/expression.hpp @@ -35,6 +35,8 @@ class Expression : public std::enable_shared_from_this { RegInfo get_reg_info() const; void set_reg_info(const RegInfo& rinfo); + const std::set& get_live_regs() const {return m_live_regs; } + void set_live_regs(std::set live_regs) { m_live_regs = std::move(live_regs); } double get_exec_num() const { return m_exec_num; } @@ -130,6 +132,7 @@ class Expression : public std::enable_shared_from_this { // 2. This number can be changed and updated during whole pipeline, so its absolute values are meaningless. // 3. This number can be negative, positive and zero. double m_exec_num = 0; + std::set m_live_regs{}; }; } // namespace lowered diff --git a/src/common/snippets/include/snippets/lowered/expression_factory.hpp b/src/common/snippets/include/snippets/lowered/expression_factory.hpp index d617eb3d03b410..30d789380e5386 100644 --- a/src/common/snippets/include/snippets/lowered/expression_factory.hpp +++ b/src/common/snippets/include/snippets/lowered/expression_factory.hpp @@ -10,6 +10,7 @@ #include "snippets/op/loop.hpp" #include "snippets/op/buffer.hpp" #include "snippets/op/perf_count.hpp" +#include "snippets/op/reg_spill.hpp" namespace ov { namespace snippets { @@ -35,6 +36,10 @@ class ExpressionFactory { const std::shared_ptr& shape_infer_factory); static ExpressionPtr create(const std::shared_ptr& n, const std::vector& inputs, const std::shared_ptr& shape_infer_factory); + static ExpressionPtr create(const std::shared_ptr& n, const std::vector& inputs, + const std::shared_ptr& shape_infer_factory); + static ExpressionPtr create(const std::shared_ptr& n, const std::vector& inputs, + const std::shared_ptr& shape_infer_factory); // Note: PerfCountBegin nodes have a PerfCountEnd ov::Output, but corresponding expression should not have any outputs to avoid register allocation #ifdef SNIPPETS_DEBUG_CAPS diff --git a/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp b/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp index 00ccd14925969d..1572d1653e35f9 100644 --- a/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/allocate_buffers.hpp @@ -25,11 +25,11 @@ namespace pass { */ class AllocateBuffers: public RangedPass { public: - OPENVINO_RTTI("AllocateBuffers", "", RangedPass); + OPENVINO_RTTI("AllocateBuffers", "", RangedPass) AllocateBuffers(bool is_optimized = true); /** - * @brief Apply the pass to the Linear IR + * @brief Apply the pass to the Linear IR` * @param linear_ir the target Linear IR * @return status of the pass */ diff --git a/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp b/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp index 986a099246804d..37d87373e3f5dc 100644 --- a/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/assign_registers.hpp @@ -6,6 +6,7 @@ #include "pass.hpp" #include "snippets/generator.hpp" +#include "snippets/lowered/reg_manager.hpp" namespace ov { namespace snippets { @@ -20,16 +21,15 @@ namespace pass { */ class AssignRegisters : public Pass { public: - OPENVINO_RTTI("AssignRegisters", "", Pass); - explicit AssignRegisters(const std::function& out)>& mapper, const size_t reg_cnt) - : m_reg_type_mapper(mapper), reg_count(reg_cnt) {} + OPENVINO_RTTI("AssignRegisters", "0", Pass) + explicit AssignRegisters(RegManager& reg_manager) : m_reg_manager(reg_manager) {} bool run(LinearIR& linear_ir) override; private: - void set_reg_types(LinearIR& linear_ir); + using RegMap = std::map; + RegMap assign_regs_manually(const LinearIR& linear_ir, std::set& gpr_pool, std::set& vec_pool); - std::function& out)> m_reg_type_mapper; - size_t reg_count; + RegManager& m_reg_manager; }; } // namespace pass diff --git a/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp b/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp index 3b59f83003d565..90a81e2512d3b1 100644 --- a/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/brgemm_blocking.hpp @@ -94,7 +94,7 @@ template ::value, bool>::type = true> class BrgemmBlocking : public snippets::lowered::pass::RangedPass, public BrgemmBlockingBase { public: - OPENVINO_RTTI("BrgemmBlocking", "", RangedPass); + OPENVINO_RTTI("BrgemmBlocking", "", RangedPass) bool run(snippets::lowered::LinearIR& linear_ir, snippets::lowered::LinearIR::constExprIt begin, diff --git a/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp b/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp index 10e0635be553d4..478db3d9522163 100644 --- a/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/clean_repeated_ptr_shifts.hpp @@ -23,7 +23,7 @@ namespace pass { */ class CleanRepeatedDataPointerShifts: public RangedPass { public: - OPENVINO_RTTI("CleanRepeatedDataPointerShifts", "", RangedPass); + OPENVINO_RTTI("CleanRepeatedDataPointerShifts", "", RangedPass) CleanRepeatedDataPointerShifts() = default; bool run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/init_live_ranges.hpp b/src/common/snippets/include/snippets/lowered/pass/init_live_ranges.hpp new file mode 100644 index 00000000000000..7591dd28d092e0 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/init_live_ranges.hpp @@ -0,0 +1,33 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" +#include "snippets/generator.hpp" +#include "snippets/lowered/reg_manager.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface InitLiveRanges + * @brief Calculates live ranges of registers. This information will be used to assign registers and optimize ABI reg spills. + * @ingroup snippets + */ +class InitLiveRanges : public Pass { +public: + OPENVINO_RTTI("InitLiveRanges", "", Pass) + explicit InitLiveRanges(RegManager& reg_manager) : m_reg_manager(reg_manager) {} + bool run(LinearIR& linear_ir) override; +private: + RegManager& m_reg_manager; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/init_registers.hpp b/src/common/snippets/include/snippets/lowered/pass/init_registers.hpp new file mode 100644 index 00000000000000..d0f881636afbbc --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/init_registers.hpp @@ -0,0 +1,36 @@ +// Copyright (C) 2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" +#include "snippets/generator.hpp" +#include "snippets/lowered/reg_manager.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface InitRegisters + * @brief This pass combines all register-related transformations that are needed to initialize register info. + * @ingroup snippets + */ +class InitRegisters : public Pass { +public: + OPENVINO_RTTI("InitRegisters", "0", Pass) + InitRegisters(const std::shared_ptr& generator, + const std::shared_ptr& pass_config); + bool run(LinearIR& linear_ir) override; + +private: + lowered::RegManager m_reg_manager; + const std::shared_ptr& m_pass_config; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/insert_reg_spills.hpp b/src/common/snippets/include/snippets/lowered/pass/insert_reg_spills.hpp new file mode 100644 index 00000000000000..fa51db7df1f271 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/insert_reg_spills.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" +#include "snippets/lowered/reg_manager.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @brief Default function to enable RegSpill insertion + * @return True if RegSpill is required around a certain op, False otherwise. + */ +inline bool needs_reg_spill_default(const ExpressionPtr& expr) { + return ov::is_type(expr->get_node()); +} + +/** + * @interface InsertRegSpills + * @brief Insert RegSpill and RegRestore operations for binary call emitters to comply with ABI conventions. + * @ingroup snippets + */ +class InsertRegSpills : public Pass { +public: + OPENVINO_RTTI("InsertRegSpills", "", Pass) + explicit InsertRegSpills(RegManager& reg_manager, + std::function needs_reg_spill = needs_reg_spill_default) : + m_reg_manager(reg_manager), m_needs_reg_spill(std::move(needs_reg_spill)) {} + bool run(LinearIR& linear_ir) override; + +private: + RegManager& m_reg_manager; + std::function m_needs_reg_spill; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp b/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp index 9af247cd52ecab..2f42a523ec4eac 100644 --- a/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp @@ -23,6 +23,7 @@ namespace pass { */ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer { public: + OPENVINO_RTTI("MHAParallelWAOptimizer", "", RuntimeOptimizer) MHAParallelWAOptimizer() = default; MHAParallelWAOptimizer(const lowered::LinearIRCPtr& linear_ir, const RuntimeConfigurator* configurator); diff --git a/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp b/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp index 607006d1a836bf..b39707c7ca086a 100644 --- a/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/optimize_domain.hpp @@ -46,7 +46,7 @@ namespace pass { class OptimizeDomain : public snippets::lowered::pass::Pass { public: - OPENVINO_RTTI("OptimizeDomain", "", snippets::lowered::pass::Pass) + OPENVINO_RTTI("OptimizeDomain", "", Pass) explicit OptimizeDomain(size_t& tile_rank); bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/pass.hpp b/src/common/snippets/include/snippets/lowered/pass/pass.hpp index bf08f653e83277..b9f7aaeda9d8c9 100644 --- a/src/common/snippets/include/snippets/lowered/pass/pass.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/pass.hpp @@ -59,7 +59,7 @@ class PassBase : public std::enable_shared_from_this { */ class Pass : public PassBase { public: - OPENVINO_RTTI("snippets::lowered::pass::Pass", "", PassBase) + OPENVINO_RTTI("snippets::lowered::pass::Pass") /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR @@ -75,7 +75,7 @@ class Pass : public PassBase { */ class ConstPass : public PassBase { public: - OPENVINO_RTTI("snippets::lowered::pass::ConstPass", "", PassBase) + OPENVINO_RTTI("snippets::lowered::pass::ConstPass") /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR @@ -91,7 +91,7 @@ class ConstPass : public PassBase { */ class RangedPass : public PassBase { public: - OPENVINO_RTTI("snippets::lowered::pass::RangedPass", "", PassBase) + OPENVINO_RTTI("snippets::lowered::pass::RangedPass") /** * @brief Apply the pass to the Linear IR * @param linear_ir the target Linear IR diff --git a/src/common/snippets/include/snippets/lowered/pass/runtime_optimizer.hpp b/src/common/snippets/include/snippets/lowered/pass/runtime_optimizer.hpp index ed37a1c6c58bca..ec878e7a3bed93 100644 --- a/src/common/snippets/include/snippets/lowered/pass/runtime_optimizer.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/runtime_optimizer.hpp @@ -19,6 +19,7 @@ namespace pass { */ class RuntimeOptimizer : public ConstPass { public: + OPENVINO_RTTI("RuntimeOptimizer", "0", ConstPass) RuntimeOptimizer() = default; RuntimeOptimizer(const RuntimeConfigurator* configurator) : m_configurator(configurator) { OPENVINO_ASSERT(configurator, "RuntimeConfigurator musn't be nullptr"); diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp index 2e8f91aed6c08d..63ff92e88b628c 100644 --- a/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_control_flow.hpp @@ -19,7 +19,7 @@ namespace pass { */ class SerializeControlFlow : public SerializeBase { public: - OPENVINO_RTTI("SerializeControlFlow", "Pass", SerializeBase) + OPENVINO_RTTI("SerializeControlFlow", "", SerializeBase) SerializeControlFlow(const std::string& xml_path, bool update_dynamic_ops = false) : SerializeBase(xml_path), m_update_dynamic_ops{update_dynamic_ops} {} bool run(const LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp b/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp index ecbc1a834ce388..b85515916ac2d7 100644 --- a/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/serialize_data_flow.hpp @@ -21,7 +21,7 @@ namespace pass { */ class SerializeDataFlow : public SerializeBase { public: - OPENVINO_RTTI("SerializeDataFlow", "Pass", SerializeBase) + OPENVINO_RTTI("SerializeDataFlow", "", SerializeBase) SerializeDataFlow(const std::string& xml_path) : SerializeBase(xml_path) {} bool run(const LinearIR& linear_ir) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp b/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp index bdcdcfe7165f85..f59e2f77adea6e 100644 --- a/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/set_buffer_reg_group.hpp @@ -31,7 +31,7 @@ namespace pass { */ class SetBufferRegGroup: public RangedPass { public: - OPENVINO_RTTI("SetBufferRegGroup", "", RangedPass); + OPENVINO_RTTI("SetBufferRegGroup", "", RangedPass) SetBufferRegGroup() = default; /** diff --git a/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp b/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp index 9a5ecb4c82fdcd..74e9d450af44db 100644 --- a/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/set_load_store_scalar.hpp @@ -20,7 +20,7 @@ namespace pass { */ class SetLoadStoreScalar : public RangedPass { public: - OPENVINO_RTTI("SetLoadStoreScalar", "", RangedPass); + OPENVINO_RTTI("SetLoadStoreScalar", "", RangedPass) SetLoadStoreScalar() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp index 9a1843e34b134e..faad32e8510e6c 100644 --- a/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/split_loops.hpp @@ -31,7 +31,7 @@ namespace pass { class SplitLoops : public RangedPass { public: - OPENVINO_RTTI("SplitLoops", "", RangedPass); + OPENVINO_RTTI("SplitLoops", "", RangedPass) SplitLoops() = default; bool run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; @@ -49,7 +49,7 @@ class SplitLoops : public RangedPass { class TransformInnerSplitLoop : public pass::RangedPass { public: TransformInnerSplitLoop() = default; - OPENVINO_RTTI("TransformInnerSplitLoop", "", RangedPass); + OPENVINO_RTTI("TransformInnerSplitLoop", "", RangedPass) bool run(LinearIR& linear_ir, LinearIR::constExprIt begin, LinearIR::constExprIt end) override; std::shared_ptr merge(const std::shared_ptr& other) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp index 8e97bf9b83d0ef..55338bd8200a5c 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_expanded_loops.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ValidateExpandedLoops : public Pass { public: - OPENVINO_RTTI("ValidateExpandedLoops", "", Pass); + OPENVINO_RTTI("ValidateExpandedLoops", "", Pass) ValidateExpandedLoops() = default; bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp index 28e37b99a8646d..e1f3a90b7db275 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_shapes.hpp @@ -18,7 +18,7 @@ namespace pass { */ class ValidateShapes : public RangedPass { public: - OPENVINO_RTTI("ValidateShapes", "", RangedPass); + OPENVINO_RTTI("ValidateShapes", "", RangedPass) ValidateShapes() = default; bool run(lowered::LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lowered::LinearIR::constExprIt end) override; }; diff --git a/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp b/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp index 89380416298471..0fb02e4ae13c23 100644 --- a/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/validate_unified_loops.hpp @@ -27,7 +27,7 @@ namespace pass { */ class ValidateUnifiedLoops : public Pass { public: - OPENVINO_RTTI("ValidateUnifiedLoops", "", Pass); + OPENVINO_RTTI("ValidateUnifiedLoops", "", Pass) ValidateUnifiedLoops() = default; bool run(LinearIR& linear_ir) override; diff --git a/src/common/snippets/include/snippets/lowered/reg_manager.hpp b/src/common/snippets/include/snippets/lowered/reg_manager.hpp new file mode 100644 index 00000000000000..bd8bfd2daf178c --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/reg_manager.hpp @@ -0,0 +1,69 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once +#include "openvino/core/node.hpp" +#include "snippets/emitter.hpp" +#include "snippets/lowered/expression.hpp" +#include "snippets/generator.hpp" +#include "snippets/op/kernel.hpp" +#include + +/** + * @interface RegManager + * @brief The class holds supplementary info about assigned registers and live ranges + * @ingroup snippets + */ +namespace ov { +namespace snippets { +namespace lowered { + +// LiveInterval is a pair of (start, stop) expression execution numbers, where: +// start - exec number of the expression that produced the value +// stop - exec number of the last consumer of the value +using LiveInterval = std::pair; +class RegManager { +public: + RegManager() = delete; + RegManager(const std::shared_ptr& generator) : m_generator(generator) {} + inline RegType get_reg_type(const ov::Output& out) const { return m_generator->get_op_out_reg_type(out); } + inline std::vector get_vec_reg_pool() const { return m_generator->get_target_machine()->get_vec_reg_pool(); } + + inline void set_live_range(const Reg& reg, const LiveInterval& interval) { + OPENVINO_ASSERT(m_reg_live_range.insert({reg, interval}).second, "Live range for this reg is already set"); + } + + inline std::vector get_kernel_call_regs(const std::shared_ptr& kernel) const { + const auto& abi_regs = m_generator->get_target_machine()->get_abi_arg_regs(); + const auto num_kernel_args = kernel->get_num_call_args(); + OPENVINO_ASSERT(abi_regs.size() > num_kernel_args, "Too many kernel args requested"); + return {abi_regs.begin(), abi_regs.begin() + static_cast(num_kernel_args)}; + } + + inline std::vector get_gp_regs_except_kernel_call(const std::shared_ptr& kernel) const { + auto res = m_generator->get_target_machine()->get_gp_reg_pool(); + std::set kernel_call; + for (auto r : get_kernel_call_regs(kernel)) + kernel_call.insert(r); + res.erase(std::remove_if(res.begin(), res.end(), [&kernel_call](const Reg& r) {return kernel_call.count(r) != 0; }), res.end()); + return res; + } + + inline const LiveInterval& get_live_range(const Reg& reg) const { + OPENVINO_ASSERT(m_reg_live_range.count(reg), "Live range for this reg was not set"); + return m_reg_live_range.at(reg); + } + inline const std::map& get_live_range_map() const { + return m_reg_live_range; + } + +private: + // Maps Register to {Start, Stop} pairs + std::map m_reg_live_range; + const std::shared_ptr m_generator; +}; + +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/op/kernel.hpp b/src/common/snippets/include/snippets/op/kernel.hpp index 0f0c176ae386b2..af13ddca017fcc 100644 --- a/src/common/snippets/include/snippets/op/kernel.hpp +++ b/src/common/snippets/include/snippets/op/kernel.hpp @@ -22,7 +22,9 @@ class Kernel : public ov::op::Op { Kernel() = default; Kernel(lowered::LinearIR region); - static std::shared_ptr make_kernel(const lowered::LinearIR& region); + template + static std::shared_ptr make_kernel(bool is_dynamic, ArgTypes&&... args); + virtual size_t get_num_call_args() const = 0; std::shared_ptr region; const void *compile_params = nullptr; @@ -33,6 +35,7 @@ class KernelStatic : public Kernel { OPENVINO_OP("KernelStatic", "SnippetsOpset", Kernel); KernelStatic() = default; KernelStatic(lowered::LinearIR region); + size_t get_num_call_args() const override { return 2; } std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; }; @@ -41,9 +44,19 @@ class KernelDynamic : public Kernel { OPENVINO_OP("KernelDynamic", "SnippetsOpset", Kernel); KernelDynamic() = default; KernelDynamic(lowered::LinearIR region); + size_t get_num_call_args() const override { return 1; } std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; }; +template +std::shared_ptr Kernel::make_kernel(bool is_dynamic, ArgTypes&&... args) { + if (is_dynamic) { + return std::make_shared(std::forward(args)...); + } else { + return std::make_shared(std::forward(args)...); + } +} + } // namespace op } // namespace snippets } // namespace ov diff --git a/src/common/snippets/include/snippets/op/reg_spill.hpp b/src/common/snippets/include/snippets/op/reg_spill.hpp new file mode 100644 index 00000000000000..84fe0b4da609c1 --- /dev/null +++ b/src/common/snippets/include/snippets/op/reg_spill.hpp @@ -0,0 +1,81 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "snippets/emitter.hpp" + +#include "openvino/op/op.hpp" +#include "snippets/shape_inference/shape_inference.hpp" + +namespace ov { +namespace snippets { +namespace op { + +/** + * @interface RegSpillBase + * @brief Base class for RegSpillBegin and RegSpillEnd ops + * @ingroup snippets + */ +class RegSpillBase : public ov::op::Op { +public: + OPENVINO_OP("RegSpillBaseBase", "SnippetsOpset"); + RegSpillBase(const std::vector>& args); + RegSpillBase() = default; + virtual const std::set& get_regs_to_spill() const = 0; + bool visit_attributes(AttributeVisitor& visitor) override; +}; +class RegSpillEnd; +/** + * @interface RegSpillBegin + * @brief Marks the start of the register spill region. + * @ingroup snippets + */ +class RegSpillBegin : public RegSpillBase { +public: + OPENVINO_OP("RegSpillBegin", "SnippetsOpset", RegSpillBase); + RegSpillBegin(std::set regs_to_spill); + + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + std::shared_ptr get_reg_spill_end() const; + const std::set& get_regs_to_spill() const override { return m_regs_to_spill; } + + class ShapeInfer : public IShapeInferSnippets { + size_t num_out_shapes = 0; + public: + explicit ShapeInfer(const std::shared_ptr& n); + Result infer(const std::vector& input_shapes) override; + }; +protected: + void validate_and_infer_types_except_RegSpillEnd(); + std::set m_regs_to_spill = {}; +}; +/** + * @interface RegSpillEnd + * @brief Marks the end of the register spill region. + * @ingroup snippets + */ +class RegSpillEnd : public RegSpillBase { +public: + OPENVINO_OP("RegSpillEnd", "SnippetsOpset", RegSpillBase); + RegSpillEnd() = default; + RegSpillEnd(const Output& reg_spill_begin); + + void validate_and_infer_types() override; + + std::shared_ptr clone_with_new_inputs(const OutputVector& inputs) const override; + std::shared_ptr get_reg_spill_begin() const { + auto reg_spill_begin = ov::as_type_ptr(get_input_node_shared_ptr(0)); + OPENVINO_ASSERT(reg_spill_begin, "Can't get reg_spill_begin from reg_spill_end"); + return reg_spill_begin; + } + const std::set& get_regs_to_spill() const override { + return get_reg_spill_begin()->get_regs_to_spill(); + } +}; + +} // namespace op +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/include/snippets/snippets_isa.hpp b/src/common/snippets/include/snippets/snippets_isa.hpp index 5e948b20989aad..409da7ce46ed75 100644 --- a/src/common/snippets/include/snippets/snippets_isa.hpp +++ b/src/common/snippets/include/snippets/snippets_isa.hpp @@ -29,6 +29,7 @@ #include "op/rank_normalization.hpp" #include "op/perf_count.hpp" #include "op/reduce.hpp" +#include "op/reg_spill.hpp" namespace ov { namespace snippets { diff --git a/src/common/snippets/include/snippets/target_machine.hpp b/src/common/snippets/include/snippets/target_machine.hpp index d9d89264fe1926..a5e95a70a2ab3f 100644 --- a/src/common/snippets/include/snippets/target_machine.hpp +++ b/src/common/snippets/include/snippets/target_machine.hpp @@ -57,10 +57,24 @@ class TargetMachine { virtual size_t get_lanes() const = 0; /** - * @brief gets number of registers for a target machine - * @return number of registers + * @brief Get all possible ABI argument registers. + * The number of actually used register depends on the signature of the called binary. + * @return vector os snippets::Reg */ - virtual size_t get_reg_count() const = 0; + virtual std::vector get_abi_arg_regs() const = 0; + + /** + * @brief Get all available general-purpose registers. + * Returns only registers that are not reserved for special purposes (e.g. stack pointer or instruction address). + * @return vector os snippets::Reg + */ + virtual std::vector get_gp_reg_pool() const = 0; + /** + * @brief Get all available vector registers. + * Returns only registers that are not reserved for special purposes + * @return vector os snippets::Reg + */ + virtual std::vector get_vec_reg_pool() const = 0; /** * @brief called by generator to all the emitter for a target machine diff --git a/src/common/snippets/include/snippets/utils/reg_utils.hpp b/src/common/snippets/include/snippets/utils/reg_utils.hpp new file mode 100644 index 00000000000000..04d622837aa5a9 --- /dev/null +++ b/src/common/snippets/include/snippets/utils/reg_utils.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "snippets/emitter.hpp" + +namespace ov { +namespace snippets { +namespace utils { +inline static std::vector transform_snippets_regs_to_idxs(const std::vector& regs, snippets::RegType expected_type) { + std::vector idxs; + idxs.reserve(regs.size()); + for (const auto& reg : regs) { + OPENVINO_ASSERT(expected_type == snippets::RegType::undefined || reg.type == expected_type, "Reg type mismatch during to_idxs conversion"); + idxs.emplace_back(reg.idx); + } + return idxs; +} +inline static std::vector transform_snippets_regs_to_idxs(const std::vector& regs) { + std::vector idxs; + std::transform(regs.begin(), regs.end(), std::back_inserter(idxs), [](const snippets::Reg& r) { return r.idx; }); + return idxs; +} + + +} // namespace utils +} // namespace snippets +} // namespace ov \ No newline at end of file diff --git a/src/common/snippets/src/emitter.cpp b/src/common/snippets/src/emitter.cpp index 1f2200accfa9a3..1c159841784c33 100644 --- a/src/common/snippets/src/emitter.cpp +++ b/src/common/snippets/src/emitter.cpp @@ -13,16 +13,34 @@ bool operator==(const Reg& lhs, const Reg& rhs) { bool operator!=(const Reg& lhs, const Reg& rhs) { return !(lhs == rhs); } +bool operator<(const Reg& lhs, const Reg& rhs) { + return lhs.type < rhs.type || + (lhs.type == rhs.type && lhs.idx < rhs.idx); +} +bool operator>(const Reg& lhs, const Reg& rhs) { + return lhs.type > rhs.type || + (lhs.type == rhs.type && lhs.idx > rhs.idx); +} -std::string regTypeToStr(const RegType& type) { - switch (type) { - case RegType::vec: - return "vec"; - case RegType::gpr: - return "gpr"; - default: - OPENVINO_THROW("Unexpected RegType"); - } +std::ostream& operator<<(std::ostream& s, const Reg& r) { + auto regTypeToStr = [](const RegType& type) { + switch (type) { + case RegType::vec: + return "vec"; + case RegType::gpr: + return "gpr"; + case RegType::mask: + return "mask"; + case RegType::undefined: + return "undefined"; + default: + OPENVINO_THROW("Unexpected RegType"); + } + }; + s << regTypeToStr(r.type) << "[" << + (r.idx == Reg::UNDEFINED_IDX ? "undefined" : std::to_string(r.idx)) + << "]"; + return s; } } // namespace snippets diff --git a/src/common/snippets/src/generator.cpp b/src/common/snippets/src/generator.cpp index 783d66858c6436..144fab766e739b 100644 --- a/src/common/snippets/src/generator.cpp +++ b/src/common/snippets/src/generator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2025 Intel Corporation +// Copyright (C) 2018-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -8,6 +8,8 @@ #include "snippets/runtime_configurator.hpp" #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/expression.hpp" +#include "snippets/lowered/reg_manager.hpp" +#include "snippets/utils/reg_utils.hpp" #include "snippets/op/kernel.hpp" #include "snippets/op/memory_access.hpp" @@ -26,13 +28,15 @@ LoweringResult Generator::generate(const lowered::LinearIRPtr& linear_ir, const linear_ir->init_emitters(target); OV_ITT_TASK_NEXT(GENERATE, "::EmitCode") - - const auto kernel_op = op::Kernel::make_kernel(*linear_ir); + const auto kernel_op = op::Kernel::make_kernel(linear_ir->is_dynamic(), *linear_ir); kernel_op->compile_params = compile_params; + const lowered::RegManager& reg_manager(shared_from_this()); const auto kernel_expr = linear_ir->get_expr_factory()->build(kernel_op, std::vector{}); const auto kernel = target->get(kernel_expr->get_node()->get_type_info())(kernel_expr); - kernel->emit_code({}, {}); + kernel->emit_code(utils::transform_snippets_regs_to_idxs(reg_manager.get_kernel_call_regs(kernel_op)), {}, + utils::transform_snippets_regs_to_idxs(reg_manager.get_vec_reg_pool()), + utils::transform_snippets_regs_to_idxs(reg_manager.get_gp_regs_except_kernel_call(kernel_op))); OV_ITT_TASK_NEXT(GENERATE, "::EmitData") for (auto& l : linear_ir->get_ops()) { @@ -69,38 +73,38 @@ RegType Generator::get_op_out_reg_type(const ov::Output& out) const { if (reg_type != RegType::undefined) return reg_type; const auto op = out.get_node_shared_ptr(); - if (ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) + if (is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) #ifdef SNIPPETS_DEBUG_CAPS - || ov::as_type_ptr(op) - || ov::as_type_ptr(op) + || is_type(op) + || is_type(op) #endif ) return RegType::gpr; - else if (ov::as_type_ptr(op) || - ov::as_type_ptr(op) || + else if (is_type(op) || + is_type(op) || ov::op::util::is_unary_elementwise_arithmetic(op) || ov::op::util::is_binary_elementwise_arithmetic(op) || ov::op::util::is_binary_elementwise_comparison(op) || ov::op::util::is_binary_elementwise_logical(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op) || - ov::as_type_ptr(op)) + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op) || + is_type(op)) return RegType::vec; else OPENVINO_THROW("Register type of the operation " + std::string(op->get_type_name()) + " isn't determined!"); diff --git a/src/common/snippets/src/lowered/expression.cpp b/src/common/snippets/src/lowered/expression.cpp index 1952b93017aab5..245470ae1a48af 100644 --- a/src/common/snippets/src/lowered/expression.cpp +++ b/src/common/snippets/src/lowered/expression.cpp @@ -182,8 +182,7 @@ bool Expression::visit_attributes(AttributeVisitor &visitor) { return ss.str(); }; - std::vector in_regs, out_regs; - std::vector in_reg_types, out_reg_types; + std::ostringstream in_regs, out_regs; std::vector> shapes; std::vector> subtensors; std::vector>> layouts; @@ -201,8 +200,7 @@ bool Expression::visit_attributes(AttributeVisitor &visitor) { if (!layout.empty() && !utils::is_planar_layout(layout)) layouts.emplace_back("in_layout_" + std::to_string(i), layout); - in_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); - in_regs.emplace_back(desc->get_reg().idx); + in_regs << desc->get_reg() << " "; } for (size_t i = 0; i < get_output_count(); i++) { const auto& desc = m_output_port_descriptors[i]; @@ -218,17 +216,16 @@ bool Expression::visit_attributes(AttributeVisitor &visitor) { if (!layout.empty() && !utils::is_planar_layout(layout)) layouts.emplace_back("out_layout_" + std::to_string(i), layout); - out_reg_types.emplace_back(regTypeToStr(desc->get_reg().type)); - out_regs.emplace_back(desc->get_reg().idx); + out_regs << desc->get_reg() << " "; } - if (!in_regs.empty()) { - visitor.on_attribute("in_regs", in_regs); - visitor.on_attribute("in_reg_types", in_reg_types); + if (!in_regs.str().empty()) { + std::vector tmp {in_regs.str()}; + visitor.on_attribute("in_regs", tmp); } - if (!out_regs.empty()) { - visitor.on_attribute("out_regs", out_regs); - visitor.on_attribute("out_reg_types", out_reg_types); + if (!out_regs.str().empty()) { + std::vector tmp {out_regs.str()}; + visitor.on_attribute("out_regs", tmp); } for (auto& s : shapes) visitor.on_attribute(s.first, s.second); diff --git a/src/common/snippets/src/lowered/expression_factory.cpp b/src/common/snippets/src/lowered/expression_factory.cpp index 668df3b65c415e..139abfd3cb1b55 100644 --- a/src/common/snippets/src/lowered/expression_factory.cpp +++ b/src/common/snippets/src/lowered/expression_factory.cpp @@ -20,6 +20,10 @@ std::shared_ptr ExpressionFactory::build(const std::shared_ptr return create(loop_begin, inputs, m_shape_infer_factory); } else if (const auto loop_end = ov::as_type_ptr(n)) { return create(loop_end, inputs, m_shape_infer_factory); + } else if (const auto spill_begin = ov::as_type_ptr(n)) { + return create(spill_begin, inputs, m_shape_infer_factory); + } else if (const auto spill_end = ov::as_type_ptr(n)) { + return create(spill_end, inputs, m_shape_infer_factory); } else if (const auto buffer = ov::as_type_ptr(n)) { return create(buffer, inputs, m_shape_infer_factory); #ifdef SNIPPETS_DEBUG_CAPS @@ -110,6 +114,36 @@ ExpressionPtr ExpressionFactory::create(const std::shared_ptr& n, c return expr; } +ExpressionPtr ExpressionFactory::create(const std::shared_ptr& n, const std::vector& inputs, + const std::shared_ptr& shape_infer_factory) { + auto expr = std::shared_ptr(new Expression(n, shape_infer_factory, false)); + OPENVINO_ASSERT(inputs.empty(), "RegSpillBegin expression expects no inputs"); + const auto num_to_spill = n->get_regs_to_spill().size(); + expr->m_output_port_descriptors.resize(num_to_spill, nullptr); + for (size_t i = 0; i < num_to_spill; i++) + expr->m_output_port_descriptors[i] = std::make_shared(); + expr->m_output_port_connectors.resize(num_to_spill, nullptr); + for (size_t i = 0; i < num_to_spill; i++) { + const auto source = expr->get_output_port(i); + expr->m_output_port_connectors[i] = std::make_shared(source); + } + expr->validate(); + return expr; +} + +ExpressionPtr ExpressionFactory::create(const std::shared_ptr& n, const std::vector& inputs, + const std::shared_ptr& shape_infer_factory) { + auto expr = std::shared_ptr(new Expression(n, shape_infer_factory, false)); + const auto spill_begin_node = n->get_reg_spill_begin(); + const auto num_to_spill = spill_begin_node->get_regs_to_spill().size(); + OPENVINO_ASSERT(inputs.size() == num_to_spill, "Invalid num inputs for RegSpillEnd expression"); + expr->m_input_port_descriptors.resize(num_to_spill, std::make_shared()); + init_expression_inputs(expr, inputs); + expr->m_output_port_descriptors.clear(); + expr->validate(); + return expr; +} + #ifdef SNIPPETS_DEBUG_CAPS ExpressionPtr ExpressionFactory::create(const std::shared_ptr& n, const std::vector& inputs, const std::shared_ptr& shape_infer_factory) { diff --git a/src/common/snippets/src/lowered/linear_ir.cpp b/src/common/snippets/src/lowered/linear_ir.cpp index 36ab2e235880af..cff3bcbe927d04 100644 --- a/src/common/snippets/src/lowered/linear_ir.cpp +++ b/src/common/snippets/src/lowered/linear_ir.cpp @@ -126,10 +126,10 @@ void LinearIR::debug_print(bool tds_as_pointers) const { auto print_rinfo = [](const RegInfo& rinfo) { std::cerr << " : {"; for (auto i : rinfo.first) - std::cerr << regTypeToStr(i.type) << "[" << i.idx << "] "; + std::cerr << i << " "; std::cerr << " => "; for (auto i : rinfo.second) - std::cerr << regTypeToStr(i.type) << "[" << i.idx << "] "; + std::cerr << i << " "; std::cerr << "}"; }; std::map td2int; diff --git a/src/common/snippets/src/lowered/pass/assign_registers.cpp b/src/common/snippets/src/lowered/pass/assign_registers.cpp index 2f921214bffed4..cea308ce645f8a 100644 --- a/src/common/snippets/src/lowered/pass/assign_registers.cpp +++ b/src/common/snippets/src/lowered/pass/assign_registers.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2023 Intel Corporation +// Copyright (C) 2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // @@ -8,6 +8,8 @@ #include "snippets/snippets_isa.hpp" #include "snippets/itt.hpp" #include "snippets/utils/utils.hpp" +#include "snippets/op/kernel.hpp" + // This header is needed to avoid MSVC warning "C2039: 'inserter': is not a member of 'std'" #include @@ -17,273 +19,125 @@ namespace snippets { namespace lowered { namespace pass { -void AssignRegisters::set_reg_types(LinearIR& linear_ir) { - for (const auto& expr : linear_ir) { - const auto op = expr->get_node(); - if (ov::is_type(op) || - ov::is_type(op) -#ifdef SNIPPETS_DEBUG_CAPS - || ov::is_type(op) - || ov::is_type(op) -#endif - ) - continue; - - OPENVINO_ASSERT(expr->get_output_count() == op->get_output_size(), "Incorrect count of output port descriptors!"); - for (size_t i = 0; i < expr->get_output_count(); ++i) { - const auto reg_type = m_reg_type_mapper(op->output(i)); - expr->get_output_port_descriptor(i)->set_reg_type(reg_type); - // propogate to consumers - for (const auto& consumer : expr->get_output_port_connector(i)->get_consumers()) { - consumer.get_descriptor_ptr()->set_reg_type(reg_type); - } - } +AssignRegisters::RegMap AssignRegisters::assign_regs_manually(const LinearIR& linear_ir, std::set& gpr_pool, std::set& vec_pool) { + RegMap manually_assigned; + OPENVINO_ASSERT(gpr_pool.size() >= (linear_ir.get_parameters().size() + linear_ir.get_results().size()), + "Not enough gp registers in the pool to perform manual assignment"); + for (const auto& param : linear_ir.get_parameters()) { + manually_assigned[param->get_output_port_descriptor(0)->get_reg()] = *gpr_pool.begin(); + gpr_pool.erase(gpr_pool.begin()); } -} - -bool AssignRegisters::run(LinearIR& linear_ir) { - OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::AssignRegisters") - using Reg = size_t; - using tensor = PortConnectorPtr; - - set_reg_types(linear_ir); - const auto& exprs = linear_ir.get_ops(); - const auto& params = linear_ir.get_parameters(); - const auto& results = linear_ir.get_results(); - Reg num_expressions = exprs.size(); - Reg num_parameters = params.size(); - Reg num_results = results.size(); - - size_t io_index = 0; - // Define a set of immune tensors that will be ignored by auto reg allocation => their reg allocation is done manually - std::map manually_assigned_gprs, manually_assigned_vecs; - for (const auto& param : params) { - manually_assigned_gprs[param->get_output_port_connector(0)] = io_index; - // TODO [96434]: Support shape infer ops in arbitrary place in pipeline, not just after inputs - // shape infer ops sequence after input - const auto& shape_infer_consumers = utils::get_first_child_shape_infer_expr_seq(param); - for (const auto& child_shape_infer_expr : shape_infer_consumers) { - manually_assigned_gprs[child_shape_infer_expr->get_output_port_connector(0)] = io_index; - } - io_index++; - } - for (const auto& result : results) { - manually_assigned_gprs[result->get_input_port_connector(0)] = io_index; - // shape infer ops sequence before result - const auto& shape_infer_sources = utils::get_first_parent_shape_infer_expr_seq(result); - for (const auto& parent_shape_infer_expr : shape_infer_sources) { - manually_assigned_gprs[parent_shape_infer_expr->get_input_port_connector(0)] = io_index; - } - io_index++; + for (const auto& result : linear_ir.get_results()) { + manually_assigned[result->get_input_port_descriptor(0)->get_reg()] = *gpr_pool.begin(); + gpr_pool.erase(gpr_pool.begin()); } - size_t counter_vec = 0; - size_t counter_gpr = 0; - std::map regs_vec, regs_gpr; - const auto IS_MANUALLY_ALLOCATED_REG = SIZE_MAX; - auto accumulator_reg = 0lu; - for (const auto& expr : exprs) { + long int max_buffer_group = -1; + for (const auto& expr : linear_ir) { auto op = expr->get_node(); - if (const auto& buffer_expr = ov::as_type_ptr(expr)) { - const auto reg_group = buffer_expr->get_reg_group(); + if (const auto& buffer = ov::as_type_ptr(expr)) { // All buffers have one common data pointer - const auto assigned_reg = num_results + num_parameters + reg_group; - for (const auto& input : expr->get_input_port_connectors()) { - manually_assigned_gprs[input] = static_cast(assigned_reg); - // shape infer ops in the middle of subgraph. Buffer is inserted before reshape as new loop should start. - // child shape info ops share the same memory as Buffer. - const auto& shape_infer_consumers = utils::get_first_child_shape_infer_expr_seq(expr); - for (const auto& child_shape_infer_expr : shape_infer_consumers) { - manually_assigned_gprs[child_shape_infer_expr->get_input_port_connector(0)] = - manually_assigned_gprs[child_shape_infer_expr->get_output_port_connector(0)] = - static_cast(assigned_reg); - } - } - manually_assigned_gprs[expr->get_output_port_connector(0)] = static_cast(assigned_reg); + const auto reg_group = static_cast(buffer->get_reg_group()); + max_buffer_group = std::max(max_buffer_group, reg_group); + OPENVINO_ASSERT(gpr_pool.size() > static_cast(max_buffer_group), + "Not enough gp registers in the pool to perform manual assignment"); + const auto& assigned = *std::next(gpr_pool.begin(), reg_group); + const auto& out_reg = buffer->get_output_port_descriptor(0)->get_reg(); + manually_assigned[out_reg] = assigned; + // Buffer abstract registers validation: + bool all_equal = true; + for (const auto& pd : buffer->get_input_port_descriptors()) + all_equal &= pd->get_reg() == out_reg; + for (const auto& pd : buffer->get_output_port_descriptors()) + all_equal &= pd->get_reg() == out_reg; + OPENVINO_ASSERT(all_equal, "Buffer must have same register on all inputs and outputs"); } else if (ov::is_type(op) || ov::is_type(op)) { // Only in ReduceDecomposition Reduce ops use HorizonMax/HorizonSum and VectorBuffer. // We should manually set the one vector register for VectorBuffer and Max/Sum output to simulate a accumulator // TODO [96351]: We should rewrite accumulator pattern using another way const auto& input_tensor = expr->get_input_port_connector(0); - const auto& input_expr = input_tensor->get_source().get_expr(); - const auto& input_expr_input_tensors = input_expr->get_input_port_connectors(); - for (const auto& tensor : input_expr_input_tensors) { - const auto parent_expr = tensor->get_source().get_expr(); + const auto& input = input_tensor->get_source(); + OPENVINO_ASSERT(!vec_pool.empty(), "Not enough vector registers in the pool to perform manual assignment"); + const auto& assigned = *vec_pool.begin(); + for (const auto& tensor : input.get_expr()->get_input_port_connectors()) { + const auto parent = tensor->get_source(); + const auto parent_expr = parent.get_expr(); if (ov::is_type(parent_expr->get_node())) { if (ov::is_type(parent_expr->get_input_port_connector(0)->get_source().get_expr()->get_node())) { - manually_assigned_vecs[tensor] = static_cast(accumulator_reg); - manually_assigned_vecs[parent_expr->get_input_port_connector(0)] = static_cast(accumulator_reg); + manually_assigned[parent.get_descriptor_ptr()->get_reg()] = + manually_assigned[parent_expr->get_input_port_descriptor(0)->get_reg()] = assigned; } } } - manually_assigned_vecs[input_tensor] = static_cast(accumulator_reg); - accumulator_reg++; + manually_assigned[input.get_descriptor_ptr()->get_reg()] = assigned; + vec_pool.erase(vec_pool.begin()); } } - // Note: have to specify default capture "=" due to MSVC bug (it doesn't capture const expressions implicitly) - // Otherwise WIN build fails with "IS_MANUALLY_ALLOCATED_REG cannot be implicitly captured because no default capture mode has been specified" - // the same problem with all the other lambdas in this file - auto enumerate_out_tensor = [=] (const tensor& out_tensor, - decltype(regs_vec)& reg_map, - const std::map& manually_assigned_regs, - size_t& counter) { - // Note that some ops might have identical input&output tensors (Result and Tile* for ex.) - // so we have to check that the tensor has not been enumerated already - if (reg_map.count(out_tensor) == 0) { - reg_map[out_tensor] = manually_assigned_regs.count(out_tensor) == 0 ? counter++ : IS_MANUALLY_ALLOCATED_REG; - } - }; - for (const auto& expr : exprs) { - for (size_t i = 0; i < expr->get_output_count(); ++i) { - const auto& out = expr->get_output_port(i); - switch (out.get_descriptor_ptr()->get_reg().type) { - case RegType::vec: - enumerate_out_tensor(out.get_port_connector_ptr(), regs_vec, manually_assigned_vecs, counter_vec); - break; - case RegType::gpr: - enumerate_out_tensor(out.get_port_connector_ptr(), regs_gpr, manually_assigned_gprs, counter_gpr); - break; - default: - OPENVINO_THROW("Unsupported reg type detected"); - } - } - } - // todo: make one for gpr and one for vector - std::vector> used_gpr, used_vec; // used = used as an input - std::vector> defined_gpr, defined_vec; // defined = used as output - used_gpr.reserve(num_expressions); - used_vec.reserve(num_expressions); - defined_gpr.reserve(num_expressions); - defined_vec.reserve(num_expressions); + gpr_pool.erase(gpr_pool.begin(), std::next(gpr_pool.begin(), max_buffer_group + 1)); + return manually_assigned; +} - auto tensor2reg = [=] (const std::vector& tensors, const std::map& reg_map) { - std::set result; - for (const auto& t : tensors) { - if (reg_map.count(t) == 0) - OPENVINO_THROW("Assign registers: attempt to access not enumerated tensor"); - Reg reg_id = reg_map.at(t); - if (reg_id != IS_MANUALLY_ALLOCATED_REG) - result.insert(reg_id); - } - return result; +bool AssignRegisters::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::AssignRegisters") + + const auto& exprs = linear_ir.get_ops(); + + const auto& kernel = snippets::op::Kernel::make_kernel(linear_ir.is_dynamic()); + auto vec2set = [](std::vector&& v){ + std::set res; + std::copy(v.begin(), v.end(), std::inserter(res, res.begin())); + return res; }; - for (const auto& expr : exprs) { - std::vector used_gpr_tensors, used_vec_tensors, defined_gpr_tensors, defined_vec_tensors; - for (size_t i = 0; i < expr->get_input_count(); ++i) { - const auto& in = expr->get_input_port(i); - switch (in.get_descriptor_ptr()->get_reg().type) { - case RegType::vec: - used_vec_tensors.push_back(in.get_port_connector_ptr()); - break; - case RegType::gpr: - used_gpr_tensors.push_back(in.get_port_connector_ptr()); - break; - default: - OPENVINO_THROW("Unsupported reg type detected"); - } - } - for (size_t i = 0; i < expr->get_output_count(); ++i) { - const auto& out = expr->get_output_port(i); - switch (out.get_descriptor_ptr()->get_reg().type) { - case RegType::vec: - defined_vec_tensors.push_back(out.get_port_connector_ptr()); - break; - case RegType::gpr: - defined_gpr_tensors.push_back(out.get_port_connector_ptr()); - break; - default: - OPENVINO_THROW("Unsupported reg type detected"); - } - } - used_vec.emplace_back(tensor2reg(used_vec_tensors, regs_vec)); - used_gpr.emplace_back(tensor2reg(used_gpr_tensors, regs_gpr)); - defined_vec.emplace_back(tensor2reg(defined_vec_tensors, regs_vec)); - defined_gpr.emplace_back(tensor2reg(defined_gpr_tensors, regs_gpr)); - } - // define life intervals - // liveOut[i] - regs that are live on exit from i-th (topologically ordered) operation - // liveIn[i] - regs that are live on entering the i-th (topologically ordered) operation - std::vector> life_in_vec(std::move(used_vec)), - life_in_gpr(std::move(used_gpr)); - std::vector> life_out_vec(num_expressions, std::set()), - life_out_gpr(num_expressions, std::set()); + std::set global_regs = vec2set(m_reg_manager.get_kernel_call_regs(kernel)); + std::set gpr_pool = vec2set(m_reg_manager.get_gp_regs_except_kernel_call(kernel)); + std::set vec_pool = vec2set(m_reg_manager.get_vec_reg_pool()); + auto assigned_reg_map = assign_regs_manually(linear_ir, gpr_pool, vec_pool); + + for (const auto& item : assigned_reg_map) + global_regs.insert(item.second); - // todo: this part if O(N*N), so it's slow for large subgraphs. Can we simplify it? At least add an early stopping criteria - for (size_t i = 0; i < num_expressions; i++) { - for (size_t n = 0; n < num_expressions; n++) { - // Regs that are live on entering the operation = regs used by the op + (all other regs alive - regs defined by the op) - // copy regs from lifeOut to lifeIn while ignoring regs in def - std::set_difference(life_out_gpr[n].begin(), life_out_gpr[n].end(), - defined_gpr[n].begin(), defined_gpr[n].end(), - std::inserter(life_in_gpr[n], life_in_gpr[n].begin())); - std::set_difference(life_out_vec[n].begin(), life_out_vec[n].end(), - defined_vec[n].begin(), defined_vec[n].end(), - std::inserter(life_in_vec[n], life_in_vec[n].begin())); - } - size_t n = 0; - for (const auto& expr : exprs) { - if (is_type(expr->get_node())) - continue; - for (const auto& out : expr->get_output_port_connectors()) { - for (const auto& child_expr_input : out->get_consumers()) { - const auto& child_expr = child_expr_input.get_expr(); - auto child_it = linear_ir.begin(); - std::advance(child_it, n); - size_t k = n; - while (child_it != linear_ir.end() && *child_it != child_expr) { - child_it++; - k++; - } - if (k == num_expressions) - OPENVINO_THROW("assign registers can't find target op in the body"); - life_out_vec[n].insert(life_in_vec[k].begin(), life_in_vec[k].end()); - life_out_gpr[n].insert(life_in_gpr[k].begin(), life_in_gpr[k].end()); - } - } - n++; - } - } struct by_starting { - auto operator()(const std::pair& lhs, const std::pair& rhs) const -> bool { + auto operator()(const LiveInterval& lhs, const LiveInterval& rhs) const -> bool { return lhs.first < rhs.first|| (lhs.first == rhs.first && lhs.second < rhs.second); } }; struct by_ending { - auto operator()(const std::pair& lhs, const std::pair& rhs) const -> bool { + auto operator()(const LiveInterval& lhs, const LiveInterval& rhs) const -> bool { return lhs.second < rhs.second || (lhs.second == rhs.second && lhs.first < rhs.first); } }; - // A variable live interval - is a range (start, stop) of op indexes, such that - // the variable is alive within this range (defined but not used by the last user) - std::map, Reg, by_starting> live_intervals_vec, live_intervals_gpr; - std::reverse(life_in_vec.begin(), life_in_vec.end()); - std::reverse(life_in_gpr.begin(), life_in_gpr.end()); - auto find_last_use = [](decltype(life_in_gpr) life_in, int i) -> int { - int ln = static_cast(life_in.size()) - 1; - for (auto& x : life_in) { - if (x.find(i) != x.end()) { - return ln; - } - ln--; + // A variable LiveInterval - is a range (start, stop) of op indexes, such that + // the variable is alive within this range (defined but not used by the last user) + std::map live_intervals_vec, live_intervals_gpr; + for (const auto& regint : m_reg_manager.get_live_range_map()) { + const auto& reg = regint.first; + const auto& interval = regint.second; + // If a register is assigned manually, we should ignore it during automatic assignment + if (assigned_reg_map.count(reg)) + continue; + switch (reg.type) { + case (RegType::gpr): + OPENVINO_ASSERT(!live_intervals_gpr.count(interval), "GPR live interval is already in the map"); + live_intervals_gpr[interval] = reg; + break; + case (RegType::vec): + OPENVINO_ASSERT(!live_intervals_vec.count(interval), "VEC live interval is already in the map"); + live_intervals_vec[interval] = reg; + break; + case (RegType::undefined): + default: + OPENVINO_THROW("Unhandled register type"); } - return i; - }; - for (int i = 0; i < static_cast(num_expressions); i++) { - for (const auto& def : defined_vec[i]) - live_intervals_vec[std::make_pair(i, find_last_use(life_in_vec, static_cast(def)))] = def; - for (const auto& def : defined_gpr[i]) - live_intervals_gpr[std::make_pair(i, find_last_use(life_in_gpr, static_cast(def)))] = def; } auto linescan_assign_registers = [](const decltype(live_intervals_vec)& live_intervals, const std::set& reg_pool) { // http://web.cs.ucla.edu/~palsberg/course/cs132/linearscan.pdf - // todo: do we need multimap? <=> can an op have two inputs from the same op? - std::map, Reg, by_ending> active; + std::map active; // uniquely defined register => reused reg (reduced subset enabled by reg by reusage) std::map register_map; std::stack bank; @@ -291,7 +145,7 @@ bool AssignRegisters::run(LinearIR& linear_ir) { for (auto rit = reg_pool.crbegin(); rit != reg_pool.crend(); rit++) bank.push(*rit); - std::pair interval, active_interval; + LiveInterval interval, active_interval; Reg unique_reg, active_unique_reg; for (const auto& interval_reg : live_intervals) { std::tie(interval, unique_reg) = interval_reg; @@ -306,51 +160,29 @@ bool AssignRegisters::run(LinearIR& linear_ir) { bank.push(register_map[active_unique_reg]); } // allocate - if (active.size() == reg_pool.size()) { - // todo: if it is LoopBegin or LoopEnd that requires gpr, and we don't have any in the pool, - // then assign SIZE_MAX-1 as a flag to spill a reg inside emitter - OPENVINO_THROW("can't allocate registers for a snippet "); - } else { - register_map[unique_reg] = bank.top(); - bank.pop(); - active.insert(interval_reg); - } + OPENVINO_ASSERT(active.size() != reg_pool.size(), "Can't allocate registers for a snippet: not enough registers"); + register_map[unique_reg] = bank.top(); + bank.pop(); + active.insert(interval_reg); } return register_map; }; - // todo: vec_/gpr_pool are hardware-specific and should be provided by a backend, e.g. overloaded generator - std::set vec_pool; - for (Reg i = 0; i < reg_count; i++) - vec_pool.insert(i); - std::set gpr_pool(vec_pool); - for (const auto& t_reg : manually_assigned_vecs) - vec_pool.erase(t_reg.second); - for (const auto& t_reg : manually_assigned_gprs) - gpr_pool.erase(t_reg.second); - auto unique2reused_map_vec = linescan_assign_registers(live_intervals_vec, vec_pool); - auto unique2reused_map_gpr = linescan_assign_registers(live_intervals_gpr, gpr_pool); - std::map assigned_regs(std::move(manually_assigned_gprs)); - assigned_regs.insert(manually_assigned_vecs.begin(), manually_assigned_vecs.end()); - auto register_assigned_regs = [=, &assigned_regs](const std::map& unique_regs, const std::map& unique2reused) { - for (const auto& reg : unique_regs) { - if (reg.second == IS_MANUALLY_ALLOCATED_REG) - continue; - if (unique2reused.count(reg.second) == 0) - OPENVINO_THROW("Assign registers failed to allocate register for a tensor"); - assigned_regs[reg.first] = unique2reused.at(reg.second); - } - }; - register_assigned_regs(regs_vec, unique2reused_map_vec); - register_assigned_regs(regs_gpr, unique2reused_map_gpr); + const auto& map_vec = linescan_assign_registers(live_intervals_vec, vec_pool); + assigned_reg_map.insert(map_vec.begin(), map_vec.end()); + const auto& map_gpr = linescan_assign_registers(live_intervals_gpr, gpr_pool); + assigned_reg_map.insert(map_gpr.begin(), map_gpr.end()); for (const auto& expr : exprs) { - for (size_t i = 0; i < expr->get_input_count(); ++i) { - expr->get_input_port_descriptor(i)->set_reg_idx(assigned_regs[expr->get_input_port_connector(i)]); - } - for (size_t i = 0; i < expr->get_output_count(); ++i) { - expr->get_output_port_descriptor(i)->set_reg_idx(assigned_regs[expr->get_output_port_connector(i)]); - } + // Note: manually assigned regs are always live => add them to all expressions + std::set mapped_live_regs = global_regs; + for (const auto& live_reg : expr->get_live_regs()) + mapped_live_regs.insert(assigned_reg_map[live_reg]); + expr->set_live_regs(mapped_live_regs); + for (const auto& in : expr->get_input_port_descriptors()) + in->set_reg(assigned_reg_map[in->get_reg()]); + for (const auto& out : expr->get_output_port_descriptors()) + out->set_reg(assigned_reg_map[out->get_reg()]); } return false; } diff --git a/src/common/snippets/src/lowered/pass/init_live_ranges.cpp b/src/common/snippets/src/lowered/pass/init_live_ranges.cpp new file mode 100644 index 00000000000000..d12827bc52e43c --- /dev/null +++ b/src/common/snippets/src/lowered/pass/init_live_ranges.cpp @@ -0,0 +1,90 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/init_live_ranges.hpp" +#include "snippets/itt.hpp" +#include "snippets/op/subgraph.hpp" +#include "snippets/lowered/expressions/buffer_expression.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { +namespace { +// Expressions that don't affect lifetime of registers, e.g. Buffer or RankNormalization +inline bool pass_through_expr(const ExpressionPtr& expr) { + const auto& node = expr->get_node(); + return op::Subgraph::is_shape_infer_op(node) +#ifdef SNIPPETS_DEBUG_CAPS + || ov::is_type(node) + || ov::is_type(node) +#endif + || ov::is_type(expr); +} + +} // namespace + +bool InitLiveRanges::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InitLiveRanges") + std::map reg_counter; + + // Note: map expiring time to register + std::map> regs_to_expire; + + for (auto expr_it = linear_ir.begin(); expr_it != linear_ir.end(); expr_it++) { + const auto& expr = *expr_it; + const auto op = expr->get_node(); + if (pass_through_expr(expr)) { + if (expr_it != linear_ir.begin()) + expr->set_live_regs(std::prev(expr_it)->get()->get_live_regs()); + continue; + } + const double start = expr->get_exec_num(); + // Remove all regs that expired before start + regs_to_expire.erase(regs_to_expire.begin(), regs_to_expire.lower_bound(start)); // remove all elements lower than start (not equal) + std::set live_regs; + for (const auto& time_reg : regs_to_expire) + live_regs.insert(time_reg.second.begin(), time_reg.second.end()); + + expr->set_live_regs(std::move(live_regs)); + + for (size_t i = 0; i < expr->get_output_count(); ++i) { + const auto& out_pd = expr->get_output_port_descriptor(i); + if (out_pd->get_reg().is_defined()) + continue; + const auto reg_type = m_reg_manager.get_reg_type(op->output(i)); + const auto& reg = Reg(reg_type, reg_counter[reg_type]++); + double stop = start; + // propagate to consumers + std::stack to_visit; + to_visit.push(expr->get_output_port_connector(i)); + while (!to_visit.empty()) { + const auto& current = to_visit.top(); + current->get_source().get_descriptor_ptr()->set_reg(reg); + to_visit.pop(); + for (const auto& consumer : current->get_consumers()) { + consumer.get_descriptor_ptr()->set_reg(reg); + const auto& consumer_expr = consumer.get_expr(); + stop = std::max(stop, consumer_expr->get_exec_num()); + // Note: pass_through expression don't affect registers' life times, + // so we should examine their consumers to understand when the register will actually be used + if (pass_through_expr(consumer_expr)) { + for (const auto& connector : consumer_expr->get_output_port_connectors()) + to_visit.push(connector); + } + } + } + regs_to_expire[stop].insert(reg); + m_reg_manager.set_live_range(reg, std::make_pair(start, stop)); + } + } + + return true; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov + diff --git a/src/common/snippets/src/lowered/pass/init_registers.cpp b/src/common/snippets/src/lowered/pass/init_registers.cpp new file mode 100644 index 00000000000000..9c069b3165e8b6 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/init_registers.cpp @@ -0,0 +1,35 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/init_registers.hpp" +#include "snippets/lowered/pass/init_live_ranges.hpp" +#include "snippets/lowered/pass/assign_registers.hpp" +#include "snippets/lowered/pass/insert_reg_spills.hpp" +#include "snippets/itt.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +InitRegisters::InitRegisters(const std::shared_ptr& generator, + const std::shared_ptr& pass_config) : + Pass(), m_reg_manager(generator), m_pass_config(pass_config) { +} + +bool InitRegisters::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InitRegisters"); + lowered::pass::PassPipeline reg_pipeline(m_pass_config); + reg_pipeline.register_pass(m_reg_manager); + reg_pipeline.register_pass(m_reg_manager); + reg_pipeline.register_pass(m_reg_manager); + reg_pipeline.run(linear_ir); + return true; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov + diff --git a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp index 541628f71928f6..c6c26cec16ef96 100644 --- a/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp +++ b/src/common/snippets/src/lowered/pass/insert_broadcastmove.cpp @@ -75,6 +75,9 @@ bool InsertBroadcastMove::run(LinearIR& linear_ir, lowered::LinearIR::constExprI const auto broadcast = std::make_shared(node->get_input_source_output(i), broadcasted_dim); const auto broadcast_expr = *linear_ir.insert_node(broadcast, std::vector{ input }, expr->get_loop_ids(), true, expr_it, { expr->get_input_port(i) }); + // Note: We have to set live regs manually, since this transformation is applied after all register-related passes. + // Since BroadcastMove sets in_regs the same as out_regs, live regs are the same as for the child. + broadcast_expr->set_live_regs(expr->get_live_regs()); // Note that BroadcastMove modified the next expr input shape, so we need to set update // expr's input port descriptor to reflect the changes expr->get_input_port_descriptor(i)->set_shape(broadcast_expr->get_output_port_descriptor(0)->get_shape()); diff --git a/src/common/snippets/src/lowered/pass/insert_reg_spills.cpp b/src/common/snippets/src/lowered/pass/insert_reg_spills.cpp new file mode 100644 index 00000000000000..f1c55de3b2f153 --- /dev/null +++ b/src/common/snippets/src/lowered/pass/insert_reg_spills.cpp @@ -0,0 +1,88 @@ +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/insert_reg_spills.hpp" + +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/op/reg_spill.hpp" +#include "snippets/op/brgemm.hpp" +#include "snippets/itt.hpp" +#include "snippets/utils/utils.hpp" + + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +bool InsertRegSpills::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::InsertRegSpills") + + bool modified = false; + for (auto it = linear_ir.begin(); it != linear_ir.end(); it++) { + const auto& expr = *it; + if (!m_needs_reg_spill(expr)) + continue; + auto start_it = std::prev(it); + auto stop_it = std::next(it); + while (ov::is_type(start_it->get()->get_node()) && + ov::is_type(stop_it->get()->get_node())) { + start_it--; + stop_it++; + } + // Note: we need to insert immediately before LoopBegin => increment start_it + start_it++; + const auto& loop_begin_live = start_it->get()->get_live_regs(); + std::set used; + const auto& reg_info = expr->get_reg_info(); + used.insert(reg_info.first.begin(), reg_info.first.end()); + used.insert(reg_info.second.begin(), reg_info.second.end()); + // Note: before the loop, we need to spill all live regs except for the ones used by the target expression + std::set regs_to_spill; + std::set_difference(loop_begin_live.begin(), loop_begin_live.end(), + used.begin(), used.end(), + std::inserter(regs_to_spill, regs_to_spill.begin())); + // we also need to keep kernel regs alive (actually only abi_param_1 is used in emitters, but save all for consistency) + for (const auto& r : m_reg_manager.get_kernel_call_regs( snippets::op::Kernel::make_kernel(linear_ir.is_dynamic()))) + regs_to_spill.erase(r); + if (regs_to_spill.empty()) + continue; + // All spilled regs are not live anymore => update live_regs for affected expressions + for (auto affected_it = start_it; affected_it != stop_it; affected_it++) { + const auto& affected_expr = *affected_it; + const auto& live_old = affected_expr->get_live_regs(); + std::set live_new; + std::set_difference(live_old.begin(), live_old.end(), + regs_to_spill.begin(), regs_to_spill.end(), + std::inserter(live_new, live_new.begin())); + affected_expr->set_live_regs(live_new); + } + + const auto begin = std::make_shared(regs_to_spill); + const auto end = std::make_shared(begin); + const auto loop_ids = start_it->get()->get_loop_ids(); + OPENVINO_ASSERT(loop_ids == std::prev(stop_it)->get()->get_loop_ids(), "Inconsistent loop ids for RegSpill expressions"); + const auto spill_begin_it = linear_ir.insert_node(begin, std::vector{}, loop_ids, + false, start_it, std::vector>{}); + std::vector vregs{regs_to_spill.begin(), regs_to_spill.end()}; + spill_begin_it->get()->set_reg_info({{}, vregs}); + // Note: spill_begin and spill_end do not use any registers, so: + // - the regs that are live on entry of spill_begin are the same as for its predecessor (since no regs consumed) + // - similarly, live regs for spill_end are the same as for its successor (since no regs produced) + spill_begin_it->get()->set_live_regs(std::prev(spill_begin_it)->get()->get_live_regs()); + + const auto spill_end_it = linear_ir.insert_node(end, spill_begin_it->get()->get_output_port_connectors(), loop_ids, + false, stop_it, std::vector>{}); + spill_end_it->get()->set_reg_info({vregs, {}}); + spill_end_it->get()->set_live_regs(std::next(spill_end_it)->get()->get_live_regs()); + modified = true; + } + return modified; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov + diff --git a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp index 27232f1605ea0e..741ab936055c20 100644 --- a/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp +++ b/src/common/snippets/src/lowered/pass/load_movebroadcast_to_broadcastload.cpp @@ -47,10 +47,10 @@ bool LoadMoveBroadcastToBroadcastLoad::run(LinearIR& linear_ir, lowered::LinearI const auto& load_parent_node = load_expr->get_input_port_connector(0)->get_source().get_expr()->get_node(); const auto& outshape = move_broadcast->get_output_partial_shape(0); const auto broadcastload = std::make_shared(load_parent_node, *outshape.rbegin(), load->get_offset()); - // insert at position of load_expr. As BroadcastMove and Load will be removed, preserve expr_it. - expr_it = std::next(expr_it); - linear_ir.replace_with_node({ load_expr, expr }, broadcastload, load_expr->get_loop_ids(), linear_ir.find(load_expr)); - expr_it = std::prev(expr_it); + auto live_regs = load_expr->get_live_regs(); + expr_it = linear_ir.replace_with_node({ load_expr, expr }, broadcastload, load_expr->get_loop_ids(), linear_ir.find(load_expr)); + // Note: We have to set live regs manually, since this transformation is applied after all register-related passes. + expr_it->get()->set_live_regs(std::move(live_regs)); modified |= true; } } diff --git a/src/common/snippets/src/lowered/pass/validate.cpp b/src/common/snippets/src/lowered/pass/validate.cpp index e76f994d1284e1..5e6f31ae3f80ea 100644 --- a/src/common/snippets/src/lowered/pass/validate.cpp +++ b/src/common/snippets/src/lowered/pass/validate.cpp @@ -97,8 +97,13 @@ void validate_buffer(const ExpressionPtr& expr, const LinearIR& linear_ir) { void validate_loop_end(const ExpressionPtr& expr, const LinearIR& linear_ir) { const auto loop_end = ov::as_type_ptr(expr->get_node()); OPENVINO_ASSERT(loop_end, "LoopEnd validation expects LoopEnd op"); - OPENVINO_ASSERT(loop_end->get_loop_begin() != nullptr, + const auto& loop_begin = loop_end->get_loop_begin(); + OPENVINO_ASSERT(loop_begin != nullptr, "LoopEnd must be connected to the LoopBegin"); + const auto num_inputs = expr->get_input_count(); + OPENVINO_ASSERT(num_inputs >= 1, "LoopEnd expression must have at least 1 input"); + OPENVINO_ASSERT(expr->get_input_port_connector(num_inputs - 1)->get_source().get_expr()->get_node() == loop_begin, + "LoopEnd expression must have LoopBegin attached to the last connector"); const auto& loop_manager = linear_ir.get_loop_manager(); const auto& loop_info = loop_manager->get_loop_info(loop_end->get_id()); @@ -148,6 +153,9 @@ bool Validate::run(LinearIR& linear_ir, lowered::LinearIR::constExprIt begin, lo if (found != m_validation_map.cend()) { (found->second)(expr, linear_ir); } + OPENVINO_ASSERT(expr->get_output_count() == node->get_output_size() || + ov::is_type(node) || + ov::is_type(node), "Incorrect count of output port descriptors!"); expr->validate(); // Loop expr doesn't have shapes and layouts if (!ov::is_type(node)) diff --git a/src/common/snippets/src/lowered/port_descriptor.cpp b/src/common/snippets/src/lowered/port_descriptor.cpp index f5c99c8c983a6b..7e3234d788d0e6 100644 --- a/src/common/snippets/src/lowered/port_descriptor.cpp +++ b/src/common/snippets/src/lowered/port_descriptor.cpp @@ -74,7 +74,7 @@ std::string PortDescriptor::serialize() const { ss << m_layout.size() << " "; for (auto val : m_layout) ss << val << " "; - ss << regTypeToStr(m_reg.type) << "["<< m_reg.idx << "]"; + ss << m_reg; return ss.str(); } bool operator==(const PortDescriptor& lhs, const PortDescriptor& rhs) { diff --git a/src/common/snippets/src/op/kernel.cpp b/src/common/snippets/src/op/kernel.cpp index 53767fe11776a0..51d4848a31cad9 100644 --- a/src/common/snippets/src/op/kernel.cpp +++ b/src/common/snippets/src/op/kernel.cpp @@ -12,14 +12,6 @@ namespace op { Kernel::Kernel(lowered::LinearIR nested) : Op(), region(std::make_shared(std::move(nested))) {} -std::shared_ptr Kernel::make_kernel(const lowered::LinearIR& region) { - if (region.is_dynamic()) { - return std::make_shared(region); - } else { - return std::make_shared(region); - } -} - KernelStatic::KernelStatic(lowered::LinearIR nested) : Kernel(std::move(nested)) {} KernelDynamic::KernelDynamic(lowered::LinearIR nested) : Kernel(std::move(nested)) {} diff --git a/src/common/snippets/src/op/reg_spill.cpp b/src/common/snippets/src/op/reg_spill.cpp new file mode 100644 index 00000000000000..0eef459a47ac62 --- /dev/null +++ b/src/common/snippets/src/op/reg_spill.cpp @@ -0,0 +1,87 @@ +// Copyright (C) 2018-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/op/reg_spill.hpp" + +#include "snippets/utils/utils.hpp" + +namespace ov { +namespace snippets { +namespace op { + +RegSpillBase::RegSpillBase(const std::vector> &args) : Op(args) {} + +bool RegSpillBase::visit_attributes(AttributeVisitor &visitor) { + std::stringstream ss; + const auto& regs_to_spill = get_regs_to_spill(); + for (auto reg_it = regs_to_spill.begin(); reg_it != regs_to_spill.end(); reg_it++) { + ss << *reg_it; + if (std::next(reg_it) != regs_to_spill.end()) + ss << ", "; + } + std::string spilled = ss.str(); + visitor.on_attribute("regs_to_spill", spilled); + return true; +} + +RegSpillBegin::RegSpillBegin(std::set regs_to_spill) : m_regs_to_spill(std::move(regs_to_spill)) { + validate_and_infer_types_except_RegSpillEnd(); +} + +void RegSpillBegin::validate_and_infer_types_except_RegSpillEnd() { + NODE_VALIDATION_CHECK(this, get_input_size() == 0, "RegSpillBegin doesn't expect any inputs"); + set_output_type(0, element::f32, ov::PartialShape{ov::Shape{}}); +} + +void RegSpillBegin::validate_and_infer_types() { + validate_and_infer_types_except_RegSpillEnd(); + OPENVINO_ASSERT(get_output_size() == 1, "RegSpillBegin must have only one output"); + const auto& last_output_inputs = get_output_target_inputs(0); + OPENVINO_ASSERT(last_output_inputs.size() == 1, "RegSpillBegin must have exactly one input attached to the last output"); + OPENVINO_ASSERT(ov::is_type(last_output_inputs.begin()->get_node()), + "RegSpillBegin must have RegSpillEnd connected to its last output"); +} + +std::shared_ptr RegSpillBegin::clone_with_new_inputs(const OutputVector& inputs) const { + OPENVINO_ASSERT(inputs.empty(), "RegSpillBegin should not contain inputs"); + return std::make_shared(m_regs_to_spill); +} + +std::shared_ptr RegSpillBegin::get_reg_spill_end() const { + const auto& last_output_inputs = get_output_target_inputs(0); + OPENVINO_ASSERT(last_output_inputs.size() == 1, "RegSpillBegin has more than one inputs attached to the last output"); + const auto& loop_end = ov::as_type_ptr(last_output_inputs.begin()->get_node()->shared_from_this()); + OPENVINO_ASSERT(loop_end != nullptr, "RegSpillBegin must have RegSpillEnd connected to its last output"); + return loop_end; +} + +RegSpillBegin::ShapeInfer::ShapeInfer(const std::shared_ptr& n) { + auto reg_spill_begin = ov::as_type_ptr(n); + OPENVINO_ASSERT(reg_spill_begin, "Invalid node passed to RegSpillBegin::ShapeInfer"); + num_out_shapes = reg_spill_begin->get_regs_to_spill().size(); +} + +RegSpillBegin::ShapeInfer::Result RegSpillBegin::ShapeInfer::infer(const std::vector& input_shapes) { + return {std::vector(num_out_shapes, VectorDims{1}), ShapeInferStatus::success}; +} + +RegSpillEnd::RegSpillEnd(const Output& reg_spill_begin) : RegSpillBase({reg_spill_begin}) { + constructor_validate_and_infer_types(); +} + +void RegSpillEnd::validate_and_infer_types() { + NODE_VALIDATION_CHECK(this, get_input_size() == 1 && ov::is_type(get_input_node_shared_ptr(0)), + "RegSpillEnd must have one input of RegSPillBegin type"); + set_output_type(0, element::f32, ov::PartialShape{}); +} + +std::shared_ptr RegSpillEnd::clone_with_new_inputs(const OutputVector& inputs) const { + check_new_args_count(this, inputs); + return std::make_shared(inputs.at(0)); +} + + +} // namespace op +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index baaf34e6c6403f..ecfa72bcb20919 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -26,7 +26,6 @@ #include "snippets/lowered/port_descriptor.hpp" #include "snippets/lowered/linear_ir.hpp" #include "snippets/lowered/linear_ir_builder.hpp" -#include "snippets/lowered/pass/assign_registers.hpp" #include "snippets/lowered/pass/mark_loops.hpp" #include "snippets/lowered/pass/split_loops.hpp" #include "snippets/lowered/pass/fuse_loops.hpp" @@ -48,7 +47,6 @@ #include "snippets/lowered/pass/validate.hpp" #include "snippets/lowered/pass/pass_config.hpp" #include "snippets/lowered/pass/reduce_decomposition.hpp" -#include "snippets/lowered/pass/assign_registers.hpp" #include "snippets/lowered/pass/cleanup_loop_offsets.hpp" #include "snippets/lowered/pass/insert_specific_iterations.hpp" #include "snippets/lowered/pass/optimize_loop_single_evaluation.hpp" @@ -57,12 +55,13 @@ #include "snippets/lowered/pass/set_load_store_scalar.hpp" #include "snippets/lowered/pass/extract_loop_invariants.hpp" +#include "snippets/lowered/pass/init_registers.hpp" + #include "transformations/utils/utils.hpp" #include "snippets/pass/manager.hpp" #include "openvino/pass/constant_folding.hpp" #include "ov_ops/type_relaxed.hpp" -#include "openvino/pass/serialize.hpp" #include #include @@ -497,10 +496,6 @@ void Subgraph::control_flow_transformations(size_t min_parallel_work_amount, siz OV_ITT_TASK_NEXT(CONTROL_FLOW, "::pre_generation_pipeline") - std::function& out)> reg_type_mapper = [&](const ov::Output& out) -> RegType { - return get_generator()->get_op_out_reg_type(out); - }; - lowered::pass::PassPipeline gen_pipeline(lowered_pass_config); // Note: the order of all passes in this pipeline must not be changed since they have hard dependencies // 1. InsertSpecificIterations must be called after AssignRegisters since tail loop expressions must have the same @@ -509,7 +504,8 @@ void Subgraph::control_flow_transformations(size_t min_parallel_work_amount, siz // (this might happen if tail loop and main loop have different increments) // 3. OptimizeLoopSingleEvaluation must be called after CleanupLoopOffsets // since CleanupLoopOffsets can't handle loops with evaluate_once = true - gen_pipeline.register_pass(reg_type_mapper, get_generator()->get_target_machine()->get_reg_count()); + + gen_pipeline.register_pass(get_generator(), lowered_pass_config); gen_pipeline.register_pass(); gen_pipeline.register_pass(); gen_pipeline.register_pass(); diff --git a/src/common/snippets/src/shape_inference/shape_inference.cpp b/src/common/snippets/src/shape_inference/shape_inference.cpp index 0e3060501a87d5..694afce198c763 100644 --- a/src/common/snippets/src/shape_inference/shape_inference.cpp +++ b/src/common/snippets/src/shape_inference/shape_inference.cpp @@ -46,10 +46,12 @@ const IShapeInferSnippetsFactory::TRegistry IShapeInferSnippetsFactory::registry SHAPE_INFER_PREDEFINED(op::HorizonMax, HorizonOpShapeInfer), SHAPE_INFER_PREDEFINED(op::HorizonSum, HorizonOpShapeInfer), // - SHAPE_INFER_PREDEFINED(op::LoopBegin, SingleElementShapeInfer), SHAPE_INFER_PREDEFINED(op::Scalar, SingleElementShapeInfer), SHAPE_INFER_PREDEFINED(op::VectorBuffer, SingleElementShapeInfer), + SHAPE_INFER_PREDEFINED(op::LoopBegin, SingleElementShapeInfer), SHAPE_INFER_PREDEFINED(op::LoopEnd, EmptyShapeInfer), + SHAPE_INFER_OP_SPECIFIC(op::RegSpillBegin), + SHAPE_INFER_PREDEFINED(op::RegSpillEnd, EmptyShapeInfer), #ifdef SNIPPETS_DEBUG_CAPS SHAPE_INFER_PREDEFINED(op::PerfCountBegin, EmptyShapeInfer), SHAPE_INFER_PREDEFINED(op::PerfCountEnd, EmptyShapeInfer), diff --git a/src/common/snippets/tests/include/lowering_utils.hpp b/src/common/snippets/tests/include/lowering_utils.hpp index b9ecbb5570481c..ba87b3d1a067fa 100644 --- a/src/common/snippets/tests/include/lowering_utils.hpp +++ b/src/common/snippets/tests/include/lowering_utils.hpp @@ -49,7 +49,9 @@ class DummyTargetMachine : public ov::snippets::TargetMachine { ov::snippets::CompiledSnippetPtr get_snippet() override { return std::make_shared(); } size_t get_lanes() const override { return 10; } std::shared_ptr clone() const override { return std::make_shared(); } - size_t get_reg_count() const override { return 16; } + std::vector get_abi_arg_regs() const override; + std::vector get_gp_reg_pool() const override; + std::vector get_vec_reg_pool() const override; }; class DummyGenerator : public ov::snippets::Generator { @@ -58,7 +60,6 @@ class DummyGenerator : public ov::snippets::Generator { DummyGenerator(const std::shared_ptr& t) : ov::snippets::Generator(t) {} std::shared_ptr clone() const override { return std::make_shared(target); } -protected: ov::snippets::RegType get_op_out_reg_type(const ov::Output& out) const override { return ov::snippets::RegType::vec; }; }; diff --git a/src/common/snippets/tests/src/lir_comparator.cpp b/src/common/snippets/tests/src/lir_comparator.cpp index c4bd81c279b2dd..f9455fb6dae397 100644 --- a/src/common/snippets/tests/src/lir_comparator.cpp +++ b/src/common/snippets/tests/src/lir_comparator.cpp @@ -17,7 +17,9 @@ inline string to_string(const vector& vec) { } inline string to_string(const ov::snippets::Reg& reg) { - return string("Reg(type = " + ov::snippets::regTypeToStr(reg.type) + ", idx = " + to_string(reg.idx) + ")"); + stringstream ss; + ss << reg; + return ss.str(); } inline string to_string(const ov::Node::type_info_t& info) { diff --git a/src/common/snippets/tests/src/lowering_utils.cpp b/src/common/snippets/tests/src/lowering_utils.cpp index e9ed04bf8da5a4..75832185a1dbc8 100644 --- a/src/common/snippets/tests/src/lowering_utils.cpp +++ b/src/common/snippets/tests/src/lowering_utils.cpp @@ -63,6 +63,33 @@ DummyTargetMachine::DummyTargetMachine(const std::vector& } } +std::vector DummyTargetMachine::get_abi_arg_regs() const { + const auto num_abi_regs = 4; + std::vector reg_pool; + reg_pool.reserve(num_abi_regs); + for (size_t i = 0; i < num_abi_regs; i++) + reg_pool.emplace_back(ov::snippets::RegType::gpr, i); + return reg_pool; +} + +std::vector DummyTargetMachine::get_gp_reg_pool() const { + const auto num_gp_regs = 16; + std::vector reg_pool; + reg_pool.reserve(num_gp_regs); + for (size_t i = 0; i < num_gp_regs; i++) + reg_pool.emplace_back(ov::snippets::RegType::gpr, i); + return reg_pool; +} + +std::vector DummyTargetMachine::get_vec_reg_pool() const { + const auto num_vec_regs = 16; + std::vector reg_pool; + reg_pool.reserve(num_vec_regs); + for (size_t i = 0; i < num_vec_regs; i++) + reg_pool.emplace_back(ov::snippets::RegType::vec, i); + return reg_pool; +} + LoweringTests::LoweringTests() : TransformationTestsF() { // external subgraph input shape and internal parameters shapes // might differ due to the blocked layout diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp index 416b03ff4f0f6a..a2041718a14875 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp @@ -1810,7 +1810,7 @@ void jit_mish_emitter::emit_isa(const std::vector& in_vec_idxs, const st const TReg vmm_src(in_vec_idxs[0]); const TReg vmm_dst(out_vec_idxs[0]); const TReg vmm_aux0(aux_vec_idxs[0]); - const TReg vmm_aux2(std::max(exp_emitter->get_aux_vecs_count(), 1)); + const TReg vmm_aux2(aux_vec_idxs[std::max(exp_emitter->get_aux_vecs_count(), 1)]); h->ld1r(vmm_aux0.s, table_val2("fwd_mish_max_x_for_equation_f")); h->fminnm(vmm_aux2.s, vmm_src.s, vmm_aux0.s); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.cpp index 4c0b0f95f783c2..5033f645413557 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.cpp @@ -72,6 +72,10 @@ void jit_emitter::emit_data() const { } } +emitter_in_out_map jit_emitter::get_in_out_type() const { + return in_out_type_; +} + std::set> jit_emitter::get_supported_precisions(const std::shared_ptr& node) { return {}; } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp index 9ce8203afe7783..c0bfb4114f9c17 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp @@ -60,6 +60,7 @@ class jit_emitter : public ov::snippets::Emitter { virtual size_t get_inputs_count() const = 0; virtual size_t get_aux_vecs_count() const; virtual size_t get_aux_gprs_count() const; + emitter_in_out_map get_in_out_type() const; /** * @brief Returns supported precisions. diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp index f0c9fa9ef62c2b..0001eac9fd272d 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp @@ -12,110 +12,180 @@ namespace intel_cpu { using namespace Xbyak; using namespace dnnl::impl::cpu::x64; -EmitABIRegSpills::EmitABIRegSpills(jit_generator* h) : h(h), isa(get_isa()) {} +namespace { +inline snippets::Reg Xbyak2SnippetsReg(const Xbyak::Reg& xb_reg) { + auto get_reg_type = [](const Xbyak::Reg& xb_reg) { + switch (xb_reg.getKind()) { + case Xbyak::Reg::REG: + return snippets::RegType::gpr; + case Xbyak::Reg::XMM: + case Xbyak::Reg::YMM: + case Xbyak::Reg::ZMM: + return snippets::RegType::vec; + case Xbyak::Reg::OPMASK: + return snippets::RegType::mask; + default: + OPENVINO_THROW("Unhandled Xbyak reg type in conversion to snippets reg type"); + } + }; + return {get_reg_type(xb_reg), static_cast(xb_reg.getIdx())}; +} + +template < + cpu_isa_t isa, + typename std::enable_if::type = true> +struct regs_to_spill { + static std::vector get(const std::set& live_regs) { + std::vector regs_to_spill; + auto push_if_live = [&live_regs, ®s_to_spill](Xbyak::Reg&& reg) { + if (live_regs.empty() || live_regs.count(Xbyak2SnippetsReg(reg))) + regs_to_spill.emplace_back(reg); + }; + for (int i = 0; i < 16; i++) { + // do not spill rsp; + if (i != Xbyak::Reg::RSP) + push_if_live(Reg64(i)); + } + + for (int i = 0; i < cpu_isa_traits::n_vregs; ++i) + push_if_live(typename cpu_isa_traits::Vmm(i)); + + const int num_k_mask = isa == cpu_isa_t::avx512_core ? 8 : 0; + for (int i = 0; i < num_k_mask; ++i) + push_if_live(Xbyak::Opmask(i)); + return regs_to_spill; + } +}; + +std::vector get_regs_to_spill(cpu_isa_t isa, const std::set& live_regs) { + switch (isa) { + case cpu_isa_t::sse41: + return regs_to_spill::get(live_regs); + case cpu_isa_t::avx2: + return regs_to_spill::get(live_regs); + case cpu_isa_t::avx512_core: + return regs_to_spill::get(live_regs); + default: + OPENVINO_THROW("Unhandled isa in get_regs_to_spill"); + } +} +} // namespace + +std::set get_callee_saved_reg_idxs() { + return {std::begin(abi_save_gpr_regs), std::end(abi_save_gpr_regs)}; +} + +size_t get_callee_saved_aux_gpr(std::vector& available_gprs, + const std::vector& used_gprs, + bool& spill_required) { + const auto& callee_saved = get_callee_saved_reg_idxs(); + spill_required = false; + size_t aux_idx = SIZE_MAX; + auto available_it = std::find_if(available_gprs.begin(), available_gprs.end(), [&callee_saved](size_t r) { + return callee_saved.count(r) != 0; + }); + if (available_it != available_gprs.end()) { + aux_idx = *available_it; + available_gprs.erase(available_it); + } else { + spill_required = true; + std::set blacklist(used_gprs.begin(), used_gprs.end()); + auto callee_it = std::find_if(callee_saved.begin(), callee_saved.end(), [&blacklist](size_t r) { + return blacklist.count(r) == 0; + }); + OPENVINO_ASSERT(callee_it != callee_saved.end(), + "All callee-saved gpr are already in use. Spill used_gprs manually"); + aux_idx = *callee_it; + } + return aux_idx; +} + +EmitABIRegSpills::EmitABIRegSpills(jit_generator* h_arg) : h(h_arg), isa(get_isa()) {} EmitABIRegSpills::~EmitABIRegSpills() { OPENVINO_ASSERT(spill_status, "postamble or preamble is missed"); OPENVINO_ASSERT(rsp_status, "rsp_align or rsp_restore is missed"); } -void EmitABIRegSpills::preamble() { - // gprs - Xbyak::Operand gprs_to_save[] = {h->r8, - h->r9, - h->r10, - h->r11, - h->r12, - h->r13, - h->r14, - h->r15, - h->rax, - h->rbx, - h->rcx, - h->rdx, - h->rdi, - h->rsi, - h->rbp}; - size_t n_gprs_to_save = sizeof(gprs_to_save) / sizeof(gprs_to_save[0]); - - h->sub(h->rsp, n_gprs_to_save * gpr_size); - for (size_t i = 0; i < n_gprs_to_save; ++i) - h->mov(h->ptr[h->rsp + i * gpr_size], gprs_to_save[i]); - - if (isa == avx512_core) { - h->sub(h->rsp, k_mask_num * k_mask_size); - for (size_t i = 0; i < k_mask_num; ++i) { - h->kmovq(h->ptr[h->rsp + i * k_mask_size], Xbyak::Opmask(static_cast(i))); - } +void EmitABIRegSpills::preamble(const std::set& live_regs) { + OPENVINO_ASSERT(spill_status, "Attempt to spill ABI registers twice in a row"); + // all regs to spill according to ABI + m_regs_to_spill = get_regs_to_spill(isa, live_regs); + for (const auto& reg : m_regs_to_spill) { + const auto reg_bit_size = reg.getBit(); + OPENVINO_ASSERT(reg_bit_size % 8 == 0, "Unexpected reg bit size"); + m_bytes_to_spill += reg_bit_size / 8; } - - h->sub(h->rsp, get_max_vecs_count() * get_vec_length()); - for (size_t i = 0; i < get_max_vecs_count(); ++i) { - const auto addr = h->ptr[h->rsp + i * get_vec_length()]; - if (isa == sse41) { - h->uni_vmovups(addr, Xmm(i)); - } else if (isa == avx2) { - h->uni_vmovups(addr, Ymm(i)); - } else { - h->uni_vmovups(addr, Zmm(i)); + h->sub(h->rsp, m_bytes_to_spill); + uint32_t byte_stack_offset = 0; + for (const auto& reg : m_regs_to_spill) { + Xbyak::Address addr = h->ptr[h->rsp + byte_stack_offset]; + byte_stack_offset += reg.getBit() / 8; + switch (reg.getKind()) { + case Xbyak::Reg::REG: + h->mov(addr, reg); + break; + case Xbyak::Reg::XMM: + h->uni_vmovups(addr, Xmm(reg.getIdx())); + break; + case Xbyak::Reg::YMM: + h->uni_vmovups(addr, Ymm(reg.getIdx())); + break; + case Xbyak::Reg::ZMM: + h->uni_vmovups(addr, Zmm(reg.getIdx())); + break; + case Xbyak::Reg::OPMASK: + h->kmovq(addr, Opmask(reg.getIdx())); + break; + default: + OPENVINO_THROW("Unhandled Xbyak reg type in conversion"); } } - // Update the status spill_status = false; } void EmitABIRegSpills::postamble() { - // restore vector registers - for (int i = static_cast(get_max_vecs_count()) - 1; i >= 0; --i) { - const auto addr = h->ptr[h->rsp + i * get_vec_length()]; - if (isa == sse41) { - h->uni_vmovups(Xmm(i), addr); - } else if (isa == avx2) { - h->uni_vmovups(Ymm(i), addr); - } else { - h->uni_vmovups(Zmm(i), addr); + OPENVINO_ASSERT(!spill_status, "Attempt to restore ABI registers that were not spilled"); + uint32_t byte_stack_offset = m_bytes_to_spill; + for (size_t i = m_regs_to_spill.size(); i > 0; i--) { + const auto& reg = m_regs_to_spill[i - 1]; + byte_stack_offset -= reg.getBit() / 8; + Xbyak::Address addr = h->ptr[h->rsp + byte_stack_offset]; + switch (reg.getKind()) { + case Xbyak::Reg::REG: + h->mov(reg, addr); + break; + case Xbyak::Reg::XMM: + h->uni_vmovups(Xmm(reg.getIdx()), addr); + break; + case Xbyak::Reg::YMM: + h->uni_vmovups(Ymm(reg.getIdx()), addr); + break; + case Xbyak::Reg::ZMM: + h->uni_vmovups(Zmm(reg.getIdx()), addr); + break; + case Xbyak::Reg::OPMASK: + h->kmovq(Xbyak::Opmask(reg.getIdx()), addr); + break; + default: + OPENVINO_THROW("Unhandled Xbyak reg type in conversion"); } } - h->add(h->rsp, (get_max_vecs_count()) * get_vec_length()); - - // restore k reg - if (isa == avx512_core) { - for (int i = k_mask_num - 1; i >= 0; --i) { - h->kmovq(Xbyak::Opmask(i), h->ptr[h->rsp + i * k_mask_size]); - } - h->add(h->rsp, k_mask_num * k_mask_size); - } - - // restore gpr registers - Xbyak::Operand gprs_to_save[] = {h->r8, - h->r9, - h->r10, - h->r11, - h->r12, - h->r13, - h->r14, - h->r15, - h->rax, - h->rbx, - h->rcx, - h->rdx, - h->rdi, - h->rsi, - h->rbp}; - size_t n_gprs_to_save = sizeof(gprs_to_save) / sizeof(gprs_to_save[0]); - for (int i = n_gprs_to_save - 1; i >= 0; --i) - h->mov(gprs_to_save[i], h->ptr[h->rsp + i * gpr_size]); - h->add(h->rsp, n_gprs_to_save * gpr_size); - + h->add(h->rsp, m_bytes_to_spill); + m_regs_to_spill.clear(); // Update the status spill_status = true; } -void EmitABIRegSpills::rsp_align() { - h->mov(h->rbx, h->rsp); - h->and_(h->rbx, 0xf); - h->sub(h->rsp, h->rbx); +void EmitABIRegSpills::rsp_align(size_t callee_saved_gpr_idx) { + OPENVINO_ASSERT(get_callee_saved_reg_idxs().count(callee_saved_gpr_idx), + "rsp_align requires a callee-saved register"); + m_rsp_align_reg = Xbyak::Reg64(static_cast(callee_saved_gpr_idx)); + h->mov(m_rsp_align_reg, h->rsp); + h->and_(m_rsp_align_reg, 0xf); + h->sub(h->rsp, m_rsp_align_reg); #ifdef _WIN32 // Allocate shadow space (home space) according to ABI h->sub(h->rsp, 32); @@ -126,11 +196,12 @@ void EmitABIRegSpills::rsp_align() { } void EmitABIRegSpills::rsp_restore() { + OPENVINO_ASSERT(!rsp_status, "rsp_restore can be called only after rsp_align"); #ifdef _WIN32 // Release shadow space (home space) h->add(h->rsp, 32); #endif - h->add(h->rsp, h->rbx); + h->add(h->rsp, m_rsp_align_reg); // Update the status rsp_status = true; @@ -142,12 +213,12 @@ cpu_isa_t EmitABIRegSpills::get_isa() { // e.g. other emitters isa is avx512, while this emitter isa is avx2, and internal call is used. Internal call may // use avx512 and spoil k-reg, ZMM. do not care about platform w/ avx512_common but w/o avx512_core(knight landing), // which is obsoleted. - if (mayiuse(avx512_core)) - return avx512_core; - if (mayiuse(avx2)) - return avx2; - if (mayiuse(sse41)) - return sse41; + if (mayiuse(cpu_isa_t::avx512_core)) + return cpu_isa_t::avx512_core; + if (mayiuse(cpu_isa_t::avx2)) + return cpu_isa_t::avx2; + if (mayiuse(cpu_isa_t::sse41)) + return cpu_isa_t::sse41; OV_CPU_JIT_EMITTER_THROW("unsupported isa"); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.hpp index 447c44ad71d8e5..7a4b48a8971a2c 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.hpp @@ -5,44 +5,56 @@ #pragma once #include "cpu/x64/jit_generator.hpp" +#include "snippets/emitter.hpp" namespace ov { namespace intel_cpu { +std::set get_callee_saved_reg_idxs(); +/** + * @brief Chooses a callee-saved gpr from the provided pool of registers (`available_gprs`) and removes it from the + * pool. If there are no callee-saved regs in the pool, chooses any register that is not in the `used_gprs`. Raises + * exception if it fails to do so. + * @arg available_gprs - pool of available registers + * @arg used_gprs - registers that are already in use and should not be selected + * @arg spill_required - reference to a bool flag that will be set to `true` if spill is required, i.e. the register was + * not selected from the pool + * @return reg_idx - idx of callee-saved gpr + */ +size_t get_callee_saved_aux_gpr(std::vector& available_gprs, + const std::vector& used_gprs, + bool& spill_required); + // The class emit register spills for the possible call of external binary code class EmitABIRegSpills { public: EmitABIRegSpills(dnnl::impl::cpu::x64::jit_generator* h); ~EmitABIRegSpills(); - - // push (save) all registers on the stack - void preamble(); - // pop (take) all registers from the stack + size_t get_num_spilled_regs() const { + return m_regs_to_spill.size(); + } + /** + * @brief Spills registers to stack + * @arg live_regs - set of registers to spill (optional). All registers will be spilled if live_regs is not + * provided. + */ + void preamble(const std::set& live_regs = {}); + /** + * @brief Restores registers previously spilled in preamble(live_regs) call. + */ void postamble(); - // align stack on 16-byte and allocate shadow space as ABI reqiures - // callee is responsible to save and restore `rbx`. `rbx` must not be changed after call callee. - void rsp_align(); + void rsp_align(size_t callee_saved_gpr_idx); void rsp_restore(); private: EmitABIRegSpills() = default; - static dnnl::impl::cpu::x64::cpu_isa_t get_isa(); - - inline size_t get_max_vecs_count() const { - return dnnl::impl::cpu::x64::isa_num_vregs(isa); - } - inline size_t get_vec_length() const { - return dnnl::impl::cpu::x64::isa_max_vlen(isa); - } - dnnl::impl::cpu::x64::jit_generator* h{nullptr}; const dnnl::impl::cpu::x64::cpu_isa_t isa{dnnl::impl::cpu::x64::cpu_isa_t::isa_undef}; - - static constexpr int k_mask_size = 8; - static constexpr int k_mask_num = 8; - static constexpr int gpr_size = 8; + std::vector m_regs_to_spill; + Xbyak::Reg m_rsp_align_reg; + uint32_t m_bytes_to_spill = 0; bool spill_status = true; bool rsp_status = true; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp index 9beae08cecb1eb..a915fb0fe17e21 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp @@ -242,9 +242,40 @@ size_t CPUTargetMachine::get_lanes() const { } } -// TODO [139932]: Support separate vec_count and gpr_count -size_t CPUTargetMachine::get_reg_count() const { - return 32; +std::vector CPUTargetMachine::get_abi_arg_regs() const { + using namespace dnnl::impl::cpu::aarch64; + std::vector res; + for (const auto& r : + {abi_param1, abi_param2, abi_param3, abi_param4, abi_param5, abi_param6, abi_param7, abi_param8}) + res.emplace_back(snippets::RegType::gpr, r.getIdx()); + return res; +} + +std::vector CPUTargetMachine::get_gp_reg_pool() const { + using Xbyak_aarch64::Operand; + const auto num_gp_regs = 32; + std::vector reg_pool; + for (size_t i = 0; i < num_gp_regs; i++) { + // Note: more details on the usage of reserved registers in aarch64/jit_kernel_emitter.cpp + if (!one_of(i, Operand::SP, Operand::X18, Operand::X23, Operand::X24, Operand::X28, Operand::X29)) + reg_pool.emplace_back(snippets::RegType::gpr, i); + } + return reg_pool; +} + +std::vector CPUTargetMachine::get_vec_reg_pool() const { + const auto num_vec_regs = [this]() { + switch (isa) { + case dnnl::impl::cpu::aarch64::asimd: + return dnnl::impl::cpu::aarch64::cpu_isa_traits::n_vregs; + default: + OPENVINO_THROW("unknown isa ", isa); + } + }(); + std::vector reg_pool; + for (int i = 0; i < num_vec_regs; i++) + reg_pool.emplace_back(snippets::RegType::vec, static_cast(i)); + return reg_pool; } dnnl::impl::cpu::aarch64::cpu_isa_t CPUTargetMachine::get_isa() const { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.hpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.hpp index 90c2662e33d070..34881742f1812e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.hpp @@ -31,7 +31,11 @@ class CPUTargetMachine : public snippets::TargetMachine { bool is_supported() const override; snippets::CompiledSnippetPtr get_snippet() override; size_t get_lanes() const override; - size_t get_reg_count() const override; + + std::vector get_abi_arg_regs() const override; + std::vector get_gp_reg_pool() const override; + std::vector get_vec_reg_pool() const override; + dnnl::impl::cpu::aarch64::cpu_isa_t get_isa() const; private: diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp index 32ed1a844b6724..417e33d339816c 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp @@ -5,6 +5,8 @@ #include "jit_kernel_emitter.hpp" #include "emitters/utils.hpp" +#include "jit_snippets_emitters.hpp" +#include "snippets/utils/reg_utils.hpp" #include "snippets/utils/utils.hpp" using namespace Xbyak_aarch64; @@ -24,55 +26,7 @@ inline static std::vector transform_idxs_to_regs(const std::vector }); return regs; } - -inline static std::vector transform_snippets_regs_to_idxs(const std::vector& regs) { - std::vector idxs(regs.size()); - std::transform(regs.cbegin(), regs.cend(), idxs.begin(), [](const snippets::Reg& reg) { - return reg.idx; - }); - return idxs; -} - -jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, - cpu_isa_t isa, - const ov::snippets::lowered::ExpressionPtr& expr) - : jit_emitter(h, isa), - reg_runtime_params_idx(Operand::X0) { - const auto kernel = ov::as_type_ptr(expr->get_node()); - OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "Invoked with invalid op argument"); - OV_CPU_JIT_EMITTER_ASSERT(!kernel->region->empty(), "Invoked with empty body"); - body = kernel->region; - jcp = *reinterpret_cast(kernel->compile_params); - const auto& parameters = body->get_parameters(); - const auto& results = body->get_results(); - const auto& buffers = body->get_buffers(); - num_inputs = parameters.size(); - num_outputs = results.size(); - for (const auto& param : parameters) - mem_access_exprs.push_back(param); - for (const auto& result : results) - mem_access_exprs.push_back(result); - - std::set unique_buffers; - for (const auto& buffer_expr : buffers) { - const auto buffer_reg_group = buffer_expr->get_reg_group(); - if (unique_buffers.count(buffer_reg_group) == 0) { - mem_access_exprs.push_back(buffer_expr); - unique_buffers.insert(buffer_reg_group); - } - } - - using ExprSet = std::unordered_set; - const ExprSet params_set(parameters.cbegin(), parameters.cend()); - const ExprSet results_set(results.cbegin(), results.cend()); - const ExprSet buffers_set(buffers.cbegin(), buffers.cend()); - for (const auto& expr : *body) { - if (params_set.count(expr) == 0 && results_set.count(expr) == 0 && buffers_set.count(expr) == 0) - general_exprs.emplace_back(expr); - } - num_unique_buffers = unique_buffers.size(); -} - +// Useful register mapping info: //==================================================================================== // GPR | Description | Usage | Purpose // =================================================================================== @@ -112,31 +66,39 @@ jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, // Note that 2 of the 25 marked Data pointer registers will be used as work_amounts in // two-level loops, so the actual number of Data pointer register is 23. //==================================================================================== -void jit_kernel_emitter::init_reg_pools(const std::set& gpr_blacklist, const std::set& vec_blacklist) { - gp_regs_pool.resize(32); - vec_regs_pool.resize(32); - // It's easier to remove the last item during mapping, so fill descending to map ascending - for (size_t i = 0; i < 32; i++) - gp_regs_pool[i] = vec_regs_pool[i] = 31 - i; - auto remove_regs_from_pool = [](std::vector& pool, const std::set& to_remove) { - // It's important to keep the order of other elements - pool.erase(std::remove_if(pool.begin(), - pool.end(), - [&](size_t x) { - return to_remove.count(x) != 0; - }), - pool.end()); - }; - std::set gprs_blacklist_extended{Operand::X18, - Operand::X23, - Operand::X24, - Operand::X28, - Operand::X29, - Operand::SP}; - gprs_blacklist_extended.insert(gpr_blacklist.begin(), gpr_blacklist.end()); - // Reserve reg_indexes_idx and reg_runtime_params_idx, since they'll be used to pass runtime call args to kernel - remove_regs_from_pool(gp_regs_pool, gprs_blacklist_extended); - remove_regs_from_pool(vec_regs_pool, vec_blacklist); + +jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, + cpu_isa_t isa, + const ov::snippets::lowered::ExpressionPtr& expr) + : jit_emitter(h, isa) { + const auto kernel = ov::as_type_ptr(expr->get_node()); + OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "Invoked with invalid op argument"); + OV_CPU_JIT_EMITTER_ASSERT(!kernel->region->empty(), "Invoked with empty body"); + body = kernel->region; + jcp = *reinterpret_cast(kernel->compile_params); + const auto& parameters = body->get_parameters(); + const auto& results = body->get_results(); + const auto& buffers = body->get_buffers(); + num_inputs = parameters.size(); + num_outputs = results.size(); + std::vector data_ptr_regs; + data_ptr_regs.reserve(num_inputs + num_outputs); + for (const auto& param : parameters) + data_ptr_regs.push_back(param->get_output_port_descriptor(0)->get_reg()); + for (const auto& result : results) + data_ptr_regs.push_back(result->get_input_port_descriptor(0)->get_reg()); + + std::set unique_buffers; + for (const auto& buffer_expr : buffers) { + const auto buffer_reg_group = buffer_expr->get_reg_group(); + if (unique_buffers.count(buffer_reg_group) == 0) { + data_ptr_regs.push_back(buffer_expr->get_output_port_descriptor(0)->get_reg()); + unique_buffers.insert(buffer_reg_group); + } + } + + num_unique_buffers = unique_buffers.size(); + data_ptr_regs_idx = snippets::utils::transform_snippets_regs_to_idxs(data_ptr_regs, snippets::RegType::gpr); } void jit_kernel_emitter::emit_code(const std::vector& in, @@ -144,11 +106,14 @@ void jit_kernel_emitter::emit_code(const std::vector& in, const std::vector& pool_vec_idxs, const std::vector& pool_gpr_idxs) const { validate_arguments(in, out); + aux_vec_idxs = pool_vec_idxs; + aux_gpr_idxs = pool_gpr_idxs; emit_impl(in, out); } void jit_kernel_emitter::validate_arguments(const std::vector& in, const std::vector& out) const { - OV_CPU_JIT_EMITTER_ASSERT(in.empty() && out.empty(), ": Expects 0 registers on input and output"); + OV_CPU_JIT_EMITTER_ASSERT(in.size() == get_inputs_count() && out.empty(), + "Unexpected number of input/output arguments"); const auto num_params = num_inputs + num_outputs + num_unique_buffers; // The number of used gpr may be >= num_params since LoopBegin+LoopEnd could also use gpr to store work_amount OV_CPU_JIT_EMITTER_ASSERT(data_ptr_regs_idx.size() == num_params, @@ -158,40 +123,78 @@ void jit_kernel_emitter::validate_arguments(const std::vector& in, const data_ptr_regs_idx.size()); } -void jit_kernel_emitter::init_body_regs(const std::set& kernel_regs, - const std::vector& pool_vec_idxs, - const std::vector& pool_gpr_idxs) { - // Initialize pools of gp and vec registers - // Reserve kernel regs (reg_indexes_idx and, if there is, reg_runtime_params_idx), since they'll be used to pass - // runtime call args to kernel - init_reg_pools(kernel_regs, {}); - - mapping_info gpr_map_pool({}, gp_regs_pool); - mapping_info vec_map_pool({}, vec_regs_pool); - - // Note that we can't use kernel_regs to store data pointers because - // these regs are used to calculate offsets for the data pointers - map_abstract_registers(gpr_map_pool, vec_map_pool, mem_access_exprs); - for (const auto& abstract_to_physical : gpr_map_pool.first) - data_ptr_regs_idx.push_back(abstract_to_physical.second); - - vec_map_pool.second.insert(vec_map_pool.second.end(), pool_vec_idxs.cbegin(), pool_vec_idxs.cend()); - gpr_map_pool.second.insert(gpr_map_pool.second.end(), pool_gpr_idxs.cbegin(), pool_gpr_idxs.cend()); - map_abstract_registers(gpr_map_pool, vec_map_pool, general_exprs); -} - void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vector& out) const { h->preamble(); + std::set available_gpr; + std::set available_vec; + auto reg_type = snippets::RegType::gpr; + auto convert = [®_type](size_t i) -> snippets::Reg { + return {reg_type, i}; + }; + std::transform(aux_gpr_idxs.begin(), + aux_gpr_idxs.end(), + std::inserter(available_gpr, available_gpr.begin()), + convert); + // Note: data_ptr regs are globally live, so it makes no sense to keep them in the pool + for (auto idx : data_ptr_regs_idx) + available_gpr.erase({snippets::RegType::gpr, idx}); + reg_type = snippets::RegType::vec; + std::transform(aux_vec_idxs.begin(), + aux_vec_idxs.end(), + std::inserter(available_vec, available_vec.begin()), + convert); + auto data_ptr_regs = transform_idxs_to_regs(data_ptr_regs_idx); - init_data_pointers(data_ptr_regs); + auto get_expected_reg_types = + [](const std::shared_ptr& emitter) -> std::pair { + switch (emitter->get_in_out_type()) { + case emitter_in_out_map::gpr_to_vec: + return {snippets::RegType::gpr, snippets::RegType::vec}; + case emitter_in_out_map::gpr_to_gpr: + return {snippets::RegType::gpr, snippets::RegType::gpr}; + case emitter_in_out_map::vec_to_gpr: + return {snippets::RegType::vec, snippets::RegType::gpr}; + case emitter_in_out_map::vec_to_vec: + return {snippets::RegType::vec, snippets::RegType::vec}; + default: + OV_CPU_JIT_EMITTER_THROW("Unsupported emitter_in_ou_map instance"); + } + }; + init_data_pointers(transform_idxs_to_regs(in), data_ptr_regs); for (const auto& expression : *body) { const auto reg_info = expression->get_reg_info(); - auto in_regs = transform_snippets_regs_to_idxs(reg_info.first); - auto out_regs = transform_snippets_regs_to_idxs(reg_info.second); - const auto& emitter = expression->get_emitter(); - emitter->emit_code(in_regs, out_regs, vec_regs_pool, gp_regs_pool); + const auto& emitter = std::dynamic_pointer_cast(expression->get_emitter()); + OV_CPU_JIT_EMITTER_ASSERT(emitter, "Unexpected emitter type"); + auto expected_in_type = snippets::RegType::undefined; + auto expected_out_type = snippets::RegType::undefined; + const auto& node = expression->get_node(); + // Note: currently only a few operations are allowed to have mixed in/out register types => skip validation here + if (!ov::is_type(node) && !ov::is_type(node) && + !std::dynamic_pointer_cast(emitter)) + std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter); + // Note: live regs = regs live on input of the expression. We also need to exclude output regs from the pool + auto live_regs = expression->get_live_regs(); + for (auto r : reg_info.second) + live_regs.insert(r); + std::vector pool_gp_reg; + std::vector pool_vec_reg; + std::set_difference(available_gpr.begin(), + available_gpr.end(), + live_regs.begin(), + live_regs.end(), + std::back_inserter(pool_gp_reg)); + std::set_difference(available_vec.begin(), + available_vec.end(), + live_regs.begin(), + live_regs.end(), + std::back_inserter(pool_vec_reg)); + auto in_regs = snippets::utils::transform_snippets_regs_to_idxs(reg_info.first, expected_in_type); + auto out_regs = snippets::utils::transform_snippets_regs_to_idxs(reg_info.second, expected_out_type); + auto gpr_pool = snippets::utils::transform_snippets_regs_to_idxs(pool_gp_reg); + auto vec_pool = snippets::utils::transform_snippets_regs_to_idxs(pool_vec_reg); + emitter->emit_code(in_regs, out_regs, vec_pool, gpr_pool); } h->postamble(); @@ -200,25 +203,22 @@ void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vec jit_kernel_static_emitter::jit_kernel_static_emitter(dnnl::impl::cpu::aarch64::jit_generator* h, dnnl::impl::cpu::aarch64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) - : jit_kernel_emitter(h, isa, expr), - reg_indexes_idx(Operand::X1) { + : jit_kernel_emitter(h, isa, expr) { const auto kernel = ov::as_type_ptr(expr->get_node()); - OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "Expectes KernelStatic expression"); + OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "expects KernelStatic expression"); jcp = *reinterpret_cast(kernel->compile_params); master_shape = jcp.exec_domain; data_offsets = jcp.data_offsets; OV_CPU_JIT_EMITTER_ASSERT(data_offsets.size() == num_inputs + num_outputs, "Incompatible count of data offsets!"); OV_CPU_JIT_EMITTER_ASSERT(data_offsets.front().size() == master_shape.size(), "Incompatible rank of data offsets!"); - - // - Reserve reg_indexes_idx and reg_runtime_params_idx, since they'll be used to pass runtime call args to kernel - // - However we can use reg_indexes_idx for non memory access operations - // since we won't need them after offsets calculation - init_body_regs({reg_indexes_idx, reg_runtime_params_idx}, {}, {reg_indexes_idx}); } -void jit_kernel_static_emitter::init_data_pointers(const std::vector& data_ptr_regs) const { - XReg reg_indexes = XReg(static_cast(reg_indexes_idx)); - XReg reg_runtime_params = XReg(static_cast(reg_runtime_params_idx)); +void jit_kernel_static_emitter::init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs) const { + OV_CPU_JIT_EMITTER_ASSERT(arg_regs.size() == 2, "Invalid arg regs size"); + XReg reg_runtime_params = arg_regs[0]; + XReg reg_indexes = arg_regs[1]; + XReg reg_tmp = XReg(h->X_TMP_0); XReg reg_aux = XReg(h->X_TMP_1); @@ -261,18 +261,14 @@ jit_kernel_dynamic_emitter::jit_kernel_dynamic_emitter(dnnl::impl::cpu::aarch64: dnnl::impl::cpu::aarch64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) : jit_kernel_emitter(h, isa, expr) { - const auto kernel = ov::as_type_ptr(expr->get_node()); - OV_CPU_JIT_EMITTER_ASSERT(kernel, "Expectes KernelDynamic expression"); - - // - Reserve reg_runtime_params_idx, since it wll be used to pass runtime call args to all dynamic emitters that - // needs runtime args - // - We cannot assign this register to the body emitters since runtime params MUST be valid during whole execution - // for all dynamic emitters - init_body_regs({reg_runtime_params_idx}); + OV_CPU_JIT_EMITTER_ASSERT(ov::is_type(expr->get_node()), + "expects KernelDynamic expression"); } -void jit_kernel_dynamic_emitter::init_data_pointers(const std::vector& data_ptr_regs) const { - XReg reg_runtime_params = XReg(static_cast(reg_runtime_params_idx)); +void jit_kernel_dynamic_emitter::init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs) const { + OV_CPU_JIT_EMITTER_ASSERT(arg_regs.size() == 1, "Invalid arg regs size"); + XReg reg_runtime_params = arg_regs[0]; const auto num_params = num_inputs + num_outputs; for (size_t i = 0; i < num_unique_buffers; ++i) { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.hpp index 0ede91f100f110..c4844bc2545f82 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.hpp @@ -5,7 +5,6 @@ #pragma once #include "emitters/plugin/aarch64/jit_emitter.hpp" -#include "emitters/snippets/jit_container_emitter.hpp" #include "emitters/snippets/jit_snippets_call_args.hpp" namespace ov { @@ -30,7 +29,7 @@ namespace aarch64 { /// Note that Kernel doesn't accept any input arguments. /// -class jit_kernel_emitter : public jit_emitter, public jit_container_emitter { +class jit_kernel_emitter : public jit_emitter { public: jit_kernel_emitter(dnnl::impl::cpu::aarch64::jit_generator* h, dnnl::impl::cpu::aarch64::cpu_isa_t isa, @@ -41,14 +40,11 @@ class jit_kernel_emitter : public jit_emitter, public jit_container_emitter { } void emit_code(const std::vector& in_idxs, const std::vector& out_idxs, - const std::vector& pool_vec_idxs = {}, - const std::vector& pool_gpr_idxs = {}) const override; + const std::vector& pool_vec_idxs, + const std::vector& pool_gpr_idxs) const override; protected: void validate_arguments(const std::vector& in, const std::vector& out) const override; - void init_body_regs(const std::set& kernel_regs, - const std::vector& pool_vec_idxs = {}, - const std::vector& pool_gpr_idxs = {}); /** * @brief populates physical registers pools for x86 (both vec and gp). * Skips stack-related gprs and extra gprs passed as arguments. @@ -57,7 +53,8 @@ class jit_kernel_emitter : public jit_emitter, public jit_container_emitter { */ void init_reg_pools(const std::set& gpr_blacklist, const std::set& vec_blacklist); - virtual void init_data_pointers(const std::vector& data_ptr_regs) const = 0; + virtual void init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs) const = 0; void emit_impl(const std::vector& in, const std::vector& out) const override; @@ -70,11 +67,6 @@ class jit_kernel_emitter : public jit_emitter, public jit_container_emitter { size_t num_outputs = 0; size_t num_unique_buffers = 0; - snippets::lowered::LinearIR::container mem_access_exprs; - snippets::lowered::LinearIR::container general_exprs; - - const size_t reg_runtime_params_idx{0}; - std::shared_ptr body; #ifdef SNIPPETS_DEBUG_CAPS @@ -87,11 +79,14 @@ class jit_kernel_static_emitter : public jit_kernel_emitter { jit_kernel_static_emitter(dnnl::impl::cpu::aarch64::jit_generator* h, dnnl::impl::cpu::aarch64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); + size_t get_inputs_count() const override { + return 2; + } private: - void init_data_pointers(const std::vector& data_ptr_regs) const override; + void init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs) const override; - const size_t reg_indexes_idx{1}; std::vector master_shape; std::vector> data_offsets; @@ -105,9 +100,13 @@ class jit_kernel_dynamic_emitter : public jit_kernel_emitter { jit_kernel_dynamic_emitter(dnnl::impl::cpu::aarch64::jit_generator* h, dnnl::impl::cpu::aarch64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); + size_t get_inputs_count() const override { + return 1; + } private: - void init_data_pointers(const std::vector& data_ptr_regs) const override; + void init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs) const override; #ifdef SNIPPETS_DEBUG_CAPS friend std::string init_info_jit_kernel_dynamic_emitter(const jit_kernel_dynamic_emitter* emitter); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.cpp deleted file mode 100644 index ceee57f3c0cd28..00000000000000 --- a/src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (C) 2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "jit_container_emitter.hpp" - -#include "emitters/utils.hpp" -#include "utils/general_utils.h" - -namespace ov { -namespace intel_cpu { - -void jit_container_emitter::map_abstract_registers(mapping_info& gpr_map_pool, - mapping_info& vec_map_pool, - snippets::lowered::LinearIR::container& expressions) const { - OV_CPU_JIT_EMITTER_ASSERT(!expressions.empty(), - "Cannot map registers when there is no allocated_emitters provided"); - - auto map_regs = [&](const std::vector& abstract_regs) { - std::vector physical_regs = abstract_regs; - for (size_t i = 0; i < abstract_regs.size(); ++i) { - const auto& abstract_reg = abstract_regs[i]; - const auto& type = abstract_reg.type; - const auto& abstract = abstract_reg.idx; - OV_CPU_JIT_EMITTER_ASSERT(one_of(type, snippets::RegType::gpr, snippets::RegType::vec), - "Incorrect reg type detected!"); - auto& mapping = type == snippets::RegType::gpr ? gpr_map_pool : vec_map_pool; - auto& abstract_to_physical = mapping.first; - auto& regs_pool = mapping.second; - auto& physical = physical_regs[i]; - if (abstract_to_physical.count(abstract) == 0) { - OV_CPU_JIT_EMITTER_ASSERT( - !regs_pool.empty(), - "Cannot map registers for jit_container_emitter: not enough regs in the pool"); - physical.idx = regs_pool.back(); - regs_pool.pop_back(); - abstract_to_physical[abstract] = physical.idx; - } else { - physical.idx = abstract_to_physical[abstract]; - } - } - return physical_regs; - }; - - for (const auto& expression : expressions) { - std::vector in_physical_regs, out_physical_regs; - std::vector in_abstract_regs, out_abstract_regs; - std::tie(in_abstract_regs, out_abstract_regs) = expression->get_reg_info(); - in_physical_regs = map_regs(in_abstract_regs); - out_physical_regs = map_regs(out_abstract_regs); - expression->set_reg_info({in_physical_regs, out_physical_regs}); - if (auto container = std::dynamic_pointer_cast(expression->get_emitter())) - container->map_abstract_registers(gpr_map_pool, vec_map_pool, expressions); - } -} - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.hpp deleted file mode 100644 index 7737e7e1150926..00000000000000 --- a/src/plugins/intel_cpu/src/emitters/snippets/jit_container_emitter.hpp +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (C) 2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "snippets/lowered/linear_ir.hpp" - -namespace ov { -namespace intel_cpu { - -/// -/// \brief jit_container_emitter designed provide common interface for register mapping -/// (abstract to physical) and nested code access. -/// -class jit_container_emitter { -public: - // mapping info contains abstract_to_physical map + regs_pool - using mapping_info = std::pair, std::vector&>; - -protected: - // maps gpr and vec abstract registers to physical ones. - void map_abstract_registers(mapping_info& gpr_map_pool, - mapping_info& vec_map_pool, - snippets::lowered::LinearIR::container& expressions) const; -}; - -} // namespace intel_cpu -} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp index 6bc05dbc43b41d..de2160e0053808 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp @@ -7,10 +7,8 @@ #include #include "emitters/plugin/x64/jit_conversion_emitters.hpp" -#include "emitters/plugin/x64/jit_dnnl_emitters.hpp" #include "emitters/plugin/x64/jit_dnnl_ext_emitters.hpp" #include "emitters/plugin/x64/jit_eltwise_emitters.hpp" -#include "emitters/snippets/cpu_kernel_executor_table.hpp" #include "emitters/snippets/cpu_runtime_configurator.hpp" #include "emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp" #include "emitters/snippets/x64/jit_brgemm_emitter.hpp" @@ -19,6 +17,7 @@ #include "emitters/snippets/x64/jit_kernel_emitter.hpp" #include "emitters/snippets/x64/jit_loop_emitters.hpp" #include "emitters/snippets/x64/jit_memory_emitters.hpp" +#include "emitters/snippets/x64/jit_reg_spill_emitters.hpp" #include "emitters/snippets/x64/jit_snippets_emitters.hpp" #include "snippets/snippets_isa.hpp" #include "transformations/cpu_opset/common/op/swish_cpu.hpp" @@ -259,13 +258,6 @@ intel_cpu::CPUTargetMachine::CPUTargetMachine(dnnl::impl::cpu::x64::cpu_isa_t ho jitters[snippets::op::HorizonMax::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(intel_cpu::jit_horizon_emitter); jitters[snippets::op::HorizonSum::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(intel_cpu::jit_horizon_emitter); - jitters[snippets::op::KernelStatic::get_type_info_static()] = - CREATE_SNIPPETS_EMITTER(intel_cpu::jit_kernel_static_emitter); - jitters[snippets::op::KernelDynamic::get_type_info_static()] = - CREATE_SNIPPETS_EMITTER(intel_cpu::jit_kernel_dynamic_emitter); - jitters[snippets::op::LoopBegin::get_type_info_static()] = - CREATE_SNIPPETS_EMITTER(intel_cpu::jit_loop_begin_emitter); - jitters[snippets::op::LoopEnd::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(intel_cpu::jit_loop_end_emitter); // Note: jit_brgemm_emitter and jit_brgemm_copy_b_emitter support runtime recompilation, so their constructor takes // additional arguments jitters[intel_cpu::BrgemmCPU::get_type_info_static()] = @@ -278,6 +270,18 @@ intel_cpu::CPUTargetMachine::CPUTargetMachine(dnnl::impl::cpu::x64::cpu_isa_t ho compiled_kernel_cache); jitters[snippets::op::ReduceMax::get_type_info_static()] = CREATE_UNDEFINED_EMITTER({{ov::element::f32}}); jitters[snippets::op::ReduceSum::get_type_info_static()] = CREATE_UNDEFINED_EMITTER({{ov::element::f32}}); + // Service + jitters[snippets::op::KernelStatic::get_type_info_static()] = + CREATE_SNIPPETS_EMITTER(intel_cpu::jit_kernel_static_emitter); + jitters[snippets::op::KernelDynamic::get_type_info_static()] = + CREATE_SNIPPETS_EMITTER(intel_cpu::jit_kernel_dynamic_emitter); + jitters[snippets::op::LoopBegin::get_type_info_static()] = + CREATE_SNIPPETS_EMITTER(intel_cpu::jit_loop_begin_emitter); + jitters[snippets::op::LoopEnd::get_type_info_static()] = CREATE_SNIPPETS_EMITTER(intel_cpu::jit_loop_end_emitter); + jitters[snippets::op::RegSpillBegin::get_type_info_static()] = + CREATE_SNIPPETS_EMITTER(intel_cpu::jit_reg_spill_begin_emitter); + jitters[snippets::op::RegSpillEnd::get_type_info_static()] = + CREATE_SNIPPETS_EMITTER(intel_cpu::jit_reg_spill_end_emitter); #ifdef SNIPPETS_DEBUG_CAPS jitters[snippets::op::PerfCountBegin::get_type_info_static()] = @@ -337,8 +341,41 @@ size_t intel_cpu::CPUTargetMachine::get_lanes() const { } } -size_t intel_cpu::CPUTargetMachine::get_reg_count() const { - return 16; +std::vector intel_cpu::CPUTargetMachine::get_abi_arg_regs() const { + const auto& abi_regs = dnnl::impl::cpu::x64::abi_param_regs; + std::vector res; + for (const auto& r : abi_regs) + res.emplace_back(snippets::RegType::gpr, r); + return res; +} + +std::vector intel_cpu::CPUTargetMachine::get_gp_reg_pool() const { + const auto num_gp_regs = 16; + std::vector reg_pool; + for (size_t i = 0; i < num_gp_regs; i++) { + if (!one_of(i, Xbyak::Operand::RSP)) + reg_pool.emplace_back(snippets::RegType::gpr, i); + } + return reg_pool; +} + +std::vector intel_cpu::CPUTargetMachine::get_vec_reg_pool() const { + const auto num_vec_regs = [this]() { + switch (isa) { + case dnnl::impl::cpu::x64::avx2: + return dnnl::impl::cpu::x64::cpu_isa_traits::n_vregs; + case dnnl::impl::cpu::x64::sse41: + return dnnl::impl::cpu::x64::cpu_isa_traits::n_vregs; + case dnnl::impl::cpu::x64::avx512_core: + return dnnl::impl::cpu::x64::cpu_isa_traits::n_vregs; + default: + OPENVINO_THROW("unknown isa ", isa); + } + }(); + std::vector reg_pool; + for (int i = 0; i < num_vec_regs; i++) + reg_pool.emplace_back(snippets::RegType::vec, static_cast(i)); + return reg_pool; } dnnl::impl::cpu::x64::cpu_isa_t intel_cpu::CPUTargetMachine::get_isa() const { @@ -390,14 +427,14 @@ std::shared_ptr intel_cpu::CPUGenerator::clone() const { ov::snippets::RegType intel_cpu::CPUGenerator::get_specific_op_out_reg_type(const ov::Output& out) const { const auto op = out.get_node_shared_ptr(); - if (ov::as_type_ptr(op) || + if (is_type(op) || #ifdef SNIPPETS_LIBXSMM_TPP std::dynamic_pointer_cast(op) || - ov::as_type_ptr(op) || + is_type(op) || #endif - ov::as_type_ptr(op)) + is_type(op)) return ov::snippets::RegType::gpr; - else if (ov::as_type_ptr(op) || ov::as_type_ptr(op)) + else if (is_type(op) || is_type(op)) return ov::snippets::RegType::vec; else return ov::snippets::RegType::undefined; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp index a86f3050580ed4..c4476d38088c97 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.hpp @@ -35,7 +35,11 @@ class CPUTargetMachine : public snippets::TargetMachine { bool is_supported() const override; snippets::CompiledSnippetPtr get_snippet() override; size_t get_lanes() const override; - size_t get_reg_count() const override; + + std::vector get_abi_arg_regs() const override; + std::vector get_gp_reg_pool() const override; + std::vector get_vec_reg_pool() const override; + dnnl::impl::cpu::x64::cpu_isa_t get_isa() const; #ifdef SNIPPETS_DEBUG_CAPS SnippetsDebugCapsConfig debug_config; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.cpp new file mode 100644 index 00000000000000..568e3dffab35e7 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.cpp @@ -0,0 +1,68 @@ +// Copyright (C) 2020-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "jit_binary_call_emitter.hpp" + +#include "emitters/plugin/x64/utils.hpp" + +using namespace Xbyak; +using namespace dnnl::impl; +using namespace dnnl::impl::cpu::x64; + +namespace ov { +namespace intel_cpu { + +using jit_generator = dnnl::impl::cpu::x64::jit_generator; +using cpu_isa_t = dnnl::impl::cpu::x64::cpu_isa_t; +using ExpressionPtr = ov::snippets::lowered::ExpressionPtr; + +jit_binary_call_emitter::jit_binary_call_emitter(dnnl::impl::cpu::x64::jit_generator* h, + dnnl::impl::cpu::x64::cpu_isa_t isa, + std::set live_regs) + : jit_emitter(h, isa), + m_regs_to_spill(std::move(live_regs)), + m_regs_initialized(false) {} + +void jit_binary_call_emitter::init_binary_call_regs(size_t num_binary_args, + const std::vector& in, + const std::vector& out) const { + std::vector mem_ptr_idxs = in; + mem_ptr_idxs.insert(mem_ptr_idxs.end(), out.begin(), out.end()); + init_binary_call_regs(num_binary_args, mem_ptr_idxs); +} + +void jit_binary_call_emitter::init_binary_call_regs(size_t num_binary_args, + const std::vector& used_gpr_idxs) const { + OV_CPU_JIT_EMITTER_ASSERT(sizeof(abi_param_regs) / sizeof(*abi_param_regs) >= num_binary_args, + "Requested number of runtime arguments is not supported"); + // This regs will be corrupted, since we'll use them to pass runtime args + for (size_t i = 0; i < num_binary_args; i++) + m_regs_to_spill.emplace(snippets::RegType::gpr, abi_param_regs[i]); + // Note: aux_gpr idx must be non-empty because aux_gprs_count() returns 1 for this emitter + OV_CPU_JIT_EMITTER_ASSERT(aux_gprs_count() >= 1, "Invalid aux_gpr count"); + m_call_address_reg = Reg64(static_cast(aux_gpr_idxs.back())); + aux_gpr_idxs.pop_back(); + bool spill_required = false; + m_callee_saved_reg = Reg64(static_cast(get_callee_saved_aux_gpr(aux_gpr_idxs, used_gpr_idxs, spill_required))); + if (spill_required) + m_regs_to_spill.emplace(snippets::RegType::gpr, m_callee_saved_reg.getIdx()); + m_regs_initialized = true; +} + +const Xbyak::Reg64& jit_binary_call_emitter::get_call_address_reg() const { + OV_CPU_JIT_EMITTER_ASSERT(m_regs_initialized, "You should call init_binary_call_regs() before using this method"); + return m_call_address_reg; +} +const Xbyak::Reg64& jit_binary_call_emitter::get_callee_saved_reg() const { + OV_CPU_JIT_EMITTER_ASSERT(m_regs_initialized, "You should call init_binary_call_regs() before using this method"); + return m_callee_saved_reg; +} + +const std::set& jit_binary_call_emitter::get_regs_to_spill() const { + OV_CPU_JIT_EMITTER_ASSERT(m_regs_initialized, "You should call init_binary_call_regs() before using this method"); + return m_regs_to_spill; +} + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.hpp new file mode 100644 index 00000000000000..96ec9e9815fa45 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_binary_call_emitter.hpp @@ -0,0 +1,73 @@ +// Copyright (C) 2020-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "emitters/plugin/x64/jit_emitter.hpp" + +namespace ov { +namespace intel_cpu { +/** + * @brief Base class for binary call emitters. Its main function is to allocate 2 auxiliary registers needed for binary + * call emission: one is any gpr to store callable address, the second one is a callee-saved reg to organize rsp + * alignment before the call. It also creates a set of registers to spill that can be passed directly to + * EmitABIRegSpills. + */ +class jit_binary_call_emitter : public jit_emitter { +public: + jit_binary_call_emitter(dnnl::impl::cpu::x64::jit_generator* h, + dnnl::impl::cpu::x64::cpu_isa_t isa, + std::set live_regs); + // Note: we need at least one register to allocate a gpr to store the callable address + size_t aux_gprs_count() const override { + return 1; + } + +protected: + /** + * @brief Returns a set of snippets::Reg that should be spilled in the derived emitter. This set includes live_regs + * passed in constructor, plus a callee-saved reg and regs for ABI params. This method can be used only after + * init_binary_call_regs(...) + */ + const std::set& get_regs_to_spill() const; + /** + * @brief Returns a gpr that can be used to store the address of the callable. This method can be used only after + * init_binary_call_regs(...) + */ + const Xbyak::Reg64& get_call_address_reg() const; + /** + * @brief Returns a callee-saved gpr that can be used align rsp before the call instruction. This method can be used + * only after init_binary_call_regs(...) + */ + const Xbyak::Reg64& get_callee_saved_reg() const; + /** + * @brief Initializes registers that can be then obtained via get_regs_to_spill(), get_call_address_reg() or + * get_callee_saved_reg(). + * @param num_binary_args - the number of arguments of the binary that will be called + * @param used_gpr_idxs - indices of registers that must be preserved during aux reg allocation, usually in/out + * memory pointers + */ + void init_binary_call_regs(size_t num_binary_args, const std::vector& used_gpr_idxs) const; + /** + * @brief Initializes registers that can be then obtained via get_regs_to_spill(), get_call_address_reg() or + * get_callee_saved_reg(). + * @param num_binary_args - the number of arguments of the binary that will be called + * @param in - indices of input registers that must be preserved during aux reg allocation + * @param out - indices of output registers that must be preserved during aux reg allocation + */ + void init_binary_call_regs(size_t num_binary_args, + const std::vector& in, + const std::vector& out) const; + +private: + // Note: init_regs() can be called only from emit_impl, since it needs initialized regs + // init_impl is a constant method, so all these fields have to be mutable + mutable std::set m_regs_to_spill{}; + mutable Xbyak::Reg64 m_callee_saved_reg; + mutable Xbyak::Reg64 m_call_address_reg; + mutable bool m_regs_initialized = false; +}; + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.cpp index 861b9779c25533..0bd6c02b42783e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.cpp @@ -9,7 +9,6 @@ #include "emitters/plugin/x64/utils.hpp" #include "emitters/snippets/x64/utils.hpp" -#include "snippets/lowered/expression.hpp" #include "snippets/utils/utils.hpp" #include "transformations/snippets/x64/op/brgemm_cpu.hpp" @@ -27,7 +26,7 @@ jit_brgemm_copy_b_emitter::jit_brgemm_copy_b_emitter(jit_generator* h, const ov::snippets::lowered::ExpressionPtr& expr, const snippets::KernelExecutorTablePtr& kernel_table, const ov::intel_cpu::MultiCacheWeakPtr& compiled_kernel_cache) - : jit_emitter(h, isa) { + : jit_binary_call_emitter(h, isa, expr->get_live_regs()) { in_out_type_ = emitter_in_out_map::gpr_to_gpr; const auto brgemm_repack = ov::as_type_ptr(expr->get_node()); OV_CPU_JIT_EMITTER_ASSERT(brgemm_repack, "expects BrgemmCopyB node"); @@ -71,43 +70,44 @@ void jit_brgemm_copy_b_emitter::emit_impl(const std::vector& in, const s std::vector mem_ptrs_idxs{in[0], out[0]}; if (out.size() > 1) mem_ptrs_idxs.emplace_back(out[1]); + init_binary_call_regs(2, mem_ptrs_idxs); + + const Xbyak::Reg64& aux_reg = get_call_address_reg(); + const Xbyak::Reg64& callee_saved_reg = get_callee_saved_reg(); EmitABIRegSpills spill(h); - spill.preamble(); + spill.preamble(get_regs_to_spill()); - h->mov(h->rbp, reinterpret_cast(BrgemmCopyBKernelExecutor::execute)); auto reserved_stack_size = sizeof(BrgemmCopyBKernel::call_args); // Reserve memory on the stack h->sub(h->rsp, reserved_stack_size); - const bool is_dynamic_case = - std::any_of(m_memory_offsets.cbegin(), m_memory_offsets.cend(), ov::snippets::utils::is_dynamic_value); - Xbyak::Reg64 aux_reg = is_dynamic_case ? ov::intel_cpu::utils::get_aux_gpr(mem_ptrs_idxs) : Xbyak::Reg64(); - const std::vector args_offsets{GET_OFF_BRGEMM_COPY_B_ARGS(src), GET_OFF_BRGEMM_COPY_B_ARGS(tr_src), GET_OFF_BRGEMM_COPY_B_ARGS(compensation_ptr)}; const auto& mem_ptrs = ov::intel_cpu::utils::transform_idxs_to_regs(mem_ptrs_idxs); for (size_t i = 0; i < mem_ptrs.size(); i++) { - if (ov::snippets::utils::is_dynamic_value(m_memory_offsets[i])) + if (ov::snippets::utils::is_dynamic_value(m_memory_offsets[i])) { utils::push_ptr_with_runtime_offset_on_stack(h, args_offsets[i], mem_ptrs[i], aux_reg, GET_OFF(buffer_offsets) + m_buffer_ids[i] * sizeof(size_t)); - else + } else { utils::push_ptr_with_static_offset_on_stack(h, args_offsets[i], mem_ptrs[i], m_memory_offsets[i]); + } } // No scratchpad => need to write nullptr manually if (!m_with_comp) h->mov(h->qword[h->rsp + args_offsets.back()], reinterpret_cast(nullptr)); + h->mov(aux_reg, reinterpret_cast(BrgemmCopyBKernelExecutor::execute)); h->mov(abi_param1, reinterpret_cast(m_kernel_executor.get())); h->mov(abi_param2, h->rsp); - spill.rsp_align(); - h->call(h->rbp); + spill.rsp_align(callee_saved_reg.getIdx()); + h->call(aux_reg); spill.rsp_restore(); h->add(h->rsp, reserved_stack_size); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp index d937e646b603da..ad4f39194cd0fe 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_copy_b_emitter.hpp @@ -5,12 +5,13 @@ #pragma once #include "emitters/plugin/x64/jit_emitter.hpp" +#include "jit_binary_call_emitter.hpp" #include "kernel_executors/brgemm_copy_b.hpp" namespace ov { namespace intel_cpu { -class jit_brgemm_copy_b_emitter : public jit_emitter { +class jit_brgemm_copy_b_emitter : public jit_binary_call_emitter { public: jit_brgemm_copy_b_emitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp index 8d343cec908732..ab6c9d0d0e567f 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp @@ -24,7 +24,7 @@ jit_brgemm_emitter::jit_brgemm_emitter(jit_generator* h, const ov::snippets::lowered::ExpressionPtr& expr, const snippets::KernelExecutorTablePtr& kernel_table, const ov::intel_cpu::MultiCacheWeakPtr& compiled_kernel_cache) - : jit_emitter(h, isa) { + : jit_binary_call_emitter(h, isa, expr->get_live_regs()) { in_out_type_ = emitter_in_out_map::gpr_to_gpr; const auto& brgemm_node = as_type_ptr(expr->get_node()); const auto& brg0Prc = brgemm_node->get_input_element_type(0); @@ -93,6 +93,7 @@ void jit_brgemm_emitter::validate_arguments(const std::vector& in, const void jit_brgemm_emitter::emit_impl(const std::vector& in, const std::vector& out) const { validate_arguments(in, out); std::vector mem_ptrs_idxs{in[0], in[1], out[0]}; + init_binary_call_regs(2, mem_ptrs_idxs); if (in.size() > 2) mem_ptrs_idxs.emplace_back(in[2]); @@ -106,18 +107,16 @@ void jit_brgemm_emitter::emit_impl(const std::vector& in, const std::vec template ::value, bool>::type> void jit_brgemm_emitter::emit_call(const std::vector& mem_ptrs_idxs) const { + const Xbyak::Reg64& aux_reg = get_call_address_reg(); + const Xbyak::Reg64& callee_saved_reg = get_callee_saved_reg(); + EmitABIRegSpills spill(h); - spill.preamble(); + spill.preamble(get_regs_to_spill()); - h->mov(h->rbp, reinterpret_cast(T::execute)); auto reserved_stack_size = sizeof(typename T::call_args); // Reserve memory on the stack h->sub(h->rsp, reserved_stack_size); - const bool is_dynamic_case = - std::any_of(m_memory_offsets.cbegin(), m_memory_offsets.cend(), ov::snippets::utils::is_dynamic_value); - Xbyak::Reg64 aux_reg = is_dynamic_case ? ov::intel_cpu::utils::get_aux_gpr(mem_ptrs_idxs) : Xbyak::Reg64(); - #define GET_OFF_CALL_ARGS(field) offsetof(typename T::call_args, field) const std::vector brgemm_args_offsets = {GET_OFF_CALL_ARGS(A), GET_OFF_CALL_ARGS(B), @@ -127,14 +126,15 @@ void jit_brgemm_emitter::emit_call(const std::vector& mem_ptrs_idxs) con const auto& mem_ptrs = utils::transform_idxs_to_regs(mem_ptrs_idxs); for (size_t i = 0; i < mem_ptrs.size(); i++) { - if (ov::snippets::utils::is_dynamic_value(m_memory_offsets[i])) + if (ov::snippets::utils::is_dynamic_value(m_memory_offsets[i])) { utils::push_ptr_with_runtime_offset_on_stack(h, brgemm_args_offsets[i], mem_ptrs[i], aux_reg, GET_OFF(buffer_offsets) + m_buffer_ids[i] * sizeof(size_t)); - else + } else { utils::push_ptr_with_static_offset_on_stack(h, brgemm_args_offsets[i], mem_ptrs[i], m_memory_offsets[i]); + } } // No scratchpad => need to write nullptr manually @@ -143,15 +143,15 @@ void jit_brgemm_emitter::emit_call(const std::vector& mem_ptrs_idxs) con // abi_param1 always contains jit_snippets_call_args which has amx tile config for each thread if (std::is_same()) { - h->lea(h->r10, h->ptr[abi_param1 + GET_OFF(amx_tile_config)]); - h->mov(h->qword[h->rsp + GET_OFF_BRGEMM_AMX_ARGS(amx_tile_config)], h->r10); + h->lea(aux_reg, h->ptr[abi_param1 + GET_OFF(amx_tile_config)]); + h->mov(h->qword[h->rsp + GET_OFF_BRGEMM_AMX_ARGS(amx_tile_config)], aux_reg); } - + h->mov(aux_reg, reinterpret_cast(T::execute)); h->mov(abi_param1, reinterpret_cast(m_kernel_executor.get())); h->mov(abi_param2, h->rsp); - spill.rsp_align(); - h->call(h->rbp); + spill.rsp_align(callee_saved_reg.getIdx()); + h->call(aux_reg); spill.rsp_restore(); h->add(h->rsp, reserved_stack_size); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp index 9d072065c0fe52..59508f46154f28 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.hpp @@ -6,11 +6,12 @@ #include "emitters/plugin/x64/jit_emitter.hpp" #include "emitters/snippets/x64/kernel_executors/brgemm_base.hpp" +#include "jit_binary_call_emitter.hpp" namespace ov { namespace intel_cpu { -class jit_brgemm_emitter : public jit_emitter { +class jit_brgemm_emitter : public jit_binary_call_emitter { public: jit_brgemm_emitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp index bd5a3227e1e125..d81f0937f48aba 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp @@ -4,7 +4,8 @@ #include "jit_kernel_emitter.hpp" -#include "snippets/utils/utils.hpp" +#include "jit_snippets_emitters.hpp" +#include "snippets/utils/reg_utils.hpp" #include "utils.hpp" using namespace Xbyak; @@ -17,8 +18,7 @@ namespace intel_cpu { jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) - : jit_emitter(h, isa), - reg_runtime_params_idx(abi_param1.getIdx()) { + : jit_emitter(h, isa) { const auto kernel = ov::as_type_ptr(expr->get_node()); OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "invoked with invalid op argument"); OV_CPU_JIT_EMITTER_ASSERT(!kernel->region->empty(), "invoked with empty body"); @@ -29,52 +29,23 @@ jit_kernel_emitter::jit_kernel_emitter(jit_generator* h, const auto& buffers = body->get_buffers(); num_inputs = parameters.size(); num_outputs = results.size(); + std::vector data_ptr_regs; + data_ptr_regs.reserve(num_inputs + num_outputs); for (const auto& param : parameters) - mem_access_exprs.push_back(param); + data_ptr_regs.push_back(param->get_output_port_descriptor(0)->get_reg()); for (const auto& result : results) - mem_access_exprs.push_back(result); + data_ptr_regs.push_back(result->get_input_port_descriptor(0)->get_reg()); std::set unique_buffers; for (const auto& buffer_expr : buffers) { const auto buffer_reg_group = buffer_expr->get_reg_group(); if (unique_buffers.count(buffer_reg_group) == 0) { - mem_access_exprs.push_back(buffer_expr); + data_ptr_regs.push_back(buffer_expr->get_output_port_descriptor(0)->get_reg()); unique_buffers.insert(buffer_reg_group); } } - - using ExprSet = std::unordered_set; - const ExprSet params_set(parameters.cbegin(), parameters.cend()); - const ExprSet results_set(results.cbegin(), results.cend()); - const ExprSet buffers_set(buffers.cbegin(), buffers.cend()); - for (const auto& expr : *body) { - if (params_set.count(expr) == 0 && results_set.count(expr) == 0 && buffers_set.count(expr) == 0) - general_exprs.emplace_back(expr); - } num_unique_buffers = unique_buffers.size(); -} - -void jit_kernel_emitter::init_reg_pools(const std::set& gpr_blacklist, const std::set& vec_blacklist) { - gp_regs_pool.resize(16); - vec_regs_pool.resize(16); - // It's easier to remove the last item during mapping, so fill descending to map ascending - for (size_t i = 0; i < 16; i++) - gp_regs_pool[i] = vec_regs_pool[i] = 15 - i; - auto remove_regs_from_pool = [](std::vector& pool, const std::set& to_remove) { - // It's important to keep the order of other elements - pool.erase(std::remove_if(pool.begin(), - pool.end(), - [&](size_t x) { - return to_remove.count(x) != 0; - }), - pool.end()); - }; - // Reserve stack base and pointer for push(...) and pop(...) operations - std::set gprs_blacklist_extended{Xbyak::Operand::RSP, Xbyak::Operand::RBP}; - gprs_blacklist_extended.insert(gpr_blacklist.begin(), gpr_blacklist.end()); - // Reserve abi_param1 and abi_param2, since they'll be used to pass runtime call args to kernel - remove_regs_from_pool(gp_regs_pool, gprs_blacklist_extended); - remove_regs_from_pool(vec_regs_pool, vec_blacklist); + data_ptr_regs_idx = snippets::utils::transform_snippets_regs_to_idxs(data_ptr_regs, snippets::RegType::gpr); } void jit_kernel_emitter::emit_code(const std::vector& in, @@ -82,54 +53,99 @@ void jit_kernel_emitter::emit_code(const std::vector& in, const std::vector& pool_vec_idxs, const std::vector& pool_gpr_idxs) const { validate_arguments(in, out); + aux_vec_idxs = pool_vec_idxs; + aux_gpr_idxs = pool_gpr_idxs; emit_impl(in, out); } void jit_kernel_emitter::validate_arguments(const std::vector& in, const std::vector& out) const { - OV_CPU_JIT_EMITTER_ASSERT(in.empty() && out.empty(), ": expects 0 registers on input and output"); + OV_CPU_JIT_EMITTER_ASSERT(in.size() == get_inputs_num() && out.empty(), + "Unexpected number of input/output arguments"); const auto num_params = num_inputs + num_outputs + num_unique_buffers; // The number of used gpr may be >= num_params since LoopBegin+LoopEnd could also use gpr to store work_amount OV_CPU_JIT_EMITTER_ASSERT(data_ptr_regs_idx.size() == num_params, - "number of inputs and outputs is inconsistent with the number of allocated registers ", + "Number of inputs and outputs is inconsistent with the number of allocated registers ", num_params, " data_ptr_regs_idx.size() = ", data_ptr_regs_idx.size()); } -void jit_kernel_emitter::init_body_regs(const std::set& kernel_regs, - const std::vector& pool_vec_idxs, - const std::vector& pool_gpr_idxs) { - // Initialize pools of gp and vec registers - // Reserve kernel regs (abi_param1 and, if there is, abi_param2), since they'll be used to pass runtime call args to - // kernel - init_reg_pools(kernel_regs, {}); - - mapping_info gpr_map_pool({}, gp_regs_pool); - mapping_info vec_map_pool({}, vec_regs_pool); - - // Note that we can't use kernel_regs to store data pointers because - // these regs are used to calculate offsets for the data pointers - map_abstract_registers(gpr_map_pool, vec_map_pool, mem_access_exprs); - for (const auto& abstract_to_physical : gpr_map_pool.first) - data_ptr_regs_idx.push_back(abstract_to_physical.second); - - gpr_map_pool.second.insert(gpr_map_pool.second.end(), pool_gpr_idxs.cbegin(), pool_gpr_idxs.cend()); - vec_map_pool.second.insert(vec_map_pool.second.end(), pool_vec_idxs.cbegin(), pool_vec_idxs.cend()); - map_abstract_registers(gpr_map_pool, vec_map_pool, general_exprs); -} - void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vector& out) const { h->preamble(); + std::set available_gpr; + std::set available_vec; + auto reg_type = snippets::RegType::gpr; + auto convert = [®_type](size_t i) -> snippets::Reg { + return {reg_type, i}; + }; + std::transform(aux_gpr_idxs.begin(), + aux_gpr_idxs.end(), + std::inserter(available_gpr, available_gpr.begin()), + convert); + // Note: data_ptr regs are globally live, so it makes no sense to keep them in the pool + for (auto idx : data_ptr_regs_idx) + available_gpr.erase({snippets::RegType::gpr, idx}); + reg_type = snippets::RegType::vec; + std::transform(aux_vec_idxs.begin(), + aux_vec_idxs.end(), + std::inserter(available_vec, available_vec.begin()), + convert); + auto data_ptr_regs = utils::transform_idxs_to_regs(data_ptr_regs_idx); - init_data_pointers(data_ptr_regs); + auto get_expected_reg_types = + [](const std::shared_ptr& emitter) -> std::pair { + switch (emitter->get_in_out_type()) { + case emitter_in_out_map::gpr_to_vec: + return {snippets::RegType::gpr, snippets::RegType::vec}; + case emitter_in_out_map::gpr_to_gpr: + return {snippets::RegType::gpr, snippets::RegType::gpr}; + case emitter_in_out_map::vec_to_gpr: + return {snippets::RegType::vec, snippets::RegType::gpr}; + case emitter_in_out_map::vec_to_vec: + return {snippets::RegType::vec, snippets::RegType::vec}; + default: + OV_CPU_JIT_EMITTER_THROW("Unsupported emitter_in_out_map instance"); + } + }; + std::vector aux_tmp_regs{}; + if (!available_gpr.empty()) + aux_tmp_regs.emplace_back(available_gpr.begin()->idx); + init_data_pointers(utils::transform_idxs_to_regs(in), data_ptr_regs, aux_tmp_regs); for (const auto& expression : *body) { const auto reg_info = expression->get_reg_info(); - auto in_regs = utils::transform_snippets_regs_to_idxs(reg_info.first); - auto out_regs = utils::transform_snippets_regs_to_idxs(reg_info.second); - const auto& emitter = expression->get_emitter(); - emitter->emit_code(in_regs, out_regs, vec_regs_pool, gp_regs_pool); + const auto& emitter = std::dynamic_pointer_cast(expression->get_emitter()); + OV_CPU_JIT_EMITTER_ASSERT(emitter, "Unexpected emitter type"); + auto expected_in_type = snippets::RegType::undefined; + auto expected_out_type = snippets::RegType::undefined; + const auto& node = expression->get_node(); + // Note: A few operations are allowed to have mixed register types on their inputs (or outputs) => skip + // validation here + if (!ov::is_type(node) && !ov::is_type(node) && + !std::dynamic_pointer_cast(emitter)) + std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter); + // Note: live regs = regs live on input of the expression. We also need to exclude output regs from the pool + auto live_regs = expression->get_live_regs(); + for (auto r : reg_info.second) + live_regs.insert(r); + std::vector pool_gp_reg; + std::vector pool_vec_reg; + std::set_difference(available_gpr.begin(), + available_gpr.end(), + live_regs.begin(), + live_regs.end(), + std::back_inserter(pool_gp_reg)); + std::set_difference(available_vec.begin(), + available_vec.end(), + live_regs.begin(), + live_regs.end(), + std::back_inserter(pool_vec_reg)); + auto in_regs = snippets::utils::transform_snippets_regs_to_idxs(reg_info.first, expected_in_type); + auto out_regs = snippets::utils::transform_snippets_regs_to_idxs(reg_info.second, expected_out_type); + auto gpr_pool = snippets::utils::transform_snippets_regs_to_idxs(pool_gp_reg); + auto vec_pool = snippets::utils::transform_snippets_regs_to_idxs(pool_vec_reg); + emitter->emit_code(in_regs, out_regs, vec_pool, gpr_pool); } h->postamble(); @@ -138,25 +154,22 @@ void jit_kernel_emitter::emit_impl(const std::vector& in, const std::vec jit_kernel_static_emitter::jit_kernel_static_emitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) - : jit_kernel_emitter(h, isa, expr), - reg_indexes_idx(abi_param2.getIdx()) { + : jit_kernel_emitter(h, isa, expr) { const auto kernel = ov::as_type_ptr(expr->get_node()); - OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "expectes KernelStatic expression"); + OV_CPU_JIT_EMITTER_ASSERT(kernel != nullptr, "expects KernelStatic expression"); jcp = *reinterpret_cast(kernel->compile_params); master_shape = jcp.exec_domain; data_offsets = jcp.data_offsets; OV_CPU_JIT_EMITTER_ASSERT(data_offsets.size() == num_inputs + num_outputs, "Incompatible count of data offsets!"); OV_CPU_JIT_EMITTER_ASSERT(data_offsets.front().size() == master_shape.size(), "Incompatible rank of data offsets!"); - - // - Reserve abi_param1 and abi_param2, since they'll be used to pass runtime call args to kernel - // - However we can use reg_indexes_idx for non memory access operations - // since we won't need them after offsets calculation - init_body_regs({reg_indexes_idx, reg_runtime_params_idx}, {}, {reg_indexes_idx}); } -void jit_kernel_static_emitter::init_data_pointers(const std::vector& data_ptr_regs) const { - Xbyak::Reg64 reg_indexes = Xbyak::Reg64(static_cast(reg_indexes_idx)); - Xbyak::Reg64 reg_runtime_params = Xbyak::Reg64(static_cast(reg_runtime_params_idx)); +void jit_kernel_static_emitter::init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs, + const std::vector& aux_gprs) const { + OV_CPU_JIT_EMITTER_ASSERT(arg_regs.size() == 2, "Invalid arg regs size"); + Xbyak::Reg64 reg_runtime_params = arg_regs[0]; + Xbyak::Reg64 reg_indexes = arg_regs[1]; const auto num_params = num_inputs + num_outputs; // Note that we don't need offset for the last dim, since it's handled directly by Tile emitter @@ -173,12 +186,8 @@ void jit_kernel_static_emitter::init_data_pointers(const std::vector(*spare_corruptable_gpr)); + const bool last_iter_explicitly = aux_gprs.empty(); + Reg64 reg_tmp = last_iter_explicitly ? data_ptr_regs[num_params - 1] : *aux_gprs.begin(); // Vector "data_ptr_regs" is sorted by abstract regs. // It means that the vector contains the physical registers in order [src, .., src, dst, .., dst, buffer] // So we can initialize buffer register firstly as last value of vector "data_ptr_regs" @@ -212,18 +221,15 @@ jit_kernel_dynamic_emitter::jit_kernel_dynamic_emitter(dnnl::impl::cpu::x64::jit dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) : jit_kernel_emitter(h, isa, expr) { - const auto kernel = ov::as_type_ptr(expr->get_node()); - OV_CPU_JIT_EMITTER_ASSERT(kernel, "expectes KernelDynamic expression"); - - // - Reserve abi_param1, since it wll be used to pass runtime call args to all dynamic emitters that needs runtime - // args - // - We cannot assign this register to the body emitters since runtime params MUST be valid during whole execution - // for all dynamic emitters - init_body_regs({reg_runtime_params_idx}); + OV_CPU_JIT_EMITTER_ASSERT(ov::is_type(expr->get_node()), + "expects KernelDynamic expression"); } -void jit_kernel_dynamic_emitter::init_data_pointers(const std::vector& data_ptr_regs) const { - Xbyak::Reg64 reg_runtime_params = Xbyak::Reg64(static_cast(reg_runtime_params_idx)); +void jit_kernel_dynamic_emitter::init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs, + const std::vector& aux_gprs) const { + OV_CPU_JIT_EMITTER_ASSERT(arg_regs.size() == 1, "Invalid arg regs size"); + Xbyak::Reg64 reg_runtime_params = arg_regs[0]; const auto num_params = num_inputs + num_outputs; for (size_t i = 0; i < num_unique_buffers; ++i) { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp index 68ea6684cbcd17..995dad15c734d1 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.hpp @@ -5,7 +5,6 @@ #pragma once #include "emitters/plugin/x64/jit_emitter.hpp" -#include "emitters/snippets/jit_container_emitter.hpp" #include "emitters/snippets/jit_snippets_call_args.hpp" namespace ov { @@ -29,7 +28,7 @@ namespace intel_cpu { /// Note that Kernel doesn't accept any input arguments. /// -class jit_kernel_emitter : public jit_emitter, public jit_container_emitter { +class jit_kernel_emitter : public jit_emitter { public: jit_kernel_emitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, @@ -40,40 +39,25 @@ class jit_kernel_emitter : public jit_emitter, public jit_container_emitter { } void emit_code(const std::vector& in_idxs, const std::vector& out_idxs, - const std::vector& pool_vec_idxs = {}, - const std::vector& pool_gpr_idxs = {}) const override; + const std::vector& pool_vec_idxs, + const std::vector& pool_gpr_idxs) const override; protected: void validate_arguments(const std::vector& in, const std::vector& out) const override; - void init_body_regs(const std::set& kernel_regs, - const std::vector& pool_vec_idxs = {}, - const std::vector& pool_gpr_idxs = {}); - /** - * @brief populates physical registers pools for x86 (both vec and gp). - * Skips stack-related gprs and extra gprs passed as arguments. - * @arg gpr_blacklist - set of gp registers that should not be added to register pool - * @arg vec_blacklist - set of vec registers should not be added to register pool - */ - void init_reg_pools(const std::set& gpr_blacklist, const std::set& vec_blacklist); - - virtual void init_data_pointers(const std::vector& data_ptr_regs) const = 0; + + virtual void init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs, + const std::vector& aux_gprs) const = 0; void emit_impl(const std::vector& in, const std::vector& out) const override; jit_snippets_compile_args jcp; // gpr's used to store data pointers, track them to apply offsets in Kernel std::vector data_ptr_regs_idx; - std::vector vec_regs_pool; - std::vector gp_regs_pool; size_t num_inputs = 0; size_t num_outputs = 0; size_t num_unique_buffers = 0; - snippets::lowered::LinearIR::container mem_access_exprs; - snippets::lowered::LinearIR::container general_exprs; - - const size_t reg_runtime_params_idx{0}; - std::shared_ptr body; #ifdef SNIPPETS_DEBUG_CAPS @@ -86,11 +70,15 @@ class jit_kernel_static_emitter : public jit_kernel_emitter { jit_kernel_static_emitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); + size_t get_inputs_num() const override { + return 2; + } private: - void init_data_pointers(const std::vector& data_ptr_regs) const override; + void init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs, + const std::vector& aux_gprs) const override; - const size_t reg_indexes_idx{1}; std::vector master_shape; std::vector> data_offsets; @@ -104,9 +92,14 @@ class jit_kernel_dynamic_emitter : public jit_kernel_emitter { jit_kernel_dynamic_emitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); + size_t get_inputs_num() const override { + return 1; + } private: - void init_data_pointers(const std::vector& data_ptr_regs) const override; + void init_data_pointers(const std::vector& arg_regs, + const std::vector& data_ptr_regs, + const std::vector& aux_gprs) const override; #ifdef SNIPPETS_DEBUG_CAPS friend std::string init_info_jit_kernel_dynamic_emitter(const jit_kernel_dynamic_emitter* emitter); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_chrono_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_chrono_emitters.cpp index 7691114b663dec..a302e99367d8d5 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_chrono_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_perf_count_chrono_emitters.cpp @@ -41,7 +41,7 @@ void jit_perf_count_chrono_start_emitter::emit_impl(const std::vector& i h->mov(h->rax, reinterpret_cast(set_start_time_overload)); h->mov(abi_param1, reinterpret_cast(m_start_node.get())); - spill.rsp_align(); + spill.rsp_align(h->rbx.getIdx()); h->call(h->rax); spill.rsp_restore(); @@ -74,7 +74,7 @@ void jit_perf_count_chrono_end_emitter::emit_impl(const std::vector& in_ h->mov(h->rax, reinterpret_cast(set_accumulated_time_overload)); h->mov(abi_param1, reinterpret_cast(m_end_node.get())); - spill.rsp_align(); + spill.rsp_align(h->rbx.getIdx()); h->call(h->rax); spill.rsp_restore(); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.cpp new file mode 100644 index 00000000000000..324bb5205df3bf --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.cpp @@ -0,0 +1,87 @@ +// Copyright (C) 2020-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "jit_reg_spill_emitters.hpp" + +#include "emitters/plugin/x64/utils.hpp" + +using namespace Xbyak; +using namespace dnnl::impl; +using namespace dnnl::impl::cpu::x64; + +namespace ov { +namespace intel_cpu { + +/* ================== jit_reg_spill_begin_emitters ====================== */ + +jit_reg_spill_begin_emitter::jit_reg_spill_begin_emitter(dnnl::impl::cpu::x64::jit_generator* h, + dnnl::impl::cpu::x64::cpu_isa_t isa, + const ov::snippets::lowered::ExpressionPtr& expr) + : jit_emitter(h, isa) { + const auto& reg_spill_node = ov::as_type_ptr(expr->get_node()); + OV_CPU_JIT_EMITTER_ASSERT(reg_spill_node, "expects RegSpillBegin expression"); + const auto& rinfo = expr->get_reg_info(); + m_regs_to_spill = std::set(rinfo.second.begin(), rinfo.second.end()); + m_abi_reg_spiller = std::make_shared(h); + in_out_type_ = emitter_in_out_map::gpr_to_gpr; +} + +void jit_reg_spill_begin_emitter::validate_arguments(const std::vector& in, + const std::vector& out) const { + OV_CPU_JIT_EMITTER_ASSERT(in.empty(), "In regs should be empty for reg_spill_begin emitter"); + OV_CPU_JIT_EMITTER_ASSERT(out.size() == m_regs_to_spill.size(), + "Invalid number of out regs for reg_spill_begin emitter"); +} + +void jit_reg_spill_begin_emitter::emit_code(const std::vector& in, + const std::vector& out, + const std::vector& pool_vec_idxs, + const std::vector& pool_gpr_idxs) const { + validate_arguments(in, out); + emit_impl(in, out); +} + +void jit_reg_spill_begin_emitter::emit_impl(const std::vector& in, const std::vector& out) const { + m_abi_reg_spiller->preamble(m_regs_to_spill); +} + +/* ============================================================== */ + +/* ================== jit_reg_spill_end_emitter ====================== */ + +jit_reg_spill_end_emitter::jit_reg_spill_end_emitter(dnnl::impl::cpu::x64::jit_generator* h, + dnnl::impl::cpu::x64::cpu_isa_t isa, + const ov::snippets::lowered::ExpressionPtr& expr) + : jit_emitter(h, isa) { + in_out_type_ = emitter_in_out_map::gpr_to_gpr; + OV_CPU_JIT_EMITTER_ASSERT(ov::is_type(expr->get_node()) && expr->get_input_count() > 0, + "Invalid expression in RegSpillEnd emitter"); + const auto& parent_expr = expr->get_input_port_connector(0)->get_source().get_expr(); + const auto& reg_spill_begin_emitter = + std::dynamic_pointer_cast(parent_expr->get_emitter()); + OV_CPU_JIT_EMITTER_ASSERT(reg_spill_begin_emitter, "Failed to obtain reg_spill_begin emitter"); + m_abi_reg_spiller = reg_spill_begin_emitter->m_abi_reg_spiller; +} + +void jit_reg_spill_end_emitter::validate_arguments(const std::vector& in, + const std::vector& out) const { + OV_CPU_JIT_EMITTER_ASSERT(out.empty(), "Out regs should be empty for reg_spill_end emitter"); + OV_CPU_JIT_EMITTER_ASSERT(in.size() == m_abi_reg_spiller->get_num_spilled_regs(), + "Invalid number of in regs for reg_spill_end emitter"); +} + +void jit_reg_spill_end_emitter::emit_code(const std::vector& in, + const std::vector& out, + const std::vector& pool_vec_idxs, + const std::vector& pool_gpr_idxs) const { + validate_arguments(in, out); + emit_impl(in, out); +} + +void jit_reg_spill_end_emitter::emit_impl(const std::vector& in, const std::vector& out) const { + m_abi_reg_spiller->postamble(); +} + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.hpp new file mode 100644 index 00000000000000..d5fcbc35120d73 --- /dev/null +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_reg_spill_emitters.hpp @@ -0,0 +1,71 @@ +// Copyright (C) 2020-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "emitters/plugin/x64/jit_emitter.hpp" + +namespace ov { +namespace intel_cpu { + +/* ================== jit_reg_spill_begin_emitters ====================== */ +class EmitABIRegSpills; +class jit_reg_spill_end_emitter; +class jit_reg_spill_begin_emitter : public jit_emitter { + friend jit_reg_spill_end_emitter; + +public: + jit_reg_spill_begin_emitter(dnnl::impl::cpu::x64::jit_generator* h, + dnnl::impl::cpu::x64::cpu_isa_t isa, + const ov::snippets::lowered::ExpressionPtr& expr); + + size_t get_inputs_num() const override { + return 0; + } + + void emit_code(const std::vector& in_idxs, + const std::vector& out_idxs, + const std::vector& pool_vec_idxs, + const std::vector& pool_gpr_idxs) const override; + +protected: + void validate_arguments(const std::vector& in, const std::vector& out) const override; + void emit_impl(const std::vector& in, const std::vector& out) const override; + std::set m_regs_to_spill; + std::shared_ptr m_abi_reg_spiller; +}; + +/* ============================================================== */ + +/* ================== jit_reg_spill_end_emitter ====================== */ + +class jit_reg_spill_end_emitter : public jit_emitter { +public: + jit_reg_spill_end_emitter(dnnl::impl::cpu::x64::jit_generator* h, + dnnl::impl::cpu::x64::cpu_isa_t isa, + const ov::snippets::lowered::ExpressionPtr& expr); + + size_t get_inputs_num() const override { + return 0; + } + + void emit_code(const std::vector& in_idxs, + const std::vector& out_idxs, + const std::vector& pool_vec_idxs, + const std::vector& pool_gpr_idxs) const override; + +protected: + void validate_arguments(const std::vector& in, const std::vector& out) const override; + void emit_impl(const std::vector& in, const std::vector& out) const override; + + size_t aux_gprs_count() const override { + return 0; + } + std::shared_ptr m_abi_reg_spiller; +}; + +/* ============================================================== */ + +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp index c513e969144d1c..e9744ae5098c5f 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp @@ -59,7 +59,7 @@ void jit_uni_segfault_detector_emitter::save_target_emitter() const { h->mov(h->rax, reinterpret_cast(set_local_handler_overload)); h->mov(abi_param1, reinterpret_cast(this)); - spill.rsp_align(); + spill.rsp_align(h->rbx.getIdx()); h->call(h->rax); spill.rsp_restore(); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp index 7aca5f6c6a696f..6d4fdf738ab355 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp @@ -284,7 +284,7 @@ void BrgemmCopyBKernel::emit_brgemm_copy_b_kernel_call(size_t N, mov(abi_param6, K); #endif - spill.rsp_align(); + spill.rsp_align(rbx.getIdx()); call(rbp); spill.rsp_restore(); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.cpp index 0604792fc22573..ce5096211cc945 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.cpp @@ -44,10 +44,10 @@ size_t get_buffer_cluster_id(const ov::snippets::lowered::ExpressionPort& port) } Xbyak::Reg64 get_aux_gpr(const std::vector& used_gpr_idxs) { - // RSP, RBP - stack-related registers, abi_param1 - runtime parameter register in the kernel + // RSP - stack pointer should be preserved, abi_param1 and abi_param2 - runtime parameter register in the kernel static std::unordered_set blacklist_gpr_idxs = {Xbyak::Operand::RSP, - Xbyak::Operand::RBP, - static_cast(abi_param1.getIdx())}; + static_cast(abi_param1.getIdx()), + static_cast(abi_param2.getIdx())}; for (size_t gpr_idx = 0; gpr_idx <= Xbyak::Operand::R15; ++gpr_idx) { size_t _idx = Xbyak::Operand::R15 - gpr_idx; // we allocate from the end if (std::find(used_gpr_idxs.cbegin(), used_gpr_idxs.cend(), _idx) != used_gpr_idxs.cend()) @@ -59,6 +59,17 @@ Xbyak::Reg64 get_aux_gpr(const std::vector& used_gpr_idxs) { OV_CPU_JIT_EMITTER_THROW("Failed to allocate aux GPR"); } +Xbyak::Reg64 init_memory_access_aux_gpr(const std::vector& used_gpr_reg_idxs, + const std::vector& aux_gpr_idxs, + std::set& regs_to_spill) { + if (!aux_gpr_idxs.empty()) { + return Xbyak::Reg64(static_cast(aux_gpr_idxs[0])); + } + const auto aux_reg = ov::intel_cpu::utils::get_aux_gpr(used_gpr_reg_idxs); + regs_to_spill.emplace(snippets::RegType::gpr, aux_reg.getIdx()); + return aux_reg; +} + void push_ptr_with_runtime_offset_on_stack(dnnl::impl::cpu::x64::jit_generator* h, size_t stack_offset, Xbyak::Reg64 ptr_reg, diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.hpp index 3d8026ea33c750..be9c470129ed6a 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/utils.hpp @@ -19,14 +19,6 @@ inline static std::vector transform_idxs_to_regs(const std::vector return regs; } -inline static std::vector transform_snippets_regs_to_idxs(const std::vector& regs) { - std::vector idxs(regs.size()); - std::transform(regs.cbegin(), regs.cend(), idxs.begin(), [](const snippets::Reg& reg) { - return reg.idx; - }); - return idxs; -} - /** * @brief If the passed `port` is connected to a Buffer, return its cluster ID. * Otherwise returns SIZE_MAX @@ -36,12 +28,23 @@ inline static std::vector transform_snippets_regs_to_idxs(const std::vec size_t get_buffer_cluster_id(const ov::snippets::lowered::ExpressionPort& port); /** - * @brief Find the available register from the pool excepting: abi_param1, RSP, RBP and `used_gpr_idxs` + * @brief Find the available register from the pool excepting: abi_param1, abi_param2, RSP and `used_gpr_idxs` * @param used_gpr_idxs current used gpr register indexes * @return register */ Xbyak::Reg64 get_aux_gpr(const std::vector& used_gpr_idxs); +/** + * @brief Returns aux gpr register for dynamic memory access emitters. Returns a register from `aux_gpr_idxs`. + * If it's empty, then choose a register that is not in `mem_ptr_reg_idxs` and add it to `regs_to_spill`. + * @param mem_ptr_reg_idxs register indexes reserved to store memory pointers in this emitter + * @param aux_gpr_idxs pool of available gp register indexes + * @param regs_to_spill set of live registers to be spilled before ABI call + */ +Xbyak::Reg64 init_memory_access_aux_gpr(const std::vector& used_gpr_reg_idxs, + const std::vector& aux_gpr_idxs, + std::set& regs_to_spill); + /** * @brief Push data pointer on stack adding offset. The offset is taken from runtime params `abi_param1` * @param h generator diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp index 9ac7f0d5cd0ffc..4338000d6982fa 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/verbose.cpp @@ -107,13 +107,9 @@ std::string init_info_jit_kernel_static_emitter(const jit_kernel_static_emitter* std::stringstream ss; ss << "Emitter_type_name:jit_kernel_static_emitter" << " jcp.exec_domain:" << vector_to_string(emitter->jcp.exec_domain) - << " gp_regs_pool:" << vector_to_string(emitter->gp_regs_pool) << " master_shape:" << vector_to_string(emitter->master_shape) << " num_inputs:" << emitter->num_inputs << " num_outputs:" << emitter->num_outputs << " num_unique_buffers:" << emitter->num_unique_buffers - << " data_ptr_regs_idx:" << vector_to_string(emitter->data_ptr_regs_idx) - << " vec_regs_pool:" << vector_to_string(emitter->vec_regs_pool) - << " reg_indexes_idx:" << emitter->reg_indexes_idx - << " reg_runtime_params_idx:" << emitter->reg_runtime_params_idx; + << " data_ptr_regs_idx:" << vector_to_string(emitter->data_ptr_regs_idx); for (size_t i = 0; i < emitter->data_offsets.size(); ++i) ss << " data_offsets for " << i << " is:" << vector_to_string(emitter->data_offsets[i]); return ss.str(); @@ -122,11 +118,9 @@ std::string init_info_jit_kernel_static_emitter(const jit_kernel_static_emitter* std::string init_info_jit_kernel_dynamic_emitter(const jit_kernel_dynamic_emitter* emitter) { std::stringstream ss; ss << "Emitter_type_name:jit_kernel_dynamic_emitter" - << " gp_regs_pool:" << vector_to_string(emitter->gp_regs_pool) << " num_inputs:" << emitter->num_inputs - << " num_outputs:" << emitter->num_outputs << " num_unique_buffers:" << emitter->num_unique_buffers - << " data_ptr_regs_idx:" << vector_to_string(emitter->data_ptr_regs_idx) - << " vec_regs_pool:" << vector_to_string(emitter->vec_regs_pool) - << " reg_runtime_params_idx:" << emitter->reg_runtime_params_idx; + << " num_inputs:" << emitter->num_inputs << " num_outputs:" << emitter->num_outputs + << " num_unique_buffers:" << emitter->num_unique_buffers + << " data_ptr_regs_idx:" << vector_to_string(emitter->data_ptr_regs_idx); return ss.str(); } diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.hpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.hpp index b8e13ab10b5f3a..129f8fa579ce9a 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.hpp @@ -14,20 +14,25 @@ class BrgemmTppEmitter : public TppEmitter { dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); - size_t get_inputs_num() const override { return 2; } - static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); + size_t get_inputs_num() const override { + return 2; + } + static std::set> get_supported_precisions( + const std::shared_ptr& node = nullptr); - static void execute_brgemm_kernel(libxsmm_gemmfunction brgemm_kernel, void *in0, void *in1, void *out0); + static void execute_brgemm_kernel(libxsmm_gemmfunction brgemm_kernel, void* in0, void* in1, void* out0); - const uintptr_t get_execute_function_ptr() const override { return reinterpret_cast(execute_brgemm_kernel); } + const uintptr_t get_execute_function_ptr() const override { + return reinterpret_cast(execute_brgemm_kernel); + } const uintptr_t get_compiled_kernel_ptr() const override; protected: - void validate_arguments(const std::vector &in, const std::vector &out) const override; + void validate_arguments(const std::vector& in, const std::vector& out) const override; static void validate_subtensors(const VectorDims& in_0, const VectorDims& in_1, const VectorDims& out_0); libxsmm_gemm_shape m_shape; - libxsmm_bitfield m_prefetching_flags {0}; + libxsmm_bitfield m_prefetching_flags{0}; }; -} // namespace intel_cpu -} // namespace ov +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.cpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.cpp index 8809bf96a12349..82467596b5d6af 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.cpp @@ -3,6 +3,7 @@ // #include "jit_eltwise_emitters.hpp" + #include "transformations/tpp/x64/op/eltwise.hpp" namespace ov { @@ -11,8 +12,8 @@ using jit_generator = dnnl::impl::cpu::x64::jit_generator; using cpu_isa_t = dnnl::impl::cpu::x64::cpu_isa_t; using ExpressionPtr = ov::snippets::lowered::ExpressionPtr; -BinaryEltwiseTppEmitter::BinaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : - TppEmitter(h, isa, expr) { +BinaryEltwiseTppEmitter::BinaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) + : TppEmitter(h, isa, expr) { const auto& subtensor_in0 = get_projected_subtensor(io_port_descriptors[0]); const auto& subtensor_in1 = get_projected_subtensor(io_port_descriptors[1]); @@ -23,8 +24,10 @@ BinaryEltwiseTppEmitter::BinaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa const auto N = std::max(N_in0, N_in1); const auto M = std::max(M_in0, M_in1); - OV_CPU_JIT_EMITTER_ASSERT(std::min(N_in0, N_in1) == N || std::min(N_in0, N_in1) == 1, "Invalid subtensor broadcasting: N"); - OV_CPU_JIT_EMITTER_ASSERT(std::min(M_in0, M_in1) == M || std::min(M_in0, M_in1) == 1, "Invalid subtensor broadcasting: M"); + OV_CPU_JIT_EMITTER_ASSERT(std::min(N_in0, N_in1) == N || std::min(N_in0, N_in1) == 1, + "Invalid subtensor broadcasting: N"); + OV_CPU_JIT_EMITTER_ASSERT(std::min(M_in0, M_in1) == M || std::min(M_in0, M_in1) == 1, + "Invalid subtensor broadcasting: M"); const auto& binary_eltw_tpp = std::dynamic_pointer_cast(expr->get_node()); OV_CPU_JIT_EMITTER_ASSERT(binary_eltw_tpp, "Invalid TPP node type detected"); @@ -32,29 +35,38 @@ BinaryEltwiseTppEmitter::BinaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa m_op_type = desc; m_compile_flags = desc.get_flags(); // Note: libxsmm implies column-major layout, so we have to swap M and N here - m_shape = libxsmm_create_meltw_binary_shape(N, M, - io_strides[0], io_strides[1], io_strides[2], - io_dtypes[0], io_dtypes[1], io_dtypes[2], + m_shape = libxsmm_create_meltw_binary_shape(N, + M, + io_strides[0], + io_strides[1], + io_strides[2], + io_dtypes[0], + io_dtypes[1], + io_dtypes[2], exec_dtype); } const uintptr_t BinaryEltwiseTppEmitter::get_compiled_kernel_ptr() const { - // Note: libxsmm hides memory management from the user, so we don't have to store pointer to compiled kernel to keep it alive. - // libxsmm will keep the pointer alive until the end of program execution (it doesn't matter whether we save the pointer in the emitter or not) + // Note: libxsmm hides memory management from the user, so we don't have to store pointer to compiled kernel to keep + // it alive. libxsmm will keep the pointer alive until the end of program execution (it doesn't matter whether we + // save the pointer in the emitter or not) return COMPILE_TPP_KERNEL(libxsmm_dispatch_meltw_binary(m_op_type, m_shape, m_compile_flags)); } -std::set> BinaryEltwiseTppEmitter::get_supported_precisions(const std::shared_ptr& node) { +std::set> BinaryEltwiseTppEmitter::get_supported_precisions( + const std::shared_ptr& node) { return {{element::f32, element::f32}}; } - -void BinaryEltwiseTppEmitter::validate_arguments(const std::vector &in, const std::vector &out) const { +void BinaryEltwiseTppEmitter::validate_arguments(const std::vector& in, const std::vector& out) const { OV_CPU_JIT_EMITTER_ASSERT(in.size() == 2, "Expects 2 input registers, got " + std::to_string(in.size())); OV_CPU_JIT_EMITTER_ASSERT(out.size() == 1, "Expects 1 output register, got " + std::to_string(out.size())); } -void BinaryEltwiseTppEmitter::execute_kernel(libxsmm_meltwfunction_binary eltwise_kernel, void *in0, void *in1, void *out0) { +void BinaryEltwiseTppEmitter::execute_kernel(libxsmm_meltwfunction_binary eltwise_kernel, + void* in0, + void* in1, + void* out0) { libxsmm_meltw_binary_param param; param.op.primary = nullptr; param.in0.primary = in0; @@ -63,8 +75,8 @@ void BinaryEltwiseTppEmitter::execute_kernel(libxsmm_meltwfunction_binary eltwis eltwise_kernel(¶m); } -UnaryEltwiseTppEmitter::UnaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : - TppEmitter(h, isa, expr) { +UnaryEltwiseTppEmitter::UnaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) + : TppEmitter(h, isa, expr) { const auto& subtensor_in0 = get_projected_subtensor(io_port_descriptors[0]); const auto N = static_cast(*subtensor_in0.rbegin()); @@ -76,13 +88,11 @@ UnaryEltwiseTppEmitter::UnaryEltwiseTppEmitter(jit_generator* h, cpu_isa_t isa, m_op_type = desc; m_compile_flags = desc.get_flags(); // Note: libxsmm implies column-major layout, so we have to swap M and N here - m_shape = libxsmm_create_meltw_unary_shape(N, M, - io_strides[0], io_strides[1], - io_dtypes[0], io_dtypes[1], - exec_dtype); + m_shape = + libxsmm_create_meltw_unary_shape(N, M, io_strides[0], io_strides[1], io_dtypes[0], io_dtypes[1], exec_dtype); } -void UnaryEltwiseTppEmitter::execute_kernel(libxsmm_meltwfunction_unary eltwise_kernel, void *in0, void *out0) { +void UnaryEltwiseTppEmitter::execute_kernel(libxsmm_meltwfunction_unary eltwise_kernel, void* in0, void* out0) { libxsmm_meltw_unary_param param; param.op.primary = nullptr; param.in.primary = in0; @@ -90,17 +100,18 @@ void UnaryEltwiseTppEmitter::execute_kernel(libxsmm_meltwfunction_unary eltwise_ eltwise_kernel(¶m); } -std::set> UnaryEltwiseTppEmitter::get_supported_precisions(const std::shared_ptr& node) { +std::set> UnaryEltwiseTppEmitter::get_supported_precisions( + const std::shared_ptr& node) { return {{element::f32}}; } -void UnaryEltwiseTppEmitter::validate_arguments(const std::vector &in, const std::vector &out) const { +void UnaryEltwiseTppEmitter::validate_arguments(const std::vector& in, const std::vector& out) const { OV_CPU_JIT_EMITTER_ASSERT(in.size() == 1, "Expects 1 input registers, got " + std::to_string(in.size())); OV_CPU_JIT_EMITTER_ASSERT(out.size() == 1, "Expects 1 output register, got " + std::to_string(out.size())); } -ReduceTppEmitter::ReduceTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : - UnaryEltwiseTppEmitter(h, isa, expr) { +ReduceTppEmitter::ReduceTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) + : UnaryEltwiseTppEmitter(h, isa, expr) { m_compile_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_ROWS; // No need to set ldo for reduce, it is always assumed = 1 inside the kernel // m_shape.ldo = 1; diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.hpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.hpp index 618dacf2353d9b..98d8481ef64a15 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_eltwise_emitters.hpp @@ -13,38 +13,46 @@ class BinaryEltwiseTppEmitter : public TppEmitter { BinaryEltwiseTppEmitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); - size_t get_inputs_num() const override { return 2; } - static void execute_kernel(libxsmm_meltwfunction_binary eltwise_kernel, void *in0, void *in1, void *out0); - const uintptr_t get_execute_function_ptr() const override { return reinterpret_cast(execute_kernel); } + size_t get_inputs_num() const override { + return 2; + } + static void execute_kernel(libxsmm_meltwfunction_binary eltwise_kernel, void* in0, void* in1, void* out0); + const uintptr_t get_execute_function_ptr() const override { + return reinterpret_cast(execute_kernel); + } const uintptr_t get_compiled_kernel_ptr() const override; - static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); + static std::set> get_supported_precisions( + const std::shared_ptr& node = nullptr); protected: libxsmm_meltw_binary_shape m_shape; libxsmm_meltw_binary_type m_op_type; - void validate_arguments(const std::vector &in, const std::vector &out) const override; + void validate_arguments(const std::vector& in, const std::vector& out) const override; }; class UnaryEltwiseTppEmitter : public TppEmitter { public: UnaryEltwiseTppEmitter(dnnl::impl::cpu::x64::jit_generator* h, - dnnl::impl::cpu::x64::cpu_isa_t isa, - const ov::snippets::lowered::ExpressionPtr& expr); - size_t get_inputs_num() const override { return 1; } + dnnl::impl::cpu::x64::cpu_isa_t isa, + const ov::snippets::lowered::ExpressionPtr& expr); + size_t get_inputs_num() const override { + return 1; + } - static void execute_kernel(libxsmm_meltwfunction_unary eltwise_kernel, void *in0, void *out0); + static void execute_kernel(libxsmm_meltwfunction_unary eltwise_kernel, void* in0, void* out0); const uintptr_t get_compiled_kernel_ptr() const override { - return COMPILE_TPP_KERNEL(libxsmm_dispatch_meltw_unary(m_op_type, - m_shape, - m_compile_flags)); + return COMPILE_TPP_KERNEL(libxsmm_dispatch_meltw_unary(m_op_type, m_shape, m_compile_flags)); + } + const uintptr_t get_execute_function_ptr() const override { + return reinterpret_cast(execute_kernel); } - const uintptr_t get_execute_function_ptr() const override { return reinterpret_cast(execute_kernel); } - static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); + static std::set> get_supported_precisions( + const std::shared_ptr& node = nullptr); protected: libxsmm_meltw_unary_shape m_shape; libxsmm_meltw_unary_type m_op_type; - void validate_arguments(const std::vector &in, const std::vector &out) const override; + void validate_arguments(const std::vector& in, const std::vector& out) const override; }; class ReduceTppEmitter : public UnaryEltwiseTppEmitter { @@ -61,10 +69,10 @@ class ReferenceUnaryEltwiseTppEmitter : public UnaryEltwiseTppEmitter { ReferenceUnaryEltwiseTppEmitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr, - executor_function executor) : - UnaryEltwiseTppEmitter(h, isa, expr), executor(std::move(executor)) { - } - static void execute_unary_eltw_kernel(ReferenceUnaryEltwiseTppEmitter* ref_emitter, void *in0, void *out0) { + executor_function executor) + : UnaryEltwiseTppEmitter(h, isa, expr), + executor(std::move(executor)) {} + static void execute_unary_eltw_kernel(ReferenceUnaryEltwiseTppEmitter* ref_emitter, void* in0, void* out0) { assert(ref_emitter); // Note: we can instantiate template with different precision combinations here, if we need to ref_emitter->evaluate_reference_impl(reinterpret_cast(in0), reinterpret_cast(out0)); @@ -79,30 +87,31 @@ class ReferenceUnaryEltwiseTppEmitter : public UnaryEltwiseTppEmitter { private: executor_function executor{nullptr}; - template::value || !std::is_same::value, bool>::type = true> + template < + class Tin, + class Tout, + typename std::enable_if::value || !std::is_same::value, bool>::type = true> void evaluate_reference_impl(Tin* in0, Tout* out0) { for (int n = 0; n < m_shape.n; n++) { - auto in0_row = in0; - auto out0_row = out0; - for (int m = 0; m < m_shape.m; m++) - out0_row[m] = static_cast(executor(static_cast(in0_row[m]))); - in0 += m_shape.ldi; - out0 += m_shape.ldo; + auto in0_row = in0; + auto out0_row = out0; + for (int m = 0; m < m_shape.m; m++) + out0_row[m] = static_cast(executor(static_cast(in0_row[m]))); + in0 += m_shape.ldi; + out0 += m_shape.ldo; } } void evaluate_reference_impl(float* in0, float* out0) { for (int n = 0; n < m_shape.n; n++) { - auto in0_row = in0; - auto out0_row = out0; - for (int m = 0; m < m_shape.m; m++) - out0_row[m] = executor(in0_row[m]); - in0 += m_shape.ldi; - out0 += m_shape.ldo; + auto in0_row = in0; + auto out0_row = out0; + for (int m = 0; m < m_shape.m; m++) + out0_row[m] = executor(in0_row[m]); + in0 += m_shape.ldi; + out0 += m_shape.ldo; } } }; - -} // namespace intel_cpu -} // namespace ov +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.cpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.cpp index 1efa9d850e31de..365acc366840e0 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.cpp @@ -3,8 +3,9 @@ // #include "jit_equation_emitter.hpp" -#include "transformations/tpp/x64/op/equation.hpp" + #include "emitters/plugin/x64/utils.hpp" +#include "transformations/tpp/x64/op/equation.hpp" using namespace Xbyak; using namespace dnnl::impl; @@ -16,11 +17,12 @@ using jit_generator = dnnl::impl::cpu::x64::jit_generator; using cpu_isa_t = dnnl::impl::cpu::x64::cpu_isa_t; using ExpressionPtr = ov::snippets::lowered::ExpressionPtr; -EquationTppEmitter::EquationTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : - TppEmitter(h, isa, expr), m_num_inputs(expr->get_input_count()) { +EquationTppEmitter::EquationTppEmitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) + : TppEmitter(h, isa, expr), + m_num_inputs(expr->get_input_count()) { const auto& eq_tpp = ov::as_type_ptr(expr->get_node()); OV_CPU_JIT_EMITTER_ASSERT(eq_tpp, "Invalid TPP node type detected"); - auto get_MN = [this](int arg_idx){ + auto get_MN = [this](int arg_idx) { const auto& subtensor = get_projected_subtensor(io_port_descriptors[arg_idx]); OV_CPU_JIT_EMITTER_ASSERT(subtensor.size() == 2, "TPP supports only 2D subtensors"); return std::make_pair(static_cast(*++subtensor.rbegin()), @@ -29,39 +31,42 @@ EquationTppEmitter::EquationTppEmitter(jit_generator* h, cpu_isa_t isa, const Ex m_equation_id = libxsmm_meqn_create(); const auto op_metadata = libxsmm_create_meqn_op_metadata(m_equation_id, -1); - const auto sing_attr = libxsmm_create_matrix_arg_attributes(LIBXSMM_MATRIX_ARG_TYPE_SINGULAR, LIBXSMM_MATRIX_ARG_SET_TYPE_NONE, 0, 0); + const auto sing_attr = + libxsmm_create_matrix_arg_attributes(LIBXSMM_MATRIX_ARG_TYPE_SINGULAR, LIBXSMM_MATRIX_ARG_SET_TYPE_NONE, 0, 0); libxsmm_blasint M, N; for (const auto& op_desc : eq_tpp->get_op_descs()) { switch (op_desc.get_arity()) { - case tpp::op::OpDescTPP::ARITY::BINARY: { - auto flags = op_desc.get_flags(); - libxsmm_meqn_push_back_binary_op(op_metadata, op_desc, exec_dtype, flags); - break; - } case tpp::op::OpDescTPP::ARITY::UNARY: { - libxsmm_meqn_push_back_unary_op(op_metadata, op_desc, exec_dtype, LIBXSMM_MELTW_FLAG_UNARY_NONE); - break; - } case tpp::op::OpDescTPP::ARITY::ZERO: { - const auto arg_idx = static_cast(op_desc); - std::tie(M, N) = get_MN(arg_idx); - auto metadata = libxsmm_create_meqn_arg_metadata(m_equation_id, arg_idx); - auto shape = libxsmm_create_meqn_arg_shape(N, M, - static_cast(io_strides[arg_idx]), - io_dtypes[arg_idx]); - OV_CPU_JIT_EMITTER_ASSERT(libxsmm_meqn_push_back_arg(metadata, shape, sing_attr) == 0, - "Failed to push back arg to tpp equation"); - break; - } - default: - OV_CPU_JIT_EMITTER_THROW("Unhandled tpp::op::OpDescTPP::ARITY"); + case tpp::op::OpDescTPP::ARITY::BINARY: { + auto flags = op_desc.get_flags(); + libxsmm_meqn_push_back_binary_op(op_metadata, op_desc, exec_dtype, flags); + break; + } + case tpp::op::OpDescTPP::ARITY::UNARY: { + libxsmm_meqn_push_back_unary_op(op_metadata, op_desc, exec_dtype, LIBXSMM_MELTW_FLAG_UNARY_NONE); + break; + } + case tpp::op::OpDescTPP::ARITY::ZERO: { + const auto arg_idx = static_cast(op_desc); + std::tie(M, N) = get_MN(arg_idx); + auto metadata = libxsmm_create_meqn_arg_metadata(m_equation_id, arg_idx); + auto shape = libxsmm_create_meqn_arg_shape(N, + M, + static_cast(io_strides[arg_idx]), + io_dtypes[arg_idx]); + OV_CPU_JIT_EMITTER_ASSERT(libxsmm_meqn_push_back_arg(metadata, shape, sing_attr) == 0, + "Failed to push back arg to tpp equation"); + break; + } + default: + OV_CPU_JIT_EMITTER_THROW("Unhandled tpp::op::OpDescTPP::ARITY"); } } // Note: for debug purposes it might be useful to serialize the equations graph here // libxsmm_meqn_tree_print(m_equation_id); // libxsmm_meqn_rpn_print(m_equation_id); std::tie(M, N) = get_MN(static_cast(io_port_descriptors.size()) - 1); - m_out_shape = libxsmm_create_meqn_arg_shape(N, M, - static_cast(io_strides.back()), - io_dtypes.back()); + m_out_shape = + libxsmm_create_meqn_arg_shape(N, M, static_cast(io_strides.back()), io_dtypes.back()); } size_t EquationTppEmitter::get_inputs_num() const { @@ -72,8 +77,10 @@ const uintptr_t EquationTppEmitter::get_compiled_kernel_ptr() const { return COMPILE_TPP_KERNEL(libxsmm_dispatch_meqn(m_equation_id, m_out_shape)); } -std::set> EquationTppEmitter::get_supported_precisions(const std::shared_ptr& node) { - // Note: TPPs have build-in convert semantics, so the equations should support any input precision (specified when created) +std::set> EquationTppEmitter::get_supported_precisions( + const std::shared_ptr& node) { + // Note: TPPs have build-in convert semantics, so the equations should support any input precision (specified when + // created) OV_CPU_JIT_EMITTER_ASSERT(node && ov::is_type(node), "Invalid node ptr or type"); std::vector input_precs; for (const auto& in : node->inputs()) @@ -81,21 +88,23 @@ std::set> EquationTppEmitter::get_supported_precision return {input_precs}; } - -void EquationTppEmitter::validate_arguments(const std::vector &in, const std::vector &out) const { - OV_CPU_JIT_EMITTER_ASSERT(in.size() == m_num_inputs, "Expects " + std::to_string(m_num_inputs) + - " input registers, got " + std::to_string(in.size())); +void EquationTppEmitter::validate_arguments(const std::vector& in, const std::vector& out) const { + OV_CPU_JIT_EMITTER_ASSERT( + in.size() == m_num_inputs, + "Expects " + std::to_string(m_num_inputs) + " input registers, got " + std::to_string(in.size())); const auto num_outputs = num_kernel_args - m_num_inputs; - OV_CPU_JIT_EMITTER_ASSERT(out.size() == num_outputs, "Expects " + std::to_string(num_outputs) + - " output register, got " + std::to_string(out.size())); + OV_CPU_JIT_EMITTER_ASSERT( + out.size() == num_outputs, + "Expects " + std::to_string(num_outputs) + " output register, got " + std::to_string(out.size())); } void EquationTppEmitter::emit_impl(const std::vector& in, const std::vector& out) const { - EmitABIRegSpills spill(h); - spill.preamble(); + init_binary_call_regs(3, in, out); + const Xbyak::Reg64& aux_reg = get_call_address_reg(); + const Xbyak::Reg64& callee_saved_reg = get_callee_saved_reg(); - // save function address in gpr to pass in call instruction - h->mov(h->rbp, get_execute_function_ptr()); + EmitABIRegSpills spill(h); + spill.preamble(get_regs_to_spill()); // Reserve memory on the stack h->sub(h->rsp, num_kernel_args * sizeof(void*)); @@ -113,12 +122,13 @@ void EquationTppEmitter::emit_impl(const std::vector& in, const std::vec OV_CPU_JIT_EMITTER_ASSERT(compiled_kernel, "Failed to compile libxsmm_kernel"); // Pass arguments according to the execute signature + h->mov(aux_reg, get_execute_function_ptr()); h->mov(abi_param1, compiled_kernel); h->mov(abi_param2, num_kernel_args); h->mov(abi_param3, h->rsp); - spill.rsp_align(); - h->call(h->rbp); + spill.rsp_align(callee_saved_reg.getIdx()); + h->call(aux_reg); spill.rsp_restore(); // Free allocated memory on the stack @@ -126,14 +136,14 @@ void EquationTppEmitter::emit_impl(const std::vector& in, const std::vec spill.postamble(); } -void EquationTppEmitter::execute_kernel(libxsmm_meqn_function equation_kernel, int argc, void **argv) { +void EquationTppEmitter::execute_kernel(libxsmm_meqn_function equation_kernel, int argc, void** argv) { std::vector inputs(argc - 1); for (int i = 0; i < argc - 1; i++) inputs[i].primary = argv[i]; libxsmm_meqn_param param; param.ops_args = nullptr; param.inputs = inputs.data(); - param.output.primary = argv[argc-1]; + param.output.primary = argv[argc - 1]; equation_kernel(¶m); } diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.hpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.hpp index 8e606c3e9673b2..489ede7f689110 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_equation_emitter.hpp @@ -14,19 +14,21 @@ class EquationTppEmitter : public TppEmitter { dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); size_t get_inputs_num() const override; - static void execute_kernel(libxsmm_meqn_function equation_kernel, int argc, void **argv); - const uintptr_t get_execute_function_ptr() const override { return reinterpret_cast(execute_kernel); } + static void execute_kernel(libxsmm_meqn_function equation_kernel, int argc, void** argv); + const uintptr_t get_execute_function_ptr() const override { + return reinterpret_cast(execute_kernel); + } const uintptr_t get_compiled_kernel_ptr() const override; - static std::set> get_supported_precisions(const std::shared_ptr& node = nullptr); + static std::set> get_supported_precisions( + const std::shared_ptr& node = nullptr); protected: libxsmm_blasint m_equation_id; libxsmm_meqn_arg_shape m_out_shape; size_t m_num_inputs = 0; - void validate_arguments(const std::vector &in, const std::vector &out) const override; + void validate_arguments(const std::vector& in, const std::vector& out) const override; void emit_impl(const std::vector& in, const std::vector& out) const override; }; - -} // namespace intel_cpu -} // namespace ov +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_tpp_emitter.cpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_tpp_emitter.cpp index cb18f69082e1b2..a18b1616bb517c 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_tpp_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_tpp_emitter.cpp @@ -3,9 +3,10 @@ // #include "jit_tpp_emitter.hpp" + +#include "emitters/plugin/x64/utils.hpp" #include "snippets/lowered/port_descriptor.hpp" #include "transformations/tpp/x64/op/eltwise.hpp" -#include "emitters/plugin/x64/utils.hpp" using namespace Xbyak; using namespace dnnl::impl; @@ -34,7 +35,7 @@ VectorDims TppEmitter::get_projected_subtensor(const snippets::lowered::PortDesc TppEmitter::TppEmitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr) - : jit_emitter(h, isa) { + : jit_binary_call_emitter(h, isa, expr->get_live_regs()) { in_out_type_ = emitter_in_out_map::gpr_to_gpr; const auto& node = expr->get_node(); const auto& tpp_mod = std::dynamic_pointer_cast(node); @@ -57,8 +58,8 @@ TppEmitter::TppEmitter(dnnl::impl::cpu::x64::jit_generator* h, for (size_t i = 0; i < num_ins; i++) { io_dtypes[i] = ov_to_xsmm_dtype(node->get_input_element_type(i)); io_offsets[i] = tpp_mod->get_input_offset(i); - io_strides[i] = replace_full_dim(tpp_mod->get_input_stride(i), - expr->get_input_port_descriptor(i)->get_shape().back()); + io_strides[i] = + replace_full_dim(tpp_mod->get_input_stride(i), expr->get_input_port_descriptor(i)->get_shape().back()); io_port_descriptors[i] = expr->get_input_port_descriptor(i); } @@ -66,39 +67,43 @@ TppEmitter::TppEmitter(dnnl::impl::cpu::x64::jit_generator* h, const auto i_off = i + num_ins; io_dtypes[i_off] = ov_to_xsmm_dtype(node->get_output_element_type(i)); io_offsets[i_off] = tpp_mod->get_output_offset(i); - io_strides[i_off] = replace_full_dim(tpp_mod->get_output_stride(i), - expr->get_output_port_descriptor(i)->get_shape().back()); + io_strides[i_off] = + replace_full_dim(tpp_mod->get_output_stride(i), expr->get_output_port_descriptor(i)->get_shape().back()); io_port_descriptors[i_off] = expr->get_output_port_descriptor(i); } } -void TppEmitter::emit_code(const std::vector &in, const std::vector &out) const { +void TppEmitter::emit_code(const std::vector& in, const std::vector& out) const { validate_arguments(in, out); emit_impl(in, out); } void TppEmitter::emit_impl(const std::vector& in, const std::vector& out) const { - EmitABIRegSpills spill(h); - spill.preamble(); - // Note: 4 args is currently enough for unary and binary ops. // To enable ternary ops, we will have to pass extra regs on stack for Windows, std::array abi_params{abi_param1, abi_param2, abi_param3, abi_param4}; + init_binary_call_regs(abi_params.size(), in, out); + + const Xbyak::Reg64& aux_reg = get_call_address_reg(); + const Xbyak::Reg64& callee_saved_reg = get_callee_saved_reg(); + + EmitABIRegSpills spill(h); + spill.preamble(get_regs_to_spill()); - // save function address in gpr to pass in call instruction - h->mov(h->rbp, get_execute_function_ptr()); int aux_xmm_count = 0; for (auto reg_idx : in) - h->uni_vmovq(Xmm(aux_xmm_count++), Reg64(static_cast(reg_idx))); + h->uni_vmovq(Xmm(aux_xmm_count++), Reg64(static_cast(reg_idx))); for (auto reg_idx : out) - h->uni_vmovq(Xmm(aux_xmm_count++), Reg64(static_cast(reg_idx))); + h->uni_vmovq(Xmm(aux_xmm_count++), Reg64(static_cast(reg_idx))); OV_CPU_JIT_EMITTER_ASSERT(aux_xmm_count == num_kernel_args, "offsets for some inputs/outputs were not set"); - OV_CPU_JIT_EMITTER_ASSERT(aux_xmm_count < static_cast(abi_params.size()), "too many input/output arguments. More abi params required"); + OV_CPU_JIT_EMITTER_ASSERT(aux_xmm_count < static_cast(abi_params.size()), + "too many input/output arguments. More abi params required"); const auto data_ptr_reg = [&](Xmm xmm, Xbyak::Reg64 reg, size_t bytes_offset) { h->uni_vmovq(reg, xmm); - if (bytes_offset) h->add(reg, bytes_offset); + if (bytes_offset) + h->add(reg, bytes_offset); }; const auto& compiled_kernel = get_compiled_kernel_ptr(); OV_CPU_JIT_EMITTER_ASSERT(compiled_kernel, "Failed to compile libxsmm_kernel"); @@ -106,9 +111,11 @@ void TppEmitter::emit_impl(const std::vector& in, const std::vectormov(abi_params[0], compiled_kernel); for (int i = 0; i < num_kernel_args; i++) data_ptr_reg(Xmm(i), abi_params[i + 1], io_offsets[i]); + // save function address in gpr to pass in call instruction + h->mov(aux_reg, get_execute_function_ptr()); - spill.rsp_align(); - h->call(h->rbp); + spill.rsp_align(callee_saved_reg.getIdx()); + h->call(aux_reg); spill.rsp_restore(); spill.postamble(); @@ -116,13 +123,17 @@ void TppEmitter::emit_impl(const std::vector& in, const std::vector(__VA_ARGS__); \ - unsetenv("LIBXSMM_X86_HINT_USE_HIGH_PREC_ELTWISE_APPROX"); \ - unsetenv("LIBXSMM_GEMM_K_A_PF_DIST"); \ - return res; \ + setenv("LIBXSMM_GEMM_K_A_PF_DIST", "4", 1); \ + auto res = reinterpret_cast(__VA_ARGS__); \ + unsetenv("LIBXSMM_X86_HINT_USE_HIGH_PREC_ELTWISE_APPROX"); \ + unsetenv("LIBXSMM_GEMM_K_A_PF_DIST"); \ + return res; \ }() class DebugTppEmitter; -class TppEmitter : public jit_emitter { +class TppEmitter : public jit_binary_call_emitter { friend DebugTppEmitter; public: TppEmitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); - void emit_code(const std::vector &in, - const std::vector &out) const; + void emit_code(const std::vector& in, const std::vector& out) const; static libxsmm_datatype ov_to_xsmm_dtype(ov::element::Type_t elemet_type); protected: - void emit_impl(const std::vector& in, - const std::vector& out) const override; + void emit_impl(const std::vector& in, const std::vector& out) const override; static ov::snippets::VectorDims get_projected_subtensor(const snippets::lowered::PortDescriptorPtr& desc); /// Generate function pointer to the thin wrapper over the kernel that is called in runtime on every iteration @@ -49,14 +48,14 @@ class TppEmitter : public jit_emitter { std::vector io_offsets{}; std::vector io_dtypes{}; // Note: almost all emitters use fp32 for internal computations - libxsmm_datatype exec_dtype {LIBXSMM_DATATYPE_F32}; + libxsmm_datatype exec_dtype{LIBXSMM_DATATYPE_F32}; // aka leading dimensions std::vector io_strides{}; std::vector io_port_descriptors{}; // compile flags has the same type for all eltwises, so we keep them in the base class - libxsmm_bitfield m_compile_flags {0}; + libxsmm_bitfield m_compile_flags{0}; int num_kernel_args = 0; }; -} // namespace intel_cpu -} // namespace ov +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp index 58d8e5e260155a..208b5f7ee497ca 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.hpp @@ -20,7 +20,7 @@ namespace pass { */ class AdjustBrgemmCopyBLoopPorts : public snippets::lowered::pass::ConstPass { public: - OPENVINO_RTTI("AdjustBrgemmCopyBLoopPorts", "0", snippets::lowered::pass::ConstPass); + OPENVINO_RTTI("AdjustBrgemmCopyBLoopPorts", "", ConstPass) AdjustBrgemmCopyBLoopPorts() = default; bool run(const snippets::lowered::LinearIR& linear_ir) override; static bool update_loop_info(const snippets::lowered::UnifiedLoopInfoPtr& uni_loop_info); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_copy_b_loop_ports_adjuster.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_copy_b_loop_ports_adjuster.hpp index e6feb6526f41c3..6dbcbcbc623039 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_copy_b_loop_ports_adjuster.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_copy_b_loop_ports_adjuster.hpp @@ -18,6 +18,7 @@ namespace intel_cpu { */ class BrgemmCopyBLoopPortsAdjuster : public ov::snippets::lowered::pass::RuntimeOptimizer { public: + OPENVINO_RTTI("BrgemmCopyBLoopPortsAdjuster", "", RuntimeOptimizer) BrgemmCopyBLoopPortsAdjuster() = default; BrgemmCopyBLoopPortsAdjuster(const ov::snippets::lowered::LinearIRCPtr& linear_ir, const CPURuntimeConfigurator* configurator); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp index 595ac3f37aa337..2700a28b6d674c 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/brgemm_cpu_blocking.hpp @@ -18,7 +18,7 @@ namespace pass { */ class BrgemmCPUBlocking : public ov::snippets::lowered::pass::BrgemmBlocking { public: - OPENVINO_RTTI("BrgemmCPUBlocking", "BrgemmCPU", ov::snippets::lowered::pass::BrgemmBlocking); + OPENVINO_RTTI("BrgemmCPUBlocking", "", BrgemmBlocking) /** * @interface DummyPass @@ -29,7 +29,7 @@ class BrgemmCPUBlocking : public ov::snippets::lowered::pass::BrgemmBlocking Date: Thu, 16 Jan 2025 16:23:50 +0100 Subject: [PATCH 17/97] [CPU] Remove errorPrefix usage for error reporting (#28386) ### Details: Take a first step of unifying error handling infrastructure in CPU plugin: get rid of `OPENVINO_THROW` and `errorPrefix` class field usage in favor of `THROW_CPU_NODE_ERR` approach ### Tickets: - 160275 --- .../intel_cpu/src/nodes/adaptive_pooling.cpp | 31 ++++----- .../intel_cpu/src/nodes/adaptive_pooling.h | 2 - .../intel_cpu/src/nodes/batch_to_space.cpp | 14 ++-- .../intel_cpu/src/nodes/batch_to_space.h | 2 - src/plugins/intel_cpu/src/nodes/bin_conv.cpp | 11 ++-- src/plugins/intel_cpu/src/nodes/bin_conv.h | 2 - src/plugins/intel_cpu/src/nodes/broadcast.cpp | 9 ++- src/plugins/intel_cpu/src/nodes/broadcast.h | 2 - src/plugins/intel_cpu/src/nodes/bucketize.cpp | 9 ++- src/plugins/intel_cpu/src/nodes/bucketize.h | 1 - src/plugins/intel_cpu/src/nodes/convert.cpp | 14 ++-- src/plugins/intel_cpu/src/nodes/convert.h | 2 - .../src/nodes/ctc_greedy_decoder.cpp | 11 ++-- .../intel_cpu/src/nodes/ctc_greedy_decoder.h | 2 - .../src/nodes/ctc_greedy_decoder_seq_len.cpp | 15 ++--- .../src/nodes/ctc_greedy_decoder_seq_len.h | 2 - src/plugins/intel_cpu/src/nodes/ctc_loss.cpp | 8 +-- src/plugins/intel_cpu/src/nodes/ctc_loss.h | 2 - src/plugins/intel_cpu/src/nodes/cum_sum.cpp | 18 +++-- src/plugins/intel_cpu/src/nodes/cum_sum.h | 1 - src/plugins/intel_cpu/src/nodes/deconv.cpp | 13 ++-- src/plugins/intel_cpu/src/nodes/deconv.h | 2 - src/plugins/intel_cpu/src/nodes/def_conv.cpp | 29 ++++----- src/plugins/intel_cpu/src/nodes/def_conv.h | 1 - .../intel_cpu/src/nodes/detection_output.cpp | 26 ++++---- .../intel_cpu/src/nodes/detection_output.h | 2 - src/plugins/intel_cpu/src/nodes/dft.cpp | 17 ++--- src/plugins/intel_cpu/src/nodes/dft.h | 1 - src/plugins/intel_cpu/src/nodes/eltwise.cpp | 22 +++---- .../src/nodes/executors/interpolate.cpp | 6 +- ...erimental_detectron_priorgridgenerator.cpp | 3 +- ...xperimental_detectron_priorgridgenerator.h | 2 - .../nodes/experimental_detectron_topkrois.cpp | 5 +- .../nodes/experimental_detectron_topkrois.h | 2 - .../src/nodes/extract_image_patches.cpp | 22 +++---- .../src/nodes/extract_image_patches.h | 2 - src/plugins/intel_cpu/src/nodes/eye.cpp | 8 +-- src/plugins/intel_cpu/src/nodes/eye.h | 7 +- .../intel_cpu/src/nodes/fake_quantize.cpp | 21 +++--- .../intel_cpu/src/nodes/fake_quantize.h | 2 - .../intel_cpu/src/nodes/fullyconnected.cpp | 3 +- .../intel_cpu/src/nodes/fullyconnected.h | 1 - .../intel_cpu/src/nodes/gather_elements.cpp | 12 ++-- .../intel_cpu/src/nodes/gather_elements.h | 1 - .../intel_cpu/src/nodes/gather_tree.cpp | 27 ++++---- src/plugins/intel_cpu/src/nodes/gather_tree.h | 2 - src/plugins/intel_cpu/src/nodes/grn.cpp | 15 ++--- src/plugins/intel_cpu/src/nodes/grn.h | 2 - .../intel_cpu/src/nodes/interaction.cpp | 1 - src/plugins/intel_cpu/src/nodes/interaction.h | 1 - .../intel_cpu/src/nodes/interpolate.cpp | 45 +++++++------ src/plugins/intel_cpu/src/nodes/interpolate.h | 2 - .../intel_cpu/src/nodes/log_softmax.cpp | 5 +- src/plugins/intel_cpu/src/nodes/log_softmax.h | 2 - src/plugins/intel_cpu/src/nodes/lrn.cpp | 14 ++-- src/plugins/intel_cpu/src/nodes/lrn.h | 2 - src/plugins/intel_cpu/src/nodes/mathematics.h | 2 - src/plugins/intel_cpu/src/nodes/matmul.cpp | 21 +++--- src/plugins/intel_cpu/src/nodes/matmul.h | 2 - .../intel_cpu/src/nodes/matrix_nms.cpp | 16 ++--- src/plugins/intel_cpu/src/nodes/matrix_nms.h | 1 - .../intel_cpu/src/nodes/multiclass_nms.cpp | 65 ++++++++----------- .../intel_cpu/src/nodes/multiclass_nms.hpp | 2 - src/plugins/intel_cpu/src/nodes/non_zero.cpp | 10 ++- src/plugins/intel_cpu/src/nodes/non_zero.h | 1 - src/plugins/intel_cpu/src/nodes/one_hot.cpp | 7 +- src/plugins/intel_cpu/src/nodes/one_hot.h | 2 - src/plugins/intel_cpu/src/nodes/pad.cpp | 27 ++++---- src/plugins/intel_cpu/src/nodes/pad.h | 5 +- .../intel_cpu/src/nodes/psroi_pooling.cpp | 41 +++++------- .../intel_cpu/src/nodes/psroi_pooling.h | 2 - src/plugins/intel_cpu/src/nodes/range.cpp | 12 ++-- src/plugins/intel_cpu/src/nodes/range.h | 2 - src/plugins/intel_cpu/src/nodes/reduce.cpp | 37 +++++------ src/plugins/intel_cpu/src/nodes/reduce.h | 2 - .../intel_cpu/src/nodes/region_yolo.cpp | 3 +- src/plugins/intel_cpu/src/nodes/region_yolo.h | 2 - .../intel_cpu/src/nodes/reorg_yolo.cpp | 5 +- src/plugins/intel_cpu/src/nodes/reorg_yolo.h | 2 - src/plugins/intel_cpu/src/nodes/reshape.cpp | 2 - src/plugins/intel_cpu/src/nodes/reshape.h | 2 - .../intel_cpu/src/nodes/reverse_sequence.cpp | 27 ++++---- .../intel_cpu/src/nodes/reverse_sequence.h | 1 - src/plugins/intel_cpu/src/nodes/roi_align.cpp | 31 ++++----- src/plugins/intel_cpu/src/nodes/roi_align.h | 2 - .../intel_cpu/src/nodes/roi_pooling.cpp | 14 ++-- src/plugins/intel_cpu/src/nodes/roi_pooling.h | 2 - src/plugins/intel_cpu/src/nodes/roll.cpp | 35 +++++----- src/plugins/intel_cpu/src/nodes/roll.h | 2 - .../intel_cpu/src/nodes/scatter_update.cpp | 49 ++++++-------- .../intel_cpu/src/nodes/scatter_update.h | 2 - src/plugins/intel_cpu/src/nodes/shapeof.cpp | 9 ++- src/plugins/intel_cpu/src/nodes/shapeof.h | 3 - .../intel_cpu/src/nodes/space_to_batch.cpp | 10 ++- .../intel_cpu/src/nodes/space_to_batch.h | 2 - .../intel_cpu/src/nodes/strided_slice.cpp | 32 ++++----- .../intel_cpu/src/nodes/strided_slice.h | 12 +--- src/plugins/intel_cpu/src/nodes/tile.cpp | 40 +++++------- src/plugins/intel_cpu/src/nodes/tile.h | 2 - src/plugins/intel_cpu/src/nodes/topk.cpp | 26 ++++---- src/plugins/intel_cpu/src/nodes/topk.h | 2 - 101 files changed, 426 insertions(+), 636 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp index 12662400d8654a..7b6f64d30d1403 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp @@ -54,9 +54,7 @@ bool AdaptivePooling::isSupportedOperation(const std::shared_ptr AdaptivePooling::AdaptivePooling(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, AdaptivePoolingShapeInferFactory(op)) { std::string errorMessage; - if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "Adaptive Pooling layer with name '" + getName() + "' "; - } else { + if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (one_of(op->get_type_info(), ov::op::v8::AdaptiveAvgPool::get_type_info_static())) { @@ -70,21 +68,21 @@ AdaptivePooling::AdaptivePooling(const std::shared_ptr& op, const Grap void AdaptivePooling::getSupportedDescriptors() { if (getParentEdges().size() != 2) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (getChildEdges().size() < (algorithm == Algorithm::AdaptivePoolingMax ? 2 : 1)) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); auto srcRank = getInputShapeAtPort(0).getRank(); if (!one_of(spatialDimsCount, 1, 2, 3)) { - OPENVINO_THROW(errorPrefix, "doesn't support 0th input with rank: ", srcRank); + THROW_CPU_NODE_ERR("doesn't support 0th input with rank: ", srcRank); } if (getInputShapeAtPort(1).getRank() != 1) { - OPENVINO_THROW(errorPrefix, "doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); + THROW_CPU_NODE_ERR("doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); } if (getOutputShapeAtPort(0).getRank() != getInputShapeAtPort(0).getRank()) { - OPENVINO_THROW(errorPrefix, "must keep data rank"); + THROW_CPU_NODE_ERR("must keep data rank"); } } @@ -136,7 +134,7 @@ void AdaptivePooling::execute(dnnl::stream strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().getDataType(); auto outputPrec = getChildEdgeAt(0)->getMemory().getDataType(); if (!(inputPrec == dnnl_f32 && outputPrec == dnnl_f32)) - OPENVINO_THROW(errorPrefix, "doesn't support demanded precisions"); + THROW_CPU_NODE_ERR("doesn't support demanded precisions"); auto& srcMemory0 = getParentEdgeAt(0)->getMemory(); auto& srcMemory1 = getParentEdgeAt(1)->getMemory(); @@ -159,12 +157,11 @@ void AdaptivePooling::execute(dnnl::stream strm) { auto* dst = getDstDataAtPortAs(0); if (static_cast(srcMemory1.getShape().getElementsCount()) != spatialDimsCount) - OPENVINO_THROW(errorPrefix, - "has input spatial dimension (", - srcMemory1.getShape().getElementsCount(), - ") inconsistent with pooling vector size (", - spatialDimsCount, - ")"); + THROW_CPU_NODE_ERR("has input spatial dimension (", + srcMemory1.getShape().getElementsCount(), + ") inconsistent with pooling vector size (", + spatialDimsCount, + ")"); auto inputDimVector = srcMemory0.getStaticDims(); const int N = static_cast(inputDimVector[0]); @@ -185,7 +182,7 @@ void AdaptivePooling::execute(dnnl::stream strm) { const int blockCount = (isTailCFmt ? 1 : chPadding / blockSize); auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor(); if (!selectedPrimitiveDescriptor) - OPENVINO_THROW(errorPrefix, "doesn't have primitive descriptors."); + THROW_CPU_NODE_ERR("doesn't have primitive descriptors."); auto config = selectedPrimitiveDescriptor->getConfig(); auto srcStrides = srcBlockDesc->getStrides(); auto dstStrides = getChildEdgeAt(0)->getMemory().getDescWithType()->getStrides(); @@ -231,7 +228,7 @@ void AdaptivePooling::execute(dnnl::stream strm) { setBinBorders(&wStart, &wEnd, ow, IW, OW); auto binSize = (dEnd - dStart) * (hEnd - hStart) * (wEnd - wStart); if (binSize == 0) - OPENVINO_THROW(errorPrefix, "has empty bin"); + THROW_CPU_NODE_ERR("has empty bin"); float sum = 0; for (size_t pixD = dStart; pixD < dEnd; pixD++) { for (size_t pixH = hStart; pixH < hEnd; pixH++) { diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h index 7338c190121c4e..cc6969dd1b1793 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h @@ -33,8 +33,6 @@ class AdaptivePooling : public Node { ov::element::Type precision = ov::element::f32; inline void setBinBorders(size_t* startPtr, size_t* endPtr, size_t idx, size_t inputLength, size_t outputLength); - std::string errorPrefix; - protected: bool needShapeInfer() const override; bool needPrepareParams() const override { diff --git a/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp b/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp index a621cf74e78668..7a255c95b63108 100644 --- a/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp @@ -37,17 +37,15 @@ BatchToSpace::BatchToSpace(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "BatchToSpace layer with name '" + op->get_friendly_name() + "'"; - if (inputShapes.size() != 4 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input or output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input or output edges!"); const auto& inDims = getInputShapeAtPort(0).getDims(); const auto& outDims = getOutputShapeAtPort(0).getDims(); if (inDims.size() < 4 || inDims.size() > 5) - OPENVINO_THROW(errorPrefix, " has unsupported 'data' input rank: ", inDims.size()); + THROW_CPU_NODE_ERR("has unsupported 'data' input rank: ", inDims.size()); if (inDims.size() != outDims.size()) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions"); } void BatchToSpace::initSupportedPrimitiveDescriptors() { @@ -58,7 +56,7 @@ void BatchToSpace::initSupportedPrimitiveDescriptors() { const auto precision = getOriginalInputPrecisionAtPort(0); const std::set supported_precision_sizes = {1, 2, 4, 8}; if (supported_precision_sizes.find(precision.size()) == supported_precision_sizes.end()) - OPENVINO_THROW(errorPrefix, " has unsupported precision: ", precision.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported precision: ", precision.get_type_name()); addSupportedPrimDesc({{LayoutType::nspc, precision}, {LayoutType::ncsp, ov::element::i32}, @@ -156,7 +154,9 @@ void BatchToSpace::batchToSpaceKernel() { size_t channels = (inShape5D[1] / blockSize); channels = channels == 0 ? 1 : channels; const size_t workAmount = inShape5D[0] * channels; - OPENVINO_ASSERT(workAmount > 0, errorPrefix, " has unsupported work amount == 0"); + if (workAmount == 0) { + THROW_CPU_NODE_ERR("has unsupported work amount == 0"); + } parallel_nt(0, [&](const int ithr, const int nthr) { size_t start(0lu), end(0lu); diff --git a/src/plugins/intel_cpu/src/nodes/batch_to_space.h b/src/plugins/intel_cpu/src/nodes/batch_to_space.h index 6e1620580a153f..9c863296cc3c1d 100644 --- a/src/plugins/intel_cpu/src/nodes/batch_to_space.h +++ b/src/plugins/intel_cpu/src/nodes/batch_to_space.h @@ -42,8 +42,6 @@ class BatchToSpace : public Node { private: std::vector blockShapeIn; std::vector cropsBeginIn; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/bin_conv.cpp b/src/plugins/intel_cpu/src/nodes/bin_conv.cpp index 881ea56ef76dc6..0a1e255dd383f9 100644 --- a/src/plugins/intel_cpu/src/nodes/bin_conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/bin_conv.cpp @@ -940,7 +940,6 @@ BinaryConvolution::BinaryConvolution(const std::shared_ptr& op, const : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "BinaryConvolution node with name '" + getName() + "' "; const auto binConv = ov::as_type_ptr(op); pad_value = binConv->get_pad_value(); @@ -980,21 +979,21 @@ void BinaryConvolution::getSupportedDescriptors() { } if (getParentEdges().size() != expectedInputEdgesNum) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); if (getInputShapeAtPort(0).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support 0th input with rank: ", getInputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("doesn't support 0th input with rank: ", getInputShapeAtPort(0).getRank()); } if (getInputShapeAtPort(1).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); + THROW_CPU_NODE_ERR("doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); } if (getOutputShapeAtPort(0).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support output with rank: ", getOutputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("doesn't support output with rank: ", getOutputShapeAtPort(0).getRank()); } } diff --git a/src/plugins/intel_cpu/src/nodes/bin_conv.h b/src/plugins/intel_cpu/src/nodes/bin_conv.h index 9ec83365b63f55..825c264a5ba69b 100644 --- a/src/plugins/intel_cpu/src/nodes/bin_conv.h +++ b/src/plugins/intel_cpu/src/nodes/bin_conv.h @@ -129,8 +129,6 @@ class BinaryConvolution : public Node { const std::vector& s_str, const std::vector& w_str, const std::vector& d_str); - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/broadcast.cpp b/src/plugins/intel_cpu/src/nodes/broadcast.cpp index 727267e836c1f3..3c92c6e6e4f041 100644 --- a/src/plugins/intel_cpu/src/nodes/broadcast.cpp +++ b/src/plugins/intel_cpu/src/nodes/broadcast.cpp @@ -57,21 +57,20 @@ Broadcast::Broadcast(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "Broadcast node with name '" + op->get_friendly_name() + "' "; if (op->get_input_size() != 2 && op->get_input_size() != 3) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (op->get_output_size() == 0) - OPENVINO_THROW(errorPrefix, "has no output edges."); + THROW_CPU_NODE_ERR("has no output edges."); auto broadcastOp = ov::as_type_ptr(op); if (broadcastOp->get_broadcast_spec().m_type == ov::op::AutoBroadcastType::NUMPY) { broadcastType = NUMPY; } else if (broadcastOp->get_broadcast_spec().m_type == ov::op::AutoBroadcastType::EXPLICIT) { if (op->get_input_size() <= AXES_MAPPING_IDX) - OPENVINO_THROW(errorPrefix, " and EXPLICIT mode must have tree input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("and EXPLICIT mode must have tree input edges: ", getParentEdges().size()); broadcastType = EXPLICIT; } else { - OPENVINO_THROW(errorPrefix, "has unexpected broadcast type: ", broadcastOp->get_broadcast_spec().m_type); + THROW_CPU_NODE_ERR("has unexpected broadcast type: ", broadcastOp->get_broadcast_spec().m_type); } if (ov::is_type(op->get_input_node_ptr(TARGET_SHAPE_IDX))) { diff --git a/src/plugins/intel_cpu/src/nodes/broadcast.h b/src/plugins/intel_cpu/src/nodes/broadcast.h index 859f4fb71dad04..5645ec70a1f707 100644 --- a/src/plugins/intel_cpu/src/nodes/broadcast.h +++ b/src/plugins/intel_cpu/src/nodes/broadcast.h @@ -44,8 +44,6 @@ class Broadcast : public Node, public TileBroadcastCommon { std::vector targetShape; std::vector axesMapping; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.cpp b/src/plugins/intel_cpu/src/nodes/bucketize.cpp index 03c6924d9a8883..115e397c4c990e 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.cpp +++ b/src/plugins/intel_cpu/src/nodes/bucketize.cpp @@ -36,7 +36,6 @@ Bucketize::Bucketize(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "Bucketize layer with name '" + op->get_friendly_name() + "' "; const auto bucketsize = ov::as_type_ptr(op); if (bucketsize == nullptr) OPENVINO_THROW("Operation with name '", @@ -44,7 +43,7 @@ Bucketize::Bucketize(const std::shared_ptr& op, const GraphContext::CP "' is not an instance of Bucketize from opset3."); if (getOriginalInputsNumber() != 2 || getOriginalOutputsNumber() != 1) { - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); } // check one attribute @@ -181,7 +180,7 @@ void Bucketize::execute(dnnl::stream strm) { element_type_traits::value_type>(); break; default: - OPENVINO_THROW(errorPrefix, " has unsupported precision: ", precision_mask); + THROW_CPU_NODE_ERR("has unsupported precision: ", precision_mask); } } @@ -201,11 +200,11 @@ void Bucketize::prepareParams() { // update with_bins/num_values/num_bin_values auto input_tensor_dims = inputTensorMemPtr->getStaticDims(); if (input_tensor_dims.size() < 1) { - OPENVINO_THROW(errorPrefix, " has incorrect dimensions of the input."); + THROW_CPU_NODE_ERR("has incorrect dimensions of the input."); } auto input_bin_dims = inputBinsMemPtr->getStaticDims(); if (input_bin_dims.size() != 1) { - OPENVINO_THROW(errorPrefix, " has incorrect dimensions of the boundaries tensor."); + THROW_CPU_NODE_ERR("has incorrect dimensions of the boundaries tensor."); } if (input_bin_dims[0] != 0) { with_bins = true; diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.h b/src/plugins/intel_cpu/src/nodes/bucketize.h index 59cb0bec95201b..5ad893ea0a9282 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.h +++ b/src/plugins/intel_cpu/src/nodes/bucketize.h @@ -43,7 +43,6 @@ class Bucketize : public Node { ov::element::Type input_precision; ov::element::Type boundaries_precision; ov::element::Type output_precision; - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/convert.cpp b/src/plugins/intel_cpu/src/nodes/convert.cpp index 7b050613739b2c..2684fe28d9b5f7 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/convert.cpp @@ -39,9 +39,7 @@ bool Convert::isSupportedOperation(const std::shared_ptr& op, st Convert::Convert(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; - if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "Convert node with name '" + getName() + "'"; - } else { + if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } @@ -61,8 +59,6 @@ Convert::Convert(const Shape& shape, if (isDynamicNode()) { shapeInference = std::make_shared(); } - - errorPrefix = "Convert node with name '" + getName() + "'"; } void Convert::getSupportedDescriptors() { @@ -73,9 +69,9 @@ void Convert::getSupportedDescriptors() { if (inputShapes.empty()) inputShapes.push_back(input->getShape()); if (getParentEdges().size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); } bool Convert::isSupportedDesc(const MemoryDesc& desc) { @@ -157,7 +153,7 @@ void Convert::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptorsBuilder(config); } } else { - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges"); } } @@ -185,7 +181,7 @@ void Convert::execute(dnnl::stream strm) { const auto childPaddElemCount = childMem.getDescWithType()->getPaddedElementsCount(); if (parentPaddElemCount != childPaddElemCount) - OPENVINO_THROW(errorPrefix, " has different elements number in input and output buffers"); + THROW_CPU_NODE_ERR("has different elements number in input and output buffers"); MemoryCPtr srcMemory = getSrcMemoryAtPort(0); MemoryPtr dstMemory = getDstMemoryAtPort(0); diff --git a/src/plugins/intel_cpu/src/nodes/convert.h b/src/plugins/intel_cpu/src/nodes/convert.h index 537c67e39dc18e..528e5adef6f3e1 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.h +++ b/src/plugins/intel_cpu/src/nodes/convert.h @@ -60,8 +60,6 @@ class Convert : public Node { ConvertParams convertParams; std::shared_ptr execPtr = nullptr; NodeConfig config; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp index 05c96aa0647313..60224ff49a8781 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp @@ -35,17 +35,16 @@ CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr& op, const Gr OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "CTCGreedyDecoder layer with name '" + op->get_friendly_name() + "' "; if (getOriginalInputsNumber() != 2) - OPENVINO_THROW(errorPrefix, "has invalid number of input edges: ", getOriginalInputsNumber()); + THROW_CPU_NODE_ERR("has invalid number of input edges: ", getOriginalInputsNumber()); if (getOriginalOutputsNumber() != 1) - OPENVINO_THROW(errorPrefix, "has invalid number of outputs edges: ", getOriginalOutputsNumber()); + THROW_CPU_NODE_ERR("has invalid number of outputs edges: ", getOriginalOutputsNumber()); const auto& dataDims = getInputShapeAtPort(DATA_INDEX).getDims(); const auto& seqDims = getInputShapeAtPort(SEQUENCE_LENGTH_INDEX).getDims(); if (!dimsEqualWeak(dataDims[0], seqDims[0]) || !dimsEqualWeak(dataDims[1], seqDims[1])) - OPENVINO_THROW(errorPrefix, "has invalid input shapes."); + THROW_CPU_NODE_ERR("has invalid input shapes."); auto greedyDecOp = ov::as_type_ptr(op); mergeRepeated = greedyDecOp->get_ctc_merge_repeated(); @@ -57,11 +56,11 @@ void CTCGreedyDecoder::initSupportedPrimitiveDescriptors() { ov::element::Type inDataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); if (!one_of(inDataPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) - OPENVINO_THROW(errorPrefix, "has unsupported 'data' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", inDataPrecision); ov::element::Type seqLenPrecision = getOriginalInputPrecisionAtPort(SEQUENCE_LENGTH_INDEX); if (!one_of(seqLenPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) - OPENVINO_THROW(errorPrefix, "has unsupported 'sequence_length' input precision: ", seqLenPrecision); + THROW_CPU_NODE_ERR("has unsupported 'sequence_length' input precision: ", seqLenPrecision); addSupportedPrimDesc({{LayoutType::ncsp, ov::element::f32}, {LayoutType::ncsp, ov::element::f32}}, {{LayoutType::ncsp, ov::element::f32}}, diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h index deb4f9fa484925..9121905eefdea7 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h @@ -27,8 +27,6 @@ class CTCGreedyDecoder : public Node { const size_t DATA_INDEX = 0lu; const size_t SEQUENCE_LENGTH_INDEX = 1lu; bool mergeRepeated; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp index 1f3e679bfcccd0..11c6efb8da30d1 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp @@ -35,16 +35,15 @@ CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr& OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "CTCGreedyDecoderSeqLen layer with name '" + op->get_friendly_name() + "' "; if (getOriginalInputsNumber() < 2 || getOriginalInputsNumber() > 3) - OPENVINO_THROW(errorPrefix, "has invalid number of input edges: ", getOriginalInputsNumber()); + THROW_CPU_NODE_ERR("has invalid number of input edges: ", getOriginalInputsNumber()); if (getOriginalOutputsNumber() != 2) - OPENVINO_THROW(errorPrefix, "has invalid number of outputs edges: ", getOriginalOutputsNumber()); + THROW_CPU_NODE_ERR("has invalid number of outputs edges: ", getOriginalOutputsNumber()); const auto& dataDims = getInputShapeAtPort(DATA_INDEX).getDims(); const auto& seqDims = getInputShapeAtPort(SEQUENCE_LENGTH_INDEX).getDims(); if (!dimsEqualWeak(dataDims[0], seqDims[0])) - OPENVINO_THROW(errorPrefix, "has invalid input shapes."); + THROW_CPU_NODE_ERR("has invalid input shapes."); auto greedyDecOp = ov::as_type_ptr(op); mergeRepeated = greedyDecOp->get_merge_repeated(); @@ -56,11 +55,11 @@ void CTCGreedyDecoderSeqLen::initSupportedPrimitiveDescriptors() { ov::element::Type inDataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); if (!one_of(inDataPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) - OPENVINO_THROW(errorPrefix, "has unsupported 'data' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", inDataPrecision); ov::element::Type seqLenPrecision = getOriginalInputPrecisionAtPort(SEQUENCE_LENGTH_INDEX); if (seqLenPrecision != ov::element::i32 && seqLenPrecision != ov::element::i64) - OPENVINO_THROW(errorPrefix, "has unsupported 'sequence_length' input precision: ", seqLenPrecision); + THROW_CPU_NODE_ERR("has unsupported 'sequence_length' input precision: ", seqLenPrecision); std::vector inDataConf; inDataConf.reserve(inputShapes.size()); @@ -95,10 +94,10 @@ void CTCGreedyDecoderSeqLen::execute(dnnl::stream strm) { for (size_t b = 0; b < B; b++) { if (sequenceLengths[b] > static_cast(T)) { std::string errorMsg = - errorPrefix + ". Sequence length " + std::to_string(sequenceLengths[b]) + + "Sequence length " + std::to_string(sequenceLengths[b]) + " cannot be greater than according decoded classes dimension size " + std::to_string(getChildEdgeAt(DECODED_CLASSES_INDEX)->getMemory().getStaticDims()[1]); - OPENVINO_THROW(errorMsg); + THROW_CPU_NODE_ERR(errorMsg); } workAmount += sequenceLengths[b]; } diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h index 93cbb66050d4fa..d730cebedac64f 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h @@ -30,8 +30,6 @@ class CTCGreedyDecoderSeqLen : public Node { const size_t DECODED_CLASSES_INDEX = 0lu; const size_t DECODED_CLASSES_LENGTH_INDEX = 1lu; bool mergeRepeated; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp b/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp index e8dc4e6c1c5792..20c080105af141 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp @@ -33,10 +33,8 @@ CTCLoss::CTCLoss(const std::shared_ptr& op, const GraphContext::CPtr c OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = std::string("CTCLoss layer with name '") + op->get_friendly_name() + "'"; - if (getOriginalInputsNumber() != 4 && getOriginalInputsNumber() != 5) - OPENVINO_THROW(errorPrefix, " has invalid inputs number."); + THROW_CPU_NODE_ERR("has invalid inputs number."); auto ctcLossOp = ov::as_type_ptr(op); ctcMergeRepeated = ctcLossOp->get_ctc_merge_repeated(); @@ -95,7 +93,7 @@ void CTCLoss::execute(dnnl::stream strm) { for (size_t b = start; b < end; b++) { if (logitsLength[b] < 0 || labelsLength[b] < 0 || logitsLength[b] > static_cast(maxTime) || labelsLength[b] > logitsLength[b]) { - errorMsgB[ithr] = errorPrefix + ". Logit length cannot be greater than max sequence length. " + + errorMsgB[ithr] = std::string("Logit length cannot be greater than max sequence length. ") + "Label length cannot be greater than a logit length" + " and both cannot be negative.\nMaxSeqLen: " + std::to_string(maxTime) + "; Logit len: " + std::to_string(logitsLength[b]) + @@ -160,7 +158,7 @@ void CTCLoss::execute(dnnl::stream strm) { if (!err.empty()) resErr += err + "\n"; } - OPENVINO_THROW(resErr); + THROW_CPU_NODE_ERR(resErr); } const size_t TC = maxTime * classesNum; diff --git a/src/plugins/intel_cpu/src/nodes/ctc_loss.h b/src/plugins/intel_cpu/src/nodes/ctc_loss.h index 615a45abb12985..6c24b47b1bd457 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_loss.h +++ b/src/plugins/intel_cpu/src/nodes/ctc_loss.h @@ -30,8 +30,6 @@ class CTCLoss : public Node { bool ctcMergeRepeated; bool preprocessCollapseRepeated; bool unique; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/cum_sum.cpp b/src/plugins/intel_cpu/src/nodes/cum_sum.cpp index 72c5749b89b492..bbda16f94a8e4b 100644 --- a/src/plugins/intel_cpu/src/nodes/cum_sum.cpp +++ b/src/plugins/intel_cpu/src/nodes/cum_sum.cpp @@ -37,16 +37,14 @@ CumSum::CumSum(const std::shared_ptr& op, const GraphContext::CPtr con OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "CumSum layer with name '" + op->get_friendly_name() + "' "; - if ((getOriginalInputsNumber() != numOfInputs && getOriginalInputsNumber() != (numOfInputs - 1)) || getOriginalOutputsNumber() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); const auto& dataShape = getInputShapeAtPort(CUM_SUM_DATA); numOfDims = dataShape.getRank(); if (numOfDims < 1) { - OPENVINO_THROW(errorPrefix, " doesn't support 'data' input tensor with rank: ", numOfDims); + THROW_CPU_NODE_ERR("doesn't support 'data' input tensor with rank: ", numOfDims); } const auto cumsum = ov::as_type_ptr(op); @@ -59,11 +57,11 @@ CumSum::CumSum(const std::shared_ptr& op, const GraphContext::CPtr con if (getOriginalInputsNumber() == numOfInputs) { const auto axis_shape = cumsum->get_input_partial_shape(AXIS); if (axis_shape.is_dynamic() || !ov::is_scalar(axis_shape.to_shape())) - OPENVINO_THROW(errorPrefix, " doesn't support 'axis' input tensor with non scalar rank"); + THROW_CPU_NODE_ERR("doesn't support 'axis' input tensor with non scalar rank"); } if (dataShape != getOutputShapeAtPort(0)) - OPENVINO_THROW(errorPrefix, " has different 'data' input and output dimensions"); + THROW_CPU_NODE_ERR("has different 'data' input and output dimensions"); } void CumSum::initSupportedPrimitiveDescriptors() { @@ -81,12 +79,12 @@ void CumSum::initSupportedPrimitiveDescriptors() { ov::element::bf16, ov::element::f16, ov::element::f32)) - OPENVINO_THROW(errorPrefix, " has unsupported 'data' input precision: ", dataPrecision.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", dataPrecision.get_type_name()); if (inputShapes.size() == numOfInputs) { const auto& axisTensorPrec = getOriginalInputPrecisionAtPort(AXIS); if (axisTensorPrec != ov::element::i32 && axisTensorPrec != ov::element::i64) - OPENVINO_THROW(errorPrefix, " has unsupported 'axis' input precision: ", axisTensorPrec.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'axis' input precision: ", axisTensorPrec.get_type_name()); } std::vector inDataConf; @@ -255,11 +253,11 @@ size_t CumSum::getAxis(const IMemory& _axis, const IMemory& _data) const { break; } default: { - OPENVINO_THROW(errorPrefix, " doesn't support 'axis' input with precision: ", axisPrecision.get_type_name()); + THROW_CPU_NODE_ERR("doesn't support 'axis' input with precision: ", axisPrecision.get_type_name()); } } if (axisValueFromBlob < -dataShapeSize || axisValueFromBlob > dataShapeSize - 1) - OPENVINO_THROW(errorPrefix, " has axis with a value out of range: ", axisValueFromBlob); + THROW_CPU_NODE_ERR("has axis with a value out of range: ", axisValueFromBlob); return axisValueFromBlob >= 0 ? axisValueFromBlob : (axisValueFromBlob + dataShapeSize); } diff --git a/src/plugins/intel_cpu/src/nodes/cum_sum.h b/src/plugins/intel_cpu/src/nodes/cum_sum.h index 393359302570bc..1b5070699c2729 100644 --- a/src/plugins/intel_cpu/src/nodes/cum_sum.h +++ b/src/plugins/intel_cpu/src/nodes/cum_sum.h @@ -46,7 +46,6 @@ class CumSum : public Node { size_t axis = 0; ov::element::Type dataPrecision; - std::string errorPrefix; template struct CumSumExecute { diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index a911e00916c169..a996249dc52acc 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -167,9 +167,8 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr& Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, DeconfolutionShapeInferFactory(op)) { std::string errorMessage; - errorPrefix = "Deconvolution node with name '" + getName() + "' "; if (!isSupportedOperation(op, errorMessage)) - OPENVINO_THROW_NOT_IMPLEMENTED(errorPrefix + errorMessage); + OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); const auto& weightDims = getWeightDims(); @@ -233,9 +232,7 @@ Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphCon const auto spDimsNum = getInputShapeAtPort(0).getRank() - 2; if (getInputShapeAtPort(2).getStaticDims()[0] != spDimsNum || (isConstOutShape && lastOutputSpatialDims.size() != spDimsNum)) { - OPENVINO_THROW(errorPrefix, - "'output_shape' input has incorrect number of elements. Expected = ", - spDimsNum); + THROW_CPU_NODE_ERR("'output_shape' input has incorrect number of elements. Expected = ", spDimsNum); } } @@ -408,7 +405,7 @@ std::pair Deconvolution::makeDummyInOutShape() { auto upper_bound = deconvAttrs.stride[i] * static_cast(origInMaxDims[i + 2] - 1) - c1; if (upper_bound < 0) { - OPENVINO_THROW(errorPrefix, ": paddings for dummy shapes can't be computed"); + THROW_CPU_NODE_ERR("paddings for dummy shapes can't be computed"); } } @@ -506,10 +503,10 @@ void Deconvolution::getSupportedDescriptors() { fusedWith[fusedWith.size() - 1]->getOriginalOutputPrecisionAtPort(0)); } if (getParentEdges().size() != (withBiases ? (biasPort + 1) : biasPort)) { - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); } if (getChildEdges().empty()) { - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); } VectorDims inDims, outDims; std::tie(inDims, outDims) = makeDummyInOutShape(); diff --git a/src/plugins/intel_cpu/src/nodes/deconv.h b/src/plugins/intel_cpu/src/nodes/deconv.h index 6f833ae232bb5a..a9efed8806abde 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.h +++ b/src/plugins/intel_cpu/src/nodes/deconv.h @@ -106,8 +106,6 @@ class Deconvolution : public Node { bool withBiases = false; size_t biasPort; - std::string errorPrefix; - void createDnnlCompatibleWeights(); bool weightIsConst = false; bool asymmetricPaddingAnd1x1 = false; diff --git a/src/plugins/intel_cpu/src/nodes/def_conv.cpp b/src/plugins/intel_cpu/src/nodes/def_conv.cpp index 6165e960837882..4c44e8b5539e1b 100644 --- a/src/plugins/intel_cpu/src/nodes/def_conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/def_conv.cpp @@ -774,10 +774,9 @@ DeformableConvolution::DeformableConvolution(const std::shared_ptr& op if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "Deformable convolution with name '" + op->get_friendly_name() + "'"; auto defConvNodeBase = ov::as_type_ptr(op); if (defConvNodeBase == nullptr) - OPENVINO_THROW(errorPrefix, " is not an instance of DeformableConvolutionBase."); + THROW_CPU_NODE_ERR("is not an instance of DeformableConvolutionBase."); defConvAttr.group = defConvNodeBase->get_group(); defConvAttr.deformable_group = defConvNodeBase->get_deformable_group(); @@ -798,7 +797,7 @@ DeformableConvolution::DeformableConvolution(const std::shared_ptr& op if (op->get_type_info() == ov::op::v8::DeformableConvolution::get_type_info_static()) { auto defConvNode = ov::as_type_ptr(op); if (defConvNode == nullptr) - OPENVINO_THROW(errorPrefix, " is not an instance of DeformableConvolution from opset8."); + THROW_CPU_NODE_ERR("is not an instance of DeformableConvolution from opset8."); defConvAttr.with_bilinear_pad = defConvNode->get_bilinear_interpolation_pad(); } else { defConvAttr.with_bilinear_pad = false; @@ -807,20 +806,20 @@ DeformableConvolution::DeformableConvolution(const std::shared_ptr& op void DeformableConvolution::getSupportedDescriptors() { if (getParentEdges().size() != 3 && getParentEdges().size() != 4) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); if (getInputShapeAtPort(DATA_ID).getRank() != 4) { - OPENVINO_THROW(errorPrefix, " has unsupported mode. Only 4D blobs are supported as input."); + THROW_CPU_NODE_ERR("has unsupported mode. Only 4D blobs are supported as input."); } if (getInputShapeAtPort(OFF_ID).getRank() != 4) { - OPENVINO_THROW(errorPrefix, " doesn't support 1st input with rank: ", getInputShapeAtPort(OFF_ID).getRank()); + THROW_CPU_NODE_ERR("doesn't support 1st input with rank: ", getInputShapeAtPort(OFF_ID).getRank()); } if (getInputShapeAtPort(WEI_ID).getRank() != 4) { - OPENVINO_THROW(errorPrefix, " doesn't support 2nd input with rank: ", getInputShapeAtPort(WEI_ID).getRank()); + THROW_CPU_NODE_ERR("doesn't support 2nd input with rank: ", getInputShapeAtPort(WEI_ID).getRank()); } if (getOutputShapeAtPort(DATA_ID).getRank() != 4) { - OPENVINO_THROW(errorPrefix, " doesn't support output with rank: ", getOutputShapeAtPort(DATA_ID).getRank()); + THROW_CPU_NODE_ERR("doesn't support output with rank: ", getOutputShapeAtPort(DATA_ID).getRank()); } } @@ -1225,23 +1224,23 @@ void DeformableConvolution::prepareParams() { auto weiMemPtr = getSrcMemoryAtPort(WEI_ID); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined destination memory"); + THROW_CPU_NODE_ERR("has undefined destination memory"); if (!srcMemPtr || !srcMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory"); + THROW_CPU_NODE_ERR("has undefined input memory"); if (!offMemPtr || !offMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined offsets shape memory"); + THROW_CPU_NODE_ERR("has undefined offsets shape memory"); if (!weiMemPtr || !weiMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined weights memory"); + THROW_CPU_NODE_ERR("has undefined weights memory"); if (getOriginalInputsNumber() > 3) { auto modMemPtr = getSrcMemoryAtPort(MOD_ID); if (!modMemPtr || !modMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined modulations memory"); + THROW_CPU_NODE_ERR("has undefined modulations memory"); } auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor(); if (!selectedPrimitiveDescriptor) - OPENVINO_THROW(errorPrefix, "' doesn't have primitive descriptors."); + THROW_CPU_NODE_ERR("doesn't have primitive descriptors."); auto config = selectedPrimitiveDescriptor->getConfig(); bool withModulation = getParentEdges().size() > 3; diff --git a/src/plugins/intel_cpu/src/nodes/def_conv.h b/src/plugins/intel_cpu/src/nodes/def_conv.h index 5859c032fe6c2a..066ac7c0e61007 100644 --- a/src/plugins/intel_cpu/src/nodes/def_conv.h +++ b/src/plugins/intel_cpu/src/nodes/def_conv.h @@ -108,7 +108,6 @@ class DeformableConvolution : public Node { static constexpr size_t OFF_ID = 1; static constexpr size_t WEI_ID = 2; static constexpr size_t MOD_ID = 3; - std::string errorPrefix; class DefConvExecutor { public: DefConvExecutor(const DefConvAttr& defConvAttr, diff --git a/src/plugins/intel_cpu/src/nodes/detection_output.cpp b/src/plugins/intel_cpu/src/nodes/detection_output.cpp index 5f253367426af9..416f47a7c0f8a9 100644 --- a/src/plugins/intel_cpu/src/nodes/detection_output.cpp +++ b/src/plugins/intel_cpu/src/nodes/detection_output.cpp @@ -55,13 +55,11 @@ DetectionOutput::DetectionOutput(const std::shared_ptr& op, const Grap OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "DetectionOutput node with name '" + getName() + "' "; - if (getOriginalInputsNumber() != 3 && getOriginalInputsNumber() != 5) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges."); + THROW_CPU_NODE_ERR("has incorrect number of input edges."); if (getOriginalOutputsNumber() != 1) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges."); + THROW_CPU_NODE_ERR("has incorrect number of output edges."); auto doOp = ov::as_type_ptr(op); auto attributes = doOp->get_attrs(); @@ -101,19 +99,17 @@ void DetectionOutput::prepareParams() { const auto& idLocDims = getParentEdgeAt(ID_LOC)->getMemory().getShape().getStaticDims(); if (priorsNum * locNumForClasses * 4 != static_cast(idLocDims[1])) - OPENVINO_THROW(errorPrefix, - "has incorrect number of priors, which must match number of location predictions (", - priorsNum * locNumForClasses * 4, - " vs ", - idLocDims[1], - ")"); + THROW_CPU_NODE_ERR("has incorrect number of priors, which must match number of location predictions (", + priorsNum * locNumForClasses * 4, + " vs ", + idLocDims[1], + ")"); if (priorsNum * classesNum != static_cast(idConfDims.back())) - OPENVINO_THROW(errorPrefix, - "has incorrect number of priors, which must match number of confidence predictions."); + THROW_CPU_NODE_ERR("has incorrect number of priors, which must match number of confidence predictions."); if (decreaseClassId && backgroundClassId != 0) - OPENVINO_THROW(errorPrefix, "cannot use decrease_label_id and background_label_id parameter simultaneously."); + THROW_CPU_NODE_ERR("cannot use decrease_label_id and background_label_id parameter simultaneously."); imgNum = static_cast(idConfDims[0]); @@ -923,7 +919,7 @@ inline void DetectionOutput::generateOutput(float* reorderedConfData, const int numResults = outDims[2]; const int DETECTION_SIZE = outDims[3]; if (DETECTION_SIZE != 7) { - OPENVINO_THROW_NOT_IMPLEMENTED(errorPrefix); + THROW_CPU_NODE_ERR("has unsupported output layout."); } int dstDataSize = 0; @@ -935,7 +931,7 @@ inline void DetectionOutput::generateOutput(float* reorderedConfData, dstDataSize = imgNum * classesNum * priorsNum * DETECTION_SIZE * sizeof(float); if (static_cast(dstDataSize) > getChildEdgeAt(0)->getMemory().getSize()) { - OPENVINO_THROW(errorPrefix, ": OUT_OF_BOUNDS"); + THROW_CPU_NODE_ERR("has insufficient output buffer size."); } memset(dstData, 0, dstDataSize); diff --git a/src/plugins/intel_cpu/src/nodes/detection_output.h b/src/plugins/intel_cpu/src/nodes/detection_output.h index 80c2859146ea20..e5ed8951d63c7f 100644 --- a/src/plugins/intel_cpu/src/nodes/detection_output.h +++ b/src/plugins/intel_cpu/src/nodes/detection_output.h @@ -130,8 +130,6 @@ class DetectionOutput : public Node { std::vector bboxSizes; std::vector numPriorsActual; std::vector confInfoForPrior; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/dft.cpp b/src/plugins/intel_cpu/src/nodes/dft.cpp index 4a675787a7ed52..5042a5e0abba7e 100644 --- a/src/plugins/intel_cpu/src/nodes/dft.cpp +++ b/src/plugins/intel_cpu/src/nodes/dft.cpp @@ -49,29 +49,28 @@ DFT::DFT(const std::shared_ptr& op, const GraphContext::CPtr context) OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - layerErrorPrefix = "DFT layer with name '" + op->get_name() + "'"; const size_t inputsNumber = getOriginalInputsNumber(); if (inputsNumber != 2 && inputsNumber != 3) { - OPENVINO_THROW(layerErrorPrefix, " has invalid number of input/output edges: ", inputsNumber); + THROW_CPU_NODE_ERR("has invalid number of input/output edges: ", inputsNumber); } /* Data */ inputShape = inputShapes[DATA_INDEX].getStaticDims(); if (inputShape.size() < 2) { - OPENVINO_THROW(layerErrorPrefix, " has invalid 'data' input tensor with rank: ", inputShape.size()); + THROW_CPU_NODE_ERR("has invalid 'data' input tensor with rank: ", inputShape.size()); } /* Axes */ const auto axesRank = inputShapes[AXES_INDEX].getRank(); if (axesRank != 1) { - OPENVINO_THROW(layerErrorPrefix, " has invalid 'axes' input tensor with rank: ", axesRank); + THROW_CPU_NODE_ERR("has invalid 'axes' input tensor with rank: ", axesRank); } /* Signal size */ if (inputsNumber > SIGNAL_SIZE_INDEX) { const auto signalSizeRank = inputShapes[SIGNAL_SIZE_INDEX].getRank(); if (signalSizeRank != 1) { - OPENVINO_THROW(layerErrorPrefix, " has invalid 'signal_size' input tensor with rank: ", signalSizeRank); + THROW_CPU_NODE_ERR("has invalid 'signal_size' input tensor with rank: ", signalSizeRank); } } @@ -87,20 +86,18 @@ void DFT::initSupportedPrimitiveDescriptors() { const auto& dataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); if (!dataPrecision.is_real()) { - OPENVINO_THROW(layerErrorPrefix, " has unsupported 'data' input precision: ", dataPrecision.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", dataPrecision.get_type_name()); } const auto& axesPrecision = getOriginalInputPrecisionAtPort(AXES_INDEX); if (axesPrecision != ov::element::i32 && axesPrecision != ov::element::i64) { - OPENVINO_THROW(layerErrorPrefix, " has unsupported 'axes' input precision: ", axesPrecision.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'axes' input precision: ", axesPrecision.get_type_name()); } if (inputShapes.size() > SIGNAL_SIZE_INDEX) { const auto& signalSizeTensorPrec = getOriginalInputPrecisionAtPort(SIGNAL_SIZE_INDEX); if (signalSizeTensorPrec != ov::element::i32 && signalSizeTensorPrec != ov::element::i64) { - OPENVINO_THROW(layerErrorPrefix, - " has unsupported 'signal_size' input precision: ", - signalSizeTensorPrec.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'signal_size' input precision: ", signalSizeTensorPrec.get_type_name()); } } diff --git a/src/plugins/intel_cpu/src/nodes/dft.h b/src/plugins/intel_cpu/src/nodes/dft.h index 2789f22fa7ca94..7d7cabcf4585e0 100644 --- a/src/plugins/intel_cpu/src/nodes/dft.h +++ b/src/plugins/intel_cpu/src/nodes/dft.h @@ -53,7 +53,6 @@ class DFT : public Node { std::vector axes; std::vector inputShape; - std::string layerErrorPrefix; const size_t DATA_INDEX = 0; const size_t AXES_INDEX = 1; const size_t SIGNAL_SIZE_INDEX = 2; diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/eltwise.cpp index 12312eac3e8511..55e265a2008dcb 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/eltwise.cpp @@ -3065,8 +3065,6 @@ void Eltwise::appendPostOpsImpl(dnnl::post_ops& ops, const VectorDims& postOpDims, std::vector& postOpsMem, const int channelAxis) { - const std::string errorPrefix = "Appending Eltwise node with name '" + getName() + "' "; - if (getOneDnnAlgorithm() != dnnl::algorithm::undef) { switch (getOneDnnAlgorithm()) { case dnnl::algorithm::eltwise_relu: @@ -3091,7 +3089,7 @@ void Eltwise::appendPostOpsImpl(dnnl::post_ops& ops, ops.append_eltwise(getOneDnnAlgorithm(), getAlpha(), getBeta()); break; default: - OPENVINO_THROW(errorPrefix, "as post operation is not supported"); + THROW_CPU_NODE_ERR("Appending Eltwise node with name '", getName(), "' as post operation is not supported"); } } else { // per-tensor EltwisePowerStatic can be implemented with more well-supported eltwise postOps @@ -3119,7 +3117,9 @@ void Eltwise::appendPostOpsImpl(dnnl::post_ops& ops, if (scales.size() == 1) { depthwiseData.resize(channelSize, depthwiseData.back()); } else if (scales.size() != channelSize) { - OPENVINO_THROW(errorPrefix, "failed due to scales data size inconsistency"); + OPENVINO_THROW("Appending Eltwise node with name '", + getName(), + "' failed due to scales data size inconsistency"); } depthwiseData.insert(depthwiseData.end(), shifts.begin(), shifts.end()); if (shifts.empty()) { @@ -3128,7 +3128,9 @@ void Eltwise::appendPostOpsImpl(dnnl::post_ops& ops, } else if (shifts.size() == 1) { depthwiseData.resize(2 * channelSize, depthwiseData.back()); } else if (shifts.size() != channelSize) { - OPENVINO_THROW(errorPrefix, "failed due to shifts data size inconsistency"); + OPENVINO_THROW("Appending Eltwise node with name '", + getName(), + "' failed due to shifts data size inconsistency"); } depthwiseDataSize = 2 * channelSize; @@ -3139,7 +3141,7 @@ void Eltwise::appendPostOpsImpl(dnnl::post_ops& ops, } if (depthwiseData.empty()) - OPENVINO_THROW(errorPrefix, "cannot be performed since buffers are not allocated"); + THROW_CPU_NODE_ERR("cannot be performed since buffers are not allocated"); std::array offsets = {0}; offsets[1] = offsets[0] + channelSize; @@ -3160,7 +3162,7 @@ void Eltwise::appendPostOpsImpl(dnnl::post_ops& ops, ops.append_depthwise(dnnl::algorithm::depthwise_prelu, offsets); break; default: - OPENVINO_THROW(errorPrefix, "as post operation is not supported"); + THROW_CPU_NODE_ERR("as post operation is not supported"); } appendMemory(depthwiseData, depthwiseMemory, postOpsMem); @@ -3192,8 +3194,6 @@ bool Eltwise::appendAttrPostOps(DnnlPostOpsComposerLegacy& dnnlpoc, bool isLastPostOp, dnnl::memory::data_type outDataType, bool allowBinary) { - const std::string errorPrefix = "Appending Eltwise node with name '" + getName() + "' as binary post op "; - if (getOneDnnAlgorithm() != dnnl::algorithm::undef) { switch (getOneDnnAlgorithm()) { case dnnl::algorithm::eltwise_relu: @@ -3221,7 +3221,7 @@ bool Eltwise::appendAttrPostOps(DnnlPostOpsComposerLegacy& dnnlpoc, dnnlpoc.appendLinear({getAlpha()}, {getBeta()}, isLastPostOp); break; default: - OPENVINO_THROW(errorPrefix, "as post operation is not supported"); + THROW_CPU_NODE_ERR("as post operation is not supported"); } } else { switch (getAlgorithm()) { @@ -3248,7 +3248,7 @@ bool Eltwise::appendAttrPostOps(DnnlPostOpsComposerLegacy& dnnlpoc, dnnlpoc.appendBinary(dnnl::algorithm::binary_prelu, scales); break; default: - OPENVINO_THROW(errorPrefix, "as post operation is not supported"); + THROW_CPU_NODE_ERR("as post operation is not supported"); } } return true; diff --git a/src/plugins/intel_cpu/src/nodes/executors/interpolate.cpp b/src/plugins/intel_cpu/src/nodes/executors/interpolate.cpp index 8e50b133506b06..189ae24bac808d 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/interpolate.cpp @@ -131,7 +131,7 @@ float ov::intel_cpu::InterpolateExecutor::coordTransToInput(int outCoord, break; } default: { - OPENVINO_THROW("errorPrefix", " does not support specified coordinate transformation mode"); + OPENVINO_THROW("Interpolate executor does not support specified coordinate transformation mode"); break; } } @@ -167,7 +167,7 @@ int ov::intel_cpu::InterpolateExecutor::nearestRound(float originCoord, return static_cast(originCoord); } default: { - OPENVINO_THROW("errorPrefix", " does not support specified nearest round mode"); + OPENVINO_THROW("Interpolate executor does not support specified nearest round mode"); break; } } @@ -547,7 +547,7 @@ const uint8_t* ov::intel_cpu::InterpolateExecutor::padPreprocess(const std::vect srcPadded.resize(eltsTotal * srcDataSize, 0x0); uint8_t* src_data_pad = static_cast(&srcPadded[0]); if ((srcDim5d[0] != srcDimPad5d[0]) || (srcDim5d[1] != srcDimPad5d[1])) { - OPENVINO_THROW("Interpolate layer with name does not support padding on batch and channel dimensions"); + OPENVINO_THROW("Interpolate executor does not support padding on batch and channel dimensions"); } parallel_for5d( srcDim5d[0], diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp index e9c7b2e0e0a5a4..873750b5a13d4a 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp @@ -35,10 +35,9 @@ ExperimentalDetectronPriorGridGenerator::ExperimentalDetectronPriorGridGenerator OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "ExperimentalDetectronPriorGridGenerator layer with name '" + op->get_friendly_name() + "'"; const auto priorGridGen = ov::as_type_ptr(op); if (getOriginalInputsNumber() != 3 || getOriginalOutputsNumber() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); const auto& attr = priorGridGen->get_attrs(); grid_w_ = attr.w; diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h index 116187039b8f70..37b0d81300c483 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h @@ -43,8 +43,6 @@ class ExperimentalDetectronPriorGridGenerator : public Node { int grid_h_; float stride_w_; float stride_h_; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp index e6b385a1d28af7..096e5bebf2f5fb 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp @@ -38,7 +38,6 @@ ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const std::shared_p OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "ExperimentalDetectronTopKROIs layer with name '" + op->get_friendly_name() + "'"; const auto topKROI = ov::as_type_ptr(op); if (topKROI == nullptr) OPENVINO_THROW("Operation with name '", @@ -46,10 +45,10 @@ ExperimentalDetectronTopKROIs::ExperimentalDetectronTopKROIs(const std::shared_p "' is not an instance of ExperimentalDetectronTopKROIs from opset6."); if (inputShapes.size() != 2 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); if (getInputShapeAtPort(INPUT_ROIS).getRank() != 2 || getInputShapeAtPort(INPUT_PROBS).getRank() != 1) - OPENVINO_THROW(errorPrefix, " has unsupported input shape"); + THROW_CPU_NODE_ERR("has unsupported input shape"); max_rois_num_ = topKROI->get_max_rois(); } diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h index 0c71974fdca8a8..5e328d224d458b 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h @@ -43,8 +43,6 @@ class ExperimentalDetectronTopKROIs : public Node { const int OUTPUT_ROIS{0}; int max_rois_num_; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp b/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp index a60c3db28f35d2..534f5a518ad11e 100644 --- a/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp +++ b/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp @@ -364,22 +364,20 @@ ExtractImagePatches::ExtractImagePatches(const std::shared_ptr& op, co OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "ExtractImagePatches layer with name '" + op->get_friendly_name() + "' "; auto extImgPatcher = ov::as_type_ptr(op); if (inputShapes.size() != 1 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, - "has incorrect number of input or output edges!", - " Input: ", - inputShapes.size(), - "); Output: ", - outputShapes.size()); + THROW_CPU_NODE_ERR("has incorrect number of input or output edges!", + " Input: ", + inputShapes.size(), + "); Output: ", + outputShapes.size()); if (getInputShapeAtPort(0).getRank() != 4) - OPENVINO_THROW(errorPrefix, "must have 4D input tensor. Actual: ", getInputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("must have 4D input tensor. Actual: ", getInputShapeAtPort(0).getRank()); if (getOutputShapeAtPort(0).getRank() != 4) - OPENVINO_THROW(errorPrefix, "must have 4D output tensor. Actual: ", getOutputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("must have 4D output tensor. Actual: ", getOutputShapeAtPort(0).getRank()); if (extImgPatcher->get_auto_pad() == ov::op::PadType::VALID) { _auto_pad = ExtImgPatcherPadType::VALID; @@ -388,7 +386,7 @@ ExtractImagePatches::ExtractImagePatches(const std::shared_ptr& op, co } else if (extImgPatcher->get_auto_pad() == ov::op::PadType::SAME_UPPER) { _auto_pad = ExtImgPatcherPadType::SAME_UPPER; } else { - OPENVINO_THROW(errorPrefix, "has unsupported pad type: ", extImgPatcher->get_auto_pad()); + THROW_CPU_NODE_ERR("has unsupported pad type: ", extImgPatcher->get_auto_pad()); } _ksizes = extImgPatcher->get_sizes(); @@ -396,7 +394,7 @@ ExtractImagePatches::ExtractImagePatches(const std::shared_ptr& op, co _strides = extImgPatcher->get_strides(); _rates = extImgPatcher->get_rates(); if (_ksizes.size() != 2 || _strides.size() != 2 || _rates.size() != 2) - OPENVINO_THROW(errorPrefix, "must have the following attributes with shape {2}: sizes, strides, rates."); + THROW_CPU_NODE_ERR("must have the following attributes with shape {2}: sizes, strides, rates."); } void ExtractImagePatches::prepareParams() { @@ -444,7 +442,7 @@ void ExtractImagePatches::initSupportedPrimitiveDescriptors() { const auto precision = getOriginalInputPrecisionAtPort(0); if (_supported_precisions_sizes.find(precision.size()) == _supported_precisions_sizes.end()) - OPENVINO_THROW(errorPrefix, "has unsupported precision: ", precision.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported precision: ", precision.get_type_name()); addSupportedPrimDesc({{LayoutType::ncsp, precision}}, {{LayoutType::ncsp, precision}}, impl_desc_type::ref_any); } diff --git a/src/plugins/intel_cpu/src/nodes/extract_image_patches.h b/src/plugins/intel_cpu/src/nodes/extract_image_patches.h index bb1a45a9f01d6d..f4f3ecc50901aa 100644 --- a/src/plugins/intel_cpu/src/nodes/extract_image_patches.h +++ b/src/plugins/intel_cpu/src/nodes/extract_image_patches.h @@ -63,8 +63,6 @@ class ExtractImagePatches : public Node { static const std::set _supported_precisions_sizes; ExtImgPatcherPadType _auto_pad; - std::string errorPrefix; - struct ExtractImagePatchesExecutor { ExtractImagePatchesExecutor() = default; virtual void exec(void* src, void* dst, const VectorDims& istrides, const VectorDims& ostrides) = 0; diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index deb47abdba2dee..57fbdd66d0308f 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -53,15 +53,15 @@ Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr context) outType = op->get_output_element_type(0); withBatchShape = (op->get_input_size() == 4); if (!one_of(outType, ov::element::f32, ov::element::bf16, ov::element::i32, ov::element::i8, ov::element::u8)) { - THROW_ERROR(errorPrefix, "doesn't support demanded output precision"); + THROW_CPU_NODE_ERR("doesn't support demanded output precision"); } } void Eye::getSupportedDescriptors() { if (!one_of(getParentEdges().size(), 3u, 4u)) - THROW_ERROR(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (getChildEdges().empty()) - THROW_ERROR(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); } template @@ -106,7 +106,7 @@ void Eye::executeSpecified() { const int64_t shift = getDiagIndex(); auto outPtr = getDstMemoryAtPort(0); if (!outPtr || !outPtr->isDefined()) - THROW_ERROR(errorPrefix, "Destination memory is undefined."); + THROW_CPU_NODE_ERR("Destination memory is undefined."); T* dst = outPtr->getDataAs(); const size_t batchVolume = getBatchVolume(getBatchShape()); diff --git a/src/plugins/intel_cpu/src/nodes/eye.h b/src/plugins/intel_cpu/src/nodes/eye.h index fc2b42a18bdbe9..52f78e7cc33711 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.h +++ b/src/plugins/intel_cpu/src/nodes/eye.h @@ -43,7 +43,6 @@ class Eye : public Node { static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: - std::string errorPrefix = ""; ov::element::Type outType = ov::element::Type_t::undefined; template void executeSpecified(); @@ -52,7 +51,7 @@ class Eye : public Node { inline const size_t getRowNum() const { auto rowMem = getSrcMemoryAtPort(ROWS_NUM); if (rowMem == nullptr) - OPENVINO_THROW(errorPrefix, " doesn't contain row_count data"); + THROW_CPU_NODE_ERR("doesn't contain row_count data"); const int* rowPtr = rowMem->getDataAs(); return rowPtr[0]; @@ -60,7 +59,7 @@ class Eye : public Node { inline const size_t getColNum() const { auto colMem = getSrcMemoryAtPort(COLS_NUM); if (colMem == nullptr) - OPENVINO_THROW(errorPrefix, " doesn't contain col_count data"); + THROW_CPU_NODE_ERR("doesn't contain col_count data"); const int* colPtr = colMem->getDataAs(); return colPtr[0]; @@ -68,7 +67,7 @@ class Eye : public Node { inline const int getDiagIndex() const { auto diagIndMem = getSrcMemoryAtPort(DIAGONAL_INDEX); if (diagIndMem == nullptr) - OPENVINO_THROW(errorPrefix, " doesn't contain diag_index data"); + THROW_CPU_NODE_ERR("doesn't contain diag_index data"); const int* diagIndexPtr = diagIndMem->getDataAs(); return diagIndexPtr[0]; diff --git a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp index 431c6f133e15ae..e7105f4f016bc7 100644 --- a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp +++ b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp @@ -1063,15 +1063,14 @@ FakeQuantize::FakeQuantize(const std::shared_ptr& op, const GraphConte algorithm = Algorithm::FQCommon; const auto fq = ov::as_type_ptr(op); - errorPrefix = "FakeQuantize node with name '" + getName() + "' "; levels = fq->get_levels(); if (levels <= 1) - OPENVINO_THROW(errorPrefix, "supports 'levels' attribute greater than or equal to 2"); + THROW_CPU_NODE_ERR("supports 'levels' attribute greater than or equal to 2"); if (inputShapes.size() != 5) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", inputShapes.size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", inputShapes.size()); if (outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", outputShapes.size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", outputShapes.size()); auto initAxisIdx = [&](const VectorDims& inputDims) { size_t axisIdx = 0; @@ -1126,7 +1125,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr& op, const GraphConte auto outputHighAxisSize = ov::is_scalar(ohShape) ? 1 : ohShape[outputHighAxis]; if (axisSize != -1 && !dimsEqualWeak(axisSize, getInputShapeAtPort(0).getDims()[axis])) { - OPENVINO_THROW(errorPrefix, "has different quantization axis size on 'data' and 'range' inputs"); + THROW_CPU_NODE_ERR("has different quantization axis size on 'data' and 'range' inputs"); } const auto inputLowNode = ov::as_type_ptr(fq->get_input_node_shared_ptr(1)); @@ -1411,25 +1410,25 @@ void FakeQuantize::init() { void FakeQuantize::getSupportedDescriptors() { if (getParentEdges().size() != 5) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); if (getInputShapeAtPort(0).getRank() != getOutputShapeAtPort(0).getRank()) { - OPENVINO_THROW(errorPrefix, "has different ranks for input and output tensors"); + THROW_CPU_NODE_ERR("has different ranks for input and output tensors"); } if (isBinarization()) { if (getInputShapeAtPort(0).getRank() != 4ul) { - OPENVINO_THROW(errorPrefix, "doesn't support input/output rank != 4"); + THROW_CPU_NODE_ERR("doesn't support input/output rank != 4"); } } if (getAxis() != 1) { if (isBinarization()) - OPENVINO_THROW(errorPrefix, "doesn't support non per-tensor binarization for axis: ", getAxis()); + THROW_CPU_NODE_ERR("doesn't support non per-tensor binarization for axis: ", getAxis()); if (getAxis() != 0) - OPENVINO_THROW(errorPrefix, "doesn't support non per-tensor quantization for axis: ", getAxis()); + THROW_CPU_NODE_ERR("doesn't support non per-tensor quantization for axis: ", getAxis()); } } diff --git a/src/plugins/intel_cpu/src/nodes/fake_quantize.h b/src/plugins/intel_cpu/src/nodes/fake_quantize.h index 13ee001a314530..4f985df4cdae41 100644 --- a/src/plugins/intel_cpu/src/nodes/fake_quantize.h +++ b/src/plugins/intel_cpu/src/nodes/fake_quantize.h @@ -320,8 +320,6 @@ class FakeQuantize : public Node { ov::element::Type inputPrecision = ov::element::f32; ov::element::Type outputPrecision = ov::element::f32; - std::string errorPrefix; - BroadcastingPolicy broadcastingPolicy; }; diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index 99d8ac35e34763..decbea94be8dd6 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -168,8 +168,7 @@ void FullyConnected::initTensorParallelConfig(const GraphContext::CPtr context) } FullyConnected::FullyConnected(const std::shared_ptr& op, const GraphContext::CPtr context) - : Node(op, context, FCShapeInferFactory(op)), - errorPrefix("FullyConnected node with name '" + getName() + "'") { + : Node(op, context, FCShapeInferFactory(op)) { std::string errorMessage; initTensorParallelConfig(context); if (!isSupportedOperation(op, errorMessage)) diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.h b/src/plugins/intel_cpu/src/nodes/fullyconnected.h index ff04bd01f857d2..0338b9906a59c1 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.h +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.h @@ -123,7 +123,6 @@ class FullyConnected : public Node { MemoryArgs memory; ExecutorFactoryPtr factory; ExecutorPtr executor = nullptr; - std::string errorPrefix; FCTensorParallelConfig tp_cfg; }; diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp index 34b70afde5f13c..61dbe2f635ed4b 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp @@ -37,22 +37,20 @@ GatherElements::GatherElements(const std::shared_ptr& op, const GraphC if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix_ = std::string("Layer GatherElements with name '") + op->get_friendly_name() + "'"; - if (inputShapes.size() != 2 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix_, " has invalid number of input/output edges."); + THROW_CPU_NODE_ERR(" has invalid number of input/output edges."); const auto dataRank = getInputShapeAtPort(dataIndex_).getRank(); const auto indicesRank = getInputShapeAtPort(indicesIndex_).getRank(); if (dataRank != indicesRank) - OPENVINO_THROW(errorPrefix_, " has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks."); + THROW_CPU_NODE_ERR(" has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks."); auto gatherElementsOp = ov::as_type_ptr(op); auto axis = gatherElementsOp->get_axis(); if (axis < 0) axis += dataRank; if (axis < 0 || axis >= static_cast(dataRank)) - OPENVINO_THROW(errorPrefix_, " has invalid axis attribute: ", axis); + THROW_CPU_NODE_ERR(" has invalid axis attribute: ", axis); axis_ = axis; } @@ -80,12 +78,12 @@ void GatherElements::initSupportedPrimitiveDescriptors() { sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type))) { - OPENVINO_THROW(errorPrefix_, " has unsupported 'inputData' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR(" has unsupported 'inputData' input precision: ", inDataPrecision); } ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(indicesIndex_); if (!one_of(indicesPrecision, ov::element::i32, ov::element::i64)) { - OPENVINO_THROW(errorPrefix_, " has unsupported 'indices' input precision: ", indicesPrecision); + THROW_CPU_NODE_ERR(" has unsupported 'indices' input precision: ", indicesPrecision); } dataTypeSize_ = inDataPrecision.size(); diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.h b/src/plugins/intel_cpu/src/nodes/gather_elements.h index a7b237782db166..c627dfb4b364ad 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.h +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.h @@ -34,7 +34,6 @@ class GatherElements : public Node { int strideAxDst_ = 0; int dstAxDim_ = 0; int strideAx1Diff_ = 0; - std::string errorPrefix_; template void directExecution(); diff --git a/src/plugins/intel_cpu/src/nodes/gather_tree.cpp b/src/plugins/intel_cpu/src/nodes/gather_tree.cpp index dc84a62d034543..da868ec9c78a34 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_tree.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_tree.cpp @@ -36,20 +36,19 @@ GatherTree::GatherTree(const std::shared_ptr& op, const GraphContext:: OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = std::string("Node GatherTree with name '") + op->get_friendly_name() + "'"; if (inputShapes.size() != 4) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges."); + THROW_CPU_NODE_ERR("has incorrect number of input edges."); if (outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges."); + THROW_CPU_NODE_ERR("has incorrect number of output edges."); if (getInputShapeAtPort(GATHER_TREE_STEP_IDX).getRank() != 3) - OPENVINO_THROW(errorPrefix, " step_idx vector should be 3 dimension"); + THROW_CPU_NODE_ERR("step_idx vector should be 3 dimension"); if (getInputShapeAtPort(GATHER_TREE_PARENT_IDX).getRank() != 3) - OPENVINO_THROW(errorPrefix, " parent_idx vector should be 3 dimension"); + THROW_CPU_NODE_ERR("parent_idx vector should be 3 dimension"); if (getInputShapeAtPort(GATHER_TREE_MAX_SEQ_LEN).getRank() != 1) - OPENVINO_THROW(errorPrefix, " max_seq_len vector should be 1 dimension"); + THROW_CPU_NODE_ERR("max_seq_len vector should be 1 dimension"); if (!is_scalar(op->get_input_partial_shape(GATHER_TREE_END_TOKEN))) - OPENVINO_THROW(errorPrefix, " end_token should be scalar"); + THROW_CPU_NODE_ERR("end_token should be scalar"); } void GatherTree::initSupportedPrimitiveDescriptors() { @@ -64,7 +63,7 @@ void GatherTree::initSupportedPrimitiveDescriptors() { getOriginalInputPrecisionAtPort(GATHER_TREE_MAX_SEQ_LEN) != precision || getOriginalInputPrecisionAtPort(GATHER_TREE_END_TOKEN) != precision || getOriginalOutputPrecisionAtPort(0) != precision) { - OPENVINO_THROW(errorPrefix, " has incorrect input/output data precision. Must be the same."); + THROW_CPU_NODE_ERR("has incorrect input/output data precision. Must be the same."); } addSupportedPrimDesc({{LayoutType::ncsp, precision}, @@ -77,7 +76,7 @@ void GatherTree::initSupportedPrimitiveDescriptors() { void GatherTree::execute(dnnl::stream strm) { if (!execPtr) - OPENVINO_THROW(errorPrefix, " has not compiled executor."); + THROW_CPU_NODE_ERR("has not compiled executor."); if (precision == ov::element::f32) execPtr->exec(getSrcMemoryAtPort(GATHER_TREE_STEP_IDX), @@ -100,15 +99,15 @@ void GatherTree::prepareParams() { const auto& dstMemPtr = getDstMemoryAtPort(0); if (!stepIdxMemPtr || !stepIdxMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory of 'step_ids'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'step_ids'."); if (!parentIdxMemPtr || !parentIdxMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory of 'parent_ids'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'parent_ids'."); if (!maxSeqLenMemPtr || !maxSeqLenMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory of 'max_seq_len'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'max_seq_len'."); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined output memory."); + THROW_CPU_NODE_ERR("has undefined output memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - OPENVINO_THROW(errorPrefix, " has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); const VectorDims& stepIdxDims = stepIdxMemPtr->getStaticDims(); const VectorDims& parentIdxDims = parentIdxMemPtr->getStaticDims(); diff --git a/src/plugins/intel_cpu/src/nodes/gather_tree.h b/src/plugins/intel_cpu/src/nodes/gather_tree.h index 79ecb41183b2e0..3a9368083e3d10 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_tree.h +++ b/src/plugins/intel_cpu/src/nodes/gather_tree.h @@ -56,8 +56,6 @@ class GatherTree : public Node { static const size_t GATHER_TREE_END_TOKEN = 3; ov::element::Type precision; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/grn.cpp b/src/plugins/intel_cpu/src/nodes/grn.cpp index d1275a153b4f2d..f3a22bd7f496e4 100644 --- a/src/plugins/intel_cpu/src/nodes/grn.cpp +++ b/src/plugins/intel_cpu/src/nodes/grn.cpp @@ -33,18 +33,17 @@ GRN::GRN(const std::shared_ptr& op, const GraphContext::CPtr context) OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "GRN layer with name '" + op->get_friendly_name() + "'"; const auto grn = ov::as_type_ptr(op); if (grn == nullptr) - OPENVINO_THROW("Operation with name '", op->get_friendly_name(), "' is not an instance of GRN from opset1."); + THROW_CPU_NODE_ERR("is not an instance of GRN from opset1."); if (inputShapes.size() != 1 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); const auto dataRank = getInputShapeAtPort(0).getRank(); if (dataRank != getOutputShapeAtPort(0).getRank()) - OPENVINO_THROW(errorPrefix, " has input/output rank mismatch"); + THROW_CPU_NODE_ERR("has input/output rank mismatch"); bias = grn->get_bias(); } @@ -63,18 +62,18 @@ void GRN::prepareParams() { const auto& dstMemPtr = getDstMemoryAtPort(0); if (!dataMemPtr || !dataMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory"); + THROW_CPU_NODE_ERR("has undefined input memory"); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined output memory"); + THROW_CPU_NODE_ERR("has undefined output memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - OPENVINO_THROW(errorPrefix, " has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const VectorDims& dataDims = dataMemPtr->getStaticDims(); const VectorDims& dstDims = dstMemPtr->getStaticDims(); for (size_t i = 0; i < dataDims.size(); ++i) { if (dataDims[i] != dstDims[i]) - OPENVINO_THROW(errorPrefix, " hsd input/output tensors dimensions mismatch"); + THROW_CPU_NODE_ERR("hsd input/output tensors dimensions mismatch"); } if (dataDims.size() > 0) diff --git a/src/plugins/intel_cpu/src/nodes/grn.h b/src/plugins/intel_cpu/src/nodes/grn.h index 50686a8cfd0719..11ff05e5fb4910 100644 --- a/src/plugins/intel_cpu/src/nodes/grn.h +++ b/src/plugins/intel_cpu/src/nodes/grn.h @@ -30,8 +30,6 @@ class GRN : public Node { int C = 1; int H = 1; int W = 1; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/interaction.cpp b/src/plugins/intel_cpu/src/nodes/interaction.cpp index 55d5055ff45614..c674ac13cb773d 100644 --- a/src/plugins/intel_cpu/src/nodes/interaction.cpp +++ b/src/plugins/intel_cpu/src/nodes/interaction.cpp @@ -187,7 +187,6 @@ Interaction::Interaction(const std::shared_ptr& op, const GraphContext if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "Interaction node with name '" + getName() + "'"; const auto interaction = ov::as_type_ptr(op); const std::vector& scales = interaction->get_output_scales(); if (!scales.empty()) { diff --git a/src/plugins/intel_cpu/src/nodes/interaction.h b/src/plugins/intel_cpu/src/nodes/interaction.h index c4b02f0ef7fad0..8dd446bb595180 100644 --- a/src/plugins/intel_cpu/src/nodes/interaction.h +++ b/src/plugins/intel_cpu/src/nodes/interaction.h @@ -62,7 +62,6 @@ class Interaction : public Node { size_t inputSizes = 0; size_t outputFeaturesLen = 0; size_t interactFeatureSize = 0; - std::string errorPrefix; MemoryPtr inputMemPtr; MemoryPtr flatMemPtr; MemoryPtr outputMemPtr; diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.cpp b/src/plugins/intel_cpu/src/nodes/interpolate.cpp index b2c27b26914614..cfcf868cae8c31 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/interpolate.cpp @@ -1876,15 +1876,14 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext : Node(op, context, InterpolateShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "Interpolate node with name '" + getName() + "'"; dataRank = getInputShapeAtPort(DATA_ID).getRank(); if (const auto interp = ov::as_type_ptr(op)) { is_version11 = false; const auto numInputs = inputShapes.size(); if (numInputs != 3 && numInputs != 4) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); isAxesSpecified = numInputs != 3; const auto& interpAttr = interp->get_attrs(); @@ -1903,7 +1902,7 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext } else if (interpMode == ngInterpMode::CUBIC) { interpAttrs.mode = InterpolateMode::cubic; } else { - OPENVINO_THROW(errorPrefix, " has unsupported interpolate mode"); + THROW_CPU_NODE_ERR("has unsupported interpolate mode"); } const auto& interpCoordTransMode = interpAttr.coordinate_transformation_mode; @@ -1918,7 +1917,7 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext } else if (interpCoordTransMode == ngInterpCoordTransf::ALIGN_CORNERS) { interpAttrs.coordTransMode = InterpolateCoordTransMode::align_corners; } else { - OPENVINO_THROW(errorPrefix, " has unsupported coordination transformation mode"); + THROW_CPU_NODE_ERR("has unsupported coordination transformation mode"); } if (interpAttrs.mode == InterpolateMode::nearest) { @@ -1934,7 +1933,7 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext } else if (interpNearestMode == ngInterpNearMode::SIMPLE) { interpAttrs.nearestMode = InterpolateNearestMode::simple; } else { - OPENVINO_THROW(errorPrefix, " has unsupported nearest mode"); + THROW_CPU_NODE_ERR("has unsupported nearest mode"); } } else if (interpAttrs.mode == InterpolateMode::cubic) { interpAttrs.cubeCoeff = static_cast(interpAttr.cube_coeff); @@ -1947,7 +1946,7 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext } else if (interpShapeCalcMode == ngInterpShapeCalcMode::SIZES) { interpAttrs.shapeCalcMode = InterpolateShapeCalcMode::sizes; } else { - OPENVINO_THROW(errorPrefix, " has unsupported shape calculation mode"); + THROW_CPU_NODE_ERR("has unsupported shape calculation mode"); } if (interpAttr.pads_begin.empty()) { @@ -1986,9 +1985,9 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext is_version11 = true; const auto numInputs = inputShapes.size(); if (numInputs != 2 && numInputs != 3) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); isAxesSpecified = numInputs != 2; const auto& interpAttr = interp->get_attrs(); @@ -1999,7 +1998,7 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext interpAttrs.mode = InterpolateMode::bicubic_pillow; interpAttrs.cubeCoeff = static_cast(interpAttr.cube_coeff); // fixed to be -0.5 } else { - OPENVINO_THROW(errorPrefix, " has unsupported interpolate mode"); + THROW_CPU_NODE_ERR("has unsupported interpolate mode"); } // pillow use fixed tf_half_pixel_for_nn style mode for coodinate transformation @@ -2018,7 +2017,7 @@ Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext } else if (interpShapeCalcMode == ngInterpShapeCalcMode::SIZES) { interpAttrs.shapeCalcMode = InterpolateShapeCalcMode::sizes; } else { - OPENVINO_THROW(errorPrefix, " has unsupported shape calculation mode"); + THROW_CPU_NODE_ERR("has unsupported shape calculation mode"); } if (interpAttr.pads_begin.empty()) { @@ -2061,9 +2060,9 @@ void Interpolate::getSupportedDescriptors() { if (getParentEdges().size() != 2 && getParentEdges().size() != 3 && getParentEdges().size() != 4) // v4: data, target_shape, scale, axis(optional). // v11: data, size_or_scale, axis(optional) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); // get pad for (size_t i = 0; i < interpAttrs.padBegin.size(); i++) { @@ -2360,31 +2359,31 @@ void Interpolate::prepareParams() { auto dstMemPtr = getDstMemoryAtPort(0); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined destination memory"); + THROW_CPU_NODE_ERR("has undefined destination memory"); auto srcMemPtr = getSrcMemoryAtPort(DATA_ID); if (!srcMemPtr || !srcMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory"); + THROW_CPU_NODE_ERR("has undefined input memory"); if (interpAttrs.shapeCalcMode == InterpolateShapeCalcMode::sizes) { auto tsMemPtr = getSrcMemoryAtPort(TARGET_SHAPE_ID); if (!tsMemPtr || !tsMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined target shape memory"); + THROW_CPU_NODE_ERR("has undefined target shape memory"); } else { auto scaleMemPtr = getSrcMemoryAtPort(get_scale_id()); if (!scaleMemPtr || !scaleMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined scales memory"); + THROW_CPU_NODE_ERR("has undefined scales memory"); } if (isAxesSpecified) { auto axesMemPtr = getSrcMemoryAtPort(get_axis_id()); if (!axesMemPtr || !axesMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined axes memory"); + THROW_CPU_NODE_ERR("has undefined axes memory"); } const NodeDesc* selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) - OPENVINO_THROW(errorPrefix, " did not set preferable primitive descriptor"); + THROW_CPU_NODE_ERR("did not set preferable primitive descriptor"); const auto& srcDimsOrign = srcMemPtr->getStaticDims(); const auto& dstDimsOrign = dstMemPtr->getStaticDims(); @@ -2479,9 +2478,9 @@ void Interpolate::createPrimitive() { auto srcMemPtr = getSrcMemoryAtPort(DATA_ID); auto dstMemPtr = getDstMemoryAtPort(0); if (!srcMemPtr) - OPENVINO_THROW(errorPrefix, " has null input memory"); + THROW_CPU_NODE_ERR("has null input memory"); if (!dstMemPtr) - OPENVINO_THROW(errorPrefix, " has null destination memory"); + THROW_CPU_NODE_ERR("has null destination memory"); if (dstMemPtr->getDesc().hasLayoutType(LayoutType::ncsp)) { interpAttrs.layout = InterpolateLayoutType::planar; @@ -3110,7 +3109,7 @@ float Interpolate::InterpolateExecutorBase::coordTransToInput(int outCoord, break; } default: { - OPENVINO_THROW("errorPrefix", " does not support specified coordinate transformation mode"); + OPENVINO_THROW("does not support specified coordinate transformation mode"); break; } } @@ -3146,7 +3145,7 @@ int Interpolate::InterpolateExecutorBase::nearestRound(float originCoord, return static_cast(originCoord); } default: { - OPENVINO_THROW("errorPrefix", " does not support specified nearest round mode"); + OPENVINO_THROW("does not support specified nearest round mode"); break; } } diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.h b/src/plugins/intel_cpu/src/nodes/interpolate.h index 77ea04296fed91..ab9b6688ac3c39 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.h +++ b/src/plugins/intel_cpu/src/nodes/interpolate.h @@ -347,8 +347,6 @@ class Interpolate : public Node { VectorDims lastOutputDims; - std::string errorPrefix; - bool canUseAclExecutor = false; std::shared_ptr aclExecPtr = nullptr; }; diff --git a/src/plugins/intel_cpu/src/nodes/log_softmax.cpp b/src/plugins/intel_cpu/src/nodes/log_softmax.cpp index 57c84b7c822df0..172f4fd3ebe87a 100644 --- a/src/plugins/intel_cpu/src/nodes/log_softmax.cpp +++ b/src/plugins/intel_cpu/src/nodes/log_softmax.cpp @@ -33,7 +33,6 @@ LogSoftmax::LogSoftmax(const std::shared_ptr& op, const GraphContext:: OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "LogSoftmax layer with name '" + op->get_friendly_name() + "'"; const auto logSoftMax = ov::as_type_ptr(op); if (logSoftMax == nullptr) OPENVINO_THROW("Operation with name '", @@ -41,7 +40,7 @@ LogSoftmax::LogSoftmax(const std::shared_ptr& op, const GraphContext:: "' is not an instance of LogSoftmax from opset5."); if (inputShapes.size() != 1 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); auto dimsSize = getInputShapeAtPort(0).getDims().size(); if (dimsSize == 0) @@ -51,7 +50,7 @@ LogSoftmax::LogSoftmax(const std::shared_ptr& op, const GraphContext:: axis += dimsSize; if (dimsSize < static_cast((size_t)(1) + axis)) - OPENVINO_THROW(errorPrefix, " has incorrect input parameters dimensions and axis number!"); + THROW_CPU_NODE_ERR("has incorrect input parameters dimensions and axis number!"); } void LogSoftmax::initSupportedPrimitiveDescriptors() { diff --git a/src/plugins/intel_cpu/src/nodes/log_softmax.h b/src/plugins/intel_cpu/src/nodes/log_softmax.h index fd7064a08f679d..e2f64e52449681 100644 --- a/src/plugins/intel_cpu/src/nodes/log_softmax.h +++ b/src/plugins/intel_cpu/src/nodes/log_softmax.h @@ -30,8 +30,6 @@ class LogSoftmax : public Node { size_t reducedAxisStride = 1; size_t axisStep = 1; bool isLastDim = false; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/lrn.cpp b/src/plugins/intel_cpu/src/nodes/lrn.cpp index 9b1746bd43c936..b913e831ddabe4 100644 --- a/src/plugins/intel_cpu/src/nodes/lrn.cpp +++ b/src/plugins/intel_cpu/src/nodes/lrn.cpp @@ -113,8 +113,6 @@ Lrn::Lrn(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "LRN node with name '" + getName() + "'"; - auto lrn = ov::as_type_ptr(op); auto axes = ov::as_type_ptr(lrn->get_input_node_shared_ptr(1))->cast_vector(); @@ -134,9 +132,9 @@ void Lrn::getSupportedDescriptors() { return; if (getParentEdges().size() != 2) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); ov::element::Type precision = getOriginalOutputPrecisionAtPort(0); if (precision != ov::element::f32 && precision != ov::element::bf16) @@ -166,13 +164,13 @@ void Lrn::prepareParams() { auto srcMemPtr = getSrcMemoryAtPort(0); auto dstMemPtr = getDstMemoryAtPort(0); if (!srcMemPtr || !srcMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " input memory is undefined"); + THROW_CPU_NODE_ERR("input memory is undefined"); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, "destination memory is undefined"); + THROW_CPU_NODE_ERR("destination memory is undefined"); const NodeDesc* selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) - OPENVINO_THROW(errorPrefix, "preferable primitive descriptor did not set"); + THROW_CPU_NODE_ERR("preferable primitive descriptor did not set"); auto inpDesc = getParentEdgeAt(0)->getMemory().getDescWithType(); @@ -246,7 +244,7 @@ void Lrn::execute(dnnl::stream strm) { if (execPtr) { execPtr->exec(primArgs, strm); } else { - OPENVINO_THROW(errorPrefix, " doesn't have an initialized executor"); + THROW_CPU_NODE_ERR("doesn't have an initialized executor"); } } diff --git a/src/plugins/intel_cpu/src/nodes/lrn.h b/src/plugins/intel_cpu/src/nodes/lrn.h index 43fd0ec318f9b1..bb87e797538045 100644 --- a/src/plugins/intel_cpu/src/nodes/lrn.h +++ b/src/plugins/intel_cpu/src/nodes/lrn.h @@ -41,8 +41,6 @@ class Lrn : public Node { int k = 1; float alpha = 1.0f; float beta = 1.0f; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.h b/src/plugins/intel_cpu/src/nodes/mathematics.h index 99bb65cb4921f8..4849bceab4eaa6 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.h +++ b/src/plugins/intel_cpu/src/nodes/mathematics.h @@ -33,8 +33,6 @@ class Math : public Node { float alpha = 0.0f; float beta = 0.0f; float gamma = 0.0f; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/matmul.cpp b/src/plugins/intel_cpu/src/nodes/matmul.cpp index b0853737040d14..a9020ea8798243 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.cpp +++ b/src/plugins/intel_cpu/src/nodes/matmul.cpp @@ -116,7 +116,6 @@ MatMul::MatMul(const std::shared_ptr& op, const GraphContext::CPtr con : Node(op, context, MMShapeInferFactory(op)), withBiases(false) { std::string errorMessage; - errorPrefix = "MatMul node with name '" + getName() + "'"; if (!isSupportedOperation(op, errorMessage)) OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); @@ -269,9 +268,9 @@ dnnl::memory::desc MatMul::getBiasDescFrom(const DnnlMemoryDescCPtr outMemDesc) void MatMul::getSupportedDescriptors() { if (getParentEdges().size() != getOriginalInputsNumber()) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges for layer ", getName()); + THROW_CPU_NODE_ERR("has incorrect number of input edges for layer ", getName()); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " has incorrect number of output edges for layer ", getName()); + THROW_CPU_NODE_ERR("has incorrect number of output edges for layer ", getName()); withBiases = getOriginalInputsNumber() == 3; @@ -317,7 +316,7 @@ void MatMul::getSupportedDescriptors() { auto outputShape = getOutputShapeAtPort(0); if (inputShape0.getRank() != inputShape1.getRank() || inputShape0.getRank() != outputShape.getRank()) - OPENVINO_THROW(errorPrefix, " has invalid dims count"); + THROW_CPU_NODE_ERR("has invalid dims count"); const int nDims = inputShape0.getRank(); const auto xAxis = nDims - 1; @@ -334,12 +333,12 @@ void MatMul::getSupportedDescriptors() { // coverity[copy_paste_error] if (!dimsEqualWeak(inDims0[xAxis0], inDims1[yAxis1]) || !dimsEqualWeak(inDims0[yAxis0], outDims[yAxis]) || !dimsEqualWeak(inDims1[xAxis1], outDims[xAxis])) - OPENVINO_THROW(errorPrefix, " has incorrect spatial input and output dimensions"); + THROW_CPU_NODE_ERR("has incorrect spatial input and output dimensions"); for (int dim_idx = nDims - 3; dim_idx >= 0; dim_idx--) { if ((!dimsEqualWeak(inDims0[dim_idx], outDims[dim_idx]) && !dimsEqualWeak(inDims0[dim_idx], 1)) || (!dimsEqualWeak(inDims1[dim_idx], outDims[dim_idx]) && !dimsEqualWeak(inDims1[dim_idx], 1))) { - OPENVINO_THROW(errorPrefix, " has incorrect input batch dimensions"); + THROW_CPU_NODE_ERR("has incorrect input batch dimensions"); } } @@ -565,9 +564,9 @@ void MatMul::prepareParams() { auto src0MemPtr = getSrcMemoryAtPort(0); auto src1MemPtr = getSrcMemoryAtPort(1); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined destination memory"); + THROW_CPU_NODE_ERR("has undefined destination memory"); if (!src0MemPtr || !src0MemPtr->isDefined() || !src1MemPtr || !src1MemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory"); + THROW_CPU_NODE_ERR("has undefined input memory"); // check for a degenerate case. In this context the degenerate case is a matrix multiplication where the // collapsing dimension is zero, e.g., AB=C, where A has the shape [10, 0] and B has the shape [0, 20], @@ -585,7 +584,7 @@ void MatMul::prepareParams() { const NodeDesc* selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) - OPENVINO_THROW(errorPrefix, " did not set preferable primitive descriptor"); + THROW_CPU_NODE_ERR("did not set preferable primitive descriptor"); DnnlMemoryDescPtr src0TransposedDesc; DnnlMemoryDescPtr src1TransposedDesc; @@ -617,7 +616,7 @@ void MatMul::prepareParams() { if (withBiases) { auto biasMemory = getSrcMemoryAtPort(2); if (!biasMemory || !biasMemory->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined bias memory"); + THROW_CPU_NODE_ERR("has undefined bias memory"); dnnlBiasMemDesc = biasMemory->getDescWithType(); } @@ -692,7 +691,7 @@ void MatMul::execute(dnnl::stream strm) { // this is a degenerate case, fill output with zeroes getDstMemoryAtPort(0)->nullify(); } else { - OPENVINO_THROW(errorPrefix, " doesn't have an initialized executor"); + THROW_CPU_NODE_ERR("doesn't have an initialized executor"); } } diff --git a/src/plugins/intel_cpu/src/nodes/matmul.h b/src/plugins/intel_cpu/src/nodes/matmul.h index 93dcb93fb44386..aea709970e2839 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.h +++ b/src/plugins/intel_cpu/src/nodes/matmul.h @@ -59,8 +59,6 @@ class MatMul : public Node { void setPostOps(dnnl::primitive_attr& attr, const VectorDims& dims, bool initWeights); - std::string errorPrefix; - /* whether to transpose input */ std::array transposeIn; diff --git a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp index 302f1574c4e677..2f9758fbc1b242 100644 --- a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp @@ -54,16 +54,14 @@ MatrixNms::MatrixNms(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - m_errorPrefix = "MatrixNMS layer with name '" + getName() + "' "; - if (one_of(op->get_type_info(), ov::op::internal::NmsStaticShapeIE::get_type_info_static())) m_outStaticShape = true; if (getOriginalInputsNumber() != 2) - OPENVINO_THROW(m_errorPrefix, "has incorrect number of input edges: ", getOriginalInputsNumber()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getOriginalInputsNumber()); if (getOriginalOutputsNumber() != 3) - OPENVINO_THROW(m_errorPrefix, "has incorrect number of output edges: ", getOriginalOutputsNumber()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getOriginalOutputsNumber()); const auto matrix_nms = ov::as_type_ptr(op); @@ -101,12 +99,12 @@ MatrixNms::MatrixNms(const std::shared_ptr& op, const GraphContext::CP const auto& boxes_dims = getInputShapeAtPort(NMS_BOXES).getDims(); if (boxes_dims.size() != 3) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'boxes' input rank: ", boxes_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'boxes' input rank: ", boxes_dims.size()); if (boxes_dims[2] != 4) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'boxes' input 3rd dimension size: ", boxes_dims[2]); + THROW_CPU_NODE_ERR("has unsupported 'boxes' input 3rd dimension size: ", boxes_dims[2]); const auto& scores_dims = getInputShapeAtPort(NMS_SCORES).getDims(); if (scores_dims.size() != 3) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'scores' input rank: ", scores_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'scores' input rank: ", scores_dims.size()); } void MatrixNms::initSupportedPrimitiveDescriptors() { @@ -265,7 +263,7 @@ void MatrixNms::prepareParams() { const auto& boxes_dims = getParentEdgeAt(NMS_BOXES)->getMemory().getStaticDims(); const auto& scores_dims = getParentEdgeAt(NMS_SCORES)->getMemory().getStaticDims(); if (!(boxes_dims[0] == scores_dims[0] && boxes_dims[1] == scores_dims[2])) { - OPENVINO_THROW(m_errorPrefix, "has incompatible 'boxes' and 'scores' input dmensions"); + THROW_CPU_NODE_ERR("has incompatible 'boxes' and 'scores' input dmensions"); } m_numBatches = boxes_dims[0]; @@ -450,7 +448,7 @@ void MatrixNms::checkPrecision(const ov::element::Type prec, const std::string name, const std::string type) { if (std::find(precList.begin(), precList.end(), prec) == precList.end()) - OPENVINO_THROW(m_errorPrefix, "has unsupported '", name, "' ", type, " precision: ", prec); + THROW_CPU_NODE_ERR("has unsupported '", name, "' ", type, " precision: ", prec); } } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/matrix_nms.h b/src/plugins/intel_cpu/src/nodes/matrix_nms.h index 017ddba9e5418d..ad872dc3eeba2d 100644 --- a/src/plugins/intel_cpu/src/nodes/matrix_nms.h +++ b/src/plugins/intel_cpu/src/nodes/matrix_nms.h @@ -96,7 +96,6 @@ class MatrixNms : public Node { int64_t classIndex = -1; float score = 0.0f; }; - std::string m_errorPrefix; const std::string m_inType = "input", m_outType = "output"; std::vector m_numPerBatch; std::vector> m_numPerBatchClass; diff --git a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp index 510dcc8678ab8a..67d840ebbc48ae 100644 --- a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp @@ -48,20 +48,19 @@ MultiClassNms::MultiClassNms(const std::shared_ptr& op, const GraphCon if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - m_errorPrefix = "MultiClassNms layer with name '" + getName() + "' "; if (one_of(op->get_type_info(), ov::op::internal::MulticlassNmsIEInternal::get_type_info_static())) m_outStaticShape = true; if (getOriginalInputsNumber() != 2 && getOriginalInputsNumber() != 3) - OPENVINO_THROW(m_errorPrefix, "has incorrect number of input edges: ", getOriginalInputsNumber()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getOriginalInputsNumber()); if (getOriginalOutputsNumber() != 3) - OPENVINO_THROW(m_errorPrefix, "has incorrect number of output edges: ", getOriginalOutputsNumber()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getOriginalOutputsNumber()); auto nmsBase = ov::as_type_ptr(op); if (nmsBase == nullptr) - OPENVINO_THROW(m_errorPrefix, " is not an instance of MulticlassNmsBase."); + THROW_CPU_NODE_ERR("is not an instance of MulticlassNmsBase."); auto& atrri = nmsBase->get_attrs(); m_sortResultAcrossBatch = atrri.sort_result_across_batch; m_nmsTopK = atrri.nms_top_k; @@ -85,32 +84,23 @@ MultiClassNms::MultiClassNms(const std::shared_ptr& op, const GraphCon auto boxes_ps = PartialShape(boxes_dims); auto scores_ps = PartialShape(scores_dims); if (boxes_dims.size() != 3) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'boxes' input rank: ", boxes_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'boxes' input rank: ", boxes_dims.size()); if (boxes_dims[2] != 4) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'boxes' input 3rd dimension size: ", boxes_dims[2]); + THROW_CPU_NODE_ERR("has unsupported 'boxes' input 3rd dimension size: ", boxes_dims[2]); if (scores_dims.size() == 3) { if (!boxes_ps[0].compatible(scores_ps[0]) || !boxes_ps[1].compatible(scores_ps[2])) - OPENVINO_THROW(m_errorPrefix, - "has incompatible 'boxes' and 'scores' shape ", - boxes_ps, - " v.s. ", - scores_ps); + THROW_CPU_NODE_ERR("has incompatible 'boxes' and 'scores' shape ", boxes_ps, " v.s. ", scores_ps); } else if (scores_dims.size() == 2) { if (op->get_type_info() == ov::op::v8::MulticlassNms::get_type_info_static()) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'scores' input rank: ", scores_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'scores' input rank: ", scores_dims.size()); if (!boxes_ps[0].compatible(scores_ps[0]) || !boxes_ps[1].compatible(scores_ps[1])) - OPENVINO_THROW(m_errorPrefix, - "has incompatible 'boxes' and 'scores' shape ", - boxes_ps, - " v.s. ", - scores_ps); + THROW_CPU_NODE_ERR("has incompatible 'boxes' and 'scores' shape ", boxes_ps, " v.s. ", scores_ps); if (getOriginalInputsNumber() != 3) - OPENVINO_THROW(m_errorPrefix, - "has incorrect number of input edges: ", - getOriginalInputsNumber(), - " when input 'scores' is 2D."); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", + getOriginalInputsNumber(), + " when input 'scores' is 2D."); } else { - OPENVINO_THROW(m_errorPrefix, "has unsupported 'scores' input rank: ", scores_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'scores' input rank: ", scores_dims.size()); } } @@ -169,31 +159,28 @@ void MultiClassNms::prepareParams() { if (shared) { if (boxes_dims[0] != scores_dims[0] || boxes_dims[1] != scores_dims[2]) - OPENVINO_THROW(m_errorPrefix, - "has incompatible 'boxes' and 'scores' shape ", - PartialShape(boxes_dims), - " v.s. ", - PartialShape(scores_dims)); + THROW_CPU_NODE_ERR("has incompatible 'boxes' and 'scores' shape ", + PartialShape(boxes_dims), + " v.s. ", + PartialShape(scores_dims)); } else if (scores_dims.size() == 2) { if (boxes_dims[0] != scores_dims[0] || boxes_dims[1] != scores_dims[1]) - OPENVINO_THROW(m_errorPrefix, - "has incompatible 'boxes' and 'scores' shape ", - PartialShape(boxes_dims), - " v.s. ", - PartialShape(scores_dims)); + THROW_CPU_NODE_ERR("has incompatible 'boxes' and 'scores' shape ", + PartialShape(boxes_dims), + " v.s. ", + PartialShape(scores_dims)); if (!has_roinum) - OPENVINO_THROW(m_errorPrefix, - "has incorrect number of input edges: ", - getOriginalInputsNumber(), - " when input 'scores' is 2D."); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", + getOriginalInputsNumber(), + " when input 'scores' is 2D."); } else { - OPENVINO_THROW(m_errorPrefix, "has unsupported 'scores' input rank: ", scores_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'scores' input rank: ", scores_dims.size()); } if (has_roinum) { const auto& roisnum_dims = getParentEdgeAt(NMS_ROISNUM)->getMemory().getStaticDims(); if (roisnum_dims.size() != 1) - OPENVINO_THROW(m_errorPrefix, "has unsupported 'roisnum' input rank: ", roisnum_dims.size()); + THROW_CPU_NODE_ERR("has unsupported 'roisnum' input rank: ", roisnum_dims.size()); m_numBatches = shared ? boxes_dims[0] : roisnum_dims[0]; } else { m_numBatches = boxes_dims[0]; @@ -657,7 +644,7 @@ void MultiClassNms::checkPrecision(const ov::element::Type prec, const std::string name, const std::string type) { if (std::find(precList.begin(), precList.end(), prec) == precList.end()) - OPENVINO_THROW(m_errorPrefix, "has unsupported '", name, "' ", type, " precision: ", prec); + THROW_CPU_NODE_ERR("has unsupported '", name, "' ", type, " precision: ", prec); } } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp b/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp index 2a6a3d9f344a8d..030a14e56b61df 100644 --- a/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp +++ b/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp @@ -66,8 +66,6 @@ class MultiClassNms : public Node { bool m_outStaticShape = false; - std::string m_errorPrefix; - std::vector> m_numFiltBox; // number of rois after nms for each class in each image std::vector m_numBoxOffset; const std::string m_inType = "input", m_outType = "output"; diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.cpp b/src/plugins/intel_cpu/src/nodes/non_zero.cpp index 57d9c4395b1730..57eee8520d0ccf 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_zero.cpp @@ -34,21 +34,19 @@ bool NonZero::isSupportedOperation(const std::shared_ptr& op, st NonZero::NonZero(const std::shared_ptr& op, const GraphContext::CPtr context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; - if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "NonZero layer with name '" + getName() + "' "; - } else { + if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (op->get_output_element_type(0) != ov::element::i32) { - OPENVINO_THROW(errorPrefix, "doesn't support demanded output precision"); + THROW_CPU_NODE_ERR("doesn't support demanded output precision"); } } void NonZero::getSupportedDescriptors() { if (getParentEdges().size() != 1) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (!getChildEdges().size()) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); } void NonZero::initSupportedPrimitiveDescriptors() { diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.h b/src/plugins/intel_cpu/src/nodes/non_zero.h index 58fa426398714c..7ba3552a45c846 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.h +++ b/src/plugins/intel_cpu/src/nodes/non_zero.h @@ -40,7 +40,6 @@ class NonZero : public Node { private: int threadsCount = 1; - std::string errorPrefix; template void executeSpecified(); template diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.cpp b/src/plugins/intel_cpu/src/nodes/one_hot.cpp index cfeed26f3ab65b..2a8f6aea669107 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.cpp +++ b/src/plugins/intel_cpu/src/nodes/one_hot.cpp @@ -48,7 +48,6 @@ OneHot::OneHot(const std::shared_ptr& op, const GraphContext::CPtr con OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "OneHot layer with name '" + op->get_friendly_name() + "'"; const auto oneHot = ov::as_type_ptr(op); const auto depthNode = ov::as_type_ptr(oneHot->get_input_node_shared_ptr(DEPTH_ID)); if (depthNode) { @@ -70,12 +69,12 @@ OneHot::OneHot(const std::shared_ptr& op, const GraphContext::CPtr con axis += output_dims_size; } if (axis < 0 || axis >= output_dims_size) { - OPENVINO_THROW(errorPrefix, " has unsupported 'axis' attribute: ", oneHot->get_axis()); + THROW_CPU_NODE_ERR("has unsupported 'axis' attribute: ", oneHot->get_axis()); } if (!(((1 + srcDims.size()) == dstDims.size()) || (depthNode && (srcDims.size() == 1 && dstDims.size() == 1 && dstDims[0] == depth && srcDims[0] == 1)))) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output dimensions!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions!"); } bool OneHot::needShapeInfer() const { @@ -95,7 +94,7 @@ void OneHot::initSupportedPrimitiveDescriptors() { // check a precision of the input tensor auto input_precision = getOriginalInputPrecisionAtPort(INDICES_ID); if (input_precision != ov::element::i32) { - OPENVINO_THROW(errorPrefix, " has incorrect input precision for the input. Only I32 is supported!"); + THROW_CPU_NODE_ERR("has incorrect input precision for the input. Only I32 is supported!"); } output_precision = getOriginalOutputPrecisionAtPort(0); diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.h b/src/plugins/intel_cpu/src/nodes/one_hot.h index fd451e9eaca7a7..f33efaef39fc26 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.h +++ b/src/plugins/intel_cpu/src/nodes/one_hot.h @@ -49,8 +49,6 @@ class OneHot : public Node { ov::element::Type output_precision; - std::string errorPrefix; - static const size_t INDICES_ID = 0; static const size_t DEPTH_ID = 1; static const size_t ON_VALUE_ID = 2; diff --git a/src/plugins/intel_cpu/src/nodes/pad.cpp b/src/plugins/intel_cpu/src/nodes/pad.cpp index 28beb4bd9b24f1..efbf51b3e05b7f 100644 --- a/src/plugins/intel_cpu/src/nodes/pad.cpp +++ b/src/plugins/intel_cpu/src/nodes/pad.cpp @@ -44,20 +44,19 @@ Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr context) if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = NameFromType(getType()) + " node with name '" + getName() + "' "; if (inputShapes.size() != 3 && inputShapes.size() != 4) - OPENVINO_THROW(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, "Incorrect number of output edges"); + THROW_CPU_NODE_ERR("Incorrect number of output edges"); const size_t srcDimsRank = inputShapes[DATA_ID].getRank(); const size_t dstDimsRank = outputShapes[DATA_ID].getRank(); if (srcDimsRank != dstDimsRank) - OPENVINO_THROW(errorPrefix, "has incorrect number of input/output dimensions!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions!"); auto pad = ov::as_type(op.get()); if (!pad) { - OPENVINO_THROW(errorPrefix, "couldn't be casted to op of opset1"); + THROW_CPU_NODE_ERR("couldn't be casted to op of opset1"); } shapeHasDataDependency = !ov::is_type(op->get_input_node_shared_ptr(PADS_BEGIN_ID)) || @@ -74,7 +73,7 @@ Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr context) parameter.push_back(value); } if (parameter.size() != srcDimsRank) - OPENVINO_THROW(errorPrefix, "has incorrect number of input/output dimensions!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions!"); } }; @@ -88,7 +87,7 @@ Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr context) if (isPadValueSpecified && op->get_input_node_shared_ptr(PAD_VALUE_ID)->get_type_info() == ov::op::v0::Constant::get_type_info_static()) { if (!ov::is_scalar(pad->get_input_shape(PAD_VALUE_ID))) - OPENVINO_THROW(errorPrefix, "has non scalar 'pad_value' input"); + THROW_CPU_NODE_ERR("has non scalar 'pad_value' input"); attrs.padValue = ov::as_type_ptr(pad->get_input_node_shared_ptr(PAD_VALUE_ID)) ->cast_vector()[0]; attrs.constPadValue = true; @@ -100,7 +99,7 @@ Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr context) } else if (pad_mode == op::PadMode::SYMMETRIC) { attrs.padMode = SYMMETRIC; } else { - OPENVINO_THROW(errorPrefix, "has unsupported pad_mode: " + ov::as_string(pad_mode)); + THROW_CPU_NODE_ERR("has unsupported pad_mode: " + ov::as_string(pad_mode)); } } @@ -205,14 +204,12 @@ bool Pad::isExecutable() const { void Pad::prepareParams() { updateLastInputDims(); - execPtr = std::make_shared(attrs, srcMemory, dstMemory, errorPrefix); + execPtr = std::make_shared(attrs, srcMemory, dstMemory); } Pad::PadExecutor::PadExecutor(const PadAttrs& attrs, const std::vector& srcMemory, - const std::vector& dstMemory, - const std::string& errorPrefix) - : errorPrefix(errorPrefix) { + const std::vector& dstMemory) { paramsInitialization(attrs, srcMemory, dstMemory); workPartition(); innerParamsInitialization(); @@ -225,9 +222,9 @@ void Pad::PadExecutor::paramsInitialization(const PadAttrs& attrs, auto& srcMemPtr = srcMemory[DATA_ID]; auto& dstMemPtr = dstMemory[DATA_ID]; if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, "has undefined source memory."); + OPENVINO_THROW("Pad executor has undefined source memory."); if (!srcMemPtr || !srcMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, "has undefined destination memory."); + OPENVINO_THROW("Pad executor has undefined destination memory."); const auto srcBlockMemDesc = srcMemPtr->getDescWithType(); const auto dstBlockMemDesc = dstMemPtr->getDescWithType(); const auto& srcDims = srcBlockMemDesc->getBlockDims(); @@ -392,7 +389,7 @@ void Pad::PadExecutor::exec(const MemoryPtr& srcMemPtr, const MemoryPtr& dstMemP void Pad::execute(dnnl::stream strm) { if (!execPtr) - OPENVINO_THROW(errorPrefix, "has not compiled executor."); + THROW_CPU_NODE_ERR("has not compiled executor."); execPtr->exec(getSrcMemoryAtPort(0), getDstMemoryAtPort(0)); } diff --git a/src/plugins/intel_cpu/src/nodes/pad.h b/src/plugins/intel_cpu/src/nodes/pad.h index 4654bb714680d6..38fb57e4c9971a 100644 --- a/src/plugins/intel_cpu/src/nodes/pad.h +++ b/src/plugins/intel_cpu/src/nodes/pad.h @@ -48,8 +48,7 @@ class Pad : public Node { struct PadExecutor { PadExecutor(const PadAttrs& attrs, const std::vector& srcMemory, - const std::vector& dstMemory, - const std::string& errorPrefix); + const std::vector& dstMemory); void exec(const MemoryPtr& srcMemPtr, const MemoryPtr& dstMemPtr); ~PadExecutor() = default; @@ -106,7 +105,6 @@ class Pad : public Node { size_t innerEndPadCount = 0lu; PadMode padMode; } params; - const std::string errorPrefix; }; static constexpr size_t DATA_ID = 0lu; @@ -120,7 +118,6 @@ class Pad : public Node { executorPtr execPtr = nullptr; std::vector srcMemory; std::vector dstMemory; - std::string errorPrefix; bool shapeHasDataDependency = false; }; diff --git a/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp b/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp index 1b42d0f0cbc2b0..3049e82dcd93ee 100644 --- a/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp @@ -66,25 +66,20 @@ PSROIPooling::PSROIPooling(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = std::string(op->get_type_name()) + " node with name '" + op->get_friendly_name() + "'"; - const auto psroi = ov::as_type_ptr(op); const auto defPsroi = ov::as_type_ptr(op); noTrans = op->get_input_size() == 2; if (op->get_input_shape(0).size() != 4) - OPENVINO_THROW(errorPrefix, - " has first input with incorrect rank: " + std::to_string(op->get_input_shape(0).size())); + THROW_CPU_NODE_ERR("has first input with incorrect rank: " + std::to_string(op->get_input_shape(0).size())); if (op->get_input_shape(1).size() != 2) - OPENVINO_THROW(errorPrefix, - " has second input with incorrect rank: " + std::to_string(op->get_input_shape(1).size())); + THROW_CPU_NODE_ERR("has second input with incorrect rank: " + std::to_string(op->get_input_shape(1).size())); if (!noTrans && op->get_input_shape(2).size() != 4) - OPENVINO_THROW(errorPrefix, - " has third input with incorrect rank: " + std::to_string(op->get_input_shape(2).size())); + THROW_CPU_NODE_ERR("has third input with incorrect rank: " + std::to_string(op->get_input_shape(2).size())); if (psroi) { if (psroi->get_input_size() != 2) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); mode = psroi->get_mode(); if (mode == "average") { @@ -104,7 +99,7 @@ PSROIPooling::PSROIPooling(const std::shared_ptr& op, const GraphConte } else if (defPsroi) { if (defPsroi->get_input_size() != 2 && defPsroi->get_input_size() != 3) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); algorithm = Algorithm::PSROIPoolingBilinearDeformable; @@ -211,19 +206,17 @@ void PSROIPooling::unpackParams(const BlockedMemoryDesc& srcDesc, auto inBlkDims = srcDesc.getBlockDims(); auto outBlkDims = dstDesc.getBlockDims(); if (inBlkDims.size() != expectedInBlockDimsSize) - OPENVINO_THROW(errorPrefix, - " has unexpected size of blocking dims in input (given ", - inBlkDims.size(), - ", expected ", - expectedInBlockDimsSize, - ")"); + THROW_CPU_NODE_ERR("has unexpected size of blocking dims in input (given ", + inBlkDims.size(), + ", expected ", + expectedInBlockDimsSize, + ")"); if (outBlkDims.size() != expectedOutBlockDimsSize) - OPENVINO_THROW(errorPrefix, - " has unexpected size of blocking dims in output (given ", - outBlkDims.size(), - ", expected ", - expectedOutBlockDimsSize, - ")"); + THROW_CPU_NODE_ERR("has unexpected size of blocking dims in output (given ", + outBlkDims.size(), + ", expected ", + expectedOutBlockDimsSize, + ")"); inBlockSize = (inpIsBlk ? srcDesc.getBlockDims()[4] : 1); outBlockSize = (outIsBlk ? dstDesc.getBlockDims()[4] : 1); @@ -614,8 +607,8 @@ void PSROIPooling::execute(dnnl::stream strm) { if (!((inputPrec == ov::element::bf16 && outputPrec == ov::element::bf16) || (inputPrec == ov::element::f32 && outputPrec == ov::element::f32))) { - OPENVINO_THROW(errorPrefix + " has different precisions on input: " + inputPrec.get_type_name() + - " and output: " + outputPrec.get_type_name()); + THROW_CPU_NODE_ERR("has different precisions on input: " + inputPrec.get_type_name() + + " and output: " + outputPrec.get_type_name()); } PSROIPoolingContext ctx = { diff --git a/src/plugins/intel_cpu/src/nodes/psroi_pooling.h b/src/plugins/intel_cpu/src/nodes/psroi_pooling.h index 3a39d6e072c12a..2a0f59de26430e 100644 --- a/src/plugins/intel_cpu/src/nodes/psroi_pooling.h +++ b/src/plugins/intel_cpu/src/nodes/psroi_pooling.h @@ -46,8 +46,6 @@ class PSROIPooling : public Node { int partSize = 1; float transStd = 1.f; - std::string errorPrefix; - void unpackParams(const BlockedMemoryDesc& srcDesc, const BlockedMemoryDesc& dstDesc, int& hInputStride, diff --git a/src/plugins/intel_cpu/src/nodes/range.cpp b/src/plugins/intel_cpu/src/nodes/range.cpp index 64897085142392..02eb470d2f6d5b 100644 --- a/src/plugins/intel_cpu/src/nodes/range.cpp +++ b/src/plugins/intel_cpu/src/nodes/range.cpp @@ -36,26 +36,24 @@ Range::Range(const std::shared_ptr& op, const GraphContext::CPtr conte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "Range layer with name '" + op->get_friendly_name() + "'"; - if (getOriginalInputsNumber() != 3 || getOriginalOutputsNumber() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); auto start_dims = op->get_input_shape(RANGE_START); if (ov::shape_size(start_dims) != 1) - OPENVINO_THROW(errorPrefix, " has start scalar with more than 1 value"); + THROW_CPU_NODE_ERR("has start scalar with more than 1 value"); auto limit_dims = op->get_input_shape(RANGE_LIMIT); if (ov::shape_size(limit_dims) != 1) - OPENVINO_THROW(errorPrefix, " has limit scalar with more than 1 value"); + THROW_CPU_NODE_ERR("has limit scalar with more than 1 value"); auto delta_dims = op->get_input_shape(RANGE_DELTA); if (ov::shape_size(delta_dims) != 1) - OPENVINO_THROW(errorPrefix, " has delta scalar with more than 1 value"); + THROW_CPU_NODE_ERR("has delta scalar with more than 1 value"); size_t dstRank = op->get_output_partial_shape(0).size(); if (dstRank > 1) - OPENVINO_THROW(errorPrefix, " has unsupported rank for output: ", dstRank); + THROW_CPU_NODE_ERR("has unsupported rank for output: ", dstRank); } void Range::initSupportedPrimitiveDescriptors() { diff --git a/src/plugins/intel_cpu/src/nodes/range.h b/src/plugins/intel_cpu/src/nodes/range.h index d5cbd79be1f4ee..bd21f12495f76b 100644 --- a/src/plugins/intel_cpu/src/nodes/range.h +++ b/src/plugins/intel_cpu/src/nodes/range.h @@ -41,8 +41,6 @@ class Range : public Node { static const size_t RANGE_START = 0; static const size_t RANGE_LIMIT = 1; static const size_t RANGE_DELTA = 2; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/reduce.cpp b/src/plugins/intel_cpu/src/nodes/reduce.cpp index a8165b0226b262..f320ed270c6d18 100644 --- a/src/plugins/intel_cpu/src/nodes/reduce.cpp +++ b/src/plugins/intel_cpu/src/nodes/reduce.cpp @@ -1963,21 +1963,20 @@ Reduce::Reduce(const std::shared_ptr& op, const GraphContext::CPtr con : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "Reduce node with name '" + getName() + "'"; getInitializers().at(op->get_type_info())(op, *this); if (const auto reduce = ov::as_type_ptr(op)) { keep_dims = reduce->get_keep_dims(); auto reduceConst = ov::as_type_ptr(reduce->get_input_node_shared_ptr(REDUCE_INDEXES)); if (!reduceConst) - OPENVINO_THROW(errorPrefix, " second tensor is not constant!"); + THROW_CPU_NODE_ERR("second tensor is not constant!"); raw_axes = reduceConst->cast_vector(); } else if (const auto reduce = ov::as_type_ptr(op)) { keep_dims = reduce->get_keep_dims(); auto reduceConst = ov::as_type_ptr(reduce->get_input_node_shared_ptr(REDUCE_INDEXES)); if (!reduceConst) - OPENVINO_THROW(errorPrefix, " second tensor is not constant!"); + THROW_CPU_NODE_ERR("second tensor is not constant!"); raw_axes = reduceConst->cast_vector(); } set_use_aux_kernel = false; @@ -1993,24 +1992,24 @@ Reduce::Reduce(const std::shared_ptr& op, const GraphContext::CPtr con void Reduce::getSupportedDescriptors() { if (getParentEdges().size() != 2) - OPENVINO_THROW(errorPrefix, " gets incorrect number of input edges!"); + THROW_CPU_NODE_ERR("gets incorrect number of input edges!"); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " gets incorrect number of output edges!"); + THROW_CPU_NODE_ERR("gets incorrect number of output edges!"); if (getInputShapeAtPort(REDUCE_INDEXES).getRank() != 1) { - OPENVINO_THROW(errorPrefix, " gets incorrect index vector dimension! Index vector should be 1 dimension."); + THROW_CPU_NODE_ERR("gets incorrect index vector dimension! Index vector should be 1 dimension."); } if (keep_dims) { if (getInputShapeAtPort(REDUCE_DATA).getRank() != getOutputShapeAtPort(0).getRank()) - OPENVINO_THROW(errorPrefix, " gets incorrect number of input/output dimensions!"); + THROW_CPU_NODE_ERR("gets incorrect number of input/output dimensions!"); } else { // In fact, after the Reduce operation, the shape must be a scalar if the previous one was 1d. // But for now, 0d tensor (scalar) is emulated as 1d tensor. Skip checking in such cases. bool is_emulated_0d_as_1d = getInputShapeAtPort(REDUCE_DATA).getRank() == 1 && getOutputShapeAtPort(0).getRank() == 1; if (getInputShapeAtPort(REDUCE_DATA).getRank() <= getOutputShapeAtPort(0).getRank() && !is_emulated_0d_as_1d) - OPENVINO_THROW(errorPrefix, "gets incorrect number of input/output dimensions!"); + THROW_CPU_NODE_ERR("gets incorrect number of input/output dimensions!"); } } @@ -2252,7 +2251,7 @@ void Reduce::prepareParams() { auto cache = context->getParamsCache(); auto result = cache->getOrCreate(key, builder); if (!result.first) { - OPENVINO_THROW(errorPrefix, " has not found jit_uni_reduce_post_kernel_f32."); + THROW_CPU_NODE_ERR("has not found jit_uni_reduce_post_kernel_f32."); } reduce_post_kernel = result.first; @@ -2271,11 +2270,11 @@ void Reduce::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(REDUCE_DATA); if (!dstMemPtr) - OPENVINO_THROW(errorPrefix, " has null destination memory."); + THROW_CPU_NODE_ERR("has null destination memory."); if (!srcMemPtr) - OPENVINO_THROW(errorPrefix, " has null input memory."); + THROW_CPU_NODE_ERR("has null input memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - OPENVINO_THROW(errorPrefix, " has nullable preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has nullable preferable primitive descriptor"); if (srcMemPtr->getDesc().hasLayoutType(LayoutType::ncsp)) { layout = ReduceLayoutType::reduce_ncsp; @@ -2410,7 +2409,7 @@ void Reduce::execute(dnnl::stream strm) { auto out_ptr = reinterpret_cast(dst_data); reduce_ref(in_ptr, out_ptr); } else { - OPENVINO_THROW(errorPrefix, " supports only plain layout on machine w/o sse42."); + THROW_CPU_NODE_ERR("supports only plain layout on machine w/o sse42."); } } } @@ -3282,7 +3281,7 @@ inline void Reduce::init_dst_data(uint8_t* out_ptr, size_t dst_size) { } break; default: - OPENVINO_THROW(errorPrefix, " gets unsupported reduce mode."); + THROW_CPU_NODE_ERR("gets unsupported reduce mode."); } } @@ -3345,7 +3344,7 @@ inline void Reduce::calc_process_dst_dims(std::vector& reduce_axes, const V if (axis < 0) axis += src_dims.size(); if (static_cast(axis) > src_dims.size()) - OPENVINO_THROW(errorPrefix, " exceeds data tensor dimension on index to reduce"); + THROW_CPU_NODE_ERR("exceeds data tensor dimension on index to reduce"); axes.insert(static_cast(axis)); } for (size_t i = 0; i < src_dims.size(); i++) { @@ -3369,11 +3368,11 @@ inline void Reduce::calc_process_dst_dims(std::vector& reduce_axes, const V if (jit_mode && jit_beyond_5D) { if (std::accumulate(out_dims.begin(), out_dims.end(), size_t(1), std::multiplies()) != std::accumulate(dst_dims.begin(), dst_dims.end(), size_t(1), std::multiplies())) - OPENVINO_THROW(errorPrefix, "gets incorrect number of output dimensions!"); + THROW_CPU_NODE_ERR("gets incorrect number of output dimensions!"); } else { for (size_t i = 0; i < std::min(out_dims.size(), dst_dims.size()); i++) { if (out_dims[i] != dst_dims[i]) - OPENVINO_THROW(errorPrefix, "gets incorrect number of output dimensions!"); + THROW_CPU_NODE_ERR("gets incorrect number of output dimensions!"); } } } @@ -3518,7 +3517,7 @@ inline void Reduce::reduce_ref(const float* in_ptr, float* out_ptr) { }); break; default: - OPENVINO_THROW(errorPrefix, "gets unsupported reduce mode."); + THROW_CPU_NODE_ERR("gets unsupported reduce mode."); } } @@ -3609,7 +3608,7 @@ inline void Reduce::reduce_ref_map(float* out_ptr, size_t work_amount_dst, size_ }); break; default: - OPENVINO_THROW(errorPrefix, "gets unsupported reduce mode."); + THROW_CPU_NODE_ERR("gets unsupported reduce mode."); } } diff --git a/src/plugins/intel_cpu/src/nodes/reduce.h b/src/plugins/intel_cpu/src/nodes/reduce.h index 3f35fd8bc5a7c0..e0d68241365ef4 100644 --- a/src/plugins/intel_cpu/src/nodes/reduce.h +++ b/src/plugins/intel_cpu/src/nodes/reduce.h @@ -197,8 +197,6 @@ class Reduce : public Node { std::function& op, Reduce& node)>>& getInitializers(); - std::string errorPrefix; - #if defined(OV_CPU_WITH_ACL) ReduceAttrs reduceAttrs; bool canUseAclExecutor = false; diff --git a/src/plugins/intel_cpu/src/nodes/region_yolo.cpp b/src/plugins/intel_cpu/src/nodes/region_yolo.cpp index 43c281c669ada0..fc198bc0cc2d72 100644 --- a/src/plugins/intel_cpu/src/nodes/region_yolo.cpp +++ b/src/plugins/intel_cpu/src/nodes/region_yolo.cpp @@ -265,9 +265,8 @@ RegionYolo::RegionYolo(const std::shared_ptr& op, const GraphContext:: OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = std::string(op->get_type_name()) + " node with name '" + op->get_friendly_name() + "'"; if (op->get_input_size() != 1 || op->get_output_size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); const auto regionYolo = ov::as_type_ptr(op); classes = regionYolo->get_num_classes(); diff --git a/src/plugins/intel_cpu/src/nodes/region_yolo.h b/src/plugins/intel_cpu/src/nodes/region_yolo.h index fbd88d98514271..1f34e8cb0431a6 100644 --- a/src/plugins/intel_cpu/src/nodes/region_yolo.h +++ b/src/plugins/intel_cpu/src/nodes/region_yolo.h @@ -64,8 +64,6 @@ class RegionYolo : public Node { std::vector mask; ov::element::Type input_prec, output_prec; - std::string errorPrefix; - int block_size; std::shared_ptr logistic_kernel = nullptr; std::shared_ptr softmax_kernel; diff --git a/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp b/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp index 63b4e2985ff12c..015bc38adea65b 100644 --- a/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp @@ -33,14 +33,13 @@ ReorgYolo::ReorgYolo(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = std::string(op->get_type_name()) + " node with name '" + op->get_friendly_name() + "'"; if (getOriginalInputsNumber() != 1 || getOriginalOutputsNumber() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); const auto reorgYolo = ov::as_type_ptr(op); const auto strides = reorgYolo->get_strides(); if (strides.empty()) - OPENVINO_THROW(errorPrefix, " has empty strides"); + THROW_CPU_NODE_ERR("has empty strides"); stride = strides[0]; } diff --git a/src/plugins/intel_cpu/src/nodes/reorg_yolo.h b/src/plugins/intel_cpu/src/nodes/reorg_yolo.h index bf7486f4f788b0..a2f341fee4aed2 100644 --- a/src/plugins/intel_cpu/src/nodes/reorg_yolo.h +++ b/src/plugins/intel_cpu/src/nodes/reorg_yolo.h @@ -27,8 +27,6 @@ class ReorgYolo : public Node { private: int stride; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/reshape.cpp b/src/plugins/intel_cpu/src/nodes/reshape.cpp index 6189a888153d9f..f4ee25aab83aa8 100644 --- a/src/plugins/intel_cpu/src/nodes/reshape.cpp +++ b/src/plugins/intel_cpu/src/nodes/reshape.cpp @@ -37,8 +37,6 @@ Reshape::Reshape(const std::shared_ptr& op, const GraphContext::CPtr c OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = std::string(op->get_type_name()) + " node with name '" + getName() + "'"; - if (isDynamicNode()) { auto checkSecondInput = [](const std::shared_ptr& op, const std::string opType) { if (op->get_input_partial_shape(1).is_dynamic()) { diff --git a/src/plugins/intel_cpu/src/nodes/reshape.h b/src/plugins/intel_cpu/src/nodes/reshape.h index 126e8d90c6b077..f64c6e2807b7e4 100644 --- a/src/plugins/intel_cpu/src/nodes/reshape.h +++ b/src/plugins/intel_cpu/src/nodes/reshape.h @@ -31,8 +31,6 @@ class Reshape : public Node { private: mutable std::vector lastSecondInputValues; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp b/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp index 41f7c88f69d0c1..9cdc9fa596b436 100644 --- a/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp +++ b/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp @@ -35,36 +35,33 @@ ReverseSequence::ReverseSequence(const std::shared_ptr& op, const Grap OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "ReverseSequence layer with name '" + op->get_friendly_name() + "'"; const auto revSeq = ov::as_type_ptr(op); if (revSeq == nullptr) - OPENVINO_THROW("Operation with name '", - op->get_friendly_name(), - "' is not an instance of ReverseSequence from opset1."); + THROW_CPU_NODE_ERR("is not an instance of ReverseSequence from opset1."); if (inputShapes.size() != 2 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); const auto dataRank = getInputShapeAtPort(REVERSESEQUENCE_DATA).getRank(); if (dataRank < 2) - OPENVINO_THROW(errorPrefix, " 'data' rank should be greater than or equal to 2"); + THROW_CPU_NODE_ERR("'data' rank should be greater than or equal to 2"); if (getInputShapeAtPort(REVERSESEQUENCE_LENGTHS).getRank() != 1) - OPENVINO_THROW(errorPrefix, " 'seq_lengths' should be 1D tensor"); + THROW_CPU_NODE_ERR("'seq_lengths' should be 1D tensor"); if (dataRank != getOutputShapeAtPort(0).getRank()) - OPENVINO_THROW(errorPrefix, " has input/output rank mismatch"); + THROW_CPU_NODE_ERR("has input/output rank mismatch"); seq_axis = revSeq->get_sequence_axis(); if (seq_axis < 0 || seq_axis >= static_cast(dataRank)) - OPENVINO_THROW(errorPrefix, " has incorrect 'seq_axis' parameters dimensions and axis number!"); + THROW_CPU_NODE_ERR("has incorrect 'seq_axis' parameters dimensions and axis number!"); batch_axis = revSeq->get_batch_axis(); if (batch_axis < 0 || batch_axis >= static_cast(dataRank)) - OPENVINO_THROW(errorPrefix, " has incorrect 'batch_axis' parameters dimensions and axis number!"); + THROW_CPU_NODE_ERR("has incorrect 'batch_axis' parameters dimensions and axis number!"); } void ReverseSequence::initSupportedPrimitiveDescriptors() { @@ -86,13 +83,13 @@ void ReverseSequence::prepareParams() { const auto& dstMemPtr = getDstMemoryAtPort(0); if (!dataMemPtr || !dataMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory of 'data'"); + THROW_CPU_NODE_ERR("has undefined input memory of 'data'"); if (!seqLengthsMemPtr || !seqLengthsMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory of 'seq_lengths'"); + THROW_CPU_NODE_ERR("has undefined input memory of 'seq_lengths'"); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined output memory"); + THROW_CPU_NODE_ERR("has undefined output memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - OPENVINO_THROW(errorPrefix, " has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const VectorDims& dataDims = dataMemPtr->getStaticDims(); const VectorDims& seqLengthsDims = seqLengthsMemPtr->getStaticDims(); @@ -174,7 +171,7 @@ void ReverseSequence::ReverseSequenceExecutor::exec(const MemoryPtr& dataMemPtr, void ReverseSequence::execute(dnnl::stream strm) { if (!execPtr) - OPENVINO_THROW(errorPrefix, " has no compiled executor"); + THROW_CPU_NODE_ERR("has no compiled executor"); const auto precision = getParentEdgeAt(REVERSESEQUENCE_LENGTHS)->getMemory().getDesc().getPrecision(); if (!one_of(precision, ov::element::f32, ov::element::i32)) diff --git a/src/plugins/intel_cpu/src/nodes/reverse_sequence.h b/src/plugins/intel_cpu/src/nodes/reverse_sequence.h index c40d651db4f254..3e9fa7e352a234 100644 --- a/src/plugins/intel_cpu/src/nodes/reverse_sequence.h +++ b/src/plugins/intel_cpu/src/nodes/reverse_sequence.h @@ -53,7 +53,6 @@ class ReverseSequence : public Node { int batch_axis; ov::element::Type lengthsPrecision; - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/roi_align.cpp b/src/plugins/intel_cpu/src/nodes/roi_align.cpp index 8ef5dfff25b0f6..38bf4594c4d882 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_align.cpp +++ b/src/plugins/intel_cpu/src/nodes/roi_align.cpp @@ -698,8 +698,6 @@ ROIAlign::ROIAlign(const std::shared_ptr& op, const GraphContext::CPtr : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "ROIPooling layer with name '" + getName() + "' "; - auto roiAlign = ov::as_type_ptr(op); pooledH = roiAlign->get_pooled_h(); pooledW = roiAlign->get_pooled_w(); @@ -726,39 +724,38 @@ ROIAlign::ROIAlign(const std::shared_ptr& op, const GraphContext::CPtr void ROIAlign::getSupportedDescriptors() { if (getParentEdges().size() != 3) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); if (getInputShapeAtPort(0).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support 0th input with rank: ", getInputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("doesn't support 0th input with rank: ", getInputShapeAtPort(0).getRank()); } if (getInputShapeAtPort(1).getRank() != 2) { - OPENVINO_THROW(errorPrefix, "doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); + THROW_CPU_NODE_ERR("doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); } if (getInputShapeAtPort(2).getRank() != 1) { - OPENVINO_THROW(errorPrefix, "doesn't support 2nd input with rank: ", getInputShapeAtPort(2).getRank()); + THROW_CPU_NODE_ERR("doesn't support 2nd input with rank: ", getInputShapeAtPort(2).getRank()); } if (getOutputShapeAtPort(0).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support output with rank: ", getOutputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("doesn't support output with rank: ", getOutputShapeAtPort(0).getRank()); } const auto& proposalsDims = getInputShapeAtPort(1).getDims(); if (proposalsDims[1] != 4) { - OPENVINO_THROW(errorPrefix, "has invalid shape on 1st input: [", proposalsDims[0], ",", proposalsDims[1], "]"); + THROW_CPU_NODE_ERR("has invalid shape on 1st input: [", proposalsDims[0], ",", proposalsDims[1], "]"); } const auto& indexesDims = getInputShapeAtPort(2).getDims(); if (!dimsEqualWeak(proposalsDims[0], indexesDims[0])) { - OPENVINO_THROW(errorPrefix, - "has different sizes of inputs for proposals (", - proposalsDims[0], - ") and indexes (", - indexesDims[0], - ")"); + THROW_CPU_NODE_ERR("has different sizes of inputs for proposals (", + proposalsDims[0], + ") and indexes (", + indexesDims[0], + ")"); } } @@ -835,9 +832,9 @@ void ROIAlign::createPrimitive() { auto srcMemPtr = getSrcMemoryAtPort(0); auto dstMemPtr = getDstMemoryAtPort(0); if (!srcMemPtr) - OPENVINO_THROW(errorPrefix, " has null input memory"); + THROW_CPU_NODE_ERR("has null input memory"); if (!dstMemPtr) - OPENVINO_THROW(errorPrefix, " has null destination memory"); + THROW_CPU_NODE_ERR("has null destination memory"); if (!roi_align_kernel) { ROIAlignLayoutType selectedLayout = ROIAlignLayoutType::nspc; diff --git a/src/plugins/intel_cpu/src/nodes/roi_align.h b/src/plugins/intel_cpu/src/nodes/roi_align.h index 20437d51fa9cef..dc78a23b4c79d5 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_align.h +++ b/src/plugins/intel_cpu/src/nodes/roi_align.h @@ -81,8 +81,6 @@ class ROIAlign : public Node { void createJitKernel(const ov::element::Type& dataPrec, const ROIAlignLayoutType& selectLayout); std::shared_ptr roi_align_kernel = nullptr; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp b/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp index a388a8369615ab..20013eef3c6b88 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp @@ -408,8 +408,6 @@ ROIPooling::ROIPooling(const std::shared_ptr& op, const GraphContext:: OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - std::string errorPrefix = "ROIPooling layer with name '" + getName() + "' "; - auto roiPooling = ov::as_type_ptr(op); refParams.pooled_h = roiPooling->get_output_roi()[0]; refParams.pooled_w = roiPooling->get_output_roi()[1]; @@ -424,25 +422,25 @@ ROIPooling::ROIPooling(const std::shared_ptr& op, const GraphContext:: void ROIPooling::getSupportedDescriptors() { if (getParentEdges().size() != 2) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); if (getInputShapeAtPort(0).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support 0th input with rank: ", getInputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("doesn't support 0th input with rank: ", getInputShapeAtPort(0).getRank()); } if (getInputShapeAtPort(1).getRank() != 2) { - OPENVINO_THROW(errorPrefix, "doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); + THROW_CPU_NODE_ERR("doesn't support 1st input with rank: ", getInputShapeAtPort(1).getRank()); } if (getOutputShapeAtPort(0).getRank() != 4) { - OPENVINO_THROW(errorPrefix, "doesn't support output with rank: ", getOutputShapeAtPort(0).getRank()); + THROW_CPU_NODE_ERR("doesn't support output with rank: ", getOutputShapeAtPort(0).getRank()); } const auto& dims = getInputShapeAtPort(1).getDims(); if (dims[1] != 5) { - OPENVINO_THROW(errorPrefix, "has invalid shape on 1st input: [", dims[0], ",", dims[1], "]"); + THROW_CPU_NODE_ERR("has invalid shape on 1st input: [", dims[0], ",", dims[1], "]"); } } diff --git a/src/plugins/intel_cpu/src/nodes/roi_pooling.h b/src/plugins/intel_cpu/src/nodes/roi_pooling.h index 9fe79623b3c366..e02be525f0c34b 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_pooling.h +++ b/src/plugins/intel_cpu/src/nodes/roi_pooling.h @@ -84,8 +84,6 @@ class ROIPooling : public Node { jit_roi_pooling_params refParams = {}; - std::string errorPrefix; - class ROIPoolingExecutor { public: ROIPoolingExecutor() = default; diff --git a/src/plugins/intel_cpu/src/nodes/roll.cpp b/src/plugins/intel_cpu/src/nodes/roll.cpp index 2547b945bb56c3..3d6a422b634abd 100644 --- a/src/plugins/intel_cpu/src/nodes/roll.cpp +++ b/src/plugins/intel_cpu/src/nodes/roll.cpp @@ -38,49 +38,44 @@ Roll::Roll(const std::shared_ptr& op, const GraphContext::CPtr context : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - layerErrorPrefix = "Roll layer with name '" + getName() + "'"; if (inputShapes.size() != 3 || outputShapes.size() != 1) { - OPENVINO_THROW(layerErrorPrefix, " has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); } const auto& dataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); if (std::find(supportedPrecisionSizes.begin(), supportedPrecisionSizes.end(), dataPrecision.size()) == supportedPrecisionSizes.end()) - OPENVINO_THROW(layerErrorPrefix, "has unsupported precision: ", dataPrecision.get_type_name()); + THROW_CPU_NODE_ERR("as unsupported precision: ", dataPrecision.get_type_name()); const auto dataRank = getInputShapeAtPort(DATA_INDEX).getRank(); if (dataRank < 1) { - OPENVINO_THROW(layerErrorPrefix, " doesn't support 'data' input tensor with rank: ", dataRank); + THROW_CPU_NODE_ERR("doesn't support 'data' input tensor with rank: ", dataRank); } if (dataRank != getOutputShapeAtPort(0).getRank()) - OPENVINO_THROW(layerErrorPrefix, " has input/output rank mismatch"); + THROW_CPU_NODE_ERR("has input/output rank mismatch"); /* Axes */ const auto& axesTensorPrec = getOriginalInputPrecisionAtPort(AXES_INDEX); if (axesTensorPrec != ov::element::i32 && axesTensorPrec != ov::element::i64) { - OPENVINO_THROW(layerErrorPrefix, - " has unsupported 'axes' input precision: ", - axesTensorPrec.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'axes' input precision: ", axesTensorPrec.get_type_name()); } const auto axesTensorRank = getInputShapeAtPort(AXES_INDEX).getRank(); if (axesTensorRank > 1) { - OPENVINO_THROW(layerErrorPrefix, " doesn't support 'axes' input tensor with rank: ", axesTensorRank); + THROW_CPU_NODE_ERR("doesn't support 'axes' input tensor with rank: ", axesTensorRank); } /* Shift */ const auto& shiftTensorPrec = getOriginalInputPrecisionAtPort(SHIFT_INDEX); if (shiftTensorPrec != ov::element::i32 && shiftTensorPrec != ov::element::i64) { - OPENVINO_THROW(layerErrorPrefix, - " has unsupported 'shift' input precision: ", - shiftTensorPrec.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported 'shift' input precision: ", shiftTensorPrec.get_type_name()); } const auto shiftTensorRank = getInputShapeAtPort(SHIFT_INDEX).getRank(); if (shiftTensorRank > 1) { - OPENVINO_THROW(layerErrorPrefix, " doesn't support 'shift' input tensor with rank: ", shiftTensorRank); + THROW_CPU_NODE_ERR("doesn't support 'shift' input tensor with rank: ", shiftTensorRank); } } else { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); @@ -108,15 +103,15 @@ void Roll::prepareParams() { const auto& dstMemPtr = getDstMemoryAtPort(0); if (!dataMemPtr || !dataMemPtr->isDefined()) - OPENVINO_THROW(layerErrorPrefix, " has undefined input memory of 'data'"); + THROW_CPU_NODE_ERR("has undefined input memory of 'data'"); if (!shiftMemPtr || !shiftMemPtr->isDefined()) - OPENVINO_THROW(layerErrorPrefix, " has undefined input memory of 'shift'"); + THROW_CPU_NODE_ERR("has undefined input memory of 'shift'"); if (!axesMemPtr || !axesMemPtr->isDefined()) - OPENVINO_THROW(layerErrorPrefix, " has undefined input memory of 'axes'"); + THROW_CPU_NODE_ERR("has undefined input memory of 'axes'"); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(layerErrorPrefix, " has undefined output memory"); + THROW_CPU_NODE_ERR("has undefined output memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - OPENVINO_THROW(layerErrorPrefix, " has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const VectorDims& dataDims = dataMemPtr->getStaticDims(); const VectorDims& shiftDims = shiftMemPtr->getStaticDims(); @@ -132,7 +127,7 @@ void Roll::executeDynamicImpl(dnnl::stream strm) { void Roll::execute(dnnl::stream strm) { if (!execPtr) - OPENVINO_THROW(layerErrorPrefix, " has no compiled executor"); + THROW_CPU_NODE_ERR("has no compiled executor"); const auto dataPrecision = getParentEdgeAt(DATA_INDEX)->getMemory().getDesc().getPrecision(); const auto& dataTypeSize = dataPrecision.size(); @@ -159,7 +154,7 @@ void Roll::execute(dnnl::stream strm) { break; } default: - OPENVINO_THROW(layerErrorPrefix, "has unsupported 'data' input precision: ", dataPrecision.get_type_name()); + THROW_CPU_NODE_ERR("as unsupported 'data' input precision: ", dataPrecision.get_type_name()); } } diff --git a/src/plugins/intel_cpu/src/nodes/roll.h b/src/plugins/intel_cpu/src/nodes/roll.h index c32967068e0c97..35fe87994aa3e5 100644 --- a/src/plugins/intel_cpu/src/nodes/roll.h +++ b/src/plugins/intel_cpu/src/nodes/roll.h @@ -48,8 +48,6 @@ class Roll : public Node { using ExecutorPtr = std::shared_ptr; ExecutorPtr execPtr = nullptr; - std::string layerErrorPrefix; - static constexpr std::array supportedPrecisionSizes{1, 2, 4}; static constexpr size_t DATA_INDEX = 0ul; static constexpr size_t SHIFT_INDEX = 1ul; diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp index 0a5c95ff18f78d..3225d9d8f8da96 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp @@ -82,9 +82,7 @@ ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphCon indicesPrec(ov::element::undefined), axisPrec(ov::element::undefined) { std::string errorMessage; - if (isSupportedOperation(op, errorMessage)) { - errorPrefix = std::string(op->get_type_name()) + " node with name '" + getName() + "'"; - } else { + if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } // In ov::PartialShape with rank 0 (scalars) is converted to ov::intel_cpu::Shape with rank 1. @@ -105,10 +103,10 @@ ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphCon axisRelaxed = false; isUpdateScalar = ov::is_scalar(op->get_input_partial_shape(2)); } else { - THROW_CPU_NODE_ERR(errorPrefix, " is not supported"); + THROW_CPU_NODE_ERR("is not supported"); } if (is_not_supported_input) { - THROW_CPU_NODE_ERR(errorPrefix, " do not support scalar input"); + THROW_CPU_NODE_ERR("do not support scalar input"); } reduction_type = ScatterUpdate::Reduction::NONE; @@ -168,9 +166,9 @@ ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphCon void ScatterUpdate::getSupportedDescriptors() { if ((getParentEdges().size() != 3) && (getParentEdges().size() != 4)) - THROW_CPU_NODE_ERR(errorPrefix, " has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); if (getChildEdges().empty()) - THROW_CPU_NODE_ERR(errorPrefix, " has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); } void ScatterUpdate::initSupportedPrimitiveDescriptors() { @@ -189,12 +187,11 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { // common check if (srcRank != dstRank) { - THROW_CPU_NODE_ERR(errorPrefix, " should have same rank for input and output tensor"); + THROW_CPU_NODE_ERR("should have same rank for input and output tensor"); } else { for (size_t r = 0; r < srcRank; r++) { if (!dimsEqualWeak(srcDataDim[r], dstDataDim[r])) { - THROW_CPU_NODE_ERR(errorPrefix, - " should have same shape for input and output tensor. The input shape is ", + THROW_CPU_NODE_ERR("should have same shape for input and output tensor. The input shape is ", srcDataDim[r], ", while output shape is ", dstDataDim[r], @@ -208,8 +205,7 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { switch (scatterUpdateMode) { case ScatterUpdateMode::ScatterUpdate: { if (updateRank != (srcRank + indicesRank - 1)) { - THROW_CPU_NODE_ERR(errorPrefix, - " do not have matched tensor rank relationship for input, indices and update"); + THROW_CPU_NODE_ERR("do not have matched tensor rank relationship for input, indices and update"); } break; } @@ -217,8 +213,7 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { if (indicesDim[indicesRank - 1] != Shape::UNDEFINED_DIM) { size_t k = indicesDim[indicesRank - 1]; if (k > srcRank) { - THROW_CPU_NODE_ERR(errorPrefix, - "' do not have an correct indices' last dimension value, ", + THROW_CPU_NODE_ERR("do not have an correct indices' last dimension value, ", "which should be smaller than or equal to input tensor rank"); } @@ -238,13 +233,11 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { updateAxisIter++; } if (expectUpdateShape.size() != updateRank) { - THROW_CPU_NODE_ERR(errorPrefix, - " do not have matched tensor rank relationship for input, indices and update"); + THROW_CPU_NODE_ERR("do not have matched tensor rank relationship for input, indices and update"); } for (size_t ru = 0; ru < updateRank; ru++) { if (!dimsEqualWeak(updateDim[ru], expectUpdateShape[ru])) { - THROW_CPU_NODE_ERR(errorPrefix, - " do not have matched tensor shape relationship for input, indices and update"); + THROW_CPU_NODE_ERR("do not have matched tensor shape relationship for input, indices and update"); } } } @@ -252,17 +245,17 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { } case ScatterUpdateMode::ScatterElementsUpdate: { if (srcRank != indicesRank || srcRank != updateRank) { - THROW_CPU_NODE_ERR(errorPrefix, " do not have the same tensor rank for input, indices and update"); + THROW_CPU_NODE_ERR("do not have the same tensor rank for input, indices and update"); } for (size_t ri = 0; ri < indicesRank; ri++) { if (!dimsEqualWeak(indicesDim[ri], updateDim[ri])) { - THROW_CPU_NODE_ERR(errorPrefix, " do not have the same tensor shape for indices and update"); + THROW_CPU_NODE_ERR("do not have the same tensor shape for indices and update"); } } break; } default: { - THROW_CPU_NODE_ERR(errorPrefix, " is not supported"); + THROW_CPU_NODE_ERR("is not supported"); } } @@ -881,8 +874,7 @@ void ScatterUpdate::execute(dnnl::stream strm) { } if (axis >= static_cast(srcRank) || axis < (static_cast(srcRank) * -1)) { - THROW_CPU_NODE_ERR(errorPrefix, - " should have axis value in range [-r, r - 1], where r is the rank of input data"); + THROW_CPU_NODE_ERR("should have axis value in range [-r, r - 1], where r is the rank of input data"); } axis = axis < 0 ? (axis + srcRank) : axis; @@ -895,8 +887,7 @@ void ScatterUpdate::execute(dnnl::stream strm) { int64_t idxValue = getIndicesValue(indicesPtr, i); if (idxValue >= static_cast(srcDimAxis) || (idxValue < 0 && scatterUpdateMode != ScatterUpdateMode::ScatterElementsUpdate)) { - THROW_CPU_NODE_ERR(errorPrefix, - " have indices value that points to non-existing output tensor element"); + THROW_CPU_NODE_ERR("have indices value that points to non-existing output tensor element"); } } }); @@ -920,15 +911,13 @@ void ScatterUpdate::execute(dnnl::stream strm) { } } if (updateRank > expectUpdateShape.size()) - THROW_CPU_NODE_ERR(errorPrefix, - " cannot update shape. New rank: ", + THROW_CPU_NODE_ERR("cannot update shape. New rank: ", updateRank, ", expected: ", expectUpdateShape.size()); for (size_t ru = 0; ru < updateRank; ru++) { if (updateDim[ru] != expectUpdateShape[ru]) { - THROW_CPU_NODE_ERR(errorPrefix, - " do not have matched tensor shape relationship for input, indices and update"); + THROW_CPU_NODE_ERR("do not have matched tensor shape relationship for input, indices and update"); } } } @@ -963,7 +952,7 @@ void ScatterUpdate::execute(dnnl::stream strm) { break; } default: { - THROW_CPU_NODE_ERR(errorPrefix, " is not supported"); + THROW_CPU_NODE_ERR("is not supported"); } } } diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.h b/src/plugins/intel_cpu/src/nodes/scatter_update.h index ae2c30a37cc6cd..4dc9ed1be59a63 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.h +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.h @@ -137,8 +137,6 @@ class ScatterUpdate : public Node { // In ov::PartialShape with rank 0 (scalars) is converted to ov::intel_cpu::Shape with rank 1. // Add flag set in constructor for workaround for ScatterNDUpdates bool isUpdateScalar = false; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/shapeof.cpp b/src/plugins/intel_cpu/src/nodes/shapeof.cpp index 5426c0dda51976..43f30b680c880c 100644 --- a/src/plugins/intel_cpu/src/nodes/shapeof.cpp +++ b/src/plugins/intel_cpu/src/nodes/shapeof.cpp @@ -29,9 +29,8 @@ ShapeOf::ShapeOf(const std::shared_ptr& op, const GraphContext::CPtr c : Node(op, context, ShapeOfShapeInferFactory()) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "ShapeOf layer with name '" + getName() + "' "; if (op->get_input_partial_shape(0).size() == 0) - OPENVINO_THROW(errorPrefix, "gets unsupported input 0D tensor (scalar)"); + THROW_CPU_NODE_ERR("gets unsupported input 0D tensor (scalar)"); } else { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } @@ -39,9 +38,9 @@ ShapeOf::ShapeOf(const std::shared_ptr& op, const GraphContext::CPtr c void ShapeOf::getSupportedDescriptors() { if (getParentEdges().size() != 1) - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges: ", getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges: ", getParentEdges().size()); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges: ", getChildEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of output edges: ", getChildEdges().size()); } void ShapeOf::initSupportedPrimitiveDescriptors() { @@ -89,7 +88,7 @@ void ShapeOf::execute(dnnl::stream strm) { auto&& inDims = inPtr->getStaticDims(); size_t dimsCount = inDims.size(); if (outPtr->getStaticDims().size() != 1 || dimsCount != outPtr->getStaticDims()[0]) - OPENVINO_THROW(errorPrefix, "has inconsistent input shape and output size"); + THROW_CPU_NODE_ERR("has inconsistent input shape and output size"); auto* dst = outPtr->getDataAs(); diff --git a/src/plugins/intel_cpu/src/nodes/shapeof.h b/src/plugins/intel_cpu/src/nodes/shapeof.h index 6dfd63946ec941..7b2ebb733e99a9 100644 --- a/src/plugins/intel_cpu/src/nodes/shapeof.h +++ b/src/plugins/intel_cpu/src/nodes/shapeof.h @@ -35,9 +35,6 @@ class ShapeOf : public Node { bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; - -private: - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp b/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp index c2d28fa6eba5f9..c01cd65a9407bf 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp +++ b/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp @@ -32,17 +32,15 @@ SpaceToBatch::SpaceToBatch(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "BatchToSpace layer with name '" + op->get_friendly_name() + "'"; - if (inputShapes.size() != 4 || outputShapes.size() != 1) - OPENVINO_THROW(errorPrefix, " has incorrect number of input or output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input or output edges!"); const size_t srcRank = getInputShapeAtPort(0).getRank(); const size_t dstRank = getOutputShapeAtPort(0).getRank(); if (srcRank < 4 || srcRank > 5) - OPENVINO_THROW(errorPrefix, " has unsupported 'data' input rank: ", srcRank); + THROW_CPU_NODE_ERR("has unsupported 'data' input rank: ", srcRank); if (srcRank != dstRank) - OPENVINO_THROW(errorPrefix, " has incorrect number of input/output dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions"); } void SpaceToBatch::initSupportedPrimitiveDescriptors() { @@ -53,7 +51,7 @@ void SpaceToBatch::initSupportedPrimitiveDescriptors() { const auto precision = getOriginalInputPrecisionAtPort(0); const std::set supported_precision_sizes = {1, 2, 4, 8}; if (supported_precision_sizes.find(precision.size()) == supported_precision_sizes.end()) - OPENVINO_THROW(errorPrefix, " has unsupported precision: ", precision.get_type_name()); + THROW_CPU_NODE_ERR("has unsupported precision: ", precision.get_type_name()); addSupportedPrimDesc({{LayoutType::nspc, precision}, {LayoutType::ncsp, ov::element::i32}, diff --git a/src/plugins/intel_cpu/src/nodes/space_to_batch.h b/src/plugins/intel_cpu/src/nodes/space_to_batch.h index 82861bac602d7a..fec4423a91a1db 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_batch.h +++ b/src/plugins/intel_cpu/src/nodes/space_to_batch.h @@ -37,8 +37,6 @@ class SpaceToBatch : public Node { std::vector blockShapeIn; std::vector padsBeginIn; - std::string errorPrefix; - template void SpaceToBatchKernel(); }; diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index 45067b9addc758..542f8897c2d625 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -39,7 +39,6 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = NameFromType(getType()) + " node with name '" + getName() + "' "; attrs.isStridedSliceOp = ov::is_type(op); attrs.isSliceScatterOp = ov::is_type(op); @@ -58,10 +57,10 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte if ((attrs.isStridedSliceOp && (inputShapes.size() < 3 || inputShapes.size() > 4)) || (!attrs.isStridedSliceOp && (inputShapes.size() < (attrs.STRIDE_ID + 1) || inputShapes.size() > (attrs.AXES_ID + 1)))) { - OPENVINO_THROW(errorPrefix, "has incorrect number of input edges"); + THROW_CPU_NODE_ERR("has incorrect number of input edges"); } if (outputShapes.size() != 1) { - OPENVINO_THROW(errorPrefix, "has incorrect number of output edges"); + THROW_CPU_NODE_ERR("has incorrect number of output edges"); } if (inputShapes.size() > attrs.STRIDE_ID) { @@ -128,7 +127,7 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte attrs.ellipsisPos1 = attrs.ellipsisMask[i] == 1 && attrs.ellipsisPos1 == -1 ? i : attrs.ellipsisPos1; } if (attrs.ellipsisMaskCounter > 1) - OPENVINO_THROW(errorPrefix, "has incorrect 'Ellipsis_mask'. Only one non-zero bit is allowed"); + THROW_CPU_NODE_ERR("has incorrect 'Ellipsis_mask'. Only one non-zero bit is allowed"); int newAxis = std::accumulate(attrs.newAxisMask.begin(), attrs.newAxisMask.end(), 0); int shrinkAxis = std::accumulate(attrs.shrinkAxisMask.begin(), attrs.shrinkAxisMask.end(), 0); @@ -334,7 +333,7 @@ void StridedSlice::prepareParams() { dstMemory.push_back(getDstMemoryAtPort(i)); } } - execPtr = std::make_shared(attrs, srcMemory, dstMemory, errorPrefix); + execPtr = std::make_shared(attrs, srcMemory, dstMemory); } bool StridedSlice::needShapeInfer() const { @@ -343,7 +342,7 @@ bool StridedSlice::needShapeInfer() const { void StridedSlice::execute(dnnl::stream strm) { if (!execPtr) - OPENVINO_THROW(errorPrefix, "doesn't have compiled executor!"); + THROW_CPU_NODE_ERR("doesn't have compiled executor!"); execPtr->exec(srcMemory, dstMemory); } @@ -358,9 +357,8 @@ bool StridedSlice::created() const { StridedSlice::StridedSliceCommonExecutor::StridedSliceCommonExecutor(const StridedSliceAttributes& attrs, const std::vector& srcMemory, - const std::vector& dstMemory, - const std::string& errorPrefix) - : StridedSliceExecutor(attrs, srcMemory, dstMemory, errorPrefix) { + const std::vector& dstMemory) + : StridedSliceExecutor(attrs, srcMemory, dstMemory) { paramsInitialization(attrs, srcMemory, dstMemory); dimsNormalization(); dimsGluing(); @@ -442,11 +440,11 @@ void StridedSlice::StridedSliceCommonExecutor::paramsInitialization(const Stride params.attrs.beginDims = srcMemory[attrs.BEGIN_ID]->getShape().getStaticDims(); params.attrs.endDims = srcMemory[attrs.END_ID]->getShape().getStaticDims(); if (params.attrs.beginDims.size() != 1) - OPENVINO_THROW(errorPrefix, "should have begin vector with 1 dimension"); + OPENVINO_THROW("Strided slice common executor should have begin vector with 1 dimension"); if (params.attrs.endDims.size() != 1) - OPENVINO_THROW(errorPrefix, "should have end vector with 1 dimension"); + OPENVINO_THROW("Strided slice common executor should have end vector with 1 dimension"); if (params.attrs.beginDims[0] != params.attrs.endDims[0]) - OPENVINO_THROW(errorPrefix, "should have begin vector with size equal to end vector size"); + OPENVINO_THROW("Strided slice common executor should have begin vector with size equal to end vector size"); if (params.attrs.begin.empty()) fillingInParameters(params.attrs.begin, attrs.BEGIN_ID, params.attrs.beginDims[0], 0); @@ -456,9 +454,10 @@ void StridedSlice::StridedSliceCommonExecutor::paramsInitialization(const Stride if (srcMemory.size() > attrs.STRIDE_ID) { params.attrs.strideDims = srcMemory[attrs.STRIDE_ID]->getShape().getStaticDims(); if (params.attrs.strideDims.size() > 1) - OPENVINO_THROW(errorPrefix, "should have stride vector with 1 dimension"); + OPENVINO_THROW("Strided slice common executor should have stride vector with 1 dimension"); if (params.attrs.beginDims[0] != params.attrs.strideDims[0]) - OPENVINO_THROW(errorPrefix, "should have stride vector with size equal to begin vector size"); + OPENVINO_THROW( + "Strided slice common executor should have stride vector with size equal to begin vector size"); if (params.attrs.stride.empty()) fillingInParameters(params.attrs.stride, attrs.STRIDE_ID, params.attrs.strideDims[0], 1); @@ -467,9 +466,10 @@ void StridedSlice::StridedSliceCommonExecutor::paramsInitialization(const Stride if (srcMemory.size() > attrs.AXES_ID) { params.attrs.axesDims = srcMemory[attrs.AXES_ID]->getShape().getStaticDims(); if (params.attrs.axesDims.size() != 1) - OPENVINO_THROW(errorPrefix, "should have axes vector with 1 dimension."); + OPENVINO_THROW("Strided slice common executor should have axes vector with 1 dimension."); if (params.attrs.beginDims[0] != params.attrs.axesDims[0]) - OPENVINO_THROW(errorPrefix, "should have axes vector with size equal to begin vector size."); + OPENVINO_THROW( + "Strided slice common executor should have axes vector with size equal to begin vector size."); if (params.attrs.axes.empty()) fillingInParameters(params.attrs.axes, attrs.AXES_ID, params.attrs.axesDims[0], 0); diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.h b/src/plugins/intel_cpu/src/nodes/strided_slice.h index e85ad381edf006..b21e99c7efeb2e 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.h +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.h @@ -72,22 +72,16 @@ class StridedSlice : public Node { public: StridedSliceExecutor(const StridedSliceAttributes& attrs, const std::vector& srcMemory, - const std::vector& dstMemory, - const std::string& errorPrefix) - : errorPrefix(errorPrefix) {} + const std::vector& dstMemory) {} virtual void exec(const std::vector& srcMemory, const std::vector& dstMemory) = 0; virtual ~StridedSliceExecutor() = default; - - protected: - const std::string errorPrefix; }; class StridedSliceCommonExecutor : public StridedSliceExecutor { public: StridedSliceCommonExecutor(const StridedSliceAttributes& attrs, const std::vector& srcMemory, - const std::vector& dstMemory, - const std::string& errorPrefix); + const std::vector& dstMemory); void exec(const std::vector& srcMemory, const std::vector& dstMemory) override; void execSliceScatter(const std::vector& srcMemory, const std::vector& dstMemory); void execStridedSlice(const std::vector& srcMemory, const std::vector& dstMemory); @@ -134,8 +128,6 @@ class StridedSlice : public Node { std::vector srcMemory; std::vector dstMemory; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/tile.cpp b/src/plugins/intel_cpu/src/nodes/tile.cpp index 55ecea646813da..473d404a0b57d8 100644 --- a/src/plugins/intel_cpu/src/nodes/tile.cpp +++ b/src/plugins/intel_cpu/src/nodes/tile.cpp @@ -40,8 +40,6 @@ Tile::Tile(const std::shared_ptr& op, const GraphContext::CPtr context OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - errorPrefix = "Tile node with name '" + getName() + "'"; - if (ov::is_type(op->get_input_node_ptr(TILE_REPEATS))) { constMap[TILE_REPEATS] = true; repeats = originRepeats = @@ -63,38 +61,34 @@ void Tile::getSupportedDescriptors() { return result; }; if (getParentEdges().size() != 2) - OPENVINO_THROW(errorPrefix, - " has incorrect number of input edges. " - "Expected: 2, Actual: ", - getParentEdges().size()); + THROW_CPU_NODE_ERR("has incorrect number of input edges. " + "Expected: 2, Actual: ", + getParentEdges().size()); if (getChildEdges().empty()) - OPENVINO_THROW(errorPrefix, " has no output edges."); + THROW_CPU_NODE_ERR("has no output edges."); const auto& dstDims0 = getOutputShapeAtPort(0).getDims(); for (size_t i = 1lu; i < outputShapes.size(); i++) { const auto& dstDims = getOutputShapeAtPort(i).getDims(); if (dstDims.size() != dstDims0.size()) - OPENVINO_THROW(errorPrefix, - " has output edges 0 and ", - i, - " with different ranks: ", - dstDims0.size(), - " and ", - dstDims.size()); - for (size_t j = 0; j < dstDims0.size(); j++) { - if (dstDims0[j] != dstDims[j]) { - OPENVINO_THROW(errorPrefix, - " has output edges 0 and ", + THROW_CPU_NODE_ERR("has output edges 0 and ", i, - " with different dims: ", - vec_to_string(dstDims0), + " with different ranks: ", + dstDims0.size(), " and ", - vec_to_string(dstDims)); + dstDims.size()); + for (size_t j = 0; j < dstDims0.size(); j++) { + if (dstDims0[j] != dstDims[j]) { + THROW_CPU_NODE_ERR("has output edges 0 and ", + i, + " with different dims: ", + vec_to_string(dstDims0), + " and ", + vec_to_string(dstDims)); } } } if (constMap[TILE_REPEATS] && getInputShapeAtPort(TILE_INPUT).getRank() > getOutputShapeAtPort(0).getRank()) - OPENVINO_THROW( - errorPrefix, + THROW_CPU_NODE_ERR( " has incorrect input/output data shape rank. Input shape rank cannot be more than output shape rank. " "Actual input shape size: ", getInputShapeAtPort(TILE_INPUT).getRank(), diff --git a/src/plugins/intel_cpu/src/nodes/tile.h b/src/plugins/intel_cpu/src/nodes/tile.h index 809aff7b70470c..0bb43ac91e8fb5 100644 --- a/src/plugins/intel_cpu/src/nodes/tile.h +++ b/src/plugins/intel_cpu/src/nodes/tile.h @@ -39,8 +39,6 @@ class Tile : public Node, public TileBroadcastCommon { int tiles = 0; bool noTiling = false; VectorDims originRepeats; - - std::string errorPrefix; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/topk.cpp b/src/plugins/intel_cpu/src/nodes/topk.cpp index 673011c2c746c7..f20bfeb8f599cf 100644 --- a/src/plugins/intel_cpu/src/nodes/topk.cpp +++ b/src/plugins/intel_cpu/src/nodes/topk.cpp @@ -1891,8 +1891,6 @@ TopK::TopK(const std::shared_ptr& op, const GraphContext::CPtr context : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { - errorPrefix = "TopK layer with name '" + getName() + "'"; - auto topKOp = ov::as_type_ptr(op); auto in_dims = topKOp->get_input_partial_shape(TOPK_DATA); @@ -1903,7 +1901,7 @@ TopK::TopK(const std::shared_ptr& op, const GraphContext::CPtr context if (!isDynamicNgraphNode(op)) { auto topKConst = ov::as_type_ptr(topKOp->get_input_node_shared_ptr(TOPK_K)); if (!topKConst) { - OPENVINO_THROW(errorPrefix, "gets non-constant second tensor in static shape mode!"); + THROW_CPU_NODE_ERR("gets non-constant second tensor in static shape mode!"); } } @@ -1925,21 +1923,21 @@ TopK::TopK(const std::shared_ptr& op, const GraphContext::CPtr context vec_idx_block.clear(); if (inputShapes.size() != 2 || outputShapes.size() < 2) - OPENVINO_THROW(errorPrefix, " gets incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("gets incorrect number of input/output edges!"); if (getInputShapeAtPort(TOPK_DATA).getRank() != getOutputShapeAtPort(TOPK_DATA).getRank()) - OPENVINO_THROW(errorPrefix, " gets incorrect number of input/output dimensions!"); + THROW_CPU_NODE_ERR("gets incorrect number of input/output dimensions!"); if (getInputShapeAtPort(TOPK_K).getRank() != 1) - OPENVINO_THROW(errorPrefix, " gets incorrect index vector dimension! Index vector should be 1 dimension."); + THROW_CPU_NODE_ERR("gets incorrect index vector dimension! Index vector should be 1 dimension."); if (out_dims != out_idx_dims) - OPENVINO_THROW(errorPrefix, " gets incorrect output tensor dimension sizes!"); + THROW_CPU_NODE_ERR("gets incorrect output tensor dimension sizes!"); if (axis < 0) axis += in_dims_size; if (axis < 0 || axis >= static_cast(in_dims_size)) - OPENVINO_THROW(errorPrefix, " gets incorrect input parameters dimensions and axis number!"); + THROW_CPU_NODE_ERR("gets incorrect input parameters dimensions and axis number!"); } else { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } @@ -1976,7 +1974,7 @@ void TopK::initSupportedPrimitiveDescriptors() { ov::element::Type dataPrecision = getOriginalOutputPrecisionAtPort(TOPK_DATA); if (dataPrecision == ov::element::bf16 && !mayiuse(avx512_core)) - OPENVINO_THROW(errorPrefix, " gets incorrect isa for BF16! AVX512 must be supported!"); + THROW_CPU_NODE_ERR("gets incorrect isa for BF16! AVX512 must be supported!"); bool precisionSupported = std::find(std::begin(supportedPrecision), std::end(supportedPrecision), dataPrecision) != std::end(supportedPrecision); if (!precisionSupported) { @@ -2046,11 +2044,11 @@ void TopK::prepareParams() { auto dstMemPtr = getDstMemoryAtPort(TOPK_DATA); auto srcMemPtr = getSrcMemoryAtPort(TOPK_DATA); if (!dstMemPtr || !dstMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined destination memory."); + THROW_CPU_NODE_ERR("has undefined destination memory."); if (!srcMemPtr || !srcMemPtr->isDefined()) - OPENVINO_THROW(errorPrefix, " has undefined input memory."); + THROW_CPU_NODE_ERR("has undefined input memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - OPENVINO_THROW(errorPrefix, " has nullable preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has nullable preferable primitive descriptor"); src_dims = srcMemPtr->getDesc().getShape().getDims(); dst_dims = dstMemPtr->getDesc().getShape().getDims(); @@ -2058,7 +2056,7 @@ void TopK::prepareParams() { if (isDynamicNode()) { const int src_k = getSrcDataAtPortAs(TOPK_K)[0]; if (static_cast(src_k) > src_dims[axis]) - OPENVINO_THROW(errorPrefix, " gets top_k out of range!"); + THROW_CPU_NODE_ERR("gets top_k out of range!"); if (top_k != src_k) { top_k = src_k; } @@ -2219,7 +2217,7 @@ void TopK::execute(dnnl::stream strm) { auto out_idx_ptr = reinterpret_cast(dst_idx); topk_ref(in_ptr, out_ptr, out_idx_ptr); } else { - OPENVINO_THROW(errorPrefix, "only support plain layout on machine w/o sse42."); + THROW_CPU_NODE_ERR("only support plain layout on machine w/o sse42."); } } } diff --git a/src/plugins/intel_cpu/src/nodes/topk.h b/src/plugins/intel_cpu/src/nodes/topk.h index 62789bda509180..e0fbb4545ccf46 100644 --- a/src/plugins/intel_cpu/src/nodes/topk.h +++ b/src/plugins/intel_cpu/src/nodes/topk.h @@ -146,8 +146,6 @@ class TopK : public Node { std::vector vec_process_idx_ptr; std::shared_ptr topk_kernel = nullptr; - - std::string errorPrefix; }; } // namespace node From 9149f63ee2fddb25034ec5ce5efff59c501d52dd Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Thu, 16 Jan 2025 16:30:25 +0100 Subject: [PATCH 18/97] [Snippets][CPU] Mark serialized data flow graph as exec_graph (#28480) ### Details: Add property to serialized data flow graph to be treated as exec_graph in order to apply type substitution from runtime info ### Tickets: - 160593 --- src/common/snippets/src/lowered/pass/serialize_data_flow.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp b/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp index 4f75e254618d05..4136c637a037ee 100644 --- a/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp +++ b/src/common/snippets/src/lowered/pass/serialize_data_flow.cpp @@ -34,6 +34,7 @@ bool SerializeDataFlow::run(const LinearIR& linear_ir) { } if (ov::is_type(node)) { const auto parameter = std::make_shared(element::f32, Shape{}); + parameter->get_rt_info()["execTimeMcs"] = 0; ops_map[expr] = parameter; parameters.push_back(parameter); } else if (ov::is_type(node)) { From 514ad605b9efb20bebb6c36a64300255b1182430 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Thu, 16 Jan 2025 17:20:02 +0100 Subject: [PATCH 19/97] [REFERENCE] Fix Interpolate reference implementation (#28471) ### Details: - This PR fixes Interpolate reference implementation: missed padding resizing is added. This logic was transferred from [interpolate shape inference implemetation](https://github.com/openvinotoolkit/openvino/blob/master/src/core/shape_inference/include/interpolate_shape_inference.hpp) (for the details, please see `interpolate::resize_padding` implementation and usage). - [The specification](https://github.com/openvinotoolkit/openvino/blob/master/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/image/interpolate-4.rst) have no hard limitation that paddings size must be equal to input shape rank - LPT tests, that were affected by the issue, are reenabled ### Tickets: - *CVS-119648* --- .../reference/include/openvino/reference/interpolate.hpp | 6 +++++- .../functional/shared_tests_instances/skip_tests_config.cpp | 2 -- .../functional/shared_tests_instances/skip_tests_config.cpp | 2 -- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/core/reference/include/openvino/reference/interpolate.hpp b/src/core/reference/include/openvino/reference/interpolate.hpp index 90c29f1883137e..5696fc16e172ea 100644 --- a/src/core/reference/include/openvino/reference/interpolate.hpp +++ b/src/core/reference/include/openvino/reference/interpolate.hpp @@ -711,9 +711,13 @@ inline PartialShape get_padded_input_shape(const PartialShape& input_shape, PartialShape padded_input_shape = input_shape; + auto pads_begin = attrs.pads_begin; + auto pads_end = attrs.pads_end; + pads_begin.resize(input_rank); + pads_end.resize(input_rank); for (int64_t i = 0; i < input_rank; ++i) { if (input_shape[i].is_static()) { - auto new_length = attrs.pads_begin[i] + attrs.pads_end[i] + input_shape[i].get_length(); + auto new_length = pads_begin[i] + pads_end[i] + input_shape[i].get_length(); padded_input_shape[i] = Dimension(new_length); } } diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 14cd143aefa757..44bf87a9a37b82 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -185,8 +185,6 @@ std::vector disabledTestPatterns() { R"(^smoke_Multinomial(?:Static|Dynamic)+(?:Log)*.*seed_g=0_seed_o=0.*device=CPU.*)", // Issue: 129025 R"(.*smoke_CpuExecNetworkCheck.*StreamsHasHigherPriorityThanLatencyHint.*)", - // Issue: 119648 - R"(.*smoke_LPT/InterpolateTransformation.*)", // Issue: 129931 R"(smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[.*,3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ .*18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[6,1,1,1\]_\{ .*1.52806e.*39, .*0.2, .*0.3, .*0.3, .*0.2, .*0.1 \}_\{ 1.52806e.*39, 0.2, 0.3, 0.3, 0.2, 0.1 \}\})", // TODO: 141068 diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 67caa08318212a..1b914f20d75d94 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -58,8 +58,6 @@ std::vector disabledTestPatterns() { R"(smoke_MemoryTestV3.*)", // Issue: 90539 R"(.*CachingSupportCase.*LoadNetworkCacheTestBase.*CompareWithRefImpl.*)", - // Issue: 119648 - R"(.*smoke_LPT/InterpolateTransformation.*)", R"(.*CachingSupportCase.*GPU.*CompileModelCacheTestBase.*CompareWithRefImpl.*)", // Issue: 111437 R"(.*smoke_Deconv_2D_Dynamic_.*FP32/DeconvolutionLayerGPUTest.Inference.*)", From 271f992939e58f7127fecdc4c60a8f61c9b6a2c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:45:23 +0000 Subject: [PATCH 20/97] Update numpy requirement from <2.1.0,>=1.16.6 to >=1.16.6,<2.3.0 in /tests (#28402) Updates the requirements on [numpy](https://github.com/numpy/numpy) to permit the latest version.
Release notes

Sourced from numpy's releases.

2.2.1 (DEC 21, 2024)

NumPy 2.2.1 Release Notes

NumPy 2.2.1 is a patch release following 2.2.0. It fixes bugs found after the 2.2.0 release and has several maintenance pins to work around upstream changes.

There was some breakage in downstream projects following the 2.2.0 release due to updates to NumPy typing. Because of problems due to MyPy defects, we recommend using basedpyright for type checking, it can be installed from PyPI. The Pylance extension for Visual Studio Code is also based on Pyright. Problems that persist when using basedpyright should be reported as issues on the NumPy github site.

This release supports Python 3.10-3.13.

Contributors

A total of 9 people contributed to this release. People with a "+" by their names contributed a patch for the first time.

  • Charles Harris
  • Joren Hammudoglu
  • Matti Picus
  • Nathan Goldbaum
  • Peter Hawkins
  • Simon Altrogge
  • Thomas A Caswell
  • Warren Weckesser
  • Yang Wang +

Pull requests merged

A total of 12 pull requests were merged for this release.

  • #27935: MAINT: Prepare 2.2.x for further development
  • #27950: TEST: cleanups
  • #27958: BUG: fix use-after-free error in npy_hashtable.cpp (#27955)
  • #27959: BLD: add missing include
  • #27982: BUG:fix compile error libatomic link test to meson.build
  • #27990: TYP: Fix falsely rejected value types in ndarray.__setitem__
  • #27991: MAINT: Don't wrap #include <Python.h> with extern "C"
  • #27993: BUG: Fix segfault in stringdtype lexsort
  • #28006: MAINT: random: Tweak module code in mtrand.pyx to fix a Cython...
  • #28007: BUG: Cython API was missing NPY_UINTP.
  • #28021: CI: pin scipy-doctest to 1.5.1
  • #28044: TYP: allow None in operand sequence of nditer

Checksums

... (truncated)

Changelog

Sourced from numpy's changelog.

This is a walkthrough of the NumPy 2.1.0 release on Linux, modified for building with GitHub Actions and cibuildwheels and uploading to the anaconda.org staging repository for NumPy <https://anaconda.org/multibuild-wheels-staging/numpy>_. The commands can be copied into the command line, but be sure to replace 2.1.0 by the correct version. This should be read together with the :ref:general release guide <prepare_release>.

Facility preparation

Before beginning to make a release, use the requirements/*_requirements.txt files to ensure that you have the needed software. Most software can be installed with pip, but some will require apt-get, dnf, or whatever your system uses for software. You will also need a GitHub personal access token (PAT) to push the documentation. There are a few ways to streamline things:

  • Git can be set up to use a keyring to store your GitHub personal access token. Search online for the details.
  • You can use the keyring app to store the PyPI password for twine. See the online twine documentation for details.

Prior to release

Add/drop Python versions

When adding or dropping Python versions, three files need to be edited:

  • .github/workflows/wheels.yml # for github cibuildwheel
  • tools/ci/cirrus_wheels.yml # for cibuildwheel aarch64/arm64 builds
  • pyproject.toml # for classifier and minimum version check.

Make these changes in an ordinary PR against main and backport if necessary. Add [wheel build] at the end of the title line of the commit summary so that wheel builds will be run to test the changes. We currently release wheels for new Python versions after the first Python rc once manylinux and cibuildwheel support it. For Python 3.11 we were able to release within a week of the rc1 announcement.

Backport pull requests

Changes that have been marked for this release must be backported to the maintenance/2.1.x branch.

Update 2.1.0 milestones

... (truncated)

Commits
  • 7469245 Merge pull request #28047 from charris/prepare-2.2.1
  • acb051e REL: Prepare for the NumPy 2.2.1 release [wheel build]
  • 28a091a Merge pull request #28044 from charris/backport-28039
  • 723605b TST: Add test for allowing None in operand sequence passed to nditer
  • 554739e TYP: allow None in operand sequence of nditer
  • 31bc4c8 Merge pull request #28021 from charris/backport-28020
  • 32f52a3 CI: pin scipy-doctest to 1.5.1 (#28020)
  • 6219aeb Merge pull request #28007 from charris/backport-28005
  • eb7071c Merge pull request #28006 from charris/backport-28003
  • 4f82c32 BUG: Cython API was missing NPY_UINTP.
  • Additional commits viewable in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index c8796aa56cd3e2..e77b48a9e38662 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -1,4 +1,4 @@ -numpy>=1.16.6,<2.1.0 +numpy>=1.16.6,<2.3.0 attrs==24.2.0 distro==1.9.0 h5py>=3.1.0,<3.12.0 From 03355545fffaca5fa54170a8b200081cf177fbe3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jan 2025 18:12:22 +0000 Subject: [PATCH 21/97] Update setuptools requirement from <75.8,>=70.1 to >=70.1,<75.9 in /src/bindings/python (#28369) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updates the requirements on [setuptools](https://github.com/pypa/setuptools) to permit the latest version.
Changelog

Sourced from setuptools's changelog.

v75.8.0

Features

  • Implemented Dynamic field for core metadata (as introduced in PEP 643). The existing implementation is currently experimental and the exact approach may change in future releases. (#4698)

v75.7.0

Features

  • pypa/distutils#310#4478)
  • Synced with pypa/distutils@ff11eed0c including bugfix for duplicate CFLAGS and adaption to support Python 3.13 is_abs in the C compiler (#4669). (#4790)

v75.6.0

Features

  • Preserve original PKG-INFO into METADATA when creating wheel (instead of calling wheel.metadata.pkginfo_to_metadata). This helps to be more compliant with the flow specified in PEP 517. (#4701)
  • Changed the WindowsSdkVersion, FrameworkVersion32 and FrameworkVersion64 properties of setuptools.msvc.PlatformInfo to return an empty tuple instead of None as a fallthrough case -- by :user:Avasam (#4754)

v75.5.0

Features

  • Removed support for SETUPTOOLS_DANGEROUSLY_SKIP_PYPROJECT_VALIDATION, as it is deemed prone to errors. (#4746)

v75.4.0

Features

  • Added support for the environment variable

... (truncated)

Commits
  • 5c9d980 Bump version: 75.7.0 → 75.8.0
  • 72c4222 Avoid using Any in function
  • 1c61d47 Add news fragments for PEP 643
  • f285d01 Implement PEP 643 (Dynamic field for core metadata) (#4698)
  • a50f6e2 Fix _static.Dict.ior for Python 3.8
  • b055895 Add extra tests for static/dynamic metadata
  • 770b4fc Remove test workaround for unmarked static values from pyproject.toml
  • 8b22d73 Mark values from pyproject.toml as static
  • f699fd8 Fix spelling error
  • 8b4c8a3 Add tests for static 'attr' directive
  • Additional commits viewable in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- src/bindings/python/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bindings/python/constraints.txt b/src/bindings/python/constraints.txt index c136c232391b00..a2af88e358c5b7 100644 --- a/src/bindings/python/constraints.txt +++ b/src/bindings/python/constraints.txt @@ -10,7 +10,7 @@ pytest-timeout==2.3.1 # Python bindings build<1.3 pygments>=2.8.1 -setuptools>=70.1,<75.8 +setuptools>=70.1,<75.9 sympy>=1.10 wheel>=0.38.1 patchelf<=0.17.2.1 From 4b1f8249295504ab16f3dd4df338c4ff1e5693aa Mon Sep 17 00:00:00 2001 From: Jade Cho Date: Fri, 17 Jan 2025 13:54:04 +0900 Subject: [PATCH 22/97] [GPU] Fix mvn unit test fails (#28372) ### Details: - *When shape flattening is required, change blocked format to plain format if reduction axis is not blocked axis.* ### Tickets: - *158306* --- .../graph_optimizer/add_required_reorders.cpp | 48 ++++++++++++++----- .../intel_gpu/src/graph/impls/ocl/mvn.cpp | 13 ++++- .../tests/unit/fusions/mvn_fusion_test.cpp | 6 ++- 3 files changed, 52 insertions(+), 15 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp index e59749c69d1f45..333afe18775e0b 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/add_required_reorders.cpp @@ -246,18 +246,42 @@ void add_required_reorders::run(program& p) { } // Remove padded-inputs in spatial axes not to use ref kernel which causes huge perf drop - if (usr->is_type() && usr->as().input().is_padded_spatial()) { - auto out_layout = usr->get_output_layout(); - // Check formats of implemented opt kernels without a spatial padding support - if (out_layout.format == format::b_fs_yx_fsv16 || out_layout.format == format::b_fs_zyx_fsv16 || - out_layout.format == format::bs_fs_yx_bsv32_fsv16 || out_layout.format == format::bs_fs_yx_bsv32_fsv32) { - auto& dep = usr->as().input(); - cldnn::layout layout_wo_padding = dep.get_output_layout(); - layout_wo_padding.data_padding = cldnn::padding{}; - auto new_reorder = std::make_shared(dep.id() + "_no_pad_reorder", dep.id(), layout_wo_padding); - auto& new_reorder_node = p.get_or_create(new_reorder); - p.add_intermediate(new_reorder_node, *usr, dep); - new_reorder_node.recalc_output_layout(false); + if (usr->is_type()) { + if (usr->as().input().is_padded_spatial()) { + auto out_layout = usr->get_output_layout(); + // Check formats of implemented opt kernels without a spatial padding support + if (out_layout.format == format::b_fs_yx_fsv16 || out_layout.format == format::b_fs_zyx_fsv16 || + out_layout.format == format::bs_fs_yx_bsv32_fsv16 || out_layout.format == format::bs_fs_yx_bsv32_fsv32) { + auto& dep = usr->as().input(); + cldnn::layout layout_wo_padding = dep.get_output_layout(); + layout_wo_padding.data_padding = cldnn::padding{}; + auto new_reorder = std::make_shared(dep.id() + "_no_pad_reorder", dep.id(), layout_wo_padding); + auto& new_reorder_node = p.get_or_create(new_reorder); + p.add_intermediate(new_reorder_node, *usr, dep); + new_reorder_node.recalc_output_layout(false); + } + } + + auto input_layout = usr->get_input_layout(); + auto input_pshape = input_layout.get_partial_shape(); + auto prim = usr->as().get_primitive(); + + if (prim->requires_alignment(input_pshape)) { + auto block_sizes = format::block_sizes(input_layout.format); + auto axes = prim->reduction_axes; + if (input_layout.is_dynamic() || block_sizes.size() > 1 + || (block_sizes.size() == 1 && + input_pshape[block_sizes[0].first].get_length() % block_sizes[0].second != 0 && + std::count(axes.begin(), axes.end(), block_sizes[0].first) == 0)) { + auto rank = input_pshape.size(); + input_layout.format = format::get_default_format(rank); + auto& dep = usr->as().input(); + auto new_reorder = std::make_shared(dep.id() + "_to_plain", dep.id(), input_layout); + auto& new_reorder_node = p.get_or_create(new_reorder); + p.add_intermediate(new_reorder_node, *usr, dep); + // Need to invalidate users because the output format of mvn follows input format. + new_reorder_node.recalc_output_layout(true); + } } } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/mvn.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/mvn.cpp index 143b775008b3bd..a12980646cd79c 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/mvn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/mvn.cpp @@ -63,7 +63,18 @@ struct mvn_impl : typed_primitive_impl_ocl { ov::PartialShape shape = ov::PartialShape::dynamic(new_rank); auto& output_layout = updated_impl_params.output_layouts[0]; + if (input_pshape.is_static()) { + size_t flatten_axis = 0; + // Change flatten axis if the format is single fsv. + auto block_sizes = format::block_sizes(input_layout.format); + if (block_sizes.size() == 1 + && (input_pshape[block_sizes[0].first].get_length() % block_sizes[0].second == 0) + && (std::count(axes.begin(), axes.end(), block_sizes[0].first) == 0) + && block_sizes[0].first == 1) { + flatten_axis = 1; + } + for (size_t i = 0; i < new_rank; i++) { shape[i] = 1; } @@ -72,7 +83,7 @@ struct mvn_impl : typed_primitive_impl_ocl { // 1. normalized dimensions which are flattened and written to the last dim // 2. not normalized dims which are flattened and written to the first dim for (size_t i = 0; i < input_rank; i++) { - shape[static_cast(i) < min ? 0 : (new_rank - 1)] *= input_pshape[i]; + shape[static_cast(i) < min ? flatten_axis : (new_rank - 1)] *= input_pshape[i]; } } diff --git a/src/plugins/intel_gpu/tests/unit/fusions/mvn_fusion_test.cpp b/src/plugins/intel_gpu/tests/unit/fusions/mvn_fusion_test.cpp index 59e535123b9f8c..da9b2c8011b337 100644 --- a/src/plugins/intel_gpu/tests/unit/fusions/mvn_fusion_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/fusions/mvn_fusion_test.cpp @@ -80,6 +80,7 @@ class MVNFusingTest : public ::BaseFusingTest { #define CASE_MVN_I8_6 { 2, 16, 8, 8 }, { 1, 1, 1, 1 }, data_types::i8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx #define CASE_MVN_I8_7 { 2, 16, 1, 8 }, { 1, 1, 8, 1 }, data_types::i8, format::b_fs_yx_fsv16, {1, 2, 3}, true, data_types::f32, format::bfyx #define CASE_MVN_I8_8 { 2, 16, 3, 8 }, { 1, 1, 3, 8 }, data_types::i8, format::b_fs_yx_fsv16, {3}, true, data_types::f32, format::bfyx +#define CASE_MVN_I8_8_NA { 2, 15, 3, 8 }, { 1, 1, 3, 8 }, data_types::i8, format::b_fs_yx_fsv16, {3}, true, data_types::f32, format::bfyx #define CASE_MVN_3D_I8_1 { 1, 16, 8, 8, 8 }, { 1, 16, 8, 8, 8 }, data_types::i8, format::bfzyx, {2, 3, 4}, true, data_types::f32, format::bfzyx #define CASE_MVN_3D_I8_2 { 2, 16, 8, 8, 8 }, { 2, 16, 8, 8, 8 }, data_types::i8, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx #define CASE_MVN_3D_I8_3 { 2, 16, 8, 8, 8 }, { 2, 1, 8, 8, 1 }, data_types::i8, format::bfzyx, {1, 2, 3, 4}, true, data_types::f32, format::bfzyx @@ -170,7 +171,8 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, mvn_scale_quantize_i8, ::testing::ValuesIn mvn_test_params{ CASE_MVN_I8_2, 2, 2, 4 }, mvn_test_params{ CASE_MVN_I8_3, 2, 2, 4 }, mvn_test_params{ CASE_MVN_I8_4, 2, 2, 4 }, - // mvn_test_params{ CASE_MVN_I8_8, 3, 3, 4 }, // TODO: It will be fix soon, test reference is wrong in new driver. + mvn_test_params{ CASE_MVN_I8_8, 3, 3, 4 }, + mvn_test_params{ CASE_MVN_I8_8_NA, 3, 3, 4 }, mvn_test_params{ CASE_MVN_3D_I8_1, 2, 2, 4 }, mvn_test_params{ CASE_MVN_3D_I8_2, 2, 2, 4 }, mvn_test_params{ CASE_MVN_U8_1, 2, 2, 4 }, @@ -221,7 +223,7 @@ INSTANTIATE_TEST_SUITE_P(fusings_gpu, mvn_scale_activation_eltwise_fp32_quantize mvn_test_params{ CASE_MVN_I8_5, 2, 4, 6 }, mvn_test_params{ CASE_MVN_I8_6, 2, 4, 6 }, mvn_test_params{ CASE_MVN_I8_7, 3, 4, 6 }, - // mvn_test_params{ CASE_MVN_I8_8, 3, 5, 6 }, // TODO: It will be fix soon, test reference is wrong in new driver. + mvn_test_params{ CASE_MVN_I8_8, 3, 5, 6 }, mvn_test_params{ CASE_MVN_3D_I8_1, 2, 4, 6 }, mvn_test_params{ CASE_MVN_3D_I8_2, 2, 4, 6 }, mvn_test_params{ CASE_MVN_3D_I8_3, 2, 4, 6 }, From 08d87552d8e933ebd84b4534ac76028075ead107 Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Fri, 17 Jan 2025 06:42:20 +0100 Subject: [PATCH 23/97] [CPU] Introduce clang-tidy (#28040) For now .clang-tidy file is placed into intel_cpu/src plugin folded, since if we put it into root src folder, this would show clang-tidy warnings in IDEs for every source file of the openvino project. From cmake configuration point of view clang-tidy check is implemented as a generic one. Regarding clang-tidy checks, for now only `performance-*` checks are enabled and fixed. Additionally, the following two checks were enabled, since they are conflicting with the `performance-unnecessary-value-param` check ``` modernize-pass-by-value, cppcoreguidelines-prefer-member-initializer, ``` The idea is to enable the check scopes one by one, since fixing them is quite time consuming. As for pre-commit check, the clang-tidy is enabled in scope of `linux_conditional_compilation` check to avoid an additional build execution. Only x64 arch is covered aarch64 will be enabled next --- .github/dockerfiles/docker_tag | 2 +- .../ov_build/ubuntu_22_04_x64_cc/Dockerfile | 10 ++- .../linux_conditional_compilation.yml | 5 +- .../OpenVINODeveloperScriptsConfig.cmake | 1 + .../clang_tidy/clang_tidy.cmake | 25 ++++++ cmake/developer_package/features.cmake | 2 + cmake/developer_package/plugins/plugins.cmake | 9 +- src/plugins/intel_cpu/CMakeLists.txt | 3 +- src/plugins/intel_cpu/src/.clang-tidy | 83 +++++++++++++++++ src/plugins/intel_cpu/src/compiled_model.cpp | 36 ++++---- src/plugins/intel_cpu/src/compiled_model.h | 4 +- .../intel_cpu/src/cpu_map_scheduling.cpp | 2 +- .../intel_cpu/src/cpu_map_scheduling.hpp | 2 +- src/plugins/intel_cpu/src/cpu_memory.cpp | 54 +++++------ src/plugins/intel_cpu/src/cpu_memory.h | 39 ++++---- src/plugins/intel_cpu/src/cpu_shape.h | 8 +- .../intel_cpu/src/cpu_streams_calculation.cpp | 6 +- .../intel_cpu/src/cpu_streams_calculation.hpp | 6 +- src/plugins/intel_cpu/src/cpu_tensor.cpp | 6 +- src/plugins/intel_cpu/src/cpu_tensor.h | 2 +- src/plugins/intel_cpu/src/dnnl_scratch_pad.h | 3 +- src/plugins/intel_cpu/src/edge.cpp | 10 +-- src/plugins/intel_cpu/src/edge.h | 2 +- .../aarch64/jit_conversion_emitters.cpp | 7 +- .../emitters/plugin/aarch64/jit_emitter.hpp | 6 +- .../emitters/plugin/x64/jit_bf16_emitters.hpp | 4 +- .../plugin/x64/jit_conversion_emitters.cpp | 7 +- .../emitters/plugin/x64/jit_dnnl_emitters.cpp | 9 +- .../src/emitters/plugin/x64/jit_emitter.hpp | 6 +- .../plugin/x64/jit_load_store_emitters.cpp | 14 +-- .../plugin/x64/jit_load_store_emitters.hpp | 6 +- .../snippets/aarch64/jit_loop_emitters.cpp | 2 +- .../emitters/snippets/x64/cpu_generator.cpp | 1 + .../snippets/x64/jit_debug_emitter.hpp | 6 +- .../snippets/x64/jit_loop_emitters.cpp | 2 +- .../x64/jit_segfault_detector_emitter.cpp | 4 +- .../x64/kernel_executors/brgemm_copy_b.cpp | 5 +- .../emitters/tpp/x64/jit_debug_emitter.hpp | 48 +++++----- src/plugins/intel_cpu/src/emitters/utils.cpp | 4 +- src/plugins/intel_cpu/src/graph.cpp | 46 +++++----- src/plugins/intel_cpu/src/graph.h | 21 +++-- src/plugins/intel_cpu/src/graph_context.cpp | 40 +++++---- src/plugins/intel_cpu/src/graph_context.h | 50 +++++------ src/plugins/intel_cpu/src/graph_dumper.cpp | 4 +- src/plugins/intel_cpu/src/graph_optimizer.cpp | 90 +++++++++---------- src/plugins/intel_cpu/src/infer_request.cpp | 4 +- src/plugins/intel_cpu/src/memory_control.cpp | 5 +- .../src/memory_desc/cpu_memory_desc_utils.cpp | 2 +- .../src/memory_desc/cpu_memory_desc_utils.h | 2 +- .../memory_desc/dnnl_blocked_memory_desc.cpp | 18 ++-- src/plugins/intel_cpu/src/memory_state.cpp | 28 +++--- src/plugins/intel_cpu/src/memory_state.h | 12 ++- src/plugins/intel_cpu/src/node.cpp | 32 +++---- src/plugins/intel_cpu/src/node.h | 32 +++---- .../intel_cpu/src/nodes/adaptive_pooling.cpp | 6 +- .../intel_cpu/src/nodes/adaptive_pooling.h | 6 +- .../intel_cpu/src/nodes/batch_to_space.cpp | 6 +- .../intel_cpu/src/nodes/batch_to_space.h | 6 +- src/plugins/intel_cpu/src/nodes/bin_conv.cpp | 4 +- src/plugins/intel_cpu/src/nodes/bin_conv.h | 4 +- src/plugins/intel_cpu/src/nodes/broadcast.cpp | 8 +- src/plugins/intel_cpu/src/nodes/broadcast.h | 8 +- src/plugins/intel_cpu/src/nodes/bucketize.cpp | 4 +- src/plugins/intel_cpu/src/nodes/bucketize.h | 6 +- .../src/nodes/causal_mask_preprocess.cpp | 6 +- .../src/nodes/causal_mask_preprocess.h | 8 +- src/plugins/intel_cpu/src/nodes/col2im.cpp | 6 +- src/plugins/intel_cpu/src/nodes/col2im.h | 6 +- .../intel_cpu/src/nodes/color_convert.cpp | 22 ++--- .../intel_cpu/src/nodes/color_convert.h | 8 +- .../src/nodes/common/dnnl_executor.cpp | 6 +- .../src/nodes/common/dnnl_executor.h | 6 +- .../intel_cpu/src/nodes/common/fp16_utils.h | 2 + .../src/nodes/common/permute_kernel.h | 4 +- .../src/nodes/common/reorder_prim.cpp | 2 +- .../intel_cpu/src/nodes/common/reorder_prim.h | 2 +- .../intel_cpu/src/nodes/common/softmax.h | 2 +- src/plugins/intel_cpu/src/nodes/composite.cpp | 6 +- src/plugins/intel_cpu/src/nodes/composite.h | 4 +- src/plugins/intel_cpu/src/nodes/concat.cpp | 4 +- src/plugins/intel_cpu/src/nodes/concat.h | 6 +- src/plugins/intel_cpu/src/nodes/conv.cpp | 16 ++-- src/plugins/intel_cpu/src/nodes/conv.h | 8 +- src/plugins/intel_cpu/src/nodes/convert.cpp | 8 +- src/plugins/intel_cpu/src/nodes/convert.h | 8 +- .../src/nodes/ctc_greedy_decoder.cpp | 6 +- .../intel_cpu/src/nodes/ctc_greedy_decoder.h | 6 +- .../src/nodes/ctc_greedy_decoder_seq_len.cpp | 6 +- .../src/nodes/ctc_greedy_decoder_seq_len.h | 6 +- src/plugins/intel_cpu/src/nodes/ctc_loss.cpp | 6 +- src/plugins/intel_cpu/src/nodes/ctc_loss.h | 6 +- src/plugins/intel_cpu/src/nodes/cum_sum.cpp | 6 +- src/plugins/intel_cpu/src/nodes/cum_sum.h | 6 +- src/plugins/intel_cpu/src/nodes/deconv.cpp | 4 +- src/plugins/intel_cpu/src/nodes/deconv.h | 6 +- src/plugins/intel_cpu/src/nodes/def_conv.cpp | 6 +- src/plugins/intel_cpu/src/nodes/def_conv.h | 6 +- .../intel_cpu/src/nodes/depth_to_space.cpp | 6 +- .../intel_cpu/src/nodes/depth_to_space.h | 6 +- .../intel_cpu/src/nodes/detection_output.cpp | 6 +- .../intel_cpu/src/nodes/detection_output.h | 6 +- src/plugins/intel_cpu/src/nodes/dft.cpp | 4 +- src/plugins/intel_cpu/src/nodes/dft.h | 4 +- src/plugins/intel_cpu/src/nodes/eltwise.cpp | 8 +- src/plugins/intel_cpu/src/nodes/eltwise.h | 9 +- .../intel_cpu/src/nodes/embedding_bag.cpp | 4 +- .../src/nodes/embedding_bag_offsets.cpp | 6 +- .../src/nodes/embedding_bag_offsets.h | 6 +- .../src/nodes/embedding_bag_packed.cpp | 6 +- .../src/nodes/embedding_bag_packed.h | 6 +- .../src/nodes/embedding_segments_sum.cpp | 6 +- .../src/nodes/embedding_segments_sum.h | 6 +- .../nodes/executors/aarch64/jit_eltwise.cpp | 3 +- .../src/nodes/executors/acl/acl_deconv.cpp | 2 +- .../src/nodes/executors/acl/acl_eltwise.cpp | 2 +- .../executors/acl/acl_fullyconnected.cpp | 2 +- .../executors/acl/acl_fullyconnected.hpp | 2 +- .../acl/acl_fullyconnected_utils.cpp | 34 +++---- .../acl/acl_fullyconnected_utils.hpp | 22 +++-- .../executors/acl/acl_lowp_fullyconnected.cpp | 11 ++- .../src/nodes/executors/acl/acl_mvn.cpp | 2 +- .../src/nodes/executors/acl/acl_pooling.cpp | 2 +- .../src/nodes/executors/acl/acl_reduce.cpp | 2 +- .../nodes/executors/common/ref_transpose.cpp | 2 +- .../nodes/executors/common/ref_transpose.hpp | 2 +- .../intel_cpu/src/nodes/executors/convert.cpp | 6 +- .../intel_cpu/src/nodes/executors/convert.hpp | 2 +- .../src/nodes/executors/convert_list.hpp | 2 +- .../intel_cpu/src/nodes/executors/deconv.hpp | 4 +- .../src/nodes/executors/deconv_list.hpp | 2 +- .../dnnl/dnnl_convolution_primitive.cpp | 8 +- .../dnnl/dnnl_convolution_primitive.hpp | 6 +- .../executors/dnnl/dnnl_fullyconnected.hpp | 15 ++-- .../dnnl/dnnl_fullyconnected_primitive.cpp | 14 +-- .../dnnl/dnnl_fullyconnected_primitive.hpp | 6 +- .../executors/dnnl/dnnl_matmul_primitive.cpp | 10 +-- .../executors/dnnl/dnnl_matmul_primitive.hpp | 6 +- .../intel_cpu/src/nodes/executors/eltwise.cpp | 6 +- .../intel_cpu/src/nodes/executors/eltwise.hpp | 16 ++-- .../src/nodes/executors/eltwise_list.hpp | 2 +- .../src/nodes/executors/executor.hpp | 9 +- .../src/nodes/executors/executor_factory.hpp | 5 +- .../executors/executor_implementation.hpp | 2 +- .../fullyconnected_implementations.cpp | 25 +++--- .../src/nodes/executors/graph_emitter.hpp | 7 +- .../src/nodes/executors/interpolate.hpp | 4 +- .../src/nodes/executors/interpolate_list.hpp | 2 +- .../src/nodes/executors/mlas/mlas_gemm.cpp | 2 +- .../src/nodes/executors/mlas/mlas_gemm.hpp | 2 +- .../intel_cpu/src/nodes/executors/mvn.cpp | 6 +- .../intel_cpu/src/nodes/executors/mvn.hpp | 2 +- .../src/nodes/executors/mvn_list.hpp | 2 +- .../intel_cpu/src/nodes/executors/pooling.cpp | 6 +- .../intel_cpu/src/nodes/executors/pooling.hpp | 4 +- .../src/nodes/executors/pooling_list.hpp | 2 +- .../intel_cpu/src/nodes/executors/reduce.cpp | 6 +- .../intel_cpu/src/nodes/executors/reduce.hpp | 4 +- .../src/nodes/executors/reduce_list.hpp | 2 +- .../src/nodes/executors/subgraph.cpp | 10 ++- .../src/nodes/executors/subgraph.hpp | 12 +-- .../src/nodes/executors/transpose.cpp | 5 +- .../src/nodes/executors/transpose.hpp | 2 +- .../src/nodes/executors/transpose_list.hpp | 2 +- .../src/nodes/executors/variable_executor.hpp | 6 +- .../src/nodes/executors/x64/subgraph.cpp | 7 +- ...xperimental_detectron_detection_output.cpp | 14 +-- .../experimental_detectron_detection_output.h | 6 +- ...ectron_generate_proposals_single_image.cpp | 4 +- ...etectron_generate_proposals_single_image.h | 6 +- ...erimental_detectron_priorgridgenerator.cpp | 4 +- ...xperimental_detectron_priorgridgenerator.h | 6 +- ...rimental_detectron_roifeatureextractor.cpp | 13 +-- ...perimental_detectron_roifeatureextractor.h | 6 +- .../nodes/experimental_detectron_topkrois.cpp | 4 +- .../nodes/experimental_detectron_topkrois.h | 6 +- .../src/nodes/extract_image_patches.cpp | 6 +- .../src/nodes/extract_image_patches.h | 6 +- src/plugins/intel_cpu/src/nodes/eye.cpp | 7 +- src/plugins/intel_cpu/src/nodes/eye.h | 6 +- .../intel_cpu/src/nodes/fake_quantize.cpp | 6 +- .../intel_cpu/src/nodes/fake_quantize.h | 6 +- .../intel_cpu/src/nodes/fullyconnected.cpp | 18 ++-- .../intel_cpu/src/nodes/fullyconnected.h | 8 +- src/plugins/intel_cpu/src/nodes/gather.cpp | 6 +- src/plugins/intel_cpu/src/nodes/gather.h | 6 +- .../intel_cpu/src/nodes/gather_elements.cpp | 6 +- .../intel_cpu/src/nodes/gather_elements.h | 6 +- src/plugins/intel_cpu/src/nodes/gather_nd.cpp | 32 ++++--- src/plugins/intel_cpu/src/nodes/gather_nd.h | 12 +-- .../intel_cpu/src/nodes/gather_tree.cpp | 6 +- src/plugins/intel_cpu/src/nodes/gather_tree.h | 6 +- .../src/nodes/generate_proposals.cpp | 6 +- .../intel_cpu/src/nodes/generate_proposals.h | 6 +- .../intel_cpu/src/nodes/grid_sample.cpp | 6 +- .../intel_cpu/src/nodes/grid_sample.hpp | 6 +- src/plugins/intel_cpu/src/nodes/grn.cpp | 8 +- src/plugins/intel_cpu/src/nodes/grn.h | 6 +- src/plugins/intel_cpu/src/nodes/if.cpp | 19 ++-- src/plugins/intel_cpu/src/nodes/if.h | 10 +-- src/plugins/intel_cpu/src/nodes/input.cpp | 26 +++--- src/plugins/intel_cpu/src/nodes/input.h | 17 ++-- .../intel_cpu/src/nodes/interaction.cpp | 8 +- src/plugins/intel_cpu/src/nodes/interaction.h | 8 +- .../intel_cpu/src/nodes/interpolate.cpp | 24 ++--- src/plugins/intel_cpu/src/nodes/interpolate.h | 8 +- src/plugins/intel_cpu/src/nodes/inverse.cpp | 4 +- src/plugins/intel_cpu/src/nodes/inverse.hpp | 4 +- .../nodes/kernels/aarch64/brgemm_kernel.cpp | 32 ++++--- .../aarch64/jit_uni_eltwise_generic.cpp | 18 ++-- .../aarch64/jit_uni_eltwise_generic.hpp | 11 +-- .../kernels/scaled_attn/softmax_kernel.hpp | 4 +- .../src/nodes/kernels/x64/brgemm_kernel.cpp | 6 +- .../nodes/kernels/x64/gather_uni_kernel.cpp | 5 +- .../nodes/kernels/x64/gather_uni_kernel.hpp | 7 +- .../src/nodes/kernels/x64/grid_sample.cpp | 7 +- .../src/nodes/kernels/x64/grid_sample.hpp | 10 ++- .../src/nodes/kernels/x64/jit_kernel.cpp | 5 +- .../src/nodes/kernels/x64/jit_kernel.hpp | 22 ++--- .../src/nodes/kernels/x64/jit_kernel_base.cpp | 7 +- .../src/nodes/kernels/x64/mlp_kernel.hpp | 2 +- src/plugins/intel_cpu/src/nodes/llm_mlp.cpp | 12 +-- src/plugins/intel_cpu/src/nodes/llm_mlp.h | 6 +- .../intel_cpu/src/nodes/log_softmax.cpp | 6 +- src/plugins/intel_cpu/src/nodes/log_softmax.h | 6 +- src/plugins/intel_cpu/src/nodes/lora.cpp | 4 +- src/plugins/intel_cpu/src/nodes/lora.h | 4 +- src/plugins/intel_cpu/src/nodes/lrn.cpp | 6 +- src/plugins/intel_cpu/src/nodes/lrn.h | 6 +- .../intel_cpu/src/nodes/mathematics.cpp | 8 +- src/plugins/intel_cpu/src/nodes/mathematics.h | 6 +- src/plugins/intel_cpu/src/nodes/matmul.cpp | 8 +- src/plugins/intel_cpu/src/nodes/matmul.h | 8 +- .../intel_cpu/src/nodes/matrix_nms.cpp | 12 +-- src/plugins/intel_cpu/src/nodes/matrix_nms.h | 12 +-- src/plugins/intel_cpu/src/nodes/memory.cpp | 60 ++++++------- src/plugins/intel_cpu/src/nodes/memory.hpp | 38 ++++---- .../intel_cpu/src/nodes/memory_state_base.h | 4 +- src/plugins/intel_cpu/src/nodes/mha.cpp | 19 ++-- src/plugins/intel_cpu/src/nodes/mha.h | 11 ++- .../intel_cpu/src/nodes/multiclass_nms.cpp | 12 +-- .../intel_cpu/src/nodes/multiclass_nms.hpp | 12 +-- .../intel_cpu/src/nodes/multinomial.cpp | 4 +- .../intel_cpu/src/nodes/multinomial.hpp | 4 +- src/plugins/intel_cpu/src/nodes/mvn.cpp | 14 +-- src/plugins/intel_cpu/src/nodes/mvn.h | 6 +- src/plugins/intel_cpu/src/nodes/ngram.cpp | 4 +- src/plugins/intel_cpu/src/nodes/ngram.h | 4 +- src/plugins/intel_cpu/src/nodes/node_config.h | 15 ++-- .../src/nodes/non_max_suppression.cpp | 4 +- .../intel_cpu/src/nodes/non_max_suppression.h | 4 +- src/plugins/intel_cpu/src/nodes/non_zero.cpp | 6 +- src/plugins/intel_cpu/src/nodes/non_zero.h | 6 +- src/plugins/intel_cpu/src/nodes/normalize.cpp | 16 ++-- src/plugins/intel_cpu/src/nodes/normalize.h | 6 +- src/plugins/intel_cpu/src/nodes/one_hot.cpp | 6 +- src/plugins/intel_cpu/src/nodes/one_hot.h | 6 +- src/plugins/intel_cpu/src/nodes/pad.cpp | 6 +- src/plugins/intel_cpu/src/nodes/pad.h | 6 +- .../intel_cpu/src/nodes/paged_attn.cpp | 4 +- src/plugins/intel_cpu/src/nodes/paged_attn.h | 6 +- src/plugins/intel_cpu/src/nodes/pooling.cpp | 9 +- src/plugins/intel_cpu/src/nodes/pooling.h | 6 +- src/plugins/intel_cpu/src/nodes/priorbox.cpp | 6 +- src/plugins/intel_cpu/src/nodes/priorbox.h | 6 +- .../src/nodes/priorbox_clustered.cpp | 4 +- .../intel_cpu/src/nodes/priorbox_clustered.h | 6 +- src/plugins/intel_cpu/src/nodes/proposal.cpp | 6 +- src/plugins/intel_cpu/src/nodes/proposal.h | 6 +- .../intel_cpu/src/nodes/psroi_pooling.cpp | 8 +- .../intel_cpu/src/nodes/psroi_pooling.h | 4 +- src/plugins/intel_cpu/src/nodes/qkv_proj.cpp | 7 +- src/plugins/intel_cpu/src/nodes/qkv_proj.h | 6 +- .../intel_cpu/src/nodes/random_uniform.cpp | 6 +- .../intel_cpu/src/nodes/random_uniform.hpp | 4 +- src/plugins/intel_cpu/src/nodes/range.cpp | 6 +- src/plugins/intel_cpu/src/nodes/range.h | 6 +- src/plugins/intel_cpu/src/nodes/rdft.cpp | 6 +- src/plugins/intel_cpu/src/nodes/rdft.h | 6 +- src/plugins/intel_cpu/src/nodes/reduce.cpp | 6 +- src/plugins/intel_cpu/src/nodes/reduce.h | 6 +- src/plugins/intel_cpu/src/nodes/reference.cpp | 12 +-- src/plugins/intel_cpu/src/nodes/reference.h | 6 +- .../intel_cpu/src/nodes/region_yolo.cpp | 4 +- src/plugins/intel_cpu/src/nodes/region_yolo.h | 6 +- src/plugins/intel_cpu/src/nodes/reorder.cpp | 14 +-- src/plugins/intel_cpu/src/nodes/reorder.h | 12 +-- .../intel_cpu/src/nodes/reorg_yolo.cpp | 6 +- src/plugins/intel_cpu/src/nodes/reorg_yolo.h | 6 +- src/plugins/intel_cpu/src/nodes/reshape.cpp | 8 +- src/plugins/intel_cpu/src/nodes/reshape.h | 6 +- .../intel_cpu/src/nodes/reverse_sequence.cpp | 8 +- .../intel_cpu/src/nodes/reverse_sequence.h | 6 +- src/plugins/intel_cpu/src/nodes/rms_norm.cpp | 4 +- src/plugins/intel_cpu/src/nodes/rms_norm.h | 8 +- src/plugins/intel_cpu/src/nodes/rnn.cpp | 16 ++-- src/plugins/intel_cpu/src/nodes/rnn.h | 11 +-- src/plugins/intel_cpu/src/nodes/roi_align.cpp | 6 +- src/plugins/intel_cpu/src/nodes/roi_align.h | 6 +- .../intel_cpu/src/nodes/roi_align_rotated.cpp | 6 +- .../intel_cpu/src/nodes/roi_align_rotated.h | 6 +- .../intel_cpu/src/nodes/roi_pooling.cpp | 6 +- src/plugins/intel_cpu/src/nodes/roi_pooling.h | 6 +- src/plugins/intel_cpu/src/nodes/roll.cpp | 8 +- src/plugins/intel_cpu/src/nodes/roll.h | 6 +- src/plugins/intel_cpu/src/nodes/rope.cpp | 12 +-- src/plugins/intel_cpu/src/nodes/rope.h | 8 +- .../intel_cpu/src/nodes/scaled_attn.cpp | 65 +++++++------- src/plugins/intel_cpu/src/nodes/scaled_attn.h | 8 +- .../intel_cpu/src/nodes/scatter_update.cpp | 11 +-- .../intel_cpu/src/nodes/scatter_update.h | 6 +- .../intel_cpu/src/nodes/search_sorted.cpp | 6 +- .../intel_cpu/src/nodes/search_sorted.h | 6 +- src/plugins/intel_cpu/src/nodes/shapeof.cpp | 4 +- src/plugins/intel_cpu/src/nodes/shapeof.h | 6 +- .../intel_cpu/src/nodes/shuffle_channels.cpp | 6 +- .../intel_cpu/src/nodes/shuffle_channels.h | 6 +- src/plugins/intel_cpu/src/nodes/softmax.cpp | 6 +- src/plugins/intel_cpu/src/nodes/softmax.h | 6 +- .../intel_cpu/src/nodes/space_to_batch.cpp | 6 +- .../intel_cpu/src/nodes/space_to_batch.h | 6 +- .../intel_cpu/src/nodes/space_to_depth.cpp | 6 +- .../intel_cpu/src/nodes/space_to_depth.h | 6 +- src/plugins/intel_cpu/src/nodes/split.cpp | 6 +- src/plugins/intel_cpu/src/nodes/split.h | 8 +- src/plugins/intel_cpu/src/nodes/stft.cpp | 4 +- src/plugins/intel_cpu/src/nodes/stft.h | 4 +- .../intel_cpu/src/nodes/strided_slice.cpp | 13 +-- .../intel_cpu/src/nodes/strided_slice.h | 6 +- .../src/nodes/string_tensor_pack.cpp | 8 +- .../intel_cpu/src/nodes/string_tensor_pack.h | 6 +- .../src/nodes/string_tensor_unpack.cpp | 8 +- .../src/nodes/string_tensor_unpack.h | 6 +- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 31 ++++--- src/plugins/intel_cpu/src/nodes/subgraph.h | 10 ++- .../intel_cpu/src/nodes/tensoriterator.cpp | 34 +++---- .../intel_cpu/src/nodes/tensoriterator.h | 12 +-- src/plugins/intel_cpu/src/nodes/tile.cpp | 8 +- src/plugins/intel_cpu/src/nodes/tile.h | 8 +- src/plugins/intel_cpu/src/nodes/topk.cpp | 6 +- src/plugins/intel_cpu/src/nodes/topk.h | 6 +- src/plugins/intel_cpu/src/nodes/transpose.cpp | 8 +- src/plugins/intel_cpu/src/nodes/transpose.h | 6 +- src/plugins/intel_cpu/src/nodes/unique.cpp | 7 +- src/plugins/intel_cpu/src/nodes/unique.hpp | 6 +- .../intel_cpu/src/partitioned_mem_blk.h | 2 + src/plugins/intel_cpu/src/plugin.cpp | 2 +- src/plugins/intel_cpu/src/post_ops.cpp | 6 +- src/plugins/intel_cpu/src/post_ops.hpp | 4 +- src/plugins/intel_cpu/src/proxy_mem_blk.cpp | 2 +- src/plugins/intel_cpu/src/proxy_mem_blk.h | 6 +- .../custom/adaptive_pooling.hpp | 4 +- .../shape_inference/custom/color_convert.hpp | 4 +- .../shape_inference/custom/fullyconnected.hpp | 2 +- .../src/shape_inference/custom/gather.hpp | 4 +- .../src/shape_inference/custom/matmul.hpp | 4 +- .../src/shape_inference/custom/ngram.hpp | 4 +- .../src/shape_inference/custom/one_hot.hpp | 4 +- .../src/shape_inference/custom/priorbox.hpp | 4 +- .../custom/priorbox_clustered.hpp | 4 +- .../src/shape_inference/custom/reshape.cpp | 6 +- .../src/shape_inference/custom/reshape.hpp | 4 +- .../src/shape_inference/custom/rms_norm.hpp | 2 + .../shape_inference/custom/scaled_attn.cpp | 4 +- .../shape_inference/custom/scaled_attn.hpp | 4 +- .../shape_inference/custom/strided_slice.hpp | 4 +- .../src/shape_inference/custom/subgraph.hpp | 5 +- .../src/shape_inference/custom/transpose.hpp | 4 +- .../src/shape_inference/static_shape.hpp | 4 - .../common/op/causal_mask_preprocess.cpp | 5 +- .../common/op/causal_mask_preprocess.hpp | 2 +- .../common/op/read_value_with_subgraph.cpp | 6 +- .../common/op/read_value_with_subgraph.hpp | 5 +- .../cpu_opset/common/op/sdpa.cpp | 13 +-- .../cpu_opset/common/op/sdpa.hpp | 6 +- .../pass/causal_mask_preprocess_fusion.cpp | 2 +- .../common/pass/convert_matmul_to_fc.cpp | 2 +- .../pass/move_fc_reshape_to_weights.cpp | 2 +- .../move_readvalue_inputs_to_subgraph.cpp | 8 +- .../cpu_opset/common/pass/ngram_fusion.cpp | 16 ++-- .../common/pass/stateful_sdpa_fusion.cpp | 2 +- .../convert_to_cpu_specific_opset.hpp | 2 +- .../transformations/cpu_opset/x64/op/mha.cpp | 46 +++++----- .../transformations/cpu_opset/x64/op/mha.hpp | 12 +-- .../cpu_opset/x64/pass/mlp_fusion.cpp | 2 +- .../intel_cpu/src/transformations/itt.hpp | 4 +- .../snippets/x64/op/brgemm_copy_b.cpp | 12 +-- .../snippets/x64/op/brgemm_copy_b.hpp | 6 +- .../snippets/x64/op/brgemm_cpu.cpp | 38 ++++---- .../snippets/x64/op/brgemm_cpu.hpp | 30 +++---- .../snippets/x64/op/brgemm_utils.cpp | 4 +- .../snippets/x64/pass/enforce_precision.cpp | 2 +- .../snippets/x64/pass/enforce_precision.hpp | 2 +- .../lowered/insert_brgemm_copy_buffers.cpp | 2 +- .../x64/pass/snippets_mark_skipped.cpp | 4 +- .../transformations/transformation_pipeline.h | 5 +- src/plugins/intel_cpu/src/utils/blob_dump.h | 3 +- src/plugins/intel_cpu/src/utils/codec_xor.hpp | 5 +- .../src/utils/debug_capabilities.cpp | 2 +- .../intel_cpu/src/utils/debug_capabilities.h | 17 ++-- .../intel_cpu/src/utils/ngraph_utils.hpp | 2 +- .../intel_cpu/src/utils/node_dumper.cpp | 18 +++- .../intel_cpu/src/utils/plain_tensor.hpp | 11 +-- .../rt_info/memory_formats_attribute.hpp | 3 +- src/plugins/intel_cpu/src/utils/serialize.cpp | 4 +- src/plugins/intel_cpu/src/weights_cache.cpp | 9 +- src/plugins/intel_cpu/src/weights_cache.hpp | 8 +- .../intel_cpu/tests/unit/graph/dummy_node.hpp | 2 +- 407 files changed, 1899 insertions(+), 1636 deletions(-) create mode 100644 cmake/developer_package/clang_tidy/clang_tidy.cmake create mode 100644 src/plugins/intel_cpu/src/.clang-tidy diff --git a/.github/dockerfiles/docker_tag b/.github/dockerfiles/docker_tag index dc8abab3366b79..0d2fd1b31c0f73 100644 --- a/.github/dockerfiles/docker_tag +++ b/.github/dockerfiles/docker_tag @@ -1 +1 @@ -pr-28380 \ No newline at end of file +pr-28040 diff --git a/.github/dockerfiles/ov_build/ubuntu_22_04_x64_cc/Dockerfile b/.github/dockerfiles/ov_build/ubuntu_22_04_x64_cc/Dockerfile index 4e044eab71ef84..07f0941fb441fa 100644 --- a/.github/dockerfiles/ov_build/ubuntu_22_04_x64_cc/Dockerfile +++ b/.github/dockerfiles/ov_build/ubuntu_22_04_x64_cc/Dockerfile @@ -36,7 +36,11 @@ RUN apt-get update && \ # For Java API default-jdk \ # Compiler \ - clang \ + clang-15 \ + # Static analyzer + clang-tidy-15 \ + # clang-tidy uses clang-format as a dependency + clang-format-15 \ && \ rm -rf /var/lib/apt/lists/* @@ -47,8 +51,8 @@ RUN chmod +x /install_build_dependencies.sh && \ rm -rf /var/lib/apt/lists/* # Set clang as a default compiler -RUN update-alternatives --install /usr/bin/cc cc /usr/bin/clang 100 && \ - update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++ 100 +RUN update-alternatives --install /usr/bin/cc cc /usr/bin/clang-15 100 && \ + update-alternatives --install /usr/bin/c++ c++ /usr/bin/clang++-15 100 # Install sscache ARG SCCACHE_VERSION="v0.7.5" diff --git a/.github/workflows/linux_conditional_compilation.yml b/.github/workflows/linux_conditional_compilation.yml index f198e64f7ad2ed..5e906e89541b30 100644 --- a/.github/workflows/linux_conditional_compilation.yml +++ b/.github/workflows/linux_conditional_compilation.yml @@ -151,7 +151,9 @@ jobs: # Build # - - name: CMake configure - CC COLLECT + - name: CMake configure - CC COLLECT with clang-tidy + # clang-tidy static analysis check is enabled as part of collection + # to avoid an additional separate build execution run: | cmake \ -G "${{ env.CMAKE_GENERATOR }}" \ @@ -159,6 +161,7 @@ jobs: -DBUILD_SHARED_LIBS=OFF \ -DENABLE_TESTS=ON \ -DENABLE_CPPLINT=OFF \ + -DENABLE_CLANG_TIDY=ON \ -DENABLE_NCC_STYLE=OFF \ -DCMAKE_COMPILE_WARNING_AS_ERROR=ON \ -DENABLE_PROFILING_ITT=ON \ diff --git a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake index d953a2970b33da..ecb52325d7223b 100644 --- a/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake +++ b/cmake/developer_package/OpenVINODeveloperScriptsConfig.cmake @@ -305,6 +305,7 @@ include(python_requirements) include(cpplint/cpplint) include(clang_format/clang_format) +include(clang_tidy/clang_tidy) include(ncc_naming_style/ncc_naming_style) # Restore state diff --git a/cmake/developer_package/clang_tidy/clang_tidy.cmake b/cmake/developer_package/clang_tidy/clang_tidy.cmake new file mode 100644 index 00000000000000..f3b3a6e697e0cd --- /dev/null +++ b/cmake/developer_package/clang_tidy/clang_tidy.cmake @@ -0,0 +1,25 @@ +# Copyright (C) 2018-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +if(ENABLE_CLANG_TIDY) + set(CLANG_TIDY_REQUIRED_VERSION 15 CACHE STRING "clang-tidy version to use") + set(CLANG_TIDY_FILENAME clang-tidy-${CLANG_TIDY_REQUIRED_VERSION} clang-tidy) + find_host_program(CLANG_TIDY NAMES ${CLANG_TIDY_FILENAME} PATHS ENV PATH) + if(CLANG_TIDY) + execute_process(COMMAND ${CLANG_TIDY} ${CMAKE_CURRENT_SOURCE_DIR} ARGS --version OUTPUT_VARIABLE CLANG_VERSION) + if(NOT CLANG_VERSION) + message(WARNING "Supported clang-tidy version is ${CLANG_TIDY_REQUIRED_VERSION}!") + set(ENABLE_CLANG_TIDY OFF) + else() + string(REGEX REPLACE "[^0-9]+([0-9]+)\\..*" "\\1" CLANG_TIDY_MAJOR_VERSION ${CLANG_VERSION}) + if(NOT CLANG_TIDY_MAJOR_VERSION EQUAL CLANG_TIDY_REQUIRED_VERSION) + message(WARNING "Supported clang-tidy version is ${CLANG_TIDY_REQUIRED_VERSION}! Provided version ${CLANG_TIDY_MAJOR_VERSION}") + set(ENABLE_CLANG_TIDY OFF) + endif() + endif() + else() + message(WARNING "Supported clang-tidy-${CLANG_TIDY_REQUIRED_VERSION} is not found!") + set(ENABLE_CLANG_TIDY OFF) + endif() +endif() diff --git a/cmake/developer_package/features.cmake b/cmake/developer_package/features.cmake index 387e6f25fd2a14..d4dacf28039516 100644 --- a/cmake/developer_package/features.cmake +++ b/cmake/developer_package/features.cmake @@ -78,6 +78,8 @@ ov_dependent_option (ENABLE_CPPLINT_REPORT "Build cpplint report instead of fail ov_option (ENABLE_CLANG_FORMAT "Enable clang-format checks during the build" ${STYLE_CHECKS_DEFAULT}) +ov_option (ENABLE_CLANG_TIDY "Enable clang-tidy checks during the build" ${STYLE_CHECKS_DEFAULT}) + ov_option (ENABLE_NCC_STYLE "Enable ncc style check" ${STYLE_CHECKS_DEFAULT}) ov_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF) diff --git a/cmake/developer_package/plugins/plugins.cmake b/cmake/developer_package/plugins/plugins.cmake index 144d2e3ceab705..b7664557cec3d8 100644 --- a/cmake/developer_package/plugins/plugins.cmake +++ b/cmake/developer_package/plugins/plugins.cmake @@ -34,10 +34,11 @@ endif() # [SKIP_INSTALL] # [SKIP_REGISTRATION] Skip creation of .xml # [ADD_CLANG_FORMAT] +# [ADD_CLANG_TIDY] # ) # function(ov_add_plugin) - set(options SKIP_INSTALL PSEUDO_DEVICE ADD_CLANG_FORMAT AS_EXTENSION SKIP_REGISTRATION) + set(options SKIP_INSTALL PSEUDO_DEVICE ADD_CLANG_FORMAT ADD_CLANG_TIDY AS_EXTENSION SKIP_REGISTRATION) set(oneValueArgs NAME DEVICE_NAME VERSION_DEFINES_FOR PSEUDO_PLUGIN_FOR) set(multiValueArgs DEFAULT_CONFIG SOURCES OBJECT_LIBRARIES CPPLINT_FILTERS) cmake_parse_arguments(OV_PLUGIN "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) @@ -105,6 +106,12 @@ function(ov_add_plugin) string(CONCAT custom_filter "${custom_filter}" "," "${filter}") endforeach() + if (OV_PLUGIN_ADD_CLANG_TIDY) + if (ENABLE_CLANG_TIDY) + set_target_properties(${OV_PLUGIN_NAME} PROPERTIES CXX_CLANG_TIDY clang-tidy-${CLANG_TIDY_REQUIRED_VERSION}) + endif() + endif() + if (OV_PLUGIN_ADD_CLANG_FORMAT) ov_add_clang_format_target(${OV_PLUGIN_NAME}_clang FOR_SOURCES ${OV_PLUGIN_SOURCES}) else() diff --git a/src/plugins/intel_cpu/CMakeLists.txt b/src/plugins/intel_cpu/CMakeLists.txt index 5d66a4db8b6253..f47bc0b5d86fc2 100644 --- a/src/plugins/intel_cpu/CMakeLists.txt +++ b/src/plugins/intel_cpu/CMakeLists.txt @@ -239,7 +239,8 @@ ov_add_plugin(NAME ${TARGET_NAME} AS_EXTENSION VERSION_DEFINES_FOR src/plugin.cpp SOURCES ${SOURCES} ${HEADERS} - ADD_CLANG_FORMAT) + ADD_CLANG_FORMAT + ADD_CLANG_TIDY) # give a different file name depending on target platform architecture if(ARM OR AARCH64) diff --git a/src/plugins/intel_cpu/src/.clang-tidy b/src/plugins/intel_cpu/src/.clang-tidy new file mode 100644 index 00000000000000..b86cc0e063da84 --- /dev/null +++ b/src/plugins/intel_cpu/src/.clang-tidy @@ -0,0 +1,83 @@ +--- + +### NOTE: +# The 'Checks: >' is a multiline string here. Comment must not be moved into the string. +# +### Scopes to be enabled: +# +# cppcoreguidelines-*, +# google-*, +# readability-*, +# modernize-*, +# bugprone-*, +# misc-*, +# +### Checks that are turned off for a reason: +# +# -cppcoreguidelines-pro-bounds-pointer-arithmetic +# -google-readability-todo. No big reason to enforce +# -modernize-use-trailing-return-type. Just stylistic preference +# -readability-identifier-length. A lot of code use short names for readability, i.e. 'B' for batch +# -readability-uppercase-literal-suffix. +# +### Checks that are turned off but better be enabled later: +# -bugprone-narrowing-conversions +# -bugprone-easily-swappable-parameters +# -bugprone-fold-init-type +# -bugprone-implicit-widening-of-multiplication-result +# -cppcoreguidelines-narrowing-conversions +# -google-readability-braces-around-statements +# -readability-implicit-bool-conversion, +# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers +# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions +# -modernize-concat-nested-namespaces. More compact way when C++17 is available + +Checks: > + -*, + performance-*, + modernize-pass-by-value, + cppcoreguidelines-prefer-member-initializer, + -bugprone-easily-swappable-parameters, + -bugprone-fold-init-type, + -bugprone-implicit-widening-of-multiplication-result, + -bugprone-narrowing-conversions, + -cppcoreguidelines-narrowing-conversions, + -cppcoreguidelines-pro-bounds-pointer-arithmetic, + -google-build-using-namespace, + -google-readability-todo, + -readability-braces-around-statements, + -google-readability-braces-around-statements, + -modernize-use-trailing-return-type, + -readability-identifier-length, + -readability-implicit-bool-conversion, + -readability-magic-numbers, + -cppcoreguidelines-avoid-magic-numbers, + -readability-uppercase-literal-suffix, + -readability-function-cognitive-complexity, + -modernize-concat-nested-namespaces, +# Treat warnings as errors +WarningsAsErrors: '*' +# Use clang-format for applied fixes +FormatStyle: file +HeaderFilterRegex: '' +CheckOptions: + - key: cppcoreguidelines-avoid-do-while.IgnoreMacros + value: true + # matches with corresponding cpplink check + - key: google-readability-namespace-comments.ShortNamespaceLines + value: "10" + # matches with corresponding cpplink check + - key: google-readability-namespace-comments.SpacesBeforeComments + value: "2" + - key: modernize-loop-convert.MinConfidence + value: reasonable + - key: modernize-pass-by-value.IncludeStyle + value: google +### To be considered to enable: +# # Unifies the usage of the statements +# - key: readability-braces-around-statements.ShortStatementLines +# value: "1" +# Reasonable way to enforce splitting complex code into simple functions +# - key: google-readability-function-size.StatementThreshold +# value: "800" +--- diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index 3b560cf5518ba4..c9b7f45222d155 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -8,6 +8,7 @@ #include #include "async_infer_request.h" +#include "config.h" #include "cpu/x64/cpu_isa_traits.hpp" #include "infer_request.h" #include "itt.h" @@ -44,34 +45,34 @@ struct ImmediateSerialExecutor : public ov::threading::ITaskExecutor { CompiledModel::CompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin, - const Config& cfg, + Config cfg, const bool loaded_from_cache, - const std::shared_ptr sub_memory_manager) + std::shared_ptr sub_memory_manager) : ov::ICompiledModel::ICompiledModel(model, plugin), m_model(model), m_plugin(plugin), - m_cfg{cfg}, + m_cfg{std::move(cfg)}, m_name{model->get_name()}, m_loaded_from_cache(loaded_from_cache), - m_sub_memory_manager(sub_memory_manager) { + m_sub_memory_manager(std::move(sub_memory_manager)) { m_mutex = std::make_shared(); const auto& core = m_plugin->get_core(); if (!core) OPENVINO_THROW("Unable to get API version. Core is unavailable"); - IStreamsExecutor::Config executor_confg; - if (cfg.exclusiveAsyncRequests) { + IStreamsExecutor::Config executor_config; + if (m_cfg.exclusiveAsyncRequests) { // special case when all InferRequests are muxed into a single queue m_task_executor = m_plugin->get_executor_manager()->get_executor("CPU"); } else { - executor_confg = m_cfg.numSubStreams > 0 ? IStreamsExecutor::Config{"CPUMainStreamExecutor", - 1, - 1, - ov::hint::SchedulingCoreType::ANY_CORE, - false, - true} - : m_cfg.streamExecutorConfig; - m_task_executor = m_plugin->get_executor_manager()->get_idle_cpu_streams_executor(executor_confg); + executor_config = m_cfg.numSubStreams > 0 ? IStreamsExecutor::Config{"CPUMainStreamExecutor", + 1, + 1, + ov::hint::SchedulingCoreType::ANY_CORE, + false, + true} + : m_cfg.streamExecutorConfig; + m_task_executor = m_plugin->get_executor_manager()->get_idle_cpu_streams_executor(executor_config); } if (0 != m_cfg.streamExecutorConfig.get_streams()) { m_callback_executor = m_plugin->get_executor_manager()->get_idle_cpu_streams_executor( @@ -85,11 +86,11 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, if (m_callback_executor) set_callback_executor(m_callback_executor); - int streams = std::max(1, executor_confg.get_streams()); + int streams = std::max(1, executor_config.get_streams()); std::vector tasks; tasks.resize(streams); m_graphs.resize(streams); - if (executor_confg.get_streams() != 0) { + if (executor_config.get_streams() != 0) { auto all_graphs_ready = [&] { return std::all_of(m_graphs.begin(), m_graphs.end(), [&](Graph& graph) { return graph.IsReady(); @@ -196,7 +197,8 @@ std::shared_ptr CompiledModel::create_infer_request() co get_callback_executor()); if (m_has_sub_compiled_models) { std::vector> requests; - for (auto model : m_sub_compiled_models) { + requests.reserve(m_sub_compiled_models.size()); + for (const auto& model : m_sub_compiled_models) { requests.push_back(model->create_infer_request()); } async_infer_request->setSubInferRequest(requests); diff --git a/src/plugins/intel_cpu/src/compiled_model.h b/src/plugins/intel_cpu/src/compiled_model.h index 68e82bee77ae38..1f9cc3c0fdb590 100644 --- a/src/plugins/intel_cpu/src/compiled_model.h +++ b/src/plugins/intel_cpu/src/compiled_model.h @@ -34,9 +34,9 @@ class CompiledModel : public ov::ICompiledModel { CompiledModel(const std::shared_ptr& model, const std::shared_ptr& plugin, - const Config& cfg, + Config cfg, const bool loaded_from_cache, - const std::shared_ptr sub_memory_manager = nullptr); + std::shared_ptr sub_memory_manager = nullptr); ~CompiledModel() { if (m_has_sub_compiled_models) { diff --git a/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp b/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp index e9855a01309564..e0d4fa5fa44f9d 100644 --- a/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp +++ b/src/plugins/intel_cpu/src/cpu_map_scheduling.cpp @@ -48,7 +48,7 @@ std::vector> apply_scheduling_core_type(ov::hint::SchedulingCor std::vector> apply_hyper_threading(bool& input_ht_hint, const bool input_ht_changed, - const std::string input_pm_hint, + const std::string& input_pm_hint, const std::vector>& proc_type_table) { std::vector> result_table = proc_type_table; diff --git a/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp b/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp index 2d6a29f38bf595..ae0c95ea8ed4b0 100644 --- a/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp +++ b/src/plugins/intel_cpu/src/cpu_map_scheduling.hpp @@ -37,7 +37,7 @@ std::vector> apply_scheduling_core_type(ov::hint::SchedulingCor */ std::vector> apply_hyper_threading(bool& input_ht_hint, const bool input_ht_changed, - const std::string input_pm_hint, + const std::string& input_pm_hint, const std::vector>& proc_type_table); /** diff --git a/src/plugins/intel_cpu/src/cpu_memory.cpp b/src/plugins/intel_cpu/src/cpu_memory.cpp index 754003c9ce0c0b..5e749121ecda51 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.cpp +++ b/src/plugins/intel_cpu/src/cpu_memory.cpp @@ -15,6 +15,7 @@ # include # include /* strerror(errno) */ +# include #endif namespace ov { @@ -67,35 +68,35 @@ void transferData(const IMemory& src, const IMemory& dst, bool ftz) { } // namespace -Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data, bool pads_zeroing) - : m_eng(eng), - m_pMemDesc(desc), +Memory::Memory(dnnl::engine eng, MemoryDescPtr desc, const void* data, bool pads_zeroing) + : m_eng(std::move(eng)), + m_pMemDesc(std::move(desc)), m_blockHandle(std::make_shared(make_unique()), this), dnnlMemHandle(this) { - if (desc->getPrecision() == element::string) { + if (m_pMemDesc->getPrecision() == element::string) { OPENVINO_THROW("[CPU] Memory object cannot be created for string data."); } create(m_pMemDesc, data, pads_zeroing); } -Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data, bool pads_zeroing) - : Memory::Memory(eng, desc.clone(), data, pads_zeroing) {} +Memory::Memory(dnnl::engine eng, const MemoryDesc& desc, const void* data, bool pads_zeroing) + : Memory::Memory(std::move(eng), desc.clone(), data, pads_zeroing) {} -Memory::Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block) - : m_eng(eng), - m_pMemDesc(desc), +Memory::Memory(dnnl::engine eng, MemoryDescPtr desc, MemoryBlockPtr block) + : m_eng(std::move(eng)), + m_pMemDesc(std::move(desc)), m_blockHandle(std::move(block), this), dnnlMemHandle(this) { - if (desc->getPrecision() == element::string) { + if (m_pMemDesc->getPrecision() == element::string) { OPENVINO_THROW("[CPU] Memory object can't be created for string data."); } bool memAllocated = m_blockHandle->getRawPtr(); - create(desc, nullptr, !memAllocated); + create(m_pMemDesc, nullptr, !memAllocated); } -Memory::Memory(const dnnl::engine& eng, const MemoryDesc& desc, MemoryBlockPtr block) - : Memory::Memory(eng, desc.clone(), std::move(block)) {} +Memory::Memory(dnnl::engine eng, const MemoryDesc& desc, MemoryBlockPtr block) + : Memory::Memory(std::move(eng), desc.clone(), std::move(block)) {} size_t Memory::getSize() const { auto size = getDesc().getCurrentMemSize(); @@ -110,7 +111,7 @@ void Memory::create(const MemoryDesc& desc, const void* data, bool pads_zeroing) } void Memory::create(MemoryDescPtr desc, const void* data, bool pads_zeroing) { - m_pMemDesc = desc; + m_pMemDesc = std::move(desc); m_padsZeroing = pads_zeroing; dnnlMemHandle.resetDnnlPrim(); @@ -248,9 +249,9 @@ void MemoryBlockWithReuse::destroy(void* ptr) { /////////////// StringMemory /////////////// -StringMemory::StringMemory(const dnnl::engine& engine, const MemoryDescPtr& desc, const void* data) - : m_engine(engine), - m_mem_desc(desc) { +StringMemory::StringMemory(dnnl::engine engine, MemoryDescPtr desc, const void* data) + : m_engine(std::move(engine)), + m_mem_desc(std::move(desc)) { if (m_mem_desc->getPrecision() != element::string) { OPENVINO_THROW("[CPU] StringMemory supports String type only."); } @@ -407,10 +408,10 @@ void DnnlMemoryBlock::notifyUpdate() { } } -StaticMemory::StaticMemory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data, bool pads_zeroing) - : m_eng(eng), - m_pMemDesc(desc) { - if (desc->getPrecision() == element::string) { +StaticMemory::StaticMemory(dnnl::engine eng, MemoryDescPtr desc, const void* data, bool pads_zeroing) + : m_eng(std::move(eng)), + m_pMemDesc(std::move(desc)) { + if (m_pMemDesc->getPrecision() == element::string) { OPENVINO_THROW("[CPU] StaticMemory object cannot be created for string data."); } if (!m_pMemDesc->isDefined()) { @@ -439,8 +440,8 @@ StaticMemory::StaticMemory(const dnnl::engine& eng, MemoryDescPtr desc, const vo } } -StaticMemory::StaticMemory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data, bool pads_zeroing) - : StaticMemory::StaticMemory(eng, desc.clone(), data, pads_zeroing) {} +StaticMemory::StaticMemory(dnnl::engine eng, const MemoryDesc& desc, const void* data, bool pads_zeroing) + : StaticMemory::StaticMemory(std::move(eng), desc.clone(), data, pads_zeroing) {} const MemoryDesc& StaticMemory::getDesc() const { return *m_pMemDesc; @@ -553,7 +554,8 @@ bool mbind_move(void* data, size_t size, int targetNode) { int realNode = ov::get_org_numa_id(targetNode); auto pagesize = getpagesize(); auto page_count = (size + pagesize - 1) / pagesize; - char* pages = reinterpret_cast((((uintptr_t)data) & ~((uintptr_t)(pagesize - 1)))); + char* pages = reinterpret_cast( // NOLINT(performance-no-int-to-ptr) + (((uintptr_t)data) & ~((uintptr_t)(pagesize - 1)))); unsigned long mask = 0; unsigned flags = 0; if (realNode < 0) { @@ -578,13 +580,13 @@ bool mbind_move(void* data, size_t size, int targetNode) { } #endif -bool mbind_move(const MemoryCPtr mem, int numaNodeID) { +bool mbind_move(const MemoryCPtr& mem, int numaNodeID) { void* data = mem->getData(); auto size = mem->getSize(); return mbind_move(data, size, numaNodeID); } -bool mbind_move(const dnnl::memory mem, int numaNodeID) { +bool mbind_move(const dnnl::memory& mem, int numaNodeID) { void* data = mem.get_data_handle(); auto desc = mem.get_desc(); auto size = desc.get_size(); diff --git a/src/plugins/intel_cpu/src/cpu_memory.h b/src/plugins/intel_cpu/src/cpu_memory.h index 8776511873ac5d..1b1b4debe4fcc4 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.h +++ b/src/plugins/intel_cpu/src/cpu_memory.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "dnnl_extension_utils.h" #include "memory_desc/cpu_memory_desc.h" @@ -131,11 +132,11 @@ class DnnlMemBlockHandle { DnnlMemBlockHandle(const DnnlMemBlockHandle&) = delete; DnnlMemBlockHandle& operator=(const DnnlMemBlockHandle&) = delete; - DnnlMemBlockHandle(DnnlMemBlockHandle&& source) { + DnnlMemBlockHandle(DnnlMemBlockHandle&& source) noexcept { std::swap(m_pMemBlock, source.m_pMemBlock); std::swap(m_pMem, source.m_pMem); } - DnnlMemBlockHandle& operator=(DnnlMemBlockHandle&& rhs) { + DnnlMemBlockHandle& operator=(DnnlMemBlockHandle&& rhs) noexcept { std::swap(m_pMemBlock, rhs.m_pMemBlock); std::swap(m_pMem, rhs.m_pMem); return *this; @@ -238,8 +239,8 @@ class StaticMemory final : public IMemory { using MemBlockPtr = std::shared_ptr; public: - StaticMemory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data = nullptr, bool pads_zeroing = true); - StaticMemory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data = nullptr, bool pads_zeroing = true); + StaticMemory(dnnl::engine eng, MemoryDescPtr desc, const void* data = nullptr, bool pads_zeroing = true); + StaticMemory(dnnl::engine eng, const MemoryDesc& desc, const void* data = nullptr, bool pads_zeroing = true); StaticMemory(const StaticMemory&) = delete; StaticMemory& operator=(const StaticMemory&) = delete; @@ -279,10 +280,10 @@ class StaticMemory final : public IMemory { class Memory : public IMemory { public: - Memory(const dnnl::engine& eng, MemoryDescPtr desc, const void* data = nullptr, bool pads_zeroing = true); - Memory(const dnnl::engine& eng, const MemoryDesc& desc, const void* data = nullptr, bool pads_zeroing = true); - Memory(const dnnl::engine& eng, MemoryDescPtr desc, MemoryBlockPtr block); - Memory(const dnnl::engine& eng, const MemoryDesc& desc, MemoryBlockPtr block); + Memory(dnnl::engine eng, MemoryDescPtr desc, const void* data = nullptr, bool pads_zeroing = true); + Memory(dnnl::engine eng, const MemoryDesc& desc, const void* data = nullptr, bool pads_zeroing = true); + Memory(dnnl::engine eng, MemoryDescPtr desc, MemoryBlockPtr block); + Memory(dnnl::engine eng, const MemoryDesc& desc, MemoryBlockPtr block); Memory(const Memory&) = delete; Memory& operator=(const Memory&) = delete; @@ -385,18 +386,18 @@ class StringMemory : public IMemory { using StringMemoryBlockPtr = std::shared_ptr; - StringMemory(const dnnl::engine& engine, const MemoryDescPtr& desc, const void* data = nullptr); + StringMemory(dnnl::engine engine, MemoryDescPtr desc, const void* data = nullptr); - StringMemory(const dnnl::engine& engine, const MemoryDesc& desc, const void* data = nullptr) - : StringMemory(engine, desc.clone(), data) {} + StringMemory(dnnl::engine engine, const MemoryDesc& desc, const void* data = nullptr) + : StringMemory(std::move(engine), desc.clone(), data) {} - StringMemory(const dnnl::engine& engine, const MemoryDescPtr& desc, const StringMemoryBlockPtr& block) - : m_engine(engine), - m_mem_desc(desc), - m_memoryBlock(block) {} + StringMemory(dnnl::engine engine, MemoryDescPtr desc, StringMemoryBlockPtr block) + : m_engine(std::move(engine)), + m_mem_desc(std::move(desc)), + m_memoryBlock(std::move(block)) {} - StringMemory(const dnnl::engine& engine, const MemoryDesc& desc, const StringMemoryBlockPtr& block) - : StringMemory(engine, desc.clone(), block) {} + StringMemory(dnnl::engine engine, const MemoryDesc& desc, StringMemoryBlockPtr block) + : StringMemory(std::move(engine), desc.clone(), std::move(block)) {} const MemoryDesc& getDesc() const override { return *m_mem_desc; @@ -443,8 +444,8 @@ using MemoryCPtr = std::shared_ptr; using StringMemoryPtr = std::shared_ptr; bool mbind_move(void* data, size_t size, int numaNodeID); -bool mbind_move(const MemoryCPtr mem, int numaNodeID); -bool mbind_move(const dnnl::memory mem, int numaNodeID); +bool mbind_move(const MemoryCPtr& mem, int numaNodeID); +bool mbind_move(const dnnl::memory& mem, int numaNodeID); MemoryPtr split_horizontal(const dnnl::engine& eng, const MemoryPtr& src, diff --git a/src/plugins/intel_cpu/src/cpu_shape.h b/src/plugins/intel_cpu/src/cpu_shape.h index 3c8d6d093220b9..f60ba585df89bf 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.h +++ b/src/plugins/intel_cpu/src/cpu_shape.h @@ -36,9 +36,7 @@ class Shape { }); } - explicit Shape(const VectorDims& shape) { - dims = minDims = maxDims = shape; - type = ShapeType::Static; + explicit Shape(const VectorDims& shape) : type(ShapeType::Static), dims(minDims = maxDims = shape) { hasZeroDimensions = std::any_of(dims.begin(), dims.end(), [](size_t dim) { return dim == 0; }); @@ -66,10 +64,10 @@ class Shape { }); } - Shape(const std::initializer_list& shape) { + Shape(const std::initializer_list& shape) : type(ShapeType::Static) { minDims.reserve(shape.size()); maxDims.reserve(shape.size()); - type = ShapeType::Static; + for (auto dim : shape) { minDims.push_back(dim); maxDims.push_back(dim); diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index a88ec8e4a1da4a..6b68afffa711e7 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -49,8 +49,8 @@ std::vector> get_streams_info_table( const int input_threads, const int input_infer_requests, const int model_prefer_threads, - const std::string input_perf_hint, - const std::set hint_model_distribution_policy, + const std::string& input_perf_hint, + const std::set& hint_model_distribution_policy, const std::vector>& proc_type_table) { std::vector stream_info(CPU_STREAMS_TABLE_SIZE, INIT_VAL); std::vector> streams_info_table; @@ -562,7 +562,7 @@ std::vector> get_streams_rank_table(const std::vector> proc_type_table, + const std::vector>& proc_type_table, const std::shared_ptr& model, Config& config) { const int sockets = get_num_sockets(); diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp index 2af57e05bfd53d..b3a9694d7cb336 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp @@ -48,8 +48,8 @@ std::vector> get_streams_info_table( const int input_threads, const int input_infer_requests, const int model_prefer_threads, - const std::string input_perf_hint, - const std::set hint_llm_distribution_policy, + const std::string& input_perf_hint, + const std::set& hint_llm_distribution_policy, const std::vector>& proc_type_table); /** @@ -75,7 +75,7 @@ std::vector> get_streams_rank_table(const std::vector> proc_type_table, + const std::vector>& proc_type_table, const std::shared_ptr& model, Config& config); diff --git a/src/plugins/intel_cpu/src/cpu_tensor.cpp b/src/plugins/intel_cpu/src/cpu_tensor.cpp index 1a31fa063b306f..14378592c576e0 100644 --- a/src/plugins/intel_cpu/src/cpu_tensor.cpp +++ b/src/plugins/intel_cpu/src/cpu_tensor.cpp @@ -4,6 +4,8 @@ #include "cpu_tensor.h" +#include + #include "memory_desc/blocked_memory_desc.h" #include "utils/debug_capabilities.h" #include "utils/general_utils.h" @@ -11,7 +13,7 @@ namespace ov { namespace intel_cpu { -Tensor::Tensor(MemoryPtr memptr) : m_memptr{memptr} { +Tensor::Tensor(MemoryPtr memptr) : m_memptr{std::move(memptr)} { OPENVINO_ASSERT(m_memptr != nullptr); // only support plain data format ncsp. @@ -100,7 +102,7 @@ void* Tensor::data(const element::Type& element_type) const { * @return Shared pointer to tensor interface */ std::shared_ptr make_tensor(MemoryPtr mem) { - return std::make_shared(mem); + return std::make_shared(std::move(mem)); } } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/cpu_tensor.h b/src/plugins/intel_cpu/src/cpu_tensor.h index bcf738f321984a..4885d76358b496 100644 --- a/src/plugins/intel_cpu/src/cpu_tensor.h +++ b/src/plugins/intel_cpu/src/cpu_tensor.h @@ -47,4 +47,4 @@ class Tensor : public ITensor { std::shared_ptr make_tensor(MemoryPtr mem); } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/dnnl_scratch_pad.h b/src/plugins/intel_cpu/src/dnnl_scratch_pad.h index 17b366e8acd54d..198a18809e4064 100644 --- a/src/plugins/intel_cpu/src/dnnl_scratch_pad.h +++ b/src/plugins/intel_cpu/src/dnnl_scratch_pad.h @@ -5,6 +5,7 @@ #pragma once #include +#include #include "cpu_memory.h" #include "utils/general_utils.h" @@ -17,7 +18,7 @@ class DnnlScratchPad { dnnl::engine eng; public: - DnnlScratchPad(const dnnl::engine& eng, int numa_node = -1) : eng(eng) { + DnnlScratchPad(dnnl::engine eng, int numa_node = -1) : eng(std::move(eng)) { blockPtr = std::make_shared(make_unique(numa_node)); } diff --git a/src/plugins/intel_cpu/src/edge.cpp b/src/plugins/intel_cpu/src/edge.cpp index 7819a9a4c1efd2..2ee0a42f4cae3b 100644 --- a/src/plugins/intel_cpu/src/edge.cpp +++ b/src/plugins/intel_cpu/src/edge.cpp @@ -77,7 +77,7 @@ void Edge::collectConsumers(std::vector& result) const { auto peerOutputNum = this->getOutputNum(); auto peerInPlacePort = peerChildSPD->getConfig().inConfs[peerOutputNum].inPlace(); auto vecChildEdges = getChild()->getChildEdgesAtPort(peerInPlacePort); - for (auto childEdge : vecChildEdges) { + for (const auto& childEdge : vecChildEdges) { childEdge->collectConsumers(result); } } @@ -256,7 +256,7 @@ Edge::ReorderStatus Edge::needReorder() { void Edge::reuse(MemoryPtr ptr) { OPENVINO_ASSERT(ptr != nullptr, "Attempt to reuse initialized memory in ", *this); - memoryPtr = ptr; + memoryPtr = std::move(ptr); changeStatus(Status::Allocated); DEBUG_LOG(*this, " memoryPtr=", memoryPtr); @@ -298,9 +298,9 @@ void Edge::allocate(MemoryBlockPtr memBlock) { OPENVINO_THROW("Unexpected: Memory block ptr is NULL"); } - auto allocateFunc = [OV_CAPTURE_CPY_AND_THIS](const MemoryDesc& inputDesc) -> MemoryPtr { + auto allocateFunc = [this, block = std::move(memBlock)](const MemoryDesc& inputDesc) mutable -> MemoryPtr { auto parentPtr = getParent(); - return std::make_shared(parentPtr->getEngine(), inputDesc, std::move(memBlock)); + return std::make_shared(parentPtr->getEngine(), inputDesc, std::move(block)); }; allocateCommon(allocateFunc); @@ -316,7 +316,7 @@ std::string Edge::hash() const { std::to_string(child_port); } -void Edge::externalAllocate(WeightsSharing::Ptr weightsCache) { +void Edge::externalAllocate(const WeightsSharing::Ptr& weightsCache) { if (status != Status::NeedAllocation) return; diff --git a/src/plugins/intel_cpu/src/edge.h b/src/plugins/intel_cpu/src/edge.h index 4b485419ff8edb..2f05bf1c92282d 100644 --- a/src/plugins/intel_cpu/src/edge.h +++ b/src/plugins/intel_cpu/src/edge.h @@ -57,7 +57,7 @@ class Edge { void init(); void allocate(const void* mem_ptr = nullptr); void allocate(MemoryBlockPtr memBlock); - void externalAllocate(WeightsSharing::Ptr weightsCache); + void externalAllocate(const WeightsSharing::Ptr& weightsCache); void reuse(MemoryPtr ptr); void validate(); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp index 43417942e8bc53..99de56b3c136eb 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp @@ -201,10 +201,9 @@ jit_convert_emitter::jit_convert_emitter(jit_generator* host, cpu_isa_t host_isa, const std::shared_ptr& node, ov::element::Type exec_prc) - : jit_emitter(host, host_isa, exec_prc) { - input_type = node->get_input_element_type(0); - output_type = node->get_output_element_type(0); -} + : jit_emitter(host, host_isa, exec_prc), + input_type(node->get_input_element_type(0)), + output_type(node->get_output_element_type(0)) {} void jit_convert_emitter::validate_types() const { OV_CPU_JIT_EMITTER_ASSERT( diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp index c0bfb4114f9c17..d305b59f51a8d5 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_emitter.hpp @@ -126,18 +126,18 @@ class jit_emitter : public ov::snippets::Emitter { mapped_table_t entry_map_; - Xbyak_aarch64::AdrImm table_val(std::string key, size_t key_off_val_shift = 0) const { + Xbyak_aarch64::AdrImm table_val(const std::string& key, size_t key_off_val_shift = 0) const { const int32_t off = table_off(key, key_off_val_shift); return Xbyak_aarch64::ptr(p_table, off); } - Xbyak_aarch64::AdrNoOfs table_val2(std::string key, size_t key_off_val_shift = 0) const { + Xbyak_aarch64::AdrNoOfs table_val2(const std::string& key, size_t key_off_val_shift = 0) const { const int32_t off = table_off(key, key_off_val_shift); h->add_imm(h->X_DEFAULT_ADDR, p_table, off, h->X_TMP_0); return Xbyak_aarch64::ptr(h->X_DEFAULT_ADDR); } - void push_arg_entry_of(const std::string key, const table_entry_val_t val, const bool broadcast) { + void push_arg_entry_of(const std::string& key, const table_entry_val_t val, const bool broadcast) { mapped_table_entry_t te{0, val, broadcast}; entry_map_.insert(std::make_pair(key, te)); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp index 6bca11ffe39f47..78ad3b04aa06b1 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp @@ -16,9 +16,9 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter { dnnl::impl::cpu::x64::cpu_isa_t host_isa, ov::element::Type exec_prc = ov::element::bf16, conversion_mode mode = conversion_mode::default_mode) - : jit_emitter(host, host_isa, exec_prc) { + : jit_emitter(host, host_isa, exec_prc), + mode_(mode) { prepare_table(); - mode_ = mode; } size_t get_inputs_num() const override { diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp index b1ef291e45e111..e508b428c5506b 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp @@ -18,10 +18,9 @@ jit_convert_emitter::jit_convert_emitter(jit_generator* host, cpu_isa_t host_isa, const std::shared_ptr& node, ov::element::Type exec_prc) - : jit_emitter(host, host_isa, exec_prc) { - input_type = node->get_input_element_type(0); - output_type = node->get_output_element_type(0); - + : jit_emitter(host, host_isa, exec_prc), + input_type(node->get_input_element_type(0)), + output_type(node->get_output_element_type(0)) { if (output_type == ov::element::bf16) uni_vcvtneps2bf16.reset(new jit_uni_vcvtneps2bf16(host, host_isa)); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_emitters.cpp index b1c0f80242a847..e8d2f9d0936f14 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_emitters.cpp @@ -22,11 +22,10 @@ jit_dnnl_emitter::jit_dnnl_emitter(jit_generator* host, cpu_isa_t host_isa, const std::shared_ptr& node, ov::element::Type exec_prc) - : jit_emitter(host, host_isa, exec_prc) { - kind = dnnl_eltwise_tanh; - alpha = 0.f; - beta = 0.f; - + : jit_emitter(host, host_isa, exec_prc), + kind(dnnl_eltwise_tanh), + alpha(0.f), + beta(0.f) { set_injector(); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp index 5f7ef8d06b3a12..4c5821bc5a0aae 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.hpp @@ -135,7 +135,7 @@ class jit_emitter : public ov::snippets::Emitter { static constexpr int k_mask_num = 8; static constexpr int gpr_size = 8; - Xbyak::Address table_val(std::string key, size_t key_off_val_shift = 0) const { + Xbyak::Address table_val(const std::string& key, size_t key_off_val_shift = 0) const { auto off = table_off(key, key_off_val_shift); return h->ptr[p_table + off]; } @@ -145,7 +145,7 @@ class jit_emitter : public ov::snippets::Emitter { mapped_table_t entry_map_; - void push_arg_entry_of(const std::string key, const table_entry_val_t val, const bool broadcast) { + void push_arg_entry_of(const std::string& key, const table_entry_val_t val, const bool broadcast) { mapped_table_entry_t te{0, val, broadcast}; entry_map_.insert(std::make_pair(key, te)); } @@ -172,7 +172,7 @@ class jit_emitter : public ov::snippets::Emitter { void push_vec(const Xbyak::Address& addr, size_t vec_idx) const; void pop_vec(size_t vec_idx, const Xbyak::Address& addr) const; - size_t table_off(std::string& key, size_t key_off_val_shift = 0) const { + size_t table_off(const std::string& key, size_t key_off_val_shift = 0) const { // assumption: all table entries sharing the same key also // share their broadcast property // TODO: enforce through data structure diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp index 7d8f0c9eb42a7b..72384674edd97e 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp @@ -4,6 +4,8 @@ #include "jit_load_store_emitters.hpp" +#include + #include "utils/bfloat16.hpp" using namespace dnnl::impl; @@ -102,14 +104,14 @@ jit_load_emitter::jit_load_emitter(dnnl::impl::cpu::x64::jit_generator* host, emitter_in_out_map in_out_type) : jit_emitter(host, host_isa, exec_prc, in_out_type), name_("unknown"), + v_len_elt_(get_vec_length() / exec_prc.size()), load_num_(load_num), + load_size_(load_num * src_prc.size()), src_prc_(src_prc), dst_prc_(dst_prc), is_fill_(is_fill), - fill_value_(fill_value) { + fill_value_(std::move(fill_value)) { prepare_table(); - load_size_ = load_num * src_prc.size(); - v_len_elt_ = get_vec_length() / exec_prc.size(); } size_t jit_load_emitter::get_inputs_num() const { @@ -630,7 +632,7 @@ void jit_load_emitter::load_words_to_dword_extension(const Vmm& vmm, } template -void jit_load_emitter::fill_with_default(const Vmm& vmm, std::string fill_value, const int& load_num) const { +void jit_load_emitter::fill_with_default(const Vmm& vmm, const std::string& fill_value, const int& load_num) const { constexpr bool is_xmm = std::is_same::value; constexpr bool is_ymm = std::is_same::value; constexpr bool is_zmm = std::is_same::value; @@ -671,13 +673,13 @@ jit_store_emitter::jit_store_emitter(dnnl::impl::cpu::x64::jit_generator* host, emitter_in_out_map in_out_type) : jit_emitter(host, host_isa, exec_prc, in_out_type), name_("unknown"), + v_len_elt_(get_vec_length() / exec_prc.size()), store_num_(store_num), + store_size_(store_num * dst_prc.size()), src_prc_(src_prc), dst_prc_(dst_prc), mode_(mode) { prepare_table(); - v_len_elt_ = get_vec_length() / exec_prc.size(); - store_size_ = store_num * dst_prc.size(); uni_vcvtneps2bf16_.reset(new jit_uni_vcvtneps2bf16(host, host_isa)); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.hpp index 54f0948035ee6d..ee8104e290da9f 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.hpp @@ -4,6 +4,8 @@ #pragma once +#include + #include "jit_bf16_emitters.hpp" #include "jit_emitter.hpp" @@ -20,7 +22,7 @@ struct load_emitter_params : public emitter_params { dst_prc_(dst_prc), load_num_(load_num), is_fill_(is_fill), - fill_value_(fill_value) {} + fill_value_(std::move(fill_value)) {} size_t hash() const override; @@ -99,7 +101,7 @@ class jit_load_emitter : public jit_emitter { int load_size) const; template - void fill_with_default(const Vmm& vmm, std::string fill_value, const int& load_num) const; + void fill_with_default(const Vmm& vmm, const std::string& fill_value, const int& load_num) const; void register_table_entries() override; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_loop_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_loop_emitters.cpp index 0666505a6d31ab..9e3dc9674079b1 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_loop_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_loop_emitters.cpp @@ -88,7 +88,7 @@ jit_loop_end_emitter::jit_loop_end_emitter(dnnl::impl::cpu::aarch64::jit_generat ov::snippets::lowered::ExpressionPtr jit_loop_end_emitter::get_loop_begin_expr( const ov::snippets::lowered::ExpressionPtr& expr) { - const auto begin_expr = expr->get_input_port_connectors().back()->get_source().get_expr(); + auto begin_expr = expr->get_input_port_connectors().back()->get_source().get_expr(); OV_CPU_JIT_EMITTER_ASSERT(ov::is_type(begin_expr->get_node()), "LoopEnd expression must have th last port connector to LoopBegin"); return begin_expr; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp index de2160e0053808..5e4a8992aa7165 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp @@ -373,6 +373,7 @@ std::vector intel_cpu::CPUTargetMachine::get_vec_reg_pool() const } }(); std::vector reg_pool; + reg_pool.reserve(num_vec_regs); for (int i = 0; i < num_vec_regs; i++) reg_pool.emplace_back(snippets::RegType::vec, static_cast(i)); return reg_pool; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp index 4bf2e55baeb05f..2c92b23e85fb46 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.hpp @@ -6,6 +6,8 @@ # pragma once +# include + # include "emitters/plugin/x64/jit_emitter.hpp" namespace ov { @@ -15,14 +17,14 @@ class jit_debug_emitter : public jit_emitter { public: enum class EmissionLocation { preamble, postamble, both }; jit_debug_emitter(const std::shared_ptr& target_emitter, - const std::shared_ptr& decorator_emitter, + std::shared_ptr decorator_emitter, const EmissionLocation& loc) : jit_emitter(target_emitter->h, target_emitter->host_isa_, target_emitter->exec_prc_, target_emitter->in_out_type_), m_target_emitter(target_emitter), - m_decorator_emitter(decorator_emitter), + m_decorator_emitter(std::move(decorator_emitter)), m_decorator_emit_loc(loc) { prepare_table(); } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp index 86421678a29011..19eba960b2d79a 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_emitters.cpp @@ -160,7 +160,7 @@ jit_loop_end_emitter::jit_loop_end_emitter(dnnl::impl::cpu::x64::jit_generator* ov::snippets::lowered::ExpressionPtr jit_loop_end_emitter::get_loop_begin_expr( const ov::snippets::lowered::ExpressionPtr& expr) { - const auto begin_expr = expr->get_input_port_connectors().back()->get_source().get_expr(); + auto begin_expr = expr->get_input_port_connectors().back()->get_source().get_expr(); OV_CPU_JIT_EMITTER_ASSERT(ov::is_type(begin_expr->get_node()), "LoopEnd expression must have th last port connector to LoopBegin"); return begin_expr; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp index e9744ae5098c5f..298733a70843b3 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_segfault_detector_emitter.cpp @@ -6,6 +6,8 @@ # include "jit_segfault_detector_emitter.hpp" +# include + # include "emitters/plugin/x64/utils.hpp" using namespace dnnl::impl::utils; @@ -29,7 +31,7 @@ jit_uni_segfault_detector_emitter::jit_uni_segfault_detector_emitter(dnnl::impl: m_target_emitter(target_emitter), is_target_use_load_emitter(is_load), is_target_use_store_emitter(is_store), - m_target_node_name(target_node_name) {} + m_target_node_name(std::move(target_node_name)) {} size_t jit_uni_segfault_detector_emitter::get_inputs_num() const { return 1; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp index 6d4fdf738ab355..30d95ed6a2bf7a 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp @@ -22,9 +22,8 @@ BrgemmCopyBKernelConfig::BrgemmCopyBKernelConfig(const element::Type& src_dt, bool is_with_comp, bool is_transposed_B, dnnl_dim_t wei_N_blk) - : m_static_params(std::make_shared(src_dt, wei_dt, isa, is_with_comp, is_transposed_B, wei_N_blk)) { - m_hash = compute_hash(); -} + : m_static_params(std::make_shared(src_dt, wei_dt, isa, is_with_comp, is_transposed_B, wei_N_blk)), + m_hash(compute_hash()) {} bool BrgemmCopyBKernelConfig::is_completed() const { return !utils::one_of(0, m_N, m_K, m_copy_B_wei_stride, m_LDB) || is_empty(); diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_debug_emitter.hpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_debug_emitter.hpp index 4afc6ee1d7c5d7..5d6759e22c757e 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_debug_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_debug_emitter.hpp @@ -3,6 +3,8 @@ // #pragma once +#include + #include "jit_tpp_emitter.hpp" namespace ov { @@ -15,28 +17,29 @@ namespace intel_cpu { */ class DebugTppEmitter : public TppEmitter { public: - DebugTppEmitter(const ov::snippets::lowered::ExpressionPtr& expr, const std::shared_ptr& original) - : TppEmitter(*original), - m_original(original), - m_compiled_kernel(m_original->get_compiled_kernel_ptr()), - m_execute_function(m_original->get_execute_function_ptr()), - m_source_expr(expr) { - } + DebugTppEmitter(ov::snippets::lowered::ExpressionPtr expr, const std::shared_ptr& original) + : TppEmitter(*original), + m_original(original), + m_compiled_kernel(m_original->get_compiled_kernel_ptr()), + m_execute_function(m_original->get_execute_function_ptr()), + m_source_expr(std::move(expr)) {} - void validate_arguments(const std::vector &in, const std::vector &out) const override { + void validate_arguments(const std::vector& in, const std::vector& out) const override { m_original->validate_arguments(in, out); }; - size_t get_inputs_num() const override { return num_kernel_args - 1; } + size_t get_inputs_num() const override { + return num_kernel_args - 1; + } protected: - static void execute_kernel_unary(const DebugTppEmitter* emitter, void *in0, void *out0) { + static void execute_kernel_unary(const DebugTppEmitter* emitter, void* in0, void* out0) { OV_CPU_JIT_EMITTER_ASSERT(emitter && emitter->m_execute_function && emitter->m_compiled_kernel, "Unable to execute unary kernel"); // Note: put a breakpoint here and analyze all the necessary debug info in runtime std::cout << "Running unary DebugTPPEmitter for node with name " << emitter->m_source_expr->get_node()->get_friendly_name() << std::endl; - auto f = reinterpret_cast(emitter->m_execute_function); + auto f = reinterpret_cast(emitter->m_execute_function); f(emitter->m_compiled_kernel, in0, out0); } @@ -46,16 +49,19 @@ class DebugTppEmitter : public TppEmitter { // Note: put a breakpoint here and analyze all the necessary debug info in runtime std::cout << "Running binary DebugTPPEmitter for node with name " << emitter->m_source_expr->get_node()->get_friendly_name() << std::endl; - auto f = reinterpret_cast(emitter->m_execute_function); + auto f = reinterpret_cast(emitter->m_execute_function); f(emitter->m_compiled_kernel, in0, in1, out0); } const uintptr_t get_execute_function_ptr() const override { // Note: num_kernel_args accounts for both input and output args switch (num_kernel_args) { - case 2: return reinterpret_cast(execute_kernel_unary); - case 3: return reinterpret_cast(execute_kernel_binary); - default: OV_CPU_JIT_EMITTER_THROW("More than two arguments are not supported"); + case 2: + return reinterpret_cast(execute_kernel_unary); + case 3: + return reinterpret_cast(execute_kernel_binary); + default: + OV_CPU_JIT_EMITTER_THROW("More than two arguments are not supported"); } } @@ -64,11 +70,11 @@ class DebugTppEmitter : public TppEmitter { } private: - std::shared_ptr m_original {nullptr}; - uintptr_t m_compiled_kernel {0}; - uintptr_t m_execute_function {0}; - snippets::lowered::ExpressionPtr m_source_expr {nullptr}; + std::shared_ptr m_original{nullptr}; + uintptr_t m_compiled_kernel{0}; + uintptr_t m_execute_function{0}; + snippets::lowered::ExpressionPtr m_source_expr{nullptr}; }; -} // namespace intel_cpu -} // namespace ov +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/emitters/utils.cpp b/src/plugins/intel_cpu/src/emitters/utils.cpp index 9743ac4638cca4..81066b9ed48ccb 100644 --- a/src/plugins/intel_cpu/src/emitters/utils.cpp +++ b/src/plugins/intel_cpu/src/emitters/utils.cpp @@ -20,7 +20,7 @@ std::string jit_emitter_pretty_name(const std::string& pretty_func) { // GCC: void foo() [with T = {type}] // clang: void foo() [T = {type}] // MSVC: void __cdecl foo<{type}>(void) - SAFE_SYMBOL_FINDING(parenthesis, pretty_func.find("(")) + SAFE_SYMBOL_FINDING(parenthesis, pretty_func.find('(')) if (pretty_func[parenthesis - 1] == '>') { // To cover template on MSVC parenthesis--; size_t counter = 1; @@ -33,7 +33,7 @@ std::string jit_emitter_pretty_name(const std::string& pretty_func) { } } SAFE_SYMBOL_FINDING(end, pretty_func.substr(0, parenthesis).rfind("::")) - SAFE_SYMBOL_FINDING(begin, pretty_func.substr(0, end).rfind(" ")) + SAFE_SYMBOL_FINDING(begin, pretty_func.substr(0, end).rfind(' ')) begin++; #undef SAFE_SYMBOL_FINDING return end > begin ? pretty_func.substr(begin, end - begin) : pretty_func; diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index cd7944b7246ef9..01371d64b779a0 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -60,7 +60,7 @@ Graph::~Graph() { } template -void Graph::CreateGraph(NET& model, const GraphContext::CPtr context) { +void Graph::CreateGraph(NET& model, const GraphContext::CPtr& context) { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::intel_cpu_LT, "CreateGraph"); Init(model, context); @@ -70,7 +70,7 @@ void Graph::CreateGraph(NET& model, const GraphContext::CPtr context) { void Graph::CreateGraph(const std::vector& graphNodes, const std::vector& graphEdges, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, std::string name) { if (IsReady()) ForgetGraphData(); @@ -85,7 +85,7 @@ void Graph::CreateGraph(const std::vector& graphNodes, std::size_t parameter_index = 0; std::size_t result_index = 0; - for (auto node : graphNodes) { + for (const auto& node : graphNodes) { if ("Parameter" == node->getTypeStr()) { inputNodesMap[parameter_index] = node; parameter_index++; @@ -100,7 +100,7 @@ void Graph::CreateGraph(const std::vector& graphNodes, Activate(); } -template void Graph::CreateGraph(const std::shared_ptr&, const GraphContext::CPtr); +template void Graph::CreateGraph(const std::shared_ptr&, const GraphContext::CPtr&); void Graph::Replicate(const std::shared_ptr& model, const std::vector& inputConfigs, @@ -116,8 +116,8 @@ void Graph::Replicate(const std::shared_ptr& model, // Will be stored as fake output separately. std::deque> unusedOutputs; - auto getParentOutputPort = [](const std::shared_ptr childOp, - const std::shared_ptr parentOp, + auto getParentOutputPort = [](const std::shared_ptr& childOp, + const std::shared_ptr& parentOp, const size_t childInputPort) -> int { for (size_t parentPort = 0; parentPort < parentOp->get_output_size(); parentPort++) { if (childOp->input(childInputPort).get_tensor_ptr() == parentOp->output(parentPort).get_tensor_ptr()) { @@ -128,7 +128,7 @@ void Graph::Replicate(const std::shared_ptr& model, return -1; }; - auto createNode = [&](std::shared_ptr op) -> NodePtr { + auto createNode = [&](const std::shared_ptr& op) -> NodePtr { // special handling for Parameters and Results if (op->get_type_info() == op::v0::Parameter::get_type_info_static()) { auto input_index = model->get_parameter_index(ov::as_type_ptr(op)); @@ -313,7 +313,7 @@ static std::tuple, std::vector> ExtractExecutableNo } void Graph::Init(const std::shared_ptr& model, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const std::vector& inputConfigs, const std::vector& outputConfigs) { if (IsReady()) @@ -640,7 +640,7 @@ static std::unordered_set getUniqueLayerNames(const std::vector uniqueLayerNames; uniqueLayerNames.reserve(graphNodes.size()); - for (auto node : graphNodes) { + for (const auto& node : graphNodes) { uniqueLayerNames.insert(node->getName()); } @@ -704,13 +704,13 @@ void Graph::ResolveComplexInplaceConflicts() { if (portChildEdges.size() > 1) { if (auto modifyingNode = edge->modifiedInPlace()) { auto execIndex = modifyingNode->getExecIndex(); - for (auto pEdgePeer : portChildEdges) { + for (const auto& pEdgePeer : portChildEdges) { if (pEdgePeer == edge) continue; std::vector vecConsumers; pEdgePeer->collectConsumers(vecConsumers); - for (auto node : vecConsumers) { + for (const auto& node : vecConsumers) { if (node->getExecIndex() >= execIndex || one_of(node->getType(), Type::MemoryOutput, Type::Output)) { return true; @@ -731,7 +731,7 @@ void Graph::ResolveComplexInplaceConflicts() { } } -static inline bool isConstOutput(EdgePtr edge) { +static inline bool isConstOutput(const EdgePtr& edge) { return edge->getParent()->isConstant() && !edge->getChild()->isConstant(); } @@ -1506,7 +1506,7 @@ void Graph::SortTopologically() { int execIndexCnt = -1; std::function visit; - visit = [&execIndexCnt, &sorted, &visit](const NodePtr node) { + visit = [&execIndexCnt, &sorted, &visit](const NodePtr& node) { if (node->execIndex >= 0) return; // already visited @@ -1563,7 +1563,6 @@ void Graph::GetPerfData(std::vector& perfMap) const { [&](std::vector& perfMap, const NodePtr& node) { ov::ProfilingInfo pc; pc.node_name = node->getName(); - // pc.execution_index = i++; uint64_t avg_time = node->PerfCounter().avg(); pc.cpu_time = pc.real_time = std::chrono::microseconds(avg_time); pc.status = avg_time > 0 ? ov::ProfilingInfo::Status::EXECUTED : ov::ProfilingInfo::Status::NOT_RUN; @@ -1571,11 +1570,11 @@ void Graph::GetPerfData(std::vector& perfMap) const { pc.node_type = node->typeStr; perfMap.emplace_back(pc); - for (auto& fusedNode : node->fusedWith) { + for (const auto& fusedNode : node->fusedWith) { getPerfMapFor(perfMap, fusedNode); } - for (auto& mergedWith : node->mergedWith) { + for (const auto& mergedWith : node->mergedWith) { getPerfMapFor(perfMap, mergedWith); } }; @@ -1604,7 +1603,7 @@ void Graph::RemoveEdge(const EdgePtr& edge) { graphEdges.erase(std::remove(graphEdges.begin(), graphEdges.end(), edge), graphEdges.end()); } -void Graph::AddNode(NodePtr node) { +void Graph::AddNode(const NodePtr& node) { assert(node); assert(std::find(graphNodes.begin(), graphNodes.end(), node) == graphNodes.end()); @@ -1716,8 +1715,8 @@ void Graph::RemoveDroppedEdges() { graphEdges.end()); } -NodePtr Graph::InsertReorder(EdgePtr edge, - std::string layerName, +NodePtr Graph::InsertReorder(const EdgePtr& edge, + const std::string& layerName, const MemoryDesc& inDesc, const MemoryDesc& outDesc, bool isOptimized, @@ -1751,7 +1750,7 @@ NodePtr Graph::InsertReorder(EdgePtr edge, return reorder; } -bool Graph::InsertNode(EdgePtr edge, NodePtr node, bool initNode) { +bool Graph::InsertNode(const EdgePtr& edge, const NodePtr& node, bool initNode) { auto oIndex = edge->getOutputNum(); auto iIndex = edge->getInputNum(); if (iIndex < 0 || oIndex < 0) @@ -1768,7 +1767,12 @@ bool Graph::InsertNode(EdgePtr edge, NodePtr node, bool initNode) { return InsertNode(edge->getParent(), edge->getChild(), node, iIndex, oIndex, initNode); } -bool Graph::InsertNode(NodePtr parent, NodePtr child, NodePtr node, int parentPort, int childPort, bool initNode) { +bool Graph::InsertNode(const NodePtr& parent, + const NodePtr& child, + const NodePtr& node, + int parentPort, + int childPort, + bool initNode) { CreateEdge(parent, node, parentPort, 0); CreateEdge(node, child, 0, childPort); AddNode(node); diff --git a/src/plugins/intel_cpu/src/graph.h b/src/plugins/intel_cpu/src/graph.h index 244995203ded2a..b28d2983104682 100644 --- a/src/plugins/intel_cpu/src/graph.h +++ b/src/plugins/intel_cpu/src/graph.h @@ -65,11 +65,11 @@ class Graph { } template - void CreateGraph(NET& model, const GraphContext::CPtr context); + void CreateGraph(NET& model, const GraphContext::CPtr& context); void CreateGraph(const std::vector& graphNodes, const std::vector& graphEdges, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, std::string name); void PushInputData(const std::size_t& index, const ov::SoPtr& input); @@ -141,7 +141,7 @@ class Graph { void RemoveEdge(const EdgePtr& edge); void RemoveDroppedNodes(); void RemoveDroppedEdges(); - void AddNode(NodePtr node); + void AddNode(const NodePtr& node); void DropNode(const NodePtr& node); void DropDWConvNode(const NodePtr& node); @@ -166,8 +166,8 @@ class Graph { * pointer to the blob containing scales * @return pointer to the new Reorder node. */ - NodePtr InsertReorder(EdgePtr edge, - std::string layerName, + NodePtr InsertReorder(const EdgePtr& edge, + const std::string& layerName, const MemoryDesc& inDesc, const MemoryDesc& outDesc, bool isOptimized = false, @@ -186,7 +186,7 @@ class Graph { * parameter that determines whether the node needs to be initialized * @return true in case of success, false otherwise. */ - bool InsertNode(EdgePtr edge, NodePtr node, bool initNode = false); + bool InsertNode(const EdgePtr& edge, const NodePtr& node, bool initNode = false); /** * @brief Insert Node between two specified nodes. @@ -206,7 +206,12 @@ class Graph { * parameter that determines whether the node needs to be initialized * @return true in case of success, false otherwise. */ - bool InsertNode(NodePtr parent, NodePtr child, NodePtr node, int parentPort, int childPort, bool initNode = false); + bool InsertNode(const NodePtr& parent, + const NodePtr& child, + const NodePtr& node, + int parentPort, + int childPort, + bool initNode = false); std::shared_ptr dump() const; @@ -224,7 +229,7 @@ class Graph { * Init graph using \p model, \p context, \p inputConfigs and \p outputConfigs */ void Init(const std::shared_ptr& model, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const std::vector& inputConfigs = {}, const std::vector& outputConfigs = {}); diff --git a/src/plugins/intel_cpu/src/graph_context.cpp b/src/plugins/intel_cpu/src/graph_context.cpp index 0112392cb54b45..e7dd513fa2f790 100644 --- a/src/plugins/intel_cpu/src/graph_context.cpp +++ b/src/plugins/intel_cpu/src/graph_context.cpp @@ -3,38 +3,40 @@ // #include "graph_context.h" -#include "dnnl_types.h" +#include + +#include "config.h" #include "memory_control.hpp" #include "nodes/memory.hpp" namespace ov { namespace intel_cpu { -GraphContext::GraphContext(const Config& config, +GraphContext::GraphContext(Config config, WeightsSharing::Ptr w_cache, bool isGraphQuantized, ov::threading::IStreamsExecutor::Ptr streamExecutor, std::shared_ptr sub_memory_manager) - : config(config), - weightsCache(std::move(w_cache)), - isGraphQuantizedFlag(isGraphQuantized), - streamExecutor(streamExecutor), - subMemoryManager(std::move(sub_memory_manager)), - memoryStatesRegister(std::make_shared()), - networkMemoryControl(std::make_shared()) { - rtParamsCache = std::make_shared(config.rtCacheCapacity); - // primitive/executors can be shared across sub-stream - // but scratch pad cannot be shared. - numNumaNodes = 1; + : m_config(std::move(config)), + m_weightsCache(std::move(w_cache)), + m_rtParamsCache(std::make_shared(m_config.rtCacheCapacity)), + m_isGraphQuantizedFlag(isGraphQuantized), + m_streamExecutor(std::move(streamExecutor)), + m_subMemoryManager(std::move(sub_memory_manager)), + m_numNumaNodes(1), + m_memoryStatesRegister(std::make_shared()), + m_networkMemoryControl(std::make_shared()) { if (streamExecutor) { - cpuStreamExecutor = std::dynamic_pointer_cast(streamExecutor); - numaNodeId = cpuStreamExecutor ? cpuStreamExecutor->get_numa_node_id() : 0; + m_cpuStreamExecutor = std::dynamic_pointer_cast(streamExecutor); + m_numaNodeId = m_cpuStreamExecutor ? m_cpuStreamExecutor->get_numa_node_id() : 0; auto nNumaNodes = get_num_numa_nodes(); - if (numNumaNodes < nNumaNodes) - numNumaNodes = nNumaNodes; + if (m_numNumaNodes < nNumaNodes) + m_numNumaNodes = nNumaNodes; } - for (int i = 0; i < numNumaNodes; i++) { - rtScratchPads.push_back(std::make_shared(getEngine(), i)); + // primitive/executors can be shared across sub-stream + // but scratch pad cannot be shared. + for (int i = 0; i < m_numNumaNodes; i++) { + m_rtScratchPads.push_back(std::make_shared(getEngine(), i)); } } diff --git a/src/plugins/intel_cpu/src/graph_context.h b/src/plugins/intel_cpu/src/graph_context.h index 1b7a0f0578734d..8389bc389505fe 100644 --- a/src/plugins/intel_cpu/src/graph_context.h +++ b/src/plugins/intel_cpu/src/graph_context.h @@ -25,81 +25,81 @@ class GraphContext { typedef std::shared_ptr Ptr; typedef std::shared_ptr CPtr; - GraphContext(const Config& config, + GraphContext(Config config, WeightsSharing::Ptr w_cache, bool isGraphQuantized, ov::threading::IStreamsExecutor::Ptr streamExecutor = nullptr, std::shared_ptr sub_memory_manager = nullptr); const Config& getConfig() const { - return config; + return m_config; } WeightsSharing::Ptr getWeightsCache() const { - return weightsCache; + return m_weightsCache; } MultiCachePtr getParamsCache() const { - return rtParamsCache; + return m_rtParamsCache; } DnnlScratchPadPtr getScratchPad() const { - return rtScratchPads[numaNodeId]; + return m_rtScratchPads[m_numaNodeId]; } const std::vector& getScratchPads() const { - return rtScratchPads; + return m_rtScratchPads; } static const dnnl::engine& getEngine(); bool isGraphQuantized() const { - return isGraphQuantizedFlag; + return m_isGraphQuantizedFlag; } ov::threading::CPUStreamsExecutor::Ptr getCPUStreamExecutor() const { - return cpuStreamExecutor; + return m_cpuStreamExecutor; } std::shared_ptr getSubMemory() const { - return subMemoryManager; + return m_subMemoryManager; } int getNumNumaNodes() const { - return numNumaNodes; + return m_numNumaNodes; } const std::shared_ptr& getMemoryStatesRegister() const { - return memoryStatesRegister; + return m_memoryStatesRegister; } const std::shared_ptr& getNetworkMemoryControl() const { - return networkMemoryControl; + return m_networkMemoryControl; } private: - Config config; // network-level config + Config m_config; // network-level config - WeightsSharing::Ptr weightsCache; // per NUMA node caches for sharing weights data + WeightsSharing::Ptr m_weightsCache; // per NUMA node caches for sharing weights data - MultiCachePtr rtParamsCache; // primitive cache - DnnlScratchPadPtr rtScratchPad; // scratch pad + MultiCachePtr m_rtParamsCache; // primitive cache + DnnlScratchPadPtr m_rtScratchPad; // scratch pad - bool isGraphQuantizedFlag = false; + bool m_isGraphQuantizedFlag = false; - std::vector rtScratchPads; // scratch pad (each sub-stream has its own copy) + std::vector m_rtScratchPads; // scratch pad (each sub-stream has its own copy) - ov::threading::IStreamsExecutor::Ptr streamExecutor; // stream executor for current graph + ov::threading::IStreamsExecutor::Ptr m_streamExecutor; // stream executor for current graph - ov::threading::CPUStreamsExecutor::Ptr cpuStreamExecutor; // cpu stream executor for current graph + ov::threading::CPUStreamsExecutor::Ptr m_cpuStreamExecutor; // cpu stream executor for current graph - std::shared_ptr subMemoryManager; + std::shared_ptr m_subMemoryManager; - int numNumaNodes = 1; - int numaNodeId = 0; + int m_numNumaNodes = 1; + int m_numaNodeId = 0; - std::shared_ptr memoryStatesRegister; - std::shared_ptr networkMemoryControl; + std::shared_ptr m_memoryStatesRegister; + std::shared_ptr m_networkMemoryControl; }; } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/graph_dumper.cpp b/src/plugins/intel_cpu/src/graph_dumper.cpp index 6efa78f38e1eb5..73c99a52156ad6 100644 --- a/src/plugins/intel_cpu/src/graph_dumper.cpp +++ b/src/plugins/intel_cpu/src/graph_dumper.cpp @@ -307,6 +307,7 @@ void summary_perf(const Graph& graph) { { std::cout << " perf_by_type:" << std::endl; std::vector> A; + A.reserve(perf_by_type.size()); for (auto& it : perf_by_type) A.push_back(it); sort(A.begin(), A.end(), [](std::pair& a, std::pair& b) { @@ -326,6 +327,7 @@ void summary_perf(const Graph& graph) { { std::cout << " perf_by_node:" << std::endl; std::vector> A; + A.reserve(perf_by_node.size()); for (auto& it : perf_by_node) A.push_back(it); sort(A.begin(), A.end(), [](std::pair& a, std::pair& b) { @@ -383,7 +385,7 @@ void average_counters(const Graph& graph) { return std::chrono::microseconds(value).count() / 1000.0; }; - auto printAverageCounter = [&toMs, &file](NodePtr node) { + auto printAverageCounter = [&toMs, &file](const NodePtr& node) { const uint64_t avg = node->PerfCounter().avg(); const std::string status = avg > 0 ? "EXECUTED" : "NOT_RUN"; const auto cpuTime = toMs(avg); diff --git a/src/plugins/intel_cpu/src/graph_optimizer.cpp b/src/plugins/intel_cpu/src/graph_optimizer.cpp index cb1324e7435703..95ba27f3fa0828 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.cpp +++ b/src/plugins/intel_cpu/src/graph_optimizer.cpp @@ -208,7 +208,7 @@ void GraphOptimizer::ApplyImplSpecificGraphOptimizations(Graph& graph) { void GraphOptimizer::FuseConvMatmulFCDeconvAndDQScales(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isDQScaleGraphPattern = [](NodePtr node) { + auto isDQScaleGraphPattern = [](const NodePtr& node) { if (node->getType() != Type::Eltwise || node->getAlgorithm() != Algorithm::EltwiseMultiply) { return false; } @@ -225,7 +225,7 @@ void GraphOptimizer::FuseConvMatmulFCDeconvAndDQScales(Graph& graph) { return (parentNode->getParentEdges().size() == 2); }; - auto scaleDimsCheck = [](NodePtr node, NodePtr scales) { + auto scaleDimsCheck = [](const NodePtr& node, const NodePtr& scales) { const auto nodeOutDims = node->getOutputShapeAtPort(0).getDims(); const auto channelAxis = node->getFusingAxis(); auto OC = nodeOutDims[channelAxis]; @@ -249,7 +249,7 @@ void GraphOptimizer::FuseConvMatmulFCDeconvAndDQScales(Graph& graph) { return true; }; - auto initializeDeQuantizedScales = [](NodePtr node, NodePtr scales) { + auto initializeDeQuantizedScales = [](const NodePtr& node, const NodePtr& scales) { auto scalesConstant = dynamic_cast(scales.get()); if (scalesConstant == nullptr) OPENVINO_THROW("Cannot cast to Input node"); @@ -458,7 +458,7 @@ void GraphOptimizer::FuseConvolutionMatMulDeconvAndBias(Graph& graph) { void GraphOptimizer::FuseDeconvolutionAndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { if (node->getType() != Type::Deconvolution || node->getChildEdges().size() != 1) return false; const auto deconv = std::dynamic_pointer_cast(node); @@ -771,13 +771,13 @@ void GraphOptimizer::FuseFCAndTransposeOnWeights(Graph& graph) { // reordering in FC node auto& graphNodes = graph.GetNodes(); - auto isSuitablePattern = [](NodePtr parent) { + auto isSuitablePattern = [](const NodePtr& parent) { bool res = true && parent->getType() == Type::Transpose && parent->getChildEdges().size() == 1 && parent->getChildEdgeAt(0)->getChild()->getType() == Type::FullyConnected && parent->isConstant(); return res; }; - for (auto parent : graphNodes) { + for (const auto& parent : graphNodes) { if (isSuitablePattern(parent)) { CPU_GRAPH_OPTIMIZER_SCOPE(FuseFCAndTransposeOnWeights); auto fcNode = std::dynamic_pointer_cast(parent->getChildEdgeAt(0)->getChild()); @@ -791,7 +791,7 @@ void GraphOptimizer::FuseFCAndTransposeOnWeights(Graph& graph) { void GraphOptimizer::FuseConvolutionAndZeroPoints(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableConvNode = [](NodePtr node) { + auto isSuitableConvNode = [](const NodePtr& node) { bool retVal = false; if (node->getType() == Type::Convolution) { if (auto convNode = std::dynamic_pointer_cast(node)) { @@ -805,7 +805,7 @@ void GraphOptimizer::FuseConvolutionAndZeroPoints(Graph& graph) { return retVal; }; - auto initializeInputZeroPoints = [](NodePtr node, NodePtr parent0, NodePtr parent1) { + auto initializeInputZeroPoints = [](const NodePtr& node, const NodePtr& parent0, const NodePtr& parent1) { auto* convNode = dynamic_cast(node.get()); if (convNode == nullptr) OPENVINO_THROW("Cannot get convolution node ", node->getName()); @@ -880,7 +880,7 @@ void GraphOptimizer::FuseConvolutionAndZeroPoints(Graph& graph) { return true; }; - auto initializeOutputCompensation = [](NodePtr node) { + auto initializeOutputCompensation = [](const NodePtr& node) { auto* convNode = dynamic_cast(node.get()); if (convNode == nullptr) OPENVINO_THROW("Cannot get convolution node ", node->getName()); @@ -969,7 +969,7 @@ void GraphOptimizer::FuseConvolutionAndZeroPoints(Graph& graph) { void GraphOptimizer::FuseFullyConnectedAndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return node->getType() == Type::FullyConnected && node->getChildEdges().size() == 1; }; @@ -1058,7 +1058,7 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { return conv->getWeightDims()[weightRank - 1] == 1 && conv->getWeightDims()[weightRank - 2] == 1; }; - auto isSuitableParentConvolution = [&](NodePtr node) { + auto isSuitableParentConvolution = [&](const NodePtr& node) { if (node->isDropped()) return false; @@ -1214,7 +1214,7 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { void GraphOptimizer::FuseConvolutionAndSimpleOperationThroughMaxPool(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return (node->getType() == Type::Convolution || node->getType() == Type::BinaryConvolution) && node->getChildEdges().size() == 1 && node->getOriginalOutputPrecisionAtPort(0) == ov::element::f32; }; @@ -1269,7 +1269,7 @@ void GraphOptimizer::FuseConvolutionAndSimpleOperationThroughMaxPool(Graph& grap void GraphOptimizer::FuseConvolutionAndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return (node->getType() == Type::Convolution || node->getType() == Type::BinaryConvolution) && node->getChildEdges().size() == 1; }; @@ -1312,7 +1312,7 @@ void GraphOptimizer::FuseConvolutionAndSimpleOperation(Graph& graph) { void GraphOptimizer::FusePoolingAndFakeQuantize(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { if (node->getType() == Type::Pooling) { if (!one_of(node->getOriginalInputPrecisionAtPort(0), ov::element::u8, ov::element::i8)) return false; @@ -1321,7 +1321,7 @@ void GraphOptimizer::FusePoolingAndFakeQuantize(Graph& graph) { return false; }; - auto isSuitableChildNode = [](NodePtr node) { + auto isSuitableChildNode = [](const NodePtr& node) { return node->getType() == Type::FakeQuantize && node->getAlgorithm() != Algorithm::FQBinarization; }; @@ -1426,7 +1426,7 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) auto& graphNodes = graph.GetNodes(); - auto isFusingSupported = [&](NodePtr conv, NodePtr child) { + auto isFusingSupported = [&](const NodePtr& conv, const NodePtr& child) { return child->getType() == Type::Eltwise && DnnlExtensionUtils::isUnarySupportedAsPostOp(child->getAlgorithm()); }; @@ -1449,7 +1449,7 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) bool isSuitableParent2 = parent2->getType() == Type::Convolution || parent2->getType() == Type::BinaryConvolution; - auto canFuseSum = [](node::BinaryConvolution* binConv, NodePtr fuseCandidate) { + auto canFuseSum = [](node::BinaryConvolution* binConv, const NodePtr& fuseCandidate) { if (binConv->getImplType() == impl_desc_type::ref) return false; @@ -1549,7 +1549,7 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) } if (peerNode->isConstant()) continue; - auto sum = graphNode; + const auto& sum = graphNode; if (mergedConv->isConstant() && !sum->isConstant()) continue; @@ -1661,7 +1661,7 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) void GraphOptimizer::FuseMVNAndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return (node->getType() == Type::MVN) && (node->getChildEdges().size() == 1); }; @@ -1701,11 +1701,11 @@ void GraphOptimizer::FuseMVNAndSimpleOperation(Graph& graph) { void GraphOptimizer::FuseInterpolateAndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return node->getType() == Type::Interpolate && node->getChildEdges().size() == 1; }; - auto isSuitableChildNode = [&](NodePtr parentNode, NodePtr childNode) { + auto isSuitableChildNode = [&](const NodePtr& parentNode, const NodePtr& childNode) { // Avoid cycle dependencies for (auto& childParentEdge : childNode->getParentEdges()) { for (auto& parentParentEdge : parentNode->getParentEdges()) { @@ -1760,7 +1760,7 @@ void GraphOptimizer::FuseInterpolateAndSimpleOperation(Graph& graph) { void GraphOptimizer::FuseNormalizeL2AndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return node->getType() == Type::NormalizeL2 && node->getChildEdges().size() == 1; }; @@ -1800,7 +1800,7 @@ void GraphOptimizer::FuseNormalizeL2AndSimpleOperation(Graph& graph) { void GraphOptimizer::FuseReduceAndSimpleOperation(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return node->getType() == Type::Reduce && node->getChildEdges().size() == 1; }; @@ -1842,11 +1842,11 @@ void GraphOptimizer::FuseReduceAndSimpleOperation(Graph& graph) { void GraphOptimizer::FuseEltwiseAndSimple(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { return node->getType() == Type::Eltwise && node->getChildEdges().size() == 1; }; - auto isSuitableChildNode = [&](NodePtr parentNode, NodePtr childNode) { + auto isSuitableChildNode = [&](const NodePtr& parentNode, const NodePtr& childNode) { if (parentNode->isConstant() && !childNode->isConstant()) return false; for (auto& childParentEdge : childNode->getParentEdges()) { @@ -1972,7 +1972,7 @@ void GraphOptimizer::FuseEltwiseAndSimple(Graph& graph) { } void GraphOptimizer::ShareReorders(Graph& graph) { - auto getSuitableReorder = [](NodePtr node) -> Reorder* { + auto getSuitableReorder = [](const NodePtr& node) -> Reorder* { if (node->getType() != Type::Reorder) return nullptr; Reorder* reorder = dynamic_cast(node.get()); @@ -1981,7 +1981,7 @@ void GraphOptimizer::ShareReorders(Graph& graph) { // inplace children cannot be safely shared with each other auto reorderConsumers = reorder->getChildEdgesAtPort(0); - if (std::any_of(reorderConsumers.begin(), reorderConsumers.end(), [](EdgePtr e) { + if (std::any_of(reorderConsumers.begin(), reorderConsumers.end(), [](const EdgePtr& e) { return e->inPlace(Edge::LOOK_DOWN); })) return nullptr; @@ -2014,13 +2014,13 @@ void GraphOptimizer::ShareReorders(Graph& graph) { DEBUG_LOG(node->getName(), " is shared by ", siblingNode->getName()); // siblingReorder can share output with current reorder - for (auto pwEdge : siblingReorder->getParentEdges()) { + for (const auto& pwEdge : siblingReorder->getParentEdges()) { auto pEdge = pwEdge.lock(); if (pEdge) graph.RemoveEdge(pEdge); } - for (auto pwEdge : siblingReorder->getChildEdges()) { + for (const auto& pwEdge : siblingReorder->getChildEdges()) { auto pEdge = pwEdge.lock(); if (pEdge) { graph.RemoveEdge(pEdge); @@ -2079,16 +2079,16 @@ void GraphOptimizer::DropDoubleReorders(Graph& graph) { void GraphOptimizer::FuseClampAndFakeQuantize(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableClampNode = [](NodePtr node) { + auto isSuitableClampNode = [](const NodePtr& node) { return node->getType() == Type::Eltwise && node->getChildEdges().size() == 1 && node->getAlgorithm() == Algorithm::EltwiseClamp; }; - auto isSuitableFakeQuantizeNode = [](NodePtr node) { + auto isSuitableFakeQuantizeNode = [](const NodePtr& node) { return node->getType() == Type::FakeQuantize && node->getAlgorithm() != Algorithm::FQBinarization; }; - auto fuseClampAndFakeQuantizeNodes = [](NodePtr parent, NodePtr child) { + auto fuseClampAndFakeQuantizeNodes = [](const NodePtr& parent, const NodePtr& child) { auto* eltwiseNode = dynamic_cast(parent.get()); if (eltwiseNode == nullptr) OPENVINO_THROW("Cannot cast ", parent->getName(), " to Eltwise node"); @@ -2393,7 +2393,7 @@ void GraphOptimizer::mergeTransposeReshapeReorder(Graph& graph, // hold references to all children before dropping reorder_node std::vector> reorderChildren; - for (auto ccEdge : childNode->getChildEdgesAtPort(0)) + for (const auto& ccEdge : childNode->getChildEdgesAtPort(0)) reorderChildren.emplace_back(ccEdge->getChild(), ccEdge->getOutputNum()); // detach nodes from graph by remove all of their edges @@ -2503,9 +2503,9 @@ void GraphOptimizer::mergeTransposeReshapeReorder(Graph& graph, void GraphOptimizer::MergeTransposeAndReorder(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableTranspose = [](NodePtr node) { + auto isSuitableTranspose = [](const NodePtr& node) { // WA: to avoid broken memory pointer for conv + sum - auto prevNodeIsConvSum = [](NodePtr node) -> bool { + auto prevNodeIsConvSum = [](const NodePtr& node) -> bool { const auto parent = node->getParentEdgeAt(0)->getParent(); if (parent->getType() == Type::Convolution) { for (const auto& fusedNode : parent->getFusedWith()) { @@ -2526,7 +2526,7 @@ void GraphOptimizer::MergeTransposeAndReorder(Graph& graph) { && !prevNodeIsConvSum(node); }; - auto isSuitableReshape = [](NodePtr node) { + auto isSuitableReshape = [](const NodePtr& node) { if (node->getChildEdges().size() != 1 || node->getOutputShapeAtPort(0).isDynamic() || node->getInputShapeAtPort(0).isDynamic()) return false; @@ -2547,13 +2547,13 @@ void GraphOptimizer::MergeTransposeAndReorder(Graph& graph) { return mismatchCount == 1; }; - auto isSuitableReorder = [](NodePtr node) { + auto isSuitableReorder = [](const NodePtr& node) { return node->getType() == Type::Reorder && !node->isDynamicNode(); // TODO [DS]: enable for dynamic shapes when inPlace in the dynamic case is // available (CVS-74863) }; - auto updateOrder = [](const VectorDims& originalOrder, NodePtr reshape) { + auto updateOrder = [](const VectorDims& originalOrder, const NodePtr& reshape) { if (!reshape) return originalOrder; @@ -2653,11 +2653,11 @@ void GraphOptimizer::MergeTransposeAndReorder(Graph& graph) { void GraphOptimizer::MergeReorderAndTranspose(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableTranspose = [](NodePtr node) { + auto isSuitableTranspose = [](const NodePtr& node) { return node->getType() == Type::Transpose && node->getChildEdges().size() == 1 && !node->isDynamicNode(); }; - auto isSuitableReshape = [](NodePtr node) { + auto isSuitableReshape = [](const NodePtr& node) { if (node->getChildEdges().size() != 1 || node->getOutputShapeAtPort(0).isDynamic() || node->getInputShapeAtPort(0).isDynamic()) return false; @@ -2678,11 +2678,11 @@ void GraphOptimizer::MergeReorderAndTranspose(Graph& graph) { return mismatchCount == 1; }; - auto isSuitableReorder = [](NodePtr node) { + auto isSuitableReorder = [](const NodePtr& node) { return node->getType() == Type::Reorder && node->getChildEdges().size() == 1 && !node->isDynamicNode(); }; - auto updateOrder = [](const VectorDims& originalOrder, NodePtr reshape) { + auto updateOrder = [](const VectorDims& originalOrder, const NodePtr& reshape) { if (!reshape) return originalOrder; @@ -2780,7 +2780,7 @@ void GraphOptimizer::MergeReorderAndTranspose(Graph& graph) { void GraphOptimizer::reshapeRnnSeq(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr node) { + auto isSuitableParentNode = [](const NodePtr& node) { if (node->type != Type::RNNSeq) return false; auto rnnNode = std::dynamic_pointer_cast(node); @@ -2838,7 +2838,7 @@ So Convert is redundant." void GraphOptimizer::RemoveSameConvert(Graph& graph) { auto& graphNodes = graph.GetNodes(); - auto isSuitableParentNode = [](NodePtr parentNode) { + auto isSuitableParentNode = [](const NodePtr& parentNode) { return parentNode->getType() == Type::Convert && (parentNode->getOriginalOutputPrecisionAtPort(0) == parentNode->getOriginalInputPrecisionAtPort(0)); }; @@ -3148,7 +3148,7 @@ void GraphOptimizer::DropRedundantMemoryOutput(Graph& graph) { parentEdges.push_back(parentEdge); graph.CreateEdge(parent, memInputSingle, inputNum, parentEdge->getOutputNum()); } - for (auto parentEdge : parentEdges) { + for (const auto& parentEdge : parentEdges) { graph.RemoveEdge(parentEdge); } } diff --git a/src/plugins/intel_cpu/src/infer_request.cpp b/src/plugins/intel_cpu/src/infer_request.cpp index 023312ab5256d3..d3b0b4c534be2a 100644 --- a/src/plugins/intel_cpu/src/infer_request.cpp +++ b/src/plugins/intel_cpu/src/infer_request.cpp @@ -66,7 +66,7 @@ void SyncInferRequest::redefine_memory_for_input_nodes(Graph& graph) { void SyncInferRequest::update_external_tensor_ptrs() { // Update it due to batched_tensors case will update input tensor - for (auto input : m_input_ports_map) { + for (const auto& input : m_input_ports_map) { if (m_input_external_ptr.find(input.first) != m_input_external_ptr.end()) { auto tensor = get_tensor(input.second); m_input_external_ptr[input.first] = tensor; @@ -294,7 +294,7 @@ std::vector> SyncInferRequest::query_state() const if (m_asyncRequest->m_has_sub_infers) { auto requests = m_asyncRequest->getSubInferRequest(); std::vector> states; - for (auto request : requests) { + for (const auto& request : requests) { auto cur = request->query_state(); states.insert(states.end(), cur.begin(), cur.end()); } diff --git a/src/plugins/intel_cpu/src/memory_control.cpp b/src/plugins/intel_cpu/src/memory_control.cpp index 7b4e5e7ca0b973..757e3659c076d4 100644 --- a/src/plugins/intel_cpu/src/memory_control.cpp +++ b/src/plugins/intel_cpu/src/memory_control.cpp @@ -5,6 +5,7 @@ #include "memory_control.hpp" #include +#include #include "node.h" #include "openvino/runtime/memory_solver.hpp" @@ -188,7 +189,7 @@ class MemoryManageNonOverlapingSets : public IMemoryManager { } } } - m_boxes.emplace_back(std::move(box)); + m_boxes.emplace_back(box); } const MemoryControl::MemoryBlockMap& lastSolution() override { @@ -429,4 +430,4 @@ void NetworkMemoryControl::releaseMemory() { } } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp index 914a52b1b9f8d3..f946282b9ad194 100644 --- a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.cpp @@ -115,7 +115,7 @@ std::shared_ptr MemoryDescUtils::makeEmptyDesc() { return emptyDesc; } -std::shared_ptr MemoryDescUtils::makeEmptyMemory(const GraphContext::CPtr context) { +std::shared_ptr MemoryDescUtils::makeEmptyMemory(const GraphContext::CPtr& context) { return std::make_shared(context->getEngine(), makeEmptyDesc(), nullptr); } diff --git a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h index 8bdd46e1d909b6..e548b779fe2f3f 100644 --- a/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h +++ b/src/plugins/intel_cpu/src/memory_desc/cpu_memory_desc_utils.h @@ -73,7 +73,7 @@ class MemoryDescUtils { * @return empty memory descriptor */ static std::shared_ptr makeEmptyDesc(); - static std::shared_ptr makeEmptyMemory(const GraphContext::CPtr context); + static std::shared_ptr makeEmptyMemory(const GraphContext::CPtr& context); /** * @brief Makes a static dummy shape where all undefined values are replaced with the smallest value between the diff --git a/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp b/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp index b9d8a1c555dd3f..69b070e165df8b 100644 --- a/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp @@ -235,7 +235,7 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(const Shape& shape, if (format == memory::format_tag::any || format == memory::format_tag::undef) OPENVINO_THROW("Unexpected: Can't create dnnl::desc with any or undef format"); - const auto dims = shape.getDims(); + const auto& dims = shape.getDims(); if (format == memory::format_tag::x && shape.getRank() == 0) { desc = dnnl::memory::desc(dnnl::memory::dims(1, 1), dataType, format); } else { @@ -456,22 +456,22 @@ static dnnl::memory::desc cloneDescWithNewDims(const dnnl::memory::desc& desc, auto mklDims = DnnlExtensionUtils::convertToDnnlDims(dims); const auto offsetPadding = desc.get()->offset0; - dnnl::memory::desc newMklDesc = desc; + dnnl::memory::desc clonedDesc(DnnlExtensionUtils::clone_desc(desc.get())); - array_copy(newMklDesc.get()->dims, mklDims.data(), mklDims.size()); + array_copy(clonedDesc.get()->dims, mklDims.data(), mklDims.size()); dnnl::memory::dims perm(convert_to_vector(order.data(), mklDims.size())); - auto innerBlks = newMklDesc.get_inner_blks(); - auto innerIdxs = newMklDesc.get_inner_idxs(); + auto innerBlks = clonedDesc.get_inner_blks(); + auto innerIdxs = clonedDesc.get_inner_idxs(); - dnnl::impl::memory_desc_t& newCdesc = *newMklDesc.get(); - auto retCode = dnnl::impl::fill_blocked(newCdesc, perm, innerBlks, innerIdxs); + auto retCode = dnnl::impl::fill_blocked(*clonedDesc.get(), perm, innerBlks, innerIdxs); if (retCode != dnnl::impl::status::success) { OPENVINO_THROW("Can not clone DnnlBlockedMemoryDesc with dims: ", dims2str(dims)); } // dnnl::impl::fill_blocked always set offset0 to 0 // so we need to restore actual value - newCdesc.offset0 = offsetPadding; - return newMklDesc; + clonedDesc.get()->offset0 = offsetPadding; + + return clonedDesc; } MemoryDescPtr DnnlBlockedMemoryDesc::cloneWithNewDimsImp(const VectorDims& dims) const { diff --git a/src/plugins/intel_cpu/src/memory_state.cpp b/src/plugins/intel_cpu/src/memory_state.cpp index 6db8c1705108cc..570cddffc7daa1 100644 --- a/src/plugins/intel_cpu/src/memory_state.cpp +++ b/src/plugins/intel_cpu/src/memory_state.cpp @@ -6,6 +6,8 @@ #include +#include + #include "cpu_memory.h" #include "cpu_tensor.h" #include "dnnl_extension_utils.h" @@ -21,9 +23,9 @@ using namespace ov::Extensions::Cpu::XARCH; namespace ov { namespace intel_cpu { -VariableStateBase::VariableStateBase(const std::string& name, const MemoryDescPtr& external_desc) +VariableStateBase::VariableStateBase(const std::string& name, MemoryDescPtr external_desc) : IVariableState{name}, - m_external_desc{external_desc} {} + m_external_desc{std::move(external_desc)} {} MemoryDescPtr VariableStateBase::to_static(const MemoryDescPtr& desc) { if (!desc->isDefined()) { @@ -165,12 +167,12 @@ MemoryPtr VariableStateDoubleBuffer::internal_state_mem() const { } VariableStateSingleBuffer::VariableStateSingleBuffer(const std::string& name, - const MemoryPtr& external_buffer, - const MemoryDescPtr& external_desc) - : VariableStateBase(name, external_desc) { - OPENVINO_ASSERT(external_buffer); - m_internal_mem = external_buffer; - m_internal_desc = m_internal_mem->getDescPtr(); + MemoryPtr external_buffer, + MemoryDescPtr external_desc) + : VariableStateBase(name, std::move(external_desc)), + m_internal_mem(std::move(external_buffer)), + m_internal_desc(m_internal_mem->getDescPtr()) { + OPENVINO_ASSERT(m_internal_mem); auto&& shape = m_internal_desc->getShape(); if (shape.isStatic()) { @@ -208,11 +210,11 @@ void VariableStateSingleBuffer::commit_impl() { } VariableStateKVcache::VariableStateKVcache(const std::string& name, - const MemoryDescPtr& external_desc, - const BlockedMemoryDescPtr& dense_internal_desc) - : VariableStateBase(name, external_desc), - m_dense_internal_desc(dense_internal_desc) { - auto&& shape = external_desc->getShape(); + MemoryDescPtr external_desc, + BlockedMemoryDescPtr dense_internal_desc) + : VariableStateBase(name, std::move(external_desc)), + m_dense_internal_desc(std::move(dense_internal_desc)) { + auto&& shape = get_external_desc()->getShape(); OPENVINO_ASSERT(shape.isDynamic(), "VariableStateKVcache is unexpectedly initalized with a static tensor"); } diff --git a/src/plugins/intel_cpu/src/memory_state.h b/src/plugins/intel_cpu/src/memory_state.h index 5af05f486650a7..d8de35618952d6 100644 --- a/src/plugins/intel_cpu/src/memory_state.h +++ b/src/plugins/intel_cpu/src/memory_state.h @@ -27,7 +27,7 @@ class IVariableState : public ov::IVariableState { class VariableStateBase : public IVariableState { public: - VariableStateBase(const std::string& name, const MemoryDescPtr& external_desc); + VariableStateBase(const std::string& name, MemoryDescPtr external_desc); // ov::IVariableState void set_state(const ov::SoPtr& state) override final; // NOLINT @@ -96,9 +96,7 @@ class VariableStateDoubleBuffer : public VariableStateBase { class VariableStateSingleBuffer : public VariableStateBase { public: - VariableStateSingleBuffer(const std::string& name, - const MemoryPtr& external_buffer, - const MemoryDescPtr& external_desc); + VariableStateSingleBuffer(const std::string& name, MemoryPtr external_buffer, MemoryDescPtr external_desc); MemoryPtr input_mem() override; MemoryPtr output_mem() override; @@ -111,15 +109,15 @@ class VariableStateSingleBuffer : public VariableStateBase { MemoryPtr internal_state_mem() const override; private: - MemoryDescPtr m_internal_desc; // mem desc required by the graph internal tensor MemoryPtr m_internal_mem; + MemoryDescPtr m_internal_desc; // mem desc required by the graph internal tensor }; class VariableStateKVcache : public VariableStateBase { public: VariableStateKVcache(const std::string& name, - const MemoryDescPtr& external_desc, - const BlockedMemoryDescPtr& dense_internal_desc); + MemoryDescPtr external_desc, + BlockedMemoryDescPtr dense_internal_desc); // ov::IVariableState ov::SoPtr get_state() const override; diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index d8616d879a2d43..13250bfabd2e10 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "cpu_types.h" @@ -48,15 +49,13 @@ Node::NodesFactory& Node::factory() { return factoryInstance; } -Node::Node(const std::shared_ptr& op, - const GraphContext::CPtr ctx, - const ShapeInferFactory& shapeInferFactory) +Node::Node(const std::shared_ptr& op, GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory) : selectedPrimitiveDescriptorIndex(-1), constant(ConstantType::NoConst), - context(ctx), + context(std::move(ctx)), algorithm(Algorithm::Default), fusingPort(-1), - engine(ctx->getEngine()), + engine(context->getEngine()), name(op->get_friendly_name()), typeStr(op->get_type_name()), type(TypeFromName(op->get_type_name())), @@ -177,7 +176,7 @@ Node::Node(const std::string& type, std::vector inputPrecisions, std::vector outputPrecisions, const std::string& name, - const GraphContext::CPtr ctx) + const GraphContext::CPtr& ctx) : inputShapes(std::move(inShapes)), outputShapes(std::move(outShapes)), selectedPrimitiveDescriptorIndex(-1), @@ -205,7 +204,7 @@ void Node::addEdge(const EdgePtr& edge) { } void Node::remove() { - auto drop = [](std::vector edges) { + auto drop = [](const std::vector& edges) { for (auto& edge : edges) { auto edgePtr = edge.lock(); if (!edgePtr) @@ -594,7 +593,7 @@ std::string Node::getPrimitiveDescriptorType() const { std::string str_type; - auto add_type = [&](std::string t) { + auto add_type = [&](const std::string& t) { if (!str_type.empty() && t.c_str()[0] != '_') str_type += "_"; str_type += t; @@ -798,7 +797,7 @@ void Node::updateDynamicParams() { } } -void Node::execute(const dnnl::stream strm, int numaId) { +void Node::execute(const dnnl::stream& strm, int numaId) { if (isDynamicNode()) { return executeDynamic(strm, numaId); } else { @@ -806,12 +805,12 @@ void Node::execute(const dnnl::stream strm, int numaId) { } } -void Node::executeStatic(const dnnl::stream strm, int numaId) { +void Node::executeStatic(const dnnl::stream& strm, int numaId) { toNumaNode(numaId); execute(strm); } -void Node::executeDynamic(dnnl::stream strm, int numaId) { +void Node::executeDynamic(const dnnl::stream& strm, int numaId) { if (isExecutable()) { toNumaNode(numaId); executeDynamicImpl(strm); @@ -1039,9 +1038,11 @@ void Node::initDescriptor(const NodeConfig& config) { descs.clear(); std::vector inDescs; + inDescs.reserve(config.inConfs.size()); for (const auto& inConf : config.inConfs) inDescs.emplace_back(inConf.getMemDesc()); std::vector outDescs; + outDescs.reserve(config.outConfs.size()); for (const auto& outConf : config.outConfs) outDescs.emplace_back(outConf.getMemDesc()); createDescriptor(inDescs, outDescs); @@ -1112,6 +1113,7 @@ void Node::prepareMemory(const std::vector& intDescs) { void Node::prepareMemory(dnnl::primitive_desc_iterator& itpd) { std::vector intDescs; + intDescs.reserve(internalBlobDesc.size()); for (auto& it : internalBlobDesc) intDescs.push_back(it(itpd, 0)); @@ -1255,11 +1257,11 @@ void Node::addOriginalLayer(const std::string& layerName) { void Node::cleanup() { internalBlobs.clear(); - for (auto it : fusedWith) { + for (const auto& it : fusedWith) { it->cleanup(); } - for (auto it : mergedWith) { + for (const auto& it : mergedWith) { it->cleanup(); } } @@ -1452,7 +1454,7 @@ void Node::appendPostOpArgs(const dnnl::primitive_attr& attr, } bool Node::isFusedWith(Type fusedNodeType) const { - for (auto fusedNode : fusedWith) { + for (const auto& fusedNode : fusedWith) { if (fusedNode->type == fusedNodeType) return true; } @@ -1532,7 +1534,7 @@ ov::element::Type Node::getRuntimePrecision() const { return runtimePrecision; } -Node* Node::NodesFactory::create(const std::shared_ptr& op, const GraphContext::CPtr context) { +Node* Node::NodesFactory::create(const std::shared_ptr& op, const GraphContext::CPtr& context) { // getExceptionDescWithoutStatus removes redundant information from the exception message. For instance, the // NotImplemented exception is generated in the form: full_path_to_src_file:line_number [ NOT_IMPLEMENTED ] reason. // An example for gather node: diff --git a/src/plugins/intel_cpu/src/node.h b/src/plugins/intel_cpu/src/node.h index 0fe0222a673045..60b6568562ec5c 100644 --- a/src/plugins/intel_cpu/src/node.h +++ b/src/plugins/intel_cpu/src/node.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include "cpu_memory.h" @@ -48,12 +49,12 @@ class PortConfigurator { public: PortConfigurator(ov::intel_cpu::LayoutType blockedDescType, ov::element::Type prc, - const Shape& shape, + Shape shape, bool constant = false, int inPlace = -1) : blockedDescCreator(getBlockedDescCreator(blockedDescType)), prc(prc), - shape(shape), + shape(std::move(shape)), constant(constant), inPlace(inPlace) {} @@ -93,7 +94,7 @@ class NodeDesc { NodeDesc(NodeConfig conf, impl_desc_type type, ExecutorFactoryLegacyPtr factory) : config(std::move(conf)), implementationType(type), - executorFactory(factory) {} + executorFactory(std::move(factory)) {} const NodeConfig& getConfig() const { return config; @@ -126,7 +127,7 @@ class NodeDesc { } void setExecutorFactory(ExecutorFactoryLegacyPtr factory) { - executorFactory = factory; + executorFactory = std::move(factory); } private: @@ -213,12 +214,12 @@ class Node { childEdges.push_back(edge); } - void removeParentEdge(const EdgePtr edge) { + void removeParentEdge(const EdgePtr& edge) { removeEdge(edge, parentEdges); updateConstantType(); } - void removeChildEdge(const EdgePtr edge) { + void removeChildEdge(const EdgePtr& edge) { removeEdge(edge, childEdges); } @@ -474,10 +475,10 @@ class Node { // @todo this supposed to be 'execute + executeImpl' instead of 'executeStatic + execute' // but this requires changes in all the nodes. Since moving to a numa node right before an execute // is a temprorary solution, do it this way for now. - void executeStatic(const dnnl::stream strm, int numaId = -1); + void executeStatic(const dnnl::stream& strm, int numaId = -1); void updateShapes(); void updateDynamicParams(); - void executeDynamic(dnnl::stream strm, int numaId = -1); + void executeDynamic(const dnnl::stream& strm, int numaId = -1); virtual void redefineOutputMemory(const std::vector& newShapes); void redefineOutputMemory(const size_t port, const VectorDims& new_output_shape); bool outputShapeDataDependency() const; @@ -720,14 +721,15 @@ class Node { std::string originalLayers; // contains names of the original layers separated by comma std::string parallelDomain; - Node(const std::shared_ptr& op, const GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory); + Node(const std::shared_ptr& op, GraphContext::CPtr ctx, const ShapeInferFactory& shapeInferFactory); + Node(const std::string& type, std::vector inputShapes, std::vector outputShapes, std::vector originalInputPrecisions, std::vector originalOutputPrecisions, const std::string& name, - const GraphContext::CPtr ctx); + const GraphContext::CPtr& ctx); int selectedPrimitiveDescriptorIndex = -1; @@ -808,10 +810,10 @@ class Node { std::vector shapeInferGeneric(const std::vector& inputDims) const; virtual IShapeInfer::Result shapeInfer() const; - void execute(dnnl::stream stream, int numaId); - virtual void execute(dnnl::stream strm) = 0; + void execute(const dnnl::stream& stream, int numaId); + virtual void execute(const dnnl::stream& strm) = 0; // TODO [DS] : make pure after all nodes support dynamic shapes - virtual void executeDynamicImpl(dnnl::stream strm) { + virtual void executeDynamicImpl(const dnnl::stream& strm) { OPENVINO_THROW_NOT_IMPLEMENTED("[DS] executeDynamicImpl not implemented for node with type: ", getTypeStr()); } @@ -847,7 +849,7 @@ class Node { static void removeEdge(const EdgePtr edge, std::vector& edges) { edges.erase(std::remove_if(edges.begin(), edges.end(), - [&edge](EdgeWeakPtr _edge) { + [&edge](const EdgeWeakPtr& _edge) { return _edge.lock() == edge; }), edges.end()); @@ -899,7 +901,7 @@ class Node::NodesFactory public: NodesFactory(); - Node* create(const std::shared_ptr& op, const GraphContext::CPtr context); + Node* create(const std::shared_ptr& op, const GraphContext::CPtr& context); }; template diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp index 7b6f64d30d1403..274259a4e279ef 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp @@ -51,7 +51,7 @@ bool AdaptivePooling::isSupportedOperation(const std::shared_ptr return true; } -AdaptivePooling::AdaptivePooling(const std::shared_ptr& op, const GraphContext::CPtr context) +AdaptivePooling::AdaptivePooling(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, AdaptivePoolingShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -126,11 +126,11 @@ void AdaptivePooling::initSupportedPrimitiveDescriptors() { } } -void AdaptivePooling::executeDynamicImpl(dnnl::stream strm) { +void AdaptivePooling::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void AdaptivePooling::execute(dnnl::stream strm) { +void AdaptivePooling::execute(const dnnl::stream& strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().getDataType(); auto outputPrec = getChildEdgeAt(0)->getMemory().getDataType(); if (!(inputPrec == dnnl_f32 && outputPrec == dnnl_f32)) diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h index cc6969dd1b1793..0bae3a03b5c1ed 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.h @@ -18,11 +18,11 @@ namespace node { class AdaptivePooling : public Node { public: - AdaptivePooling(const std::shared_ptr& op, const GraphContext::CPtr context); + AdaptivePooling(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; @@ -38,7 +38,7 @@ class AdaptivePooling : public Node { bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp b/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp index 7a255c95b63108..deffe60668de01 100644 --- a/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp @@ -30,7 +30,7 @@ bool BatchToSpace::isSupportedOperation(const std::shared_ptr& o return true; } -BatchToSpace::BatchToSpace(const std::shared_ptr& op, const GraphContext::CPtr context) +BatchToSpace::BatchToSpace(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -237,11 +237,11 @@ void BatchToSpace::batchToSpaceKernel() { }); } -void BatchToSpace::executeDynamicImpl(dnnl::stream strm) { +void BatchToSpace::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void BatchToSpace::execute(dnnl::stream strm) { +void BatchToSpace::execute(const dnnl::stream& strm) { switch (getParentEdgeAt(0)->getMemory().getDesc().getPrecision().size()) { case 1: batchToSpaceKernel::value_type>(); diff --git a/src/plugins/intel_cpu/src/nodes/batch_to_space.h b/src/plugins/intel_cpu/src/nodes/batch_to_space.h index 9c863296cc3c1d..4d13ad27789a5d 100644 --- a/src/plugins/intel_cpu/src/nodes/batch_to_space.h +++ b/src/plugins/intel_cpu/src/nodes/batch_to_space.h @@ -12,7 +12,7 @@ namespace node { class BatchToSpace : public Node { public: - BatchToSpace(const std::shared_ptr& op, const GraphContext::CPtr context); + BatchToSpace(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; @@ -22,7 +22,7 @@ class BatchToSpace : public Node { return !hasEmptyInputTensors() && !hasEmptyOutputTensors(); } - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { @@ -31,7 +31,7 @@ class BatchToSpace : public Node { bool needShapeInfer() const override { return true; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/bin_conv.cpp b/src/plugins/intel_cpu/src/nodes/bin_conv.cpp index 0a1e255dd383f9..dab539575723f7 100644 --- a/src/plugins/intel_cpu/src/nodes/bin_conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/bin_conv.cpp @@ -936,7 +936,7 @@ bool BinaryConvolution::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +BinaryConvolution::BinaryConvolution(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -1358,7 +1358,7 @@ void BinaryConvolution::executeReference(const uint8_t* src, }); } -void BinaryConvolution::execute(dnnl::stream strm) { +void BinaryConvolution::execute(const dnnl::stream& strm) { auto srcMemory = getSrcMemoryAtPort(0); auto weightsMemory = getSrcMemoryAtPort(1); auto dstMemory = getDstMemoryAtPort(0); diff --git a/src/plugins/intel_cpu/src/nodes/bin_conv.h b/src/plugins/intel_cpu/src/nodes/bin_conv.h index 825c264a5ba69b..8a03b9de292682 100644 --- a/src/plugins/intel_cpu/src/nodes/bin_conv.h +++ b/src/plugins/intel_cpu/src/nodes/bin_conv.h @@ -78,12 +78,12 @@ struct jit_uni_bin_conv_kernel { class BinaryConvolution : public Node { public: - BinaryConvolution(const std::shared_ptr& op, const GraphContext::CPtr context); + BinaryConvolution(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void createPrimitive() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override { return false; diff --git a/src/plugins/intel_cpu/src/nodes/broadcast.cpp b/src/plugins/intel_cpu/src/nodes/broadcast.cpp index 3c92c6e6e4f041..5d6dc9ebea5bbc 100644 --- a/src/plugins/intel_cpu/src/nodes/broadcast.cpp +++ b/src/plugins/intel_cpu/src/nodes/broadcast.cpp @@ -50,7 +50,7 @@ bool Broadcast::isSupportedOperation(const std::shared_ptr& op, return true; } -Broadcast::Broadcast(const std::shared_ptr& op, const GraphContext::CPtr context) +Broadcast::Broadcast(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -189,11 +189,11 @@ bool Broadcast::isExecutable() const { return !isInputTensorAtPortEmpty(0); } -void Broadcast::executeDynamicImpl(dnnl::stream strm) { +void Broadcast::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Broadcast::execute(dnnl::stream strm) { +void Broadcast::execute(const dnnl::stream& strm) { if (optimizedCase) { optimizedExecute(getSrcMemoryAtPort(INPUT_DATA_IDX), getDstMemoryAtPort(0)); } else { @@ -201,7 +201,7 @@ void Broadcast::execute(dnnl::stream strm) { } } -void Broadcast::plainExecute(dnnl::stream strm) { +void Broadcast::plainExecute(const dnnl::stream& strm) { VectorDims srcDims = getParentEdgeAt(INPUT_DATA_IDX)->getMemory().getStaticDims(); const auto& dstDims = getChildEdgeAt(0)->getMemory().getStaticDims(); const auto& dataSrcRank = getParentEdgeAt(INPUT_DATA_IDX)->getMemory().getShape().getRank(); diff --git a/src/plugins/intel_cpu/src/nodes/broadcast.h b/src/plugins/intel_cpu/src/nodes/broadcast.h index 5645ec70a1f707..c6063ebd89fbf4 100644 --- a/src/plugins/intel_cpu/src/nodes/broadcast.h +++ b/src/plugins/intel_cpu/src/nodes/broadcast.h @@ -16,12 +16,12 @@ namespace node { class Broadcast : public Node, public TileBroadcastCommon { public: - Broadcast(const std::shared_ptr& op, const GraphContext::CPtr context); + Broadcast(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool created() const override; bool isExecutable() const override; @@ -33,7 +33,7 @@ class Broadcast : public Node, public TileBroadcastCommon { bool needShapeInfer() const override; private: - void plainExecute(dnnl::stream strm); + void plainExecute(const dnnl::stream& strm); enum AutoBroadcastType { NUMPY, EXPLICIT }; AutoBroadcastType broadcastType = NUMPY; diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.cpp b/src/plugins/intel_cpu/src/nodes/bucketize.cpp index 115e397c4c990e..67f1c3ff482405 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.cpp +++ b/src/plugins/intel_cpu/src/nodes/bucketize.cpp @@ -29,7 +29,7 @@ bool Bucketize::isSupportedOperation(const std::shared_ptr& op, return true; } -Bucketize::Bucketize(const std::shared_ptr& op, const GraphContext::CPtr context) +Bucketize::Bucketize(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -85,7 +85,7 @@ inline constexpr uint32_t getElementsMask(ov::element::Type precision1, (static_cast(ov::element::Type_t(precision4)) << 24); } -void Bucketize::execute(dnnl::stream strm) { +void Bucketize::execute(const dnnl::stream& strm) { auto precision_mask = getElementsMask(input_precision, boundaries_precision, output_precision); switch (precision_mask) { diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.h b/src/plugins/intel_cpu/src/nodes/bucketize.h index 5ad893ea0a9282..3481941aa0a405 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.h +++ b/src/plugins/intel_cpu/src/nodes/bucketize.h @@ -12,13 +12,13 @@ namespace node { class Bucketize : public Node { public: - Bucketize(const std::shared_ptr& op, const GraphContext::CPtr context); + Bucketize(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp index 1803dc5c10b45e..93f5278b06a4a8 100644 --- a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp +++ b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp @@ -47,7 +47,7 @@ The functionality is equivalent to following python code: */ template struct CausalMaskPreprocess::ExecutorCausalMaskPreprocess : public CausalMaskPreprocess::Executor { - void execute(dnnl::stream strm, + void execute(const dnnl::stream& strm, intel_cpu::Node* pnode, const intel_cpu::CausalMaskPreprocessNode::Config& config) override { ov::intel_cpu::PlainTensor t_attention_mask(pnode->getSrcMemoryAtPort(0)); @@ -99,7 +99,7 @@ struct CausalMaskPreprocess::ExecutorCausalMaskPreprocess : public CausalMaskPre } }; -CausalMaskPreprocess::CausalMaskPreprocess(const std::shared_ptr& op, const GraphContext::CPtr context) +CausalMaskPreprocess::CausalMaskPreprocess(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -158,7 +158,7 @@ void CausalMaskPreprocess::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inPortConfigs, outPortConfigs, impl_desc_type::ref_any); } -void CausalMaskPreprocess::execute(dnnl::stream strm) { +void CausalMaskPreprocess::execute(const dnnl::stream& strm) { m_executor->execute(strm, this, m_config); } diff --git a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h index 444f242b0597a7..b35de8e25fcae9 100644 --- a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h +++ b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h @@ -13,7 +13,7 @@ namespace node { class CausalMaskPreprocess : public Node { public: - CausalMaskPreprocess(const std::shared_ptr& op, const GraphContext::CPtr context); + CausalMaskPreprocess(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -22,16 +22,16 @@ class CausalMaskPreprocess : public Node { bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: struct Executor { - virtual void execute(dnnl::stream strm, + virtual void execute(const dnnl::stream& strm, intel_cpu::Node* pnode, const intel_cpu::CausalMaskPreprocessNode::Config& config) = 0; virtual ~Executor() = default; diff --git a/src/plugins/intel_cpu/src/nodes/col2im.cpp b/src/plugins/intel_cpu/src/nodes/col2im.cpp index 99fc3ed7d671df..58c1e36a9e308a 100644 --- a/src/plugins/intel_cpu/src/nodes/col2im.cpp +++ b/src/plugins/intel_cpu/src/nodes/col2im.cpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { namespace node { -Col2Im::Col2Im(const std::shared_ptr& op, const GraphContext::CPtr context) +Col2Im::Col2Im(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -57,7 +57,7 @@ bool Col2Im::needPrepareParams() const { return false; } -void Col2Im::executeDynamicImpl(dnnl::stream strm) { +void Col2Im::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -89,7 +89,7 @@ struct Col2Im::Col2ImExecute { ctx.node.executeImpl(); } }; -void Col2Im::execute(dnnl::stream strm) { +void Col2Im::execute(const dnnl::stream& strm) { auto dataPrecision = getParentEdgeAt(0)->getMemory().getDesc().getPrecision(); auto indexPrecision = getParentEdgeAt(1)->getMemory().getDesc().getPrecision(); diff --git a/src/plugins/intel_cpu/src/nodes/col2im.h b/src/plugins/intel_cpu/src/nodes/col2im.h index 63a20a0cf74252..e841d4567d4b65 100644 --- a/src/plugins/intel_cpu/src/nodes/col2im.h +++ b/src/plugins/intel_cpu/src/nodes/col2im.h @@ -12,15 +12,15 @@ namespace node { class Col2Im : public Node { public: - Col2Im(const std::shared_ptr& op, const GraphContext::CPtr context); + Col2Im(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: template diff --git a/src/plugins/intel_cpu/src/nodes/color_convert.cpp b/src/plugins/intel_cpu/src/nodes/color_convert.cpp index 24e24c61280c61..dd6d5ede57b2bd 100644 --- a/src/plugins/intel_cpu/src/nodes/color_convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/color_convert.cpp @@ -352,7 +352,7 @@ class SinglePlaneConvert : public RefConverter { public: using RefConverter::RefConverter; - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& dims = inputDims(0); const size_t batch_size = dims[N_DIM]; @@ -372,7 +372,7 @@ class TwoPlaneConvert : public RefConverter { public: using RefConverter::RefConverter; - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& dims = inputDims(0); const T* y = static_cast(input(0)); @@ -535,7 +535,7 @@ class SinglePlaneConvert : public Converter { jit_converter_create(); } - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& kernel = jit_converter_get(); const auto& dims = inputDims(0); @@ -569,7 +569,7 @@ class TwoPlaneConvert : public Converter { jit_converter_create(); } - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& kernel = jit_converter_get(); const auto& dims = inputDims(0); @@ -681,7 +681,7 @@ class SinglePlaneConvert : public RefConverter { public: using RefConverter::RefConverter; - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& dims = inputDims(0); const size_t batch_size = dims[N_DIM]; @@ -702,7 +702,7 @@ class ThreePlaneConvert : public RefConverter { public: using RefConverter::RefConverter; - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& dims = inputDims(0); const T* y = static_cast(input(0)); @@ -865,7 +865,7 @@ class SinglePlaneConvert : public Converter { jit_converter_create(); } - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& kernel = jit_converter_get(); const auto& dims = inputDims(0); @@ -901,7 +901,7 @@ class ThreePlaneConvert : public Converter { jit_converter_create(); } - void execute(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override { const auto& kernel = jit_converter_get(); const auto& dims = inputDims(0); @@ -964,7 +964,7 @@ bool ColorConvert::isSupportedOperation(const std::shared_ptr& o return alg != Algorithm::Default; } -ColorConvert::ColorConvert(const std::shared_ptr& op, const GraphContext::CPtr context) +ColorConvert::ColorConvert(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, ColorConvertShapeInferFactory(op)) { std::string errorMessage; std::tie(algorithm, errorMessage) = getAlgorithmFor(op); @@ -1078,7 +1078,7 @@ void ColorConvert::createPrimitive() { } } -void ColorConvert::execute(dnnl::stream strm) { +void ColorConvert::execute(const dnnl::stream& strm) { if (!_impl) OPENVINO_THROW(getTypeStr() + " node with name '" + getName() + "' ", "has no any implemented converter"); _impl->execute(strm); @@ -1092,7 +1092,7 @@ bool ColorConvert::needPrepareParams() const { return false; } -void ColorConvert::executeDynamicImpl(dnnl::stream strm) { +void ColorConvert::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/color_convert.h b/src/plugins/intel_cpu/src/nodes/color_convert.h index c28c4c86b922ce..b3db883ec8ce18 100644 --- a/src/plugins/intel_cpu/src/nodes/color_convert.h +++ b/src/plugins/intel_cpu/src/nodes/color_convert.h @@ -17,17 +17,17 @@ namespace node { class ColorConvert : public Node { public: - ColorConvert(const std::shared_ptr& op, const GraphContext::CPtr context); + ColorConvert(const std::shared_ptr& op, const GraphContext::CPtr& context); class Converter; public: void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; @@ -70,7 +70,7 @@ class ColorConvert::Converter { const void* input(size_t idx) const; void* output(size_t idx) const; const VectorDims& inputDims(size_t idx) const; - virtual void execute(dnnl::stream strm) = 0; + virtual void execute(const dnnl::stream& strm) = 0; protected: Node* _node; diff --git a/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.cpp b/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.cpp index 7e18fd305f9fae..867f79d62a990b 100644 --- a/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.cpp @@ -25,11 +25,11 @@ DnnlExecutor::IntermReorder::IntermReorder(const dnnl::memory::desc& descSrc, m_reorder = dnnl::reorder(reorderPd); } -void DnnlExecutor::IntermReorder::exec(dnnl::memory& memSrc, dnnl::memory& memDst, dnnl::stream strm) { +void DnnlExecutor::IntermReorder::exec(dnnl::memory& memSrc, dnnl::memory& memDst, const dnnl::stream& strm) { m_reorder.execute(strm, memSrc, memDst); } -void DnnlExecutor::exec(const std::unordered_map& primArgs, dnnl::stream strm) { +void DnnlExecutor::exec(const std::unordered_map& primArgs, const dnnl::stream& strm) { if (inputReorders.empty() && outputReorders.empty()) { execPrim.execute(strm, primArgs); } else { @@ -37,7 +37,7 @@ void DnnlExecutor::exec(const std::unordered_map& primArgs, d } } -void DnnlExecutor::reorder_exec(std::unordered_map primArgs, dnnl::stream strm) { +void DnnlExecutor::reorder_exec(std::unordered_map primArgs, const dnnl::stream& strm) { for (auto& inReorder : inputReorders) { if (primArgs.count(inReorder.first)) { dnnl::memory memDst(inReorder.second.getDstDesc(), strm.get_engine()); diff --git a/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.h b/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.h index 780df7b86a1161..86e8ab526fa7fd 100644 --- a/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.h +++ b/src/plugins/intel_cpu/src/nodes/common/dnnl_executor.h @@ -17,7 +17,7 @@ class DnnlExecutor { class IntermReorder { public: IntermReorder(const dnnl::memory::desc& descSrc, const dnnl::memory::desc& descDst, const dnnl::engine& engine); - void exec(dnnl::memory& memSrc, dnnl::memory& memDst, dnnl::stream strm); + void exec(dnnl::memory& memSrc, dnnl::memory& memDst, const dnnl::stream& strm); const dnnl::memory::desc& getSrcDesc() const { return m_descSrc; } @@ -33,7 +33,7 @@ class DnnlExecutor { public: explicit DnnlExecutor(const dnnl::primitive_desc& pd); - void exec(const std::unordered_map& primArgs, dnnl::stream strm); + void exec(const std::unordered_map& primArgs, const dnnl::stream& strm); bool needReordering() const; virtual ~DnnlExecutor() = default; dnnl::primitive getExecPrim() const; @@ -67,7 +67,7 @@ class DnnlExecutor { } protected: - virtual void reorder_exec(std::unordered_map primArgs, dnnl::stream strm); + virtual void reorder_exec(std::unordered_map primArgs, const dnnl::stream& strm); protected: dnnl::primitive execPrim; diff --git a/src/plugins/intel_cpu/src/nodes/common/fp16_utils.h b/src/plugins/intel_cpu/src/nodes/common/fp16_utils.h index e09330b239ccab..371d86da9a7b64 100644 --- a/src/plugins/intel_cpu/src/nodes/common/fp16_utils.h +++ b/src/plugins/intel_cpu/src/nodes/common/fp16_utils.h @@ -4,6 +4,8 @@ #pragma once +#include + namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/nodes/common/permute_kernel.h b/src/plugins/intel_cpu/src/nodes/common/permute_kernel.h index 986fd1cee5d794..ef4fc80f7dbb87 100644 --- a/src/plugins/intel_cpu/src/nodes/common/permute_kernel.h +++ b/src/plugins/intel_cpu/src/nodes/common/permute_kernel.h @@ -4,6 +4,8 @@ #pragma once +#include + #include "node.h" namespace ov { @@ -45,7 +47,7 @@ struct jit_uni_permute_kernel { ker_(args); } - explicit jit_uni_permute_kernel(jit_permute_config_params jcp_) : ker_(nullptr), jcp(jcp_) {} + explicit jit_uni_permute_kernel(jit_permute_config_params jcp_) : ker_(nullptr), jcp(std::move(jcp_)) {} virtual ~jit_uni_permute_kernel() {} virtual void create_ker() = 0; diff --git a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp index dd07a721260aac..2ee4c0a23bbdab 100644 --- a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp @@ -41,7 +41,7 @@ bool ReorderKey::operator==(const ReorderKey& rhs) const { return retVal; } -dnnl::reorder getReorderPrim(MultiCachePtr cache, +dnnl::reorder getReorderPrim(const MultiCachePtr& cache, const dnnl::engine& engine, const dnnl::memory::desc& src, const dnnl::memory::desc& dest) { diff --git a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h index 77beda029d3976..33e12b4045abf9 100644 --- a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h +++ b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h @@ -9,7 +9,7 @@ namespace ov { namespace intel_cpu { -dnnl::reorder getReorderPrim(MultiCachePtr cache, +dnnl::reorder getReorderPrim(const MultiCachePtr& cache, const dnnl::engine& engine, const dnnl::memory::desc& src, const dnnl::memory::desc& dest); diff --git a/src/plugins/intel_cpu/src/nodes/common/softmax.h b/src/plugins/intel_cpu/src/nodes/common/softmax.h index 8e7b7dcbca264d..f6376ea642060e 100644 --- a/src/plugins/intel_cpu/src/nodes/common/softmax.h +++ b/src/plugins/intel_cpu/src/nodes/common/softmax.h @@ -30,7 +30,7 @@ static inline void softmax_many_batches(const float* src_data, float* dst_data, float expSum = 0; for (int c = 0; c < C; c++) { - pdst[c * H * W + i] = exp(psrc[c * H * W + i] - max); + pdst[c * H * W + i] = std::exp(psrc[c * H * W + i] - max); expSum += pdst[c * H * W + i]; } diff --git a/src/plugins/intel_cpu/src/nodes/composite.cpp b/src/plugins/intel_cpu/src/nodes/composite.cpp index 4c86eb43eb3fdc..488275e0ac564c 100644 --- a/src/plugins/intel_cpu/src/nodes/composite.cpp +++ b/src/plugins/intel_cpu/src/nodes/composite.cpp @@ -94,12 +94,12 @@ void Composite::createPrimitive() { m_graph.Activate(inputMemory, outputMemory); } -void Composite::execute(dnnl::stream) { +void Composite::execute(const dnnl::stream&) { m_graph.Infer(); } -void Composite::executeDynamicImpl(dnnl::stream strm) { - execute(std::move(strm)); +void Composite::executeDynamicImpl(const dnnl::stream& strm) { + execute(strm); // since the shape inference is not performed for the composite node // a memory of the extra child edges, attached to the output ports diff --git a/src/plugins/intel_cpu/src/nodes/composite.h b/src/plugins/intel_cpu/src/nodes/composite.h index 9f18a2ba68b769..59fc3705497a18 100644 --- a/src/plugins/intel_cpu/src/nodes/composite.h +++ b/src/plugins/intel_cpu/src/nodes/composite.h @@ -38,8 +38,8 @@ class Composite : public Node { void getSupportedDescriptors() override{}; void selectOptimalPrimitiveDescriptor() override; void createPrimitive() override; - void execute(dnnl::stream) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream&) override; + void executeDynamicImpl(const dnnl::stream& strm) override; const Graph& graph() const { return m_graph; diff --git a/src/plugins/intel_cpu/src/nodes/concat.cpp b/src/plugins/intel_cpu/src/nodes/concat.cpp index fe6cb224a9d6cb..c82a187fdfdbbd 100644 --- a/src/plugins/intel_cpu/src/nodes/concat.cpp +++ b/src/plugins/intel_cpu/src/nodes/concat.cpp @@ -49,7 +49,7 @@ bool Concat::isSupportedOperation(const std::shared_ptr& op, std return true; } -Concat::Concat(const std::shared_ptr& op, const GraphContext::CPtr context) +Concat::Concat(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -493,7 +493,7 @@ void Concat::initOptimalPrimitiveDescriptor() { getSelectedPrimitiveDescriptor()->getConfig().outConfs.front().getMemDesc()->hasLayoutType(LayoutType::nspc); } -void Concat::execute(dnnl::stream strm) { +void Concat::execute(const dnnl::stream& strm) { if (isInPlace()) { return; } diff --git a/src/plugins/intel_cpu/src/nodes/concat.h b/src/plugins/intel_cpu/src/nodes/concat.h index eb24c753033153..6bbc215d80aad7 100644 --- a/src/plugins/intel_cpu/src/nodes/concat.h +++ b/src/plugins/intel_cpu/src/nodes/concat.h @@ -13,7 +13,7 @@ namespace node { class Concat : public Node { public: - Concat(const std::shared_ptr& op, const GraphContext::CPtr context); + Concat(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; @@ -21,8 +21,8 @@ class Concat : public Node { void initOptimalPrimitiveDescriptor() override; void selectOptimalPrimitiveDescriptor() override; bool created() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void resolveInPlaceEdges(Edge::LOOK look) override; diff --git a/src/plugins/intel_cpu/src/nodes/conv.cpp b/src/plugins/intel_cpu/src/nodes/conv.cpp index 770b53032791c8..3240599d00c819 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/conv.cpp @@ -116,7 +116,7 @@ bool ConvKey::operator==(const ConvKey& rhs) const { class Convolution::FusedSubgraph { public: - FusedSubgraph(const std::vector& opList, const Convolution& conv, const GraphContext::CPtr context) { + FusedSubgraph(const std::vector& opList, const Convolution& conv, const GraphContext::CPtr& context) { _graph = std::unique_ptr(new Graph()); std::unordered_set nodesSet; @@ -239,7 +239,7 @@ bool Convolution::isSupportedOperation(const std::shared_ptr& op return true; } -Convolution::Convolution(const std::shared_ptr& op, const GraphContext::CPtr context) +Convolution::Convolution(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)), withBiases(false), withSum(false), @@ -1398,17 +1398,17 @@ void Convolution::prepareParams() { dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(key.inp1->getShape().getStaticDims()), deriveWeightDataType(key.inp0->getDataType()), memory::format_tag::any); - auto createDnnlConvDesc = [](const dnnl::engine engine, + auto createDnnlConvDesc = [](const dnnl::engine& engine, const dnnl::memory::desc& srcDesc, const dnnl::memory::desc& wghDesc, const dnnl::memory::desc& dstDesc, - DnnlMemoryDescCPtr biasDescPtr, + const DnnlMemoryDescCPtr& biasDescPtr, const std::vector& stride, const std::vector& dilation, const std::vector& paddingL, const std::vector& paddingR, dnnl::algorithm alg, - const dnnl::primitive_attr attr) -> dnnl::primitive_desc { + const dnnl::primitive_attr& attr) -> dnnl::primitive_desc { dnnl::memory::desc dnnlBiasDesc; if (biasDescPtr) { dnnlBiasDesc = biasDescPtr->getDnnlDesc(); @@ -1585,7 +1585,7 @@ Convolution::ConvolutionSumExecutor::ConvolutionSumExecutor(const dnnl::primitiv } void Convolution::ConvolutionSumExecutor::reorder_exec(std::unordered_map primArgs, - dnnl::stream strm) { + const dnnl::stream& strm) { auto outputMem = primArgs.at(DNNL_ARG_DST); for (auto& inReorder : inputReorders) { if (primArgs.count(inReorder.first)) { @@ -1602,7 +1602,7 @@ void Convolution::ConvolutionSumExecutor::reorder_exec(std::unordered_mapexec(primArgs, strm); } -void Convolution::executeDynamicImpl(dnnl::stream strm) { +void Convolution::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); if (withSumBroadcast) { if (!subgraph) { diff --git a/src/plugins/intel_cpu/src/nodes/conv.h b/src/plugins/intel_cpu/src/nodes/conv.h index f40177abff89a1..80c98b2a7bca07 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.h +++ b/src/plugins/intel_cpu/src/nodes/conv.h @@ -15,7 +15,7 @@ class Eltwise; class Convolution : public Node { public: - Convolution(const std::shared_ptr& op, const GraphContext::CPtr context); + Convolution(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; @@ -108,12 +108,12 @@ class Convolution : public Node { bool constWeight); private: - void reorder_exec(std::unordered_map primArgs, dnnl::stream strm) override; + void reorder_exec(std::unordered_map primArgs, const dnnl::stream& strm) override; }; void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void addLegacyZeroPoints(dnnl::primitive_attr& attr); void addZeroPoints(dnnl::primitive_attr& attr); void setPostOps(dnnl::primitive_attr& attr, diff --git a/src/plugins/intel_cpu/src/nodes/convert.cpp b/src/plugins/intel_cpu/src/nodes/convert.cpp index 2684fe28d9b5f7..f82e6da109672b 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/convert.cpp @@ -36,7 +36,7 @@ bool Convert::isSupportedOperation(const std::shared_ptr& op, st return true; } -Convert::Convert(const std::shared_ptr& op, const GraphContext::CPtr context) +Convert::Convert(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -51,7 +51,7 @@ Convert::Convert(const Shape& shape, const ov::element::Type& inPrc, const ov::element::Type& outPrc, const std::string& nodeName, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node("Convert", {shape}, {shape}, {inPrc}, {outPrc}, nodeName, context) { convertParams.origPrc = outPrc; @@ -169,11 +169,11 @@ void Convert::prepareParams() { selectedPD->setImplementationType(execPtr->implType()); } -void Convert::executeDynamicImpl(dnnl::stream strm) { +void Convert::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Convert::execute(dnnl::stream strm) { +void Convert::execute(const dnnl::stream& strm) { auto& parentMem = getParentEdgeAt(0)->getMemory(); auto& childMem = getChildEdgeAt(0)->getMemory(); diff --git a/src/plugins/intel_cpu/src/nodes/convert.h b/src/plugins/intel_cpu/src/nodes/convert.h index 528e5adef6f3e1..52f56b7acd65a2 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.h +++ b/src/plugins/intel_cpu/src/nodes/convert.h @@ -13,18 +13,18 @@ namespace node { class Convert : public Node { public: - Convert(const std::shared_ptr& op, const GraphContext::CPtr context); + Convert(const std::shared_ptr& op, const GraphContext::CPtr& context); Convert(const Shape& shape, const ov::element::Type& inPrc, const ov::element::Type& outPrc, const std::string& nodeName, - const GraphContext::CPtr context); + const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override { return false; diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp index 60224ff49a8781..445309466b2125 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp @@ -28,7 +28,7 @@ bool CTCGreedyDecoder::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +CTCGreedyDecoder::CTCGreedyDecoder(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -67,7 +67,7 @@ void CTCGreedyDecoder::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void CTCGreedyDecoder::execute(dnnl::stream strm) { +void CTCGreedyDecoder::execute(const dnnl::stream& strm) { const float* probabilities = getSrcDataAtPortAs(DATA_INDEX); const float* sequenceMask = getSrcDataAtPortAs(SEQUENCE_LENGTH_INDEX); float* outputSequences = getDstDataAtPortAs(0); @@ -165,7 +165,7 @@ bool CTCGreedyDecoder::created() const { return getType() == Type::CTCGreedyDecoder; } -void CTCGreedyDecoder::executeDynamicImpl(dnnl::stream strm) { +void CTCGreedyDecoder::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h index 9121905eefdea7..dd7950f85a4205 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.h @@ -12,13 +12,13 @@ namespace node { class CTCGreedyDecoder : public Node { public: - CTCGreedyDecoder(const std::shared_ptr& op, const GraphContext::CPtr context); + CTCGreedyDecoder(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needPrepareParams() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp index 11c6efb8da30d1..5f8713fd71ef3b 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp @@ -28,7 +28,7 @@ bool CTCGreedyDecoderSeqLen::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +CTCGreedyDecoderSeqLen::CTCGreedyDecoderSeqLen(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -72,7 +72,7 @@ void CTCGreedyDecoderSeqLen::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void CTCGreedyDecoderSeqLen::execute(dnnl::stream strm) { +void CTCGreedyDecoderSeqLen::execute(const dnnl::stream& strm) { const float* probabilities = getSrcDataAtPortAs(DATA_INDEX); const int* sequenceLengths = getSrcDataAtPortAs(SEQUENCE_LENGTH_INDEX); int* decodedClasses = getDstDataAtPortAs(DECODED_CLASSES_INDEX); @@ -171,7 +171,7 @@ bool CTCGreedyDecoderSeqLen::created() const { return getType() == Type::CTCGreedyDecoderSeqLen; } -void CTCGreedyDecoderSeqLen::executeDynamicImpl(dnnl::stream strm) { +void CTCGreedyDecoderSeqLen::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h index d730cebedac64f..d87de7b5973913 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.h @@ -12,13 +12,13 @@ namespace node { class CTCGreedyDecoderSeqLen : public Node { public: - CTCGreedyDecoderSeqLen(const std::shared_ptr& op, const GraphContext::CPtr context); + CTCGreedyDecoderSeqLen(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needPrepareParams() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp b/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp index 20c080105af141..67a80745743528 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp @@ -26,7 +26,7 @@ bool CTCLoss::isSupportedOperation(const std::shared_ptr& op, st return true; } -CTCLoss::CTCLoss(const std::shared_ptr& op, const GraphContext::CPtr context) +CTCLoss::CTCLoss(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -55,11 +55,11 @@ void CTCLoss::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inDataConf, {{LayoutType::ncsp, ov::element::f32}}, impl_desc_type::ref_any); } -void CTCLoss::executeDynamicImpl(dnnl::stream strm) { +void CTCLoss::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void CTCLoss::execute(dnnl::stream strm) { +void CTCLoss::execute(const dnnl::stream& strm) { int32_t returnCode = 0; const float* logits = getSrcDataAtPortAs(0); diff --git a/src/plugins/intel_cpu/src/nodes/ctc_loss.h b/src/plugins/intel_cpu/src/nodes/ctc_loss.h index 6c24b47b1bd457..ffe3703a975219 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_loss.h +++ b/src/plugins/intel_cpu/src/nodes/ctc_loss.h @@ -12,16 +12,16 @@ namespace node { class CTCLoss : public Node { public: - CTCLoss(const std::shared_ptr& op, const GraphContext::CPtr context); + CTCLoss(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needPrepareParams() const override { return false; }; diff --git a/src/plugins/intel_cpu/src/nodes/cum_sum.cpp b/src/plugins/intel_cpu/src/nodes/cum_sum.cpp index bbda16f94a8e4b..bcedab54caeaeb 100644 --- a/src/plugins/intel_cpu/src/nodes/cum_sum.cpp +++ b/src/plugins/intel_cpu/src/nodes/cum_sum.cpp @@ -30,7 +30,7 @@ bool CumSum::isSupportedOperation(const std::shared_ptr& op, std return true; } -CumSum::CumSum(const std::shared_ptr& op, const GraphContext::CPtr context) +CumSum::CumSum(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -96,7 +96,7 @@ void CumSum::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inDataConf, {{LayoutType::ncsp, dataPrecision}}, impl_desc_type::ref_any); } -void CumSum::execute(dnnl::stream strm) { +void CumSum::execute(const dnnl::stream& strm) { if (inputShapes.size() == numOfInputs) axis = getAxis(getParentEdgeAt(AXIS)->getMemory(), getParentEdgeAt(CUM_SUM_DATA)->getMemory()); @@ -269,7 +269,7 @@ bool CumSum::needPrepareParams() const { return false; } -void CumSum::executeDynamicImpl(dnnl::stream strm) { +void CumSum::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/cum_sum.h b/src/plugins/intel_cpu/src/nodes/cum_sum.h index 1b5070699c2729..1bf49f3d7a3861 100644 --- a/src/plugins/intel_cpu/src/nodes/cum_sum.h +++ b/src/plugins/intel_cpu/src/nodes/cum_sum.h @@ -12,15 +12,15 @@ namespace node { class CumSum : public Node { public: - CumSum(const std::shared_ptr& op, const GraphContext::CPtr context); + CumSum(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index a996249dc52acc..886497bd57cc29 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -164,7 +164,7 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr& return true; } -Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr context) +Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, DeconfolutionShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) @@ -706,7 +706,7 @@ VectorDims Deconvolution::shapeInferInternal(const VectorDims& inDims, std::vect return std::move(result.dims.back()); } -void Deconvolution::execute(dnnl::stream strm) { +void Deconvolution::execute(const dnnl::stream& strm) { if (useACL) { std::vector srcMemory; for (size_t i = 0; i < getOriginalInputsNumber(); i++) { diff --git a/src/plugins/intel_cpu/src/nodes/deconv.h b/src/plugins/intel_cpu/src/nodes/deconv.h index a9efed8806abde..4a045f86e6d281 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.h +++ b/src/plugins/intel_cpu/src/nodes/deconv.h @@ -14,7 +14,7 @@ namespace node { class Deconvolution : public Node { public: - Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr context); + Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; @@ -45,8 +45,8 @@ class Deconvolution : public Node { } void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override { + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } bool needShapeInfer() const override; diff --git a/src/plugins/intel_cpu/src/nodes/def_conv.cpp b/src/plugins/intel_cpu/src/nodes/def_conv.cpp index 4c44e8b5539e1b..df2c08a5b9e75b 100644 --- a/src/plugins/intel_cpu/src/nodes/def_conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/def_conv.cpp @@ -768,7 +768,7 @@ bool DefConvKey::operator==(const DefConvKey& rhs) const { } // namespace -DeformableConvolution::DeformableConvolution(const std::shared_ptr& op, const GraphContext::CPtr context) +DeformableConvolution::DeformableConvolution(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -1288,7 +1288,7 @@ void DeformableConvolution::prepareParams() { } } -void DeformableConvolution::executeDynamicImpl(dnnl::stream strm) { +void DeformableConvolution::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -1329,7 +1329,7 @@ void DeformableConvolution::DefConvJitExecutor::exec(const float* src, }); } -void DeformableConvolution::execute(dnnl::stream strm) { +void DeformableConvolution::execute(const dnnl::stream& strm) { const size_t inputsNumber = getOriginalInputsNumber(); auto& srcMemory0 = getParentEdgeAt(0)->getMemory(); diff --git a/src/plugins/intel_cpu/src/nodes/def_conv.h b/src/plugins/intel_cpu/src/nodes/def_conv.h index 066ac7c0e61007..2cd120a83d67bd 100644 --- a/src/plugins/intel_cpu/src/nodes/def_conv.h +++ b/src/plugins/intel_cpu/src/nodes/def_conv.h @@ -72,12 +72,12 @@ struct jit_uni_def_conv_kernel { class DeformableConvolution : public Node { public: - DeformableConvolution(const std::shared_ptr& op, const GraphContext::CPtr context); + DeformableConvolution(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override { return false; @@ -103,7 +103,7 @@ class DeformableConvolution : public Node { void prepareParams() override; void updatePadding(); - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static constexpr size_t DATA_ID = 0; static constexpr size_t OFF_ID = 1; static constexpr size_t WEI_ID = 2; diff --git a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp index bb9e3eac5f03db..bf0823885ebc71 100644 --- a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp @@ -66,7 +66,7 @@ bool DepthToSpace::isSupportedOperation(const std::shared_ptr& o return true; } -DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphContext::CPtr context) +DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -303,7 +303,7 @@ void DepthToSpace::DepthToSpaceExecutor::exec(const MemoryPtr& srcMemPtr, const permuteKernel->execute(srcData, dstData, MB); } -void DepthToSpace::execute(dnnl::stream strm) { +void DepthToSpace::execute(const dnnl::stream& strm) { if (!execPtr) { THROW_ERROR("doesn't have a compiled executor."); } @@ -312,7 +312,7 @@ void DepthToSpace::execute(dnnl::stream strm) { execPtr->exec(getSrcMemoryAtPort(0), getDstMemoryAtPort(0), MB); } -void DepthToSpace::executeDynamicImpl(dnnl::stream strm) { +void DepthToSpace::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/depth_to_space.h b/src/plugins/intel_cpu/src/nodes/depth_to_space.h index 0e12218f1a5e7f..151dbd7d3cc29f 100644 --- a/src/plugins/intel_cpu/src/nodes/depth_to_space.h +++ b/src/plugins/intel_cpu/src/nodes/depth_to_space.h @@ -13,13 +13,13 @@ namespace node { class DepthToSpace : public Node { public: - DepthToSpace(const std::shared_ptr& op, const GraphContext::CPtr context); + DepthToSpace(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; @@ -38,7 +38,7 @@ class DepthToSpace : public Node { }; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: DepthToSpaceAttrs attrs; diff --git a/src/plugins/intel_cpu/src/nodes/detection_output.cpp b/src/plugins/intel_cpu/src/nodes/detection_output.cpp index 416f47a7c0f8a9..a730bd2943dc61 100644 --- a/src/plugins/intel_cpu/src/nodes/detection_output.cpp +++ b/src/plugins/intel_cpu/src/nodes/detection_output.cpp @@ -48,7 +48,7 @@ bool DetectionOutput::isSupportedOperation(const std::shared_ptr return true; } -DetectionOutput::DetectionOutput(const std::shared_ptr& op, const GraphContext::CPtr context) +DetectionOutput::DetectionOutput(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -161,11 +161,11 @@ struct ConfidenceComparatorDO { const float* confData; }; -void DetectionOutput::executeDynamicImpl(dnnl::stream strm) { +void DetectionOutput::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void DetectionOutput::execute(dnnl::stream strm) { +void DetectionOutput::execute(const dnnl::stream& strm) { float* dstData = getDstDataAtPortAs(0); const float* locData = getSrcDataAtPortAs(ID_LOC); diff --git a/src/plugins/intel_cpu/src/nodes/detection_output.h b/src/plugins/intel_cpu/src/nodes/detection_output.h index e5ed8951d63c7f..3ce09c8935dd9d 100644 --- a/src/plugins/intel_cpu/src/nodes/detection_output.h +++ b/src/plugins/intel_cpu/src/nodes/detection_output.h @@ -13,18 +13,18 @@ namespace node { class DetectionOutput : public Node { public: - DetectionOutput(const std::shared_ptr& op, const GraphContext::CPtr context); + DetectionOutput(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: static const int ID_LOC = 0; diff --git a/src/plugins/intel_cpu/src/nodes/dft.cpp b/src/plugins/intel_cpu/src/nodes/dft.cpp index 5042a5e0abba7e..1d18e1b64d28a1 100644 --- a/src/plugins/intel_cpu/src/nodes/dft.cpp +++ b/src/plugins/intel_cpu/src/nodes/dft.cpp @@ -42,7 +42,7 @@ bool DFT::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -DFT::DFT(const std::shared_ptr& op, const GraphContext::CPtr context) +DFT::DFT(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -245,7 +245,7 @@ void copyDataToOutputWithSignalSize(const float* input, } // namespace -void DFT::execute(dnnl::stream strm) { +void DFT::execute(const dnnl::stream& strm) { const auto& outputShape = getChildEdgeAt(0)->getMemory().getStaticDims(); const auto inputDataEdge = getParentEdgeAt(DATA_INDEX); diff --git a/src/plugins/intel_cpu/src/nodes/dft.h b/src/plugins/intel_cpu/src/nodes/dft.h index 7d7cabcf4585e0..072d59995d5881 100644 --- a/src/plugins/intel_cpu/src/nodes/dft.h +++ b/src/plugins/intel_cpu/src/nodes/dft.h @@ -13,12 +13,12 @@ namespace node { class DFT : public Node { public: - DFT(const std::shared_ptr& op, const GraphContext::CPtr context); + DFT(const std::shared_ptr& op, const GraphContext::CPtr& context); ~DFT() override = default; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/eltwise.cpp index 55e265a2008dcb..c13f22b0d9b76a 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/eltwise.cpp @@ -1771,7 +1771,7 @@ class EltwiseRefBaseExecutor : public Eltwise::IEltwiseExecutor { EltwiseRefBaseExecutor(const EltwiseData& opData, const VectorDims& outBlkDims, const std::vector& inpDims) - : _opData(std::move(opData)), + : _opData(opData), _inpDims(inpDims) { if (inpDims.empty()) { OPENVINO_THROW("Can not make Eltwise executor from empty input dims array"); @@ -2219,7 +2219,7 @@ bool Eltwise::isSupportedOperation(const std::shared_ptr& op, st return true; } -Eltwise::Eltwise(const std::shared_ptr& op, const GraphContext::CPtr context) +Eltwise::Eltwise(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, EltwiseShapeInferFactory()), broadcastingPolicy(Undefined) { std::string errorMessage; @@ -2969,7 +2969,7 @@ void Eltwise::selectOptimalPrimitiveDescriptor() { selectPreferPrimitiveDescriptor(getImplPriority(), true); } -void Eltwise::execute(dnnl::stream strm) { +void Eltwise::execute(const dnnl::stream& strm) { if (execPtr) { jit_eltwise_call_args_ptrs args_ptrs = {}; VectorDims dims_out = @@ -3003,7 +3003,7 @@ void Eltwise::execute(dnnl::stream strm) { } } -void Eltwise::executeDynamicImpl(dnnl::stream strm) { +void Eltwise::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.h b/src/plugins/intel_cpu/src/nodes/eltwise.h index 21333c7b0aa91a..b8e544eb49fd88 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.h +++ b/src/plugins/intel_cpu/src/nodes/eltwise.h @@ -8,6 +8,7 @@ #include #include +#include #include #include "dnnl_postops_composer_legacy.h" @@ -60,7 +61,7 @@ struct jit_uni_eltwise_kernel { ker_(const_args, indexes); } - explicit jit_uni_eltwise_kernel(const jit_eltwise_params& jep) : ker_(nullptr), jep_(jep) {} + explicit jit_uni_eltwise_kernel(jit_eltwise_params jep) : ker_(nullptr), jep_(std::move(jep)) {} virtual ~jit_uni_eltwise_kernel() {} virtual void create_ker() = 0; @@ -86,12 +87,12 @@ class Eltwise : public Node { using executorPtr = std::shared_ptr; public: - Eltwise(const std::shared_ptr& op, const GraphContext::CPtr context); + Eltwise(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void selectOptimalPrimitiveDescriptor() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override; bool canFuseParent(const NodePtr& parentNode) const; @@ -140,7 +141,7 @@ class Eltwise : public Node { void prepareParams() override; void createPrimitive() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; enum BroadcastingPolicy { PerChannel, diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag.cpp b/src/plugins/intel_cpu/src/nodes/embedding_bag.cpp index 5ac1330d1630bf..7727df6a32e5c4 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag.cpp @@ -24,8 +24,8 @@ EmbeddingBag::EmbeddingBag(const std::shared_ptr& op, size_t defaultIndexIdx) : INDICES_IDX(indicesIdx), PER_SAMPLE_WEIGHTS_IDX(perSampleWeightsIdx), - DEFAULT_INDEX_IDX(defaultIndexIdx) { - _layerName = op->get_friendly_name(); + DEFAULT_INDEX_IDX(defaultIndexIdx), + _layerName(op->get_friendly_name()) { std::string logPrefix = std::string("Layer EmbeddingBag with name '") + _layerName + "' "; if (op->get_input_size() < requiredInputNum || op->get_output_size() != 1) OPENVINO_THROW(logPrefix, "has incorrect number of input or output edges!"); diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp b/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp index 202af505d4784b..00be05c6cb43b8 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp @@ -31,7 +31,7 @@ bool EmbeddingBagOffset::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +EmbeddingBagOffset::EmbeddingBagOffset(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)), EmbeddingBag(op, 3lu, 1lu, 4lu, 3lu) { std::string errorMessage; @@ -148,7 +148,7 @@ void EmbeddingBagOffset::getIndices(size_t embIndex, weightsIdx = offsetsData_[embIndex]; } -void EmbeddingBagOffset::executeDynamicImpl(dnnl::stream strm) { +void EmbeddingBagOffset::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -156,7 +156,7 @@ bool EmbeddingBagOffset::isExecutable() const { return !isInputTensorAtPortEmpty(0); } -void EmbeddingBagOffset::execute(dnnl::stream strm) { +void EmbeddingBagOffset::execute(const dnnl::stream& strm) { const auto* srcData = getSrcDataAtPortAs(0); const uint8_t* weightsData = nullptr; if (_withWeights) diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.h b/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.h index b8a5891209dd44..c9bef8a9e28eab 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.h +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.h @@ -13,11 +13,11 @@ namespace node { class EmbeddingBagOffset : public Node, public EmbeddingBag { public: - EmbeddingBagOffset(const std::shared_ptr& op, const GraphContext::CPtr context); + EmbeddingBagOffset(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool isExecutable() const override; @@ -25,7 +25,7 @@ class EmbeddingBagOffset : public Node, public EmbeddingBag { protected: void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: void initFromInputs() override; diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp b/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp index 2e4725f8c28c8d..2f56e2f7b3c3a4 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp @@ -31,7 +31,7 @@ bool EmbeddingBagPacked::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +EmbeddingBagPacked::EmbeddingBagPacked(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)), EmbeddingBag(op, 2lu, 1lu, 2lu, 3lu) { std::string errorMessage; @@ -116,7 +116,7 @@ void EmbeddingBagPacked::getIndices(size_t embIndex, weightsIdx = embIndex * _indicesPerBag; } -void EmbeddingBagPacked::executeDynamicImpl(dnnl::stream strm) { +void EmbeddingBagPacked::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -124,7 +124,7 @@ bool EmbeddingBagPacked::isExecutable() const { return !isInputTensorAtPortEmpty(0); } -void EmbeddingBagPacked::execute(dnnl::stream strm) { +void EmbeddingBagPacked::execute(const dnnl::stream& strm) { const auto* srcData = getSrcDataAtPortAs(0); const uint8_t* weightsData = nullptr; if (_withWeights) diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.h b/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.h index 4a3c2288c36748..a979917f2570c5 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.h +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.h @@ -13,11 +13,11 @@ namespace node { class EmbeddingBagPacked : public Node, public EmbeddingBag { public: - EmbeddingBagPacked(const std::shared_ptr& op, const GraphContext::CPtr context); + EmbeddingBagPacked(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool isExecutable() const override; @@ -25,7 +25,7 @@ class EmbeddingBagPacked : public Node, public EmbeddingBag { protected: void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: void initFromInputs() override; diff --git a/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp b/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp index ad233da95ae51e..4d9ff3af48b163 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp @@ -28,7 +28,7 @@ bool EmbeddingSegmentsSum::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +EmbeddingSegmentsSum::EmbeddingSegmentsSum(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)), EmbeddingBag(op, 4lu, 1lu, 5lu, 4lu) { std::string errorMessage; @@ -145,7 +145,7 @@ bool EmbeddingSegmentsSum::needShapeInfer() const { return false; } -void EmbeddingSegmentsSum::executeDynamicImpl(dnnl::stream strm) { +void EmbeddingSegmentsSum::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -153,7 +153,7 @@ bool EmbeddingSegmentsSum::isExecutable() const { return !isInputTensorAtPortEmpty(0); } -void EmbeddingSegmentsSum::execute(dnnl::stream strm) { +void EmbeddingSegmentsSum::execute(const dnnl::stream& strm) { const auto* srcData = getSrcDataAtPortAs(0); const uint8_t* weightsData = nullptr; if (_withWeights) diff --git a/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.h b/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.h index 5a6ec7d38785f8..4e172655ef5472 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.h +++ b/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.h @@ -13,11 +13,11 @@ namespace node { class EmbeddingSegmentsSum : public Node, public EmbeddingBag { public: - EmbeddingSegmentsSum(const std::shared_ptr& op, const GraphContext::CPtr context); + EmbeddingSegmentsSum(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool isExecutable() const override; @@ -26,7 +26,7 @@ class EmbeddingSegmentsSum : public Node, public EmbeddingBag { protected: void prepareParams() override; bool needShapeInfer() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: void initFromInputs() override; diff --git a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp index e3df2b83e574e2..d5b893b67bf2b1 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp @@ -4,6 +4,7 @@ #include "jit_eltwise.hpp" +#include #include namespace ov { @@ -107,7 +108,7 @@ bool JitEltwiseExecutor::isSupported(const Algorithm& algorithm, return true; } -JitEltwiseExecutor::JitEltwiseExecutor(const ExecutorContext::CPtr context) : EltwiseExecutor(context) {} +JitEltwiseExecutor::JitEltwiseExecutor(ExecutorContext::CPtr context) : EltwiseExecutor(std::move(context)) {} bool JitEltwiseExecutor::init(const EltwiseAttrs& eltwiseAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp index cd5e935b41a2d5..ff818830ba8c5a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp @@ -91,7 +91,7 @@ ACLDeconvTensorInfo getACLDeconvTensorInfo(const DeconvAttrs& deconvAttrs, return ACLDeconvTensorInfo{srcTensorInfo, weiTensorInfo, biasTensorInfo, dstTensorInfo, deconv_info}; } -AclDeconvExecutor::AclDeconvExecutor(const ExecutorContext::CPtr context) : DeconvExecutor(context) {} +AclDeconvExecutor::AclDeconvExecutor(ExecutorContext::CPtr context) : DeconvExecutor(std::move(context)) {} bool AclDeconvExecutor::init(const DeconvAttrs& deconvAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp index 26d387c7659dc5..7a8e431b606227 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp @@ -196,7 +196,7 @@ bool AclEltwiseExecutorBuilder::isSupported(const EltwiseAttrs& eltwiseAttrs, return true; } -AclEltwiseExecutor::AclEltwiseExecutor(const ExecutorContext::CPtr context) : EltwiseExecutor(context) {} +AclEltwiseExecutor::AclEltwiseExecutor(ExecutorContext::CPtr context) : EltwiseExecutor(std::move(context)) {} bool AclEltwiseExecutor::init(const EltwiseAttrs& eltwiseAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp index e4dbb1a3a37940..5878d41609e2f4 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp @@ -67,7 +67,7 @@ static void initFCAttrs(const FCAttrs& attrs, ACLFullyConnectedExecutor::ACLFullyConnectedExecutor(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { initFCAttrs(attrs, aclTensorAttrs, aclfcAttrs, memory, fullyConnectedLayerInfo, postOps); packedWeights = acl_fc_executor::prepareWeightMemory(memory, context, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp index 6c1a2f0576e283..fc475eac12d4f4 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.hpp @@ -16,7 +16,7 @@ class ACLFullyConnectedExecutor : public ACLCommonExecutor { ACLFullyConnectedExecutor(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context); + const ExecutorContext::CPtr& context); static bool supports(const FCConfig& config); diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp index 0c3e208381497f..9237b231d8fa5b 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.cpp @@ -52,8 +52,8 @@ VectorDims acl_fc_executor::makeDummyOutputDims(const VectorDims& inShape, return outputShape; } -DnnlMemoryDescPtr acl_fc_executor::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc) { +DnnlMemoryDescPtr acl_fc_executor::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc) { const auto& weiDesc = srcDesc->getDnnlDesc(); dnnl::memory::dims wgtDims2D = reshapeDownToRank<2>(weiDesc.get_dims()); const auto reorderedWeiDesc = dnnl::memory::desc{wgtDims2D, weiDesc.get_data_type(), dnnl::memory::format_tag::ba}; @@ -62,8 +62,8 @@ DnnlMemoryDescPtr acl_fc_executor::makeTransposedWeightDescriptor(const DnnlMemo return DnnlExtensionUtils::makeDescriptor(transposedWeiDesc); } -ov::optional acl_fc_executor::convertWeightPrecision(MemoryPtr input, - MemoryPtr output, +ov::optional acl_fc_executor::convertWeightPrecision(const MemoryPtr& input, + const MemoryPtr& output, ov::element::Type weightPrecision) { MemoryArgs memoryArgs; memoryArgs[ARG_SRC] = input; @@ -93,9 +93,9 @@ ov::optional acl_fc_executor::convertWeightPrecision(MemoryPtr input, tmpBuff.data())); } -ov::optional acl_fc_executor::reorderDataFallback(MemoryPtr input, - MemoryPtr output, - ExecutorContext::CPtr context) { +ov::optional acl_fc_executor::reorderDataFallback(const MemoryPtr& input, + const MemoryPtr& output, + const ExecutorContext::CPtr& context) { if (output->getDataType() == input->getDataType()) { return {}; } @@ -109,27 +109,27 @@ ov::optional acl_fc_executor::reorderDataFallback(MemoryPtr input, if (reorderWithoutConvert && parse_impl_name(reorderWithoutConvert.get_primitive_desc()->impl()->name()) != ref_any) { - auto convertOutput = convertWeightPrecision(input, output, inPrc); - if (!convertOutput) { + auto convertOutputOpt = convertWeightPrecision(input, output, inPrc); + if (!convertOutputOpt) { return {}; } - input = *convertOutput; + auto convertOutput = *convertOutputOpt; if (reorderWithoutConvert) { dnnl::stream loc_stream(output->getPrimitive().get_engine(), dnnl::stream::flags::in_order); reorderWithoutConvert.execute( loc_stream, - {{DNNL_ARG_FROM, input->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); + {{DNNL_ARG_FROM, convertOutput->getPrimitive()}, {DNNL_ARG_TO, output->getPrimitive()}}); return ov::optional(output); } } return {}; } -MemoryPtr acl_fc_executor::reorderData(DnnlMemoryDescPtr srcWeightDesc, - DnnlMemoryDescPtr dstWeightDesc, - MemoryCPtr weightsMem, - ExecutorContext::CPtr context) { +MemoryPtr acl_fc_executor::reorderData(const DnnlMemoryDescPtr& srcWeightDesc, + const DnnlMemoryDescPtr& dstWeightDesc, + const MemoryCPtr& weightsMem, + const ExecutorContext::CPtr& context) { MemoryPtr input = std::make_shared(context->getEngine(), srcWeightDesc, weightsMem->getData()); MemoryPtr output = std::make_shared(context->getEngine(), dstWeightDesc); if (!input->getDesc().isDefined() || !output->getDesc().isDefined()) @@ -203,7 +203,7 @@ MemoryPtr acl_fc_executor::reorderWeights(const MemoryArgs& memory, } MemoryPtr acl_fc_executor::prepareWeightMemory(const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const FCAttrs& attrs, ACLFCAttrs& aclfcAttrs, const PostOps& postOps, @@ -364,4 +364,4 @@ ACLFunction acl_fc_executor::ACLWeightFormatGenerator::configureFunction(const A } } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp index 686042f6067433..7573a4af1e80ff 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected_utils.hpp @@ -22,16 +22,20 @@ VectorDims makeDummyInputDims(const Shape& inShape, const Shape& wShape); VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDims& wShape, const size_t out_rank); -DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, const DnnlMemoryDescPtr dstDesc); +DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, const DnnlMemoryDescPtr& dstDesc); -ov::optional convertWeightPrecision(MemoryPtr input, MemoryPtr output, ov::element::Type weightPrecision); +ov::optional convertWeightPrecision(const MemoryPtr& input, + const MemoryPtr& output, + ov::element::Type weightPrecision); -ov::optional reorderDataFallback(MemoryPtr input, MemoryPtr output, ExecutorContext::CPtr context); +ov::optional reorderDataFallback(const MemoryPtr& input, + const MemoryPtr& output, + const ExecutorContext::CPtr& context); -MemoryPtr reorderData(DnnlMemoryDescPtr srcWeightDesc, - DnnlMemoryDescPtr dstWeightDesc, - MemoryCPtr weightsMem, - ExecutorContext::CPtr context); +MemoryPtr reorderData(const DnnlMemoryDescPtr& srcWeightDesc, + const DnnlMemoryDescPtr& dstWeightDesc, + const MemoryCPtr& weightsMem, + const ExecutorContext::CPtr& context); MemoryPtr reorderWeights(const MemoryArgs& memory, const ExecutorContext::CPtr context, @@ -40,7 +44,7 @@ MemoryPtr reorderWeights(const MemoryArgs& memory, DnnlMemoryDescPtr dnnlDstDesc); MemoryPtr prepareWeightMemory(const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const FCAttrs& attrs, ACLFCAttrs& aclfcAttrs, const PostOps& postOps, @@ -78,4 +82,4 @@ class ACLWeightFormatGenerator : public ACLCommonExecutor { } // namespace acl_fc_executor } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp index 1604c4fff2f585..63a486985e3a51 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp @@ -100,12 +100,11 @@ arm_compute::Status ACLLowpFullyConnectedExecutor::validateTensorsInfo(const ACL auto& tensor_info_weights = aclMemoryInfos[ACLArgs::ACL_WEI]; tensor_info_weights->set_quantization_info(arm_compute::QuantizationInfo(1.f)); - const auto matMulValid = - arm_compute::NEGEMMLowpMatrixMultiplyCore::validate(aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), - aclMemoryInfos[ACLArgs::ACL_WEI].get(), - aclMemoryInfos[ACLArgs::ACL_BIAS].get(), - aclMemoryInfos[ACLArgs::ACL_DST].get(), - gemmInfo); + auto matMulValid = arm_compute::NEGEMMLowpMatrixMultiplyCore::validate(aclMemoryInfos[ACLArgs::ACL_SRC_0].get(), + aclMemoryInfos[ACLArgs::ACL_WEI].get(), + aclMemoryInfos[ACLArgs::ACL_BIAS].get(), + aclMemoryInfos[ACLArgs::ACL_DST].get(), + gemmInfo); return matMulValid; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_mvn.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_mvn.cpp index 290cd3c9dbcce9..7c4223bfcf63fd 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_mvn.cpp @@ -9,7 +9,7 @@ namespace intel_cpu { using namespace arm_compute; -AclMVNExecutor::AclMVNExecutor(const ExecutorContext::CPtr context) : MVNExecutor(context) {} +AclMVNExecutor::AclMVNExecutor(ExecutorContext::CPtr context) : MVNExecutor(std::move(context)) {} bool AclMVNExecutor::init(const MVNAttrs& mvnAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp index 2e4aed30d7b33e..b581d696817eb2 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.cpp @@ -11,7 +11,7 @@ namespace intel_cpu { using namespace arm_compute; -AclPoolingExecutor::AclPoolingExecutor(const ExecutorContext::CPtr context) : PoolingExecutor(context) {} +AclPoolingExecutor::AclPoolingExecutor(ExecutorContext::CPtr context) : PoolingExecutor(std::move(context)) {} bool AclPoolingExecutor::isSupported(const TensorInfo& srcTensorInfo, const TensorInfo& dstTensorInfo, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.cpp index 5973027a0376cb..29b08f54409a38 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_reduce.cpp @@ -24,7 +24,7 @@ static arm_compute::ReductionOperation getAclReductionOperationByAlgorithm(Algor } } -AclReduceExecutor::AclReduceExecutor(const ExecutorContext::CPtr context) : ReduceExecutor(context) {} +AclReduceExecutor::AclReduceExecutor(ExecutorContext::CPtr context) : ReduceExecutor(std::move(context)) {} bool AclReduceExecutor::init(const ReduceAttrs& reduceAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.cpp b/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.cpp index 10a440a1b4a01e..7281f3db98f4c0 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.cpp @@ -30,7 +30,7 @@ static inline void parallel_step(size_t nDims, const VectorDims& dims, VectorDim void RefTransposeExecutor::referenceExecute(const uint8_t* src_data, uint8_t* dst_data, - jit_permute_config_params jcp, + const jit_permute_config_params& jcp, const int mb) { VectorDims dst_dims = jcp.dst_block_dims; const VectorDims dst_strides = jcp.dst_strides; diff --git a/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp b/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp index c67906f2443e6c..0513aa473494fc 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/common/ref_transpose.hpp @@ -13,7 +13,7 @@ class RefTransposeExecutor : public TransposeExecutor { using TransposeExecutor::TransposeExecutor; static void referenceExecute(const uint8_t* src_data, uint8_t* dst_data, - jit_permute_config_params jcp, + const jit_permute_config_params& jcp, const int mb); bool init(const TransposeParams& transposeParams, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert.cpp b/src/plugins/intel_cpu/src/nodes/executors/convert.cpp index 32141d53b10ee5..cf11633e662e07 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert.cpp @@ -4,5 +4,7 @@ #include "convert.hpp" -ov::intel_cpu::ConvertExecutor::ConvertExecutor(const ov::intel_cpu::ExecutorContext::CPtr context) - : convertContext(context) {} \ No newline at end of file +#include + +ov::intel_cpu::ConvertExecutor::ConvertExecutor(ov::intel_cpu::ExecutorContext::CPtr context) + : convertContext(std::move(context)) {} diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert.hpp b/src/plugins/intel_cpu/src/nodes/executors/convert.hpp index dcb0bdde2ce219..85cd64e26c643c 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert.hpp @@ -20,7 +20,7 @@ struct ConvertParams { class ConvertExecutor : public Executor { public: - explicit ConvertExecutor(const ExecutorContext::CPtr context); + explicit ConvertExecutor(ExecutorContext::CPtr context); virtual bool init(const ConvertParams& convertParams, const MemoryDescPtr& srcDesc, const MemoryDescPtr& dstDesc, diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/convert_list.hpp index 9ea47f916d859f..8f91b939816db8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert_list.hpp @@ -29,7 +29,7 @@ class ConvertExecutorFactory : public ExecutorFactoryLegacy { ConvertExecutorFactory(const ConvertParams& convertParams, const MemoryDescPtr& srcDesc, const MemoryDescPtr& dstDesc, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getConvertExecutorsList()) { if (desc.builder->isSupported(convertParams, srcDesc, dstDesc)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp b/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp index 11920c0ab35b49..44731d0648d039 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp @@ -4,6 +4,8 @@ #pragma once +#include + #include "common/primitive_hashing_utils.hpp" #include "cpu_memory.h" #include "executor.hpp" @@ -29,7 +31,7 @@ struct DeconvAttrs { class DeconvExecutor { public: - explicit DeconvExecutor(const ExecutorContext::CPtr context) : context(context) {} + explicit DeconvExecutor(ExecutorContext::CPtr context) : context(std::move(context)) {} virtual bool init(const DeconvAttrs& deconvAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp index fd114094303808..45e71acd476bb6 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp @@ -28,7 +28,7 @@ class DeconvExecutorFactory : public ExecutorFactoryLegacy { DeconvExecutorFactory(const DeconvAttrs& deconvAttrs, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getDeconvExecutorsList()) { if (desc.builder->isSupported(deconvAttrs, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp index 61aca683a37687..4aef57ac484926 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp @@ -145,7 +145,7 @@ static primitive_desc createPrimitiveDesc(const dnnl::engine& engine, static DnnlPrimitiveAttrs createPrimitiveAttrs(const ConvAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { const auto& srcDesc = memory.at(ARG_SRC)->getDescPtr(); const auto& weiDesc = memory.at(ARG_WEI)->getDescPtr(); const auto& dstDesc = memory.at(ARG_DST)->getDescPtr(); @@ -165,7 +165,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const ConvAttrs& attrs, DnnlShapeAgnosticDataPtr DnnlConvolutionPrimitive::createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const bool cacheWeights) { DEBUG_LOG("Creating shape agnostic data"); ConvAttrs convAttrs{attrs.withBias}; @@ -203,8 +203,8 @@ std::shared_ptr DnnlConvolutionPrimitive::create( return primitive; } -DnnlMemoryDescPtr DnnlConvolutionPrimitive::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc, +DnnlMemoryDescPtr DnnlConvolutionPrimitive::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc, bool weightsNonTransposed) { return DnnlFCPrimitive::makeTransposedWeightDescriptor(srcDesc, dstDesc, weightsNonTransposed); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp index 79501782922fd5..c342f5106c221d 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp @@ -61,15 +61,15 @@ class DnnlConvolutionPrimitive { return m_implType; } - static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc, + static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc, bool weightsNonTransposed); // create shape agnostic data using FC attributes (1x1 Convolution as FC executor) static DnnlShapeAgnosticDataPtr createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const bool cacheWeights); static std::shared_ptr create(const MemoryArgs& memory, diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp index db5c8bed2e43e1..a4aeac36a4eedb 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected.hpp @@ -6,6 +6,7 @@ #include #include +#include #include "cpu_memory.h" #include "memory_desc/cpu_memory_desc_utils.h" @@ -24,7 +25,7 @@ class DefaultInstantiator { std::shared_ptr operator()(const MemoryArgs& memory, const Attrs& attrs, const ExecutorContext::CPtr context, - const std::shared_ptr shapeAgnosticData) { + const std::shared_ptr& shapeAgnosticData) { return ExecutorT::create(memory, attrs, context, shapeAgnosticData); } }; @@ -39,10 +40,10 @@ class DnnlFCExecutor : public Executor { DnnlFCExecutor(const Attrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + ExecutorContext::CPtr context, const bool cacheWeights) : m_attrs(attrs), - m_context(context), + m_context(std::move(context)), m_shapeAgnosticData(Primitive::createShapeAgnosticData(m_attrs, postOps, memory, m_context, cacheWeights)), m_primArgs(m_shapeAgnosticData->primAttrs.dnnlArgs) {} bool update(const MemoryArgs& memory) override { @@ -91,7 +92,7 @@ class DnnlFCExecutor : public Executor { } private: - void updateSrcMemory(const DnnlMemoryDescPtr& memDesc, const PrimitivePtr primitive, const MemoryPtr memory) { + void updateSrcMemory(const DnnlMemoryDescPtr& memDesc, const PrimitivePtr primitive, const MemoryPtr& memory) { const auto& primMemDesc = primitive->srcDesc(); if (memDesc->isCompatible(*primMemDesc)) { m_primArgs[DNNL_ARG_SRC] = memory->getPrimitive(); @@ -103,7 +104,7 @@ class DnnlFCExecutor : public Executor { } } - void updateDstMemory(const DnnlMemoryDescPtr& memDesc, const PrimitivePtr primitive, const MemoryPtr memory) { + void updateDstMemory(const DnnlMemoryDescPtr& memDesc, const PrimitivePtr primitive, const MemoryPtr& memory) { const auto& primMemDesc = primitive->dstDesc(); if (memDesc->isCompatible(*primMemDesc)) { m_primArgs[DNNL_ARG_DST] = memory->getPrimitive(); @@ -118,7 +119,7 @@ class DnnlFCExecutor : public Executor { void updateWeightsMemory(DnnlMemoryDescPtr originalMemDesc, const PrimitivePtr currentPrimitive, const PrimitivePtr newPrimitive, - const MemoryPtr memory) { + const MemoryPtr& memory) { const auto newPrimMemDesc = newPrimitive->weightsDesc(); if (currentPrimitive && currentPrimitive->weightsDesc()->isCompatible(*newPrimMemDesc)) return; @@ -130,7 +131,7 @@ class DnnlFCExecutor : public Executor { m_primArgs[DNNL_ARG_WEIGHTS] = weiMemory->getPrimitive(); } - void updateBiasMemory(const MemoryPtr memory) { + void updateBiasMemory(const MemoryPtr& memory) { m_primArgs[DNNL_ARG_BIAS] = memory->getPrimitive(); } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index 8ae2d2784193af..8c1894f43552f1 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -109,8 +109,8 @@ std::shared_ptr DnnlFCPrimitive::create(const MemoryArgs& memor return primitive; } -DnnlMemoryDescPtr DnnlFCPrimitive::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc, +DnnlMemoryDescPtr DnnlFCPrimitive::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc, bool weightsNonTransposed) { if (!weightsNonTransposed) return srcDesc; @@ -143,8 +143,8 @@ bool DnnlFCPrimitive::useWeightsDecompressionImpl(const ov::element::Type inputT } static bool useDynamicQuantizationImpl(size_t dqGroupSize, - const MemoryDescPtr srcDesc, - const MemoryDescPtr weightsDesc, + const MemoryDescPtr& srcDesc, + const MemoryDescPtr& weightsDesc, const MemoryArgs& memory, bool needTranspose) { if (dqGroupSize == 0) @@ -207,7 +207,7 @@ static bool useDynamicQuantizationImpl(size_t dqGroupSize, static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, bool useDynamicQuantization) { const auto& srcDesc = memory.at(ARG_SRC)->getDescPtr(); const auto& weiDesc = memory.at(ARG_WEI)->getDescPtr(); @@ -376,7 +376,7 @@ static VectorDims makeDummyOutputDims(const VectorDims& inShape, const VectorDim DnnlShapeAgnosticDataPtr DnnlFCPrimitive::createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const bool cacheWeights) { DEBUG_LOG("Creating shape agnostic data"); auto srcDesc = memory.at(ARG_SRC)->getDescPtr(); @@ -439,7 +439,7 @@ DnnlShapeAgnosticDataPtr DnnlFCPrimitive::createShapeAgnosticData(const FCAttrs& return std::make_shared(postOpData); } -static impl_desc_type implTypeFromPrimDesc(const dnnl::primitive_desc primDesc) { +static impl_desc_type implTypeFromPrimDesc(const dnnl::primitive_desc& primDesc) { const auto implType = parse_impl_name(primDesc.impl_info_str()); if (implType == ov::intel_cpu::brgemm_avx512_amx && primDesc.weights_desc().get_format_kind() == memory::format_kind::sparsed) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp index 21247f149ca69f..9afcfac56b14e9 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp @@ -58,15 +58,15 @@ class DnnlFCPrimitive { static DnnlShapeAgnosticDataPtr createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const bool cacheWeights); static bool useWeightsDecompressionImpl(const ov::element::Type inputType, const ov::element::Type weightsType, const Config::ModelType modelType); - static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc, + static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc, bool weightsNonTransposed); static std::shared_ptr create(const MemoryArgs& memory, diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp index 9ffe4731689d43..1b51487fb4cebf 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp @@ -117,8 +117,8 @@ std::shared_ptr DnnlMatMulPrimitive::create(const MemoryArg return primitive; } -DnnlMemoryDescPtr DnnlMatMulPrimitive::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc, +DnnlMemoryDescPtr DnnlMatMulPrimitive::makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc, bool weightsNonTransposed) { const auto& weiDesc = srcDesc->getDnnlDesc(); auto wDims = weiDesc.get_dims(); @@ -136,7 +136,7 @@ DnnlMemoryDescPtr DnnlMatMulPrimitive::makeTransposedWeightDescriptor(const Dnnl static DnnlPrimitiveAttrs createPrimitiveAttrs(const MatMulAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, bool useWeightsDecompression, bool weightsNonTransposed) { const auto& srcDesc = memory.at(ARG_SRC)->getDescPtr(); @@ -288,7 +288,7 @@ bool DnnlMatMulPrimitive::useWeightsDecompressionImpl(const ov::element::Type in DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const bool cacheWeights) { DEBUG_LOG("Creating shape agnostic data"); auto srcDesc = memory.at(ARG_SRC)->getDescPtr(); @@ -339,7 +339,7 @@ DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAt return std::make_shared(postOpData); } -static impl_desc_type implTypeFromPrimDesc(const dnnl::primitive_desc primDesc) { +static impl_desc_type implTypeFromPrimDesc(const dnnl::primitive_desc& primDesc) { const auto implType = parse_impl_name(primDesc.impl_info_str()); if (implType == ov::intel_cpu::brgemm_avx512_amx && primDesc.weights_desc().get_format_kind() == memory::format_kind::sparsed) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp index 5491b62a154687..38ceb9922eff70 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp @@ -58,11 +58,11 @@ class DnnlMatMulPrimitive { static DnnlShapeAgnosticDataPtr createShapeAgnosticData(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + const ExecutorContext::CPtr& context, const bool cacheWeights); - static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr srcDesc, - const DnnlMemoryDescPtr dstDesc, + static DnnlMemoryDescPtr makeTransposedWeightDescriptor(const DnnlMemoryDescPtr& srcDesc, + const DnnlMemoryDescPtr& dstDesc, bool weightsNonTransposed); static std::shared_ptr create(const MemoryArgs& memory, diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp index 8e7c470984b4f2..31ffd979662f8c 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp @@ -4,10 +4,12 @@ #include "eltwise.hpp" +#include + namespace ov { namespace intel_cpu { -EltwiseExecutor::EltwiseExecutor(const ExecutorContext::CPtr context) : context(context) {} +EltwiseExecutor::EltwiseExecutor(ExecutorContext::CPtr context) : context(std::move(context)) {} } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp index b33c0eca10dae7..95ff85bb8bf851 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp @@ -4,6 +4,8 @@ #pragma once +#include + #include "cpu_memory.h" #include "executor.hpp" #include "onednn/iml_type_mapper.h" @@ -49,15 +51,9 @@ enum class EltwisePostOpType { Undefined, Eltwise, Dnnl }; class EltwisePostOp { public: - EltwisePostOp(EltwiseAttrs eltwise) { - type = EltwisePostOpType::Eltwise; - this->eltwise = eltwise; - } + EltwisePostOp(EltwiseAttrs eltwise) : eltwise(eltwise), type(EltwisePostOpType::Eltwise) {} - EltwisePostOp(dnnl::post_ops dnnlPostOps) { - type = EltwisePostOpType::Dnnl; - this->dnnlPostOps = dnnlPostOps; - } + EltwisePostOp(dnnl::post_ops dnnlPostOps) : dnnlPostOps(std::move(dnnlPostOps)), type(EltwisePostOpType::Dnnl) {} ~EltwisePostOp() = default; @@ -87,7 +83,7 @@ class EltwisePostOp { class EltwiseExecutor { public: - EltwiseExecutor(const ExecutorContext::CPtr context); + EltwiseExecutor(ExecutorContext::CPtr context); virtual bool init(const EltwiseAttrs& eltwiseAttrs, const std::vector& srcDescs, const std::vector& dstDescs, @@ -121,4 +117,4 @@ using EltwiseExecutorBuilderPtr = std::shared_ptr; using EltwiseExecutorBuilderCPtr = std::shared_ptr; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp index ac5c27c0ad36dc..f970d79c3ed1b2 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp @@ -32,7 +32,7 @@ class EltwiseExecutorFactory : public ExecutorFactoryLegacy { EltwiseExecutorFactory(const EltwiseAttrs& eltwiseAttrs, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getEltwiseExecutorsList()) { if (desc.builder->isSupported(eltwiseAttrs, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp index 16a419c95d5efc..1d0e4c877ff8e5 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include "cache/multi_cache.h" #include "cpu_memory.h" @@ -86,14 +87,14 @@ class ExecutorContext { using Ptr = std::shared_ptr; using CPtr = std::shared_ptr; - ExecutorContext(const GraphContext::CPtr graphContext, - const std::vector& implPriorities, + ExecutorContext(const GraphContext::CPtr& graphContext, + std::vector implPriorities, std::shared_ptr> privateWeighCache = nullptr) : runtimeCache(graphContext->getParamsCache()), scratchPads(graphContext->getScratchPads()), weightsCache(graphContext->getWeightsCache()), engine(graphContext->getEngine()), - implPriorities(implPriorities), + implPriorities(std::move(implPriorities)), privateWeighCache(std::move(privateWeighCache)), numNumaNodes(graphContext->getNumNumaNodes()) { auto cpuStreamsExecutor = graphContext->getCPUStreamExecutor(); @@ -142,7 +143,7 @@ class ExecutorContext { class ExecutorFactoryLegacy { public: - ExecutorFactoryLegacy(const ExecutorContext::CPtr context) : context(context) {} + ExecutorFactoryLegacy(ExecutorContext::CPtr context) : context(std::move(context)) {} virtual ~ExecutorFactoryLegacy() = default; const ExecutorContext::CPtr context; diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor_factory.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor_factory.hpp index 3130bc2d646ff3..0b44a870ddb692 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor_factory.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor_factory.hpp @@ -6,6 +6,7 @@ #include #include +#include #include "executor.hpp" #include "nodes/executors/executor_config.hpp" @@ -27,12 +28,12 @@ class ExecutorFactory { ExecutorFactory(const Attrs& attrs, const PostOps& postOps, - const ExecutorContext::CPtr context, + ExecutorContext::CPtr context, const MemoryDescArgs& descriptors, const std::string& implementationPriority = {}) : m_attrs(attrs), m_postOps(postOps), - m_context(context), + m_context(std::move(context)), m_suitableImplementations(filter(m_attrs, m_postOps, descriptors, implementationPriority)) {} /** diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp index 07a58b0fa6cfa7..375016038f2b68 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp @@ -25,7 +25,7 @@ class ExecutorImplementation { using CreateFunction = std::function; + const ExecutorContext::CPtr& context)>; ExecutorImplementation(const char* name, const ExecutorType type, diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index f2cf5a7c9102b7..792aacf54a118a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -254,7 +254,7 @@ const std::vector>& getImplementations() { [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { return std::make_shared(attrs, postOps, memory, context); }) OV_CPU_INSTANCE_X64( @@ -322,13 +322,13 @@ const std::vector>& getImplementations() { [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - ExecutorContext::CPtr context) -> std::shared_ptr { + const ExecutorContext::CPtr& context) -> std::shared_ptr { struct ConvolutionInstantiator { std::shared_ptr operator()( const MemoryArgs& memory, const FCAttrs& attrs, - const ExecutorContext::CPtr context, - std::shared_ptr shareAgnosticData) const { + const ExecutorContext::CPtr& context, + const std::shared_ptr& shareAgnosticData) const { ConvAttrs convAttrs{attrs.withBias}; auto primitive = DefaultInstantiator{}( @@ -380,7 +380,7 @@ const std::vector>& getImplementations() { [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { return std::make_shared(attrs, postOps, memory, context); }) OV_CPU_INSTANCE_ACL( @@ -412,7 +412,7 @@ const std::vector>& getImplementations() { [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { return std::make_shared(attrs, postOps, memory, context); }) OV_CPU_INSTANCE_SHL( @@ -441,7 +441,7 @@ const std::vector>& getImplementations() { [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { return std::make_shared(attrs, postOps, memory, context); } ) @@ -475,13 +475,13 @@ const std::vector>& getImplementations() { [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - ExecutorContext::CPtr context) -> std::shared_ptr { + const ExecutorContext::CPtr& context) -> std::shared_ptr { struct MatMulInstantiator { std::shared_ptr operator()( const MemoryArgs& memory, const FCAttrs& attrs, - const ExecutorContext::CPtr context, - std::shared_ptr shareAgnosticData) const { + const ExecutorContext::CPtr& context, + const std::shared_ptr& shareAgnosticData) const { MatMulAttrs matMulAttrs{false, false}; auto primitive = @@ -523,7 +523,10 @@ const std::vector>& getImplementations() { return true; }, // create - [](const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, ExecutorContext::CPtr context) { + [](const FCAttrs& attrs, + const PostOps& postOps, + const MemoryArgs& memory, + const ExecutorContext::CPtr& context) { return std::make_shared>(attrs, postOps, memory, diff --git a/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp b/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp index 347ac4c981f4f1..2f1ca6600bbd14 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include #include "graph.h" #include "node.h" @@ -26,13 +27,13 @@ class GraphEmitter { const Attrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context, + ExecutorContext::CPtr context, const std::string& name, ensureAttrsStrategy ensureAttrs = {}) : descs(descs), attrs(attrs), postOps(postOps), - context(context), + context(std::move(context)), name(name), ensureAttrs(std::move(ensureAttrs)) { OPENVINO_THROW("Graph emitter is not implemented yet!"); @@ -41,7 +42,7 @@ class GraphEmitter { GraphEmitter& createGraph(const MemoryDescArgs& descs, const Attrs& attrs, const PostOps& postOps, - const ExecutorContext::CPtr context) { + const ExecutorContext::CPtr& context) { OPENVINO_THROW("Not implemented yet!"); return *this; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/interpolate.hpp b/src/plugins/intel_cpu/src/nodes/executors/interpolate.hpp index 152f8aa9c42dc8..b8b89fdf717996 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/interpolate.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/interpolate.hpp @@ -4,6 +4,8 @@ #pragma once +#include + #include "node.h" #define MAX_INPUT_INTERPOLATE 8 @@ -102,7 +104,7 @@ class InterpolateExecutor { static constexpr size_t SCALES_ID = 2; static constexpr size_t AXES_ID = 3; static constexpr int CUBIC_GRID_LEN = 4; - InterpolateExecutor(const ExecutorContext::CPtr context) : _context(context) {} + InterpolateExecutor(ExecutorContext::CPtr context) : _context(std::move(context)) {} virtual bool init(const InterpolateAttrs& interpolateAttrs, const std::vector& srcDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.hpp index a0c1fc240731fb..360dae8e16c432 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.hpp @@ -28,7 +28,7 @@ class InterpolateExecutorFactory : public ExecutorFactoryLegacy { InterpolateExecutorFactory(const InterpolateAttrs& InterpolateAttrs, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getInterpolateExecutorsList()) { if (desc.builder->isSupported(InterpolateAttrs, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.cpp b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.cpp index 7e50c8086789a0..03daeba24c65d8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.cpp @@ -99,7 +99,7 @@ bool MlasGemmExecutor::supports(const FCConfig& config) { MlasGemmExecutor::MlasGemmExecutor(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : m_attrs(attrs), m_memoryArgs(memory), packedWeights(prepareWeightMemory(memory.at(ARG_WEI), context, !attrs.weightsNonTransposed)), diff --git a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp index 4fe48b66c7c1ff..42be857ba9dead 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp @@ -18,7 +18,7 @@ class MlasGemmExecutor : public Executor { MlasGemmExecutor(const FCAttrs& attrs, const PostOps& postOps, const MemoryArgs& memory, - const ExecutorContext::CPtr context); + const ExecutorContext::CPtr& context); void execute(const MemoryArgs& memory) override; diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp b/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp index eec9d2a8947975..2c66b9ce56af14 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp @@ -4,10 +4,12 @@ #include "mvn.hpp" +#include + namespace ov { namespace intel_cpu { -MVNExecutor::MVNExecutor(const ExecutorContext::CPtr context) : context(context) {} +MVNExecutor::MVNExecutor(ExecutorContext::CPtr context) : context(std::move(context)) {} VectorDims MVNExecutor::transformTo5DCase(const VectorDims& shape, bool initAcrossChannels) { switch (shape.size()) { @@ -41,4 +43,4 @@ VectorDims MVNExecutor::transformTo5DCase(const VectorDims& shape, bool initAcro } } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp b/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp index da51b5d1ef67e9..2b08dc2a320b5d 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp @@ -29,7 +29,7 @@ struct MVNAttrs { class MVNExecutor { public: - MVNExecutor(const ExecutorContext::CPtr context); + MVNExecutor(ExecutorContext::CPtr context); virtual bool init(const MVNAttrs& mvnAttrs, const std::vector& srcDescs, const std::vector& dstDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp index 82f8e868ac2d81..59d0447965a803 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp @@ -28,7 +28,7 @@ class MVNExecutorFactory : public ExecutorFactoryLegacy { MVNExecutorFactory(const MVNAttrs& mvnAttrs, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getMVNExecutorsList()) { if (desc.builder->isSupported(mvnAttrs, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling.cpp b/src/plugins/intel_cpu/src/nodes/executors/pooling.cpp index 95448640e3b125..cd45dbc375d8e9 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling.cpp @@ -4,10 +4,12 @@ #include "pooling.hpp" +#include + namespace ov { namespace intel_cpu { -PoolingExecutor::PoolingExecutor(const ExecutorContext::CPtr context) : context(context) {} +PoolingExecutor::PoolingExecutor(ExecutorContext::CPtr context) : context(std::move(context)) {} } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp index e826d3a37250db..325ae17f161c93 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp @@ -40,7 +40,7 @@ struct PoolingAttrs { class PoolingExecutor { public: - PoolingExecutor(const ExecutorContext::CPtr context); + PoolingExecutor(ExecutorContext::CPtr context); virtual bool init(const PoolingAttrs& poolingAttrs, const std::vector& srcDescs, const std::vector& dstDescs, @@ -74,4 +74,4 @@ using PoolingExecutorBuilderPtr = std::shared_ptr; using PoolingExecutorBuilderCPtr = std::shared_ptr; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp index 1c051ae7d2959d..1cf34912e2293f 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp @@ -25,7 +25,7 @@ class PoolingExecutorFactory : public ExecutorFactoryLegacy { PoolingExecutorFactory(const PoolingAttrs& poolingAttrs, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getPoolingExecutorsList()) { if (desc.builder->isSupported(poolingAttrs, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/reduce.cpp b/src/plugins/intel_cpu/src/nodes/executors/reduce.cpp index 6039813d8fdd28..54b70fb46d03cd 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/reduce.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/reduce.cpp @@ -4,10 +4,12 @@ #include "reduce.hpp" +#include + namespace ov { namespace intel_cpu { -ReduceExecutor::ReduceExecutor(const ExecutorContext::CPtr context) : context(context) {} +ReduceExecutor::ReduceExecutor(ExecutorContext::CPtr context) : context(std::move(context)) {} } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/reduce.hpp b/src/plugins/intel_cpu/src/nodes/executors/reduce.hpp index 21b730a197df3a..52b4b1921ec0c1 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/reduce.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/reduce.hpp @@ -20,7 +20,7 @@ struct ReduceAttrs { class ReduceExecutor { public: - ReduceExecutor(const ExecutorContext::CPtr context); + ReduceExecutor(ExecutorContext::CPtr context); virtual bool init(const ReduceAttrs& reduceAttrs, const std::vector& srcDescs, const std::vector& dstDescs, @@ -54,4 +54,4 @@ using ReduceExecutorBuilderPtr = std::shared_ptr; using ReduceExecutorBuilderCPtr = std::shared_ptr; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/reduce_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/reduce_list.hpp index faffdebc947c02..8246d497e684ee 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/reduce_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/reduce_list.hpp @@ -28,7 +28,7 @@ class ReduceExecutorFactory : public ExecutorFactoryLegacy { ReduceExecutorFactory(const ReduceAttrs& reduceAttrs, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getReduceExecutorsList()) { if (desc.builder->isSupported(reduceAttrs, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/executors/subgraph.cpp index 002561158041c4..14b929633db8c6 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/subgraph.cpp @@ -4,6 +4,8 @@ #include "nodes/executors/subgraph.hpp" +#include + #include "common/primitive_hashing_utils.hpp" #include "openvino/core/parallel.hpp" @@ -59,13 +61,13 @@ SubgraphCodeGenerator::SubgraphCodeGenerator(const std::shared_ptr& snippet_config, const std::shared_ptr& snippet_attrs, const std::shared_ptr& snippet, - const std::vector& start_offset_in, - const std::vector& start_offset_out, + std::vector start_offset_in, + std::vector start_offset_out, const BufferScratchpadAllocator& allocator, const ov::intel_cpu::MultiCacheWeakPtr& kernel_cache) : m_schedule(snippet->get()), - m_start_offset_in(start_offset_in), - m_start_offset_out(start_offset_out) { + m_start_offset_in(std::move(start_offset_in)), + m_start_offset_out(std::move(start_offset_out)) { OPENVINO_ASSERT(m_schedule, "Schedule is empty!"); OPENVINO_ASSERT(snippet_config, "Runtime Config is empty!"); init_parallel_domain(snippet_config, m_parallel_exec_domain); diff --git a/src/plugins/intel_cpu/src/nodes/executors/subgraph.hpp b/src/plugins/intel_cpu/src/nodes/executors/subgraph.hpp index 6af828099a3f9b..f731a507e61c2e 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/subgraph.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/subgraph.hpp @@ -46,8 +46,8 @@ class SubgraphBaseExecutor { SubgraphBaseExecutor(const std::shared_ptr& snippet_config, const std::shared_ptr& snippet_attrs, const std::shared_ptr& snippet, - const std::vector& start_offset_in, - const std::vector& start_offset_out, + std::vector start_offset_in, + std::vector start_offset_out, const BufferScratchpadAllocator& allocator, const ov::intel_cpu::MultiCacheWeakPtr& kernel_cache); virtual ~SubgraphBaseExecutor() = default; @@ -124,10 +124,10 @@ class SubgraphStaticBaseExecutor { // Specialized dynamic executor based on shape agnostic kernel for the specific input shapes class SubgraphDynamicSpecializedBaseExecutor { public: - SubgraphDynamicSpecializedBaseExecutor(const std::shared_ptr& snippet_config) { - m_buffer_offsets = snippet_config->buffer_cluster_offsets; - m_data_offsets = snippet_config->io_data_offsets; - m_loop_args = snippet_config->loop_args; + SubgraphDynamicSpecializedBaseExecutor(const std::shared_ptr& snippet_config) + : m_buffer_offsets(snippet_config->buffer_cluster_offsets), + m_data_offsets(snippet_config->io_data_offsets), + m_loop_args(snippet_config->loop_args) { m_reset_exec_table_state = snippet_config->kernel_executor_table->get_state_reset(); } virtual ~SubgraphDynamicSpecializedBaseExecutor() = default; diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp b/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp index b63e32e39ebf8d..ddf4cf20034d92 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp @@ -4,6 +4,7 @@ #include "transpose.hpp" +#include #include #include "openvino/core/parallel.hpp" @@ -11,7 +12,7 @@ namespace ov { namespace intel_cpu { -TransposeExecutor::TransposeExecutor(const ExecutorContext::CPtr context) : context(context) {} +TransposeExecutor::TransposeExecutor(ExecutorContext::CPtr context) : context(std::move(context)) {} jit_permute_config_params TransposeExecutor::prepareParams(const PermuteParams& params) { jit_permute_config_params jcp = {}; @@ -131,4 +132,4 @@ jit_permute_config_params TransposeExecutor::prepareParams(const PermuteParams& } } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp b/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp index 99e0b0a2742a78..6e569e62b65a19 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp @@ -19,7 +19,7 @@ struct TransposeParams { class TransposeExecutor : public Executor { public: static jit_permute_config_params prepareParams(const PermuteParams& params); - explicit TransposeExecutor(const ExecutorContext::CPtr context); + explicit TransposeExecutor(ExecutorContext::CPtr context); virtual bool init(const TransposeParams& transposeParams, const std::vector& srcDescs, const std::vector& dstDescs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/transpose_list.hpp index c81769fd1d0539..305c8c4662122c 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose_list.hpp @@ -32,7 +32,7 @@ class TransposeExecutorFactory : public ExecutorFactoryLegacy { TransposeExecutorFactory(const TransposeParams& transposeParams, const std::vector& srcDescs, const std::vector& dstDescs, - const ExecutorContext::CPtr context) + const ExecutorContext::CPtr& context) : ExecutorFactoryLegacy(context) { for (auto& desc : getTransposeExecutorsList()) { if (desc.builder->isSupported(transposeParams, srcDescs, dstDescs)) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp index 3688bc6cc60873..8b777c782aeba8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/variable_executor.hpp @@ -4,6 +4,8 @@ #pragma once +#include + #include "executor.hpp" #include "executor_config.hpp" #include "executor_implementation.hpp" @@ -25,11 +27,11 @@ class VariableExecutor : public Executor { VariableExecutor(const MemoryArgs& memory, const Attrs& attrs, const PostOps& postOps, - const ExecutorContext::CPtr context, + ExecutorContext::CPtr context, std::vector suitableImplementations) : m_attrs(attrs), m_postOps(postOps), - m_context(context), + m_context(std::move(context)), m_suitableImplementations(std::move(suitableImplementations)), m_implementationRequiresFallback( cacheFallbackStatus(m_suitableImplementations, diff --git a/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp index 8b3466c9c22c5a..0bb6bfaf6d1e26 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp @@ -76,10 +76,9 @@ SubgraphExecutor::SubgraphExecutor(const std::shared_ptr& snip start_offset_in, start_offset_out, allocator, - kernel_cache) { - m_repacking_impl_type = snippet_config->repacking_impl_type; - m_repacked_inputs = snippet_config->repacked_inputs; - + kernel_cache), + m_repacked_inputs(snippet_config->repacked_inputs), + m_repacking_impl_type(snippet_config->repacking_impl_type) { auto external_buffer_size = std::accumulate(m_repacked_inputs.begin(), m_repacked_inputs.end(), diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.cpp index 072ba42284eff9..7e46da40ddeded 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.cpp @@ -5,6 +5,7 @@ #include "openvino/op/experimental_detectron_detection_output.hpp" #include +#include #include #include "experimental_detectron_detection_output.h" @@ -15,13 +16,12 @@ namespace intel_cpu { namespace node { struct Indexer { - const std::vector dims_; + std::vector dims_; int total_{1}; - explicit Indexer(const std::vector& dims) : dims_(dims) { - total_ = 1; - for (size_t i = 0; i < dims_.size(); ++i) { - total_ *= dims_[i]; + explicit Indexer(std::vector dims) : dims_(std::move(dims)), total_(1) { + for (const auto dim : dims_) { + total_ *= dim; } } @@ -246,7 +246,7 @@ bool ExperimentalDetectronDetectionOutput::isSupportedOperation(const std::share } ExperimentalDetectronDetectionOutput::ExperimentalDetectronDetectionOutput(const std::shared_ptr& op, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -281,7 +281,7 @@ void ExperimentalDetectronDetectionOutput::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void ExperimentalDetectronDetectionOutput::execute(dnnl::stream strm) { +void ExperimentalDetectronDetectionOutput::execute(const dnnl::stream& strm) { const int rois_num = getParentEdgeAt(INPUT_ROIS)->getMemory().getStaticDims()[0]; assert(classes_num_ == static_cast(getParentEdgeAt(INPUT_SCORES)->getMemory().getStaticDims()[1])); assert(4 * classes_num_ == static_cast(getParentEdgeAt(INPUT_DELTAS)->getMemory().getStaticDims()[1])); diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.h index 1eebcc308ca420..3b6605f73dc94b 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_detection_output.h @@ -12,16 +12,16 @@ namespace node { class ExperimentalDetectronDetectionOutput : public Node { public: - ExperimentalDetectronDetectionOutput(const std::shared_ptr& op, const GraphContext::CPtr context); + ExperimentalDetectronDetectionOutput(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.cpp index f0e8bb1b847fc3..7f613b39ac3e9f 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.cpp @@ -304,7 +304,7 @@ bool ExperimentalDetectronGenerateProposalsSingleImage::isSupportedOperation(con ExperimentalDetectronGenerateProposalsSingleImage::ExperimentalDetectronGenerateProposalsSingleImage( const std::shared_ptr& op, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -336,7 +336,7 @@ void ExperimentalDetectronGenerateProposalsSingleImage::initSupportedPrimitiveDe impl_desc_type::ref_any); } -void ExperimentalDetectronGenerateProposalsSingleImage::execute(dnnl::stream strm) { +void ExperimentalDetectronGenerateProposalsSingleImage::execute(const dnnl::stream& strm) { try { if (inputShapes.size() != 4 || outputShapes.size() != 2) { OPENVINO_THROW("Incorrect number of input or output edges!"); diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.h index 4b6a809c156958..66c8ad489ee915 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_generate_proposals_single_image.h @@ -13,16 +13,16 @@ namespace node { class ExperimentalDetectronGenerateProposalsSingleImage : public Node { public: ExperimentalDetectronGenerateProposalsSingleImage(const std::shared_ptr& op, - const GraphContext::CPtr context); + const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp index 873750b5a13d4a..298f369930238f 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.cpp @@ -28,7 +28,7 @@ bool ExperimentalDetectronPriorGridGenerator::isSupportedOperation(const std::sh } ExperimentalDetectronPriorGridGenerator::ExperimentalDetectronPriorGridGenerator(const std::shared_ptr& op, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -57,7 +57,7 @@ void ExperimentalDetectronPriorGridGenerator::initSupportedPrimitiveDescriptors( impl_desc_type::ref_any); } -void ExperimentalDetectronPriorGridGenerator::execute(dnnl::stream strm) { +void ExperimentalDetectronPriorGridGenerator::execute(const dnnl::stream& strm) { const int num_priors_ = getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[0]; assert(getParentEdgeAt(INPUT_PRIORS)->getMemory().getStaticDims()[1] == 4); diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h index 37b0d81300c483..c8c1343ef05219 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_priorgridgenerator.h @@ -12,15 +12,15 @@ namespace node { class ExperimentalDetectronPriorGridGenerator : public Node { public: - ExperimentalDetectronPriorGridGenerator(const std::shared_ptr& op, const GraphContext::CPtr context); + ExperimentalDetectronPriorGridGenerator(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.cpp index 7a5b8eae3bd688..a22ecad308c52f 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.cpp @@ -5,6 +5,7 @@ #include "experimental_detectron_roifeatureextractor.h" #include +#include #include #include #include @@ -165,9 +166,11 @@ void ROIAlignForward_cpu_kernel(const int nthreads, T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); // We use roi_bin_grid to sample the grid and mimic integral - int roi_bin_grid_h = - (sampling_ratio > 0) ? sampling_ratio : static_cast(ceil(roi_height / pooled_height)); // e.g., = 2 - int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : static_cast(ceil(roi_width / pooled_width)); + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : static_cast(std::ceil(roi_height / pooled_height)); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : static_cast(std::ceil(roi_width / pooled_width)); // We do average (integral) pooling inside a bin const T count = static_cast(roi_bin_grid_h * roi_bin_grid_w); // e.g. = 4 @@ -283,7 +286,7 @@ bool ExperimentalDetectronROIFeatureExtractor::isSupportedOperation(const std::s } ExperimentalDetectronROIFeatureExtractor::ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr& op, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -314,7 +317,7 @@ void ExperimentalDetectronROIFeatureExtractor::initSupportedPrimitiveDescriptors impl_desc_type::ref_any); } -void ExperimentalDetectronROIFeatureExtractor::execute(dnnl::stream strm) { +void ExperimentalDetectronROIFeatureExtractor::execute(const dnnl::stream& strm) { const int levels_num = inputShapes.size() - INPUT_FEATURES_START; const int num_rois = getParentEdgeAt(INPUT_ROIS)->getMemory().getStaticDims()[0]; const int channels_num = getParentEdgeAt(INPUT_FEATURES_START)->getMemory().getStaticDims()[1]; diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.h index 78a82182fef4e3..e8b7c84bd7d87b 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_roifeatureextractor.h @@ -12,17 +12,17 @@ namespace node { class ExperimentalDetectronROIFeatureExtractor : public Node { public: - ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr& op, const GraphContext::CPtr context); + ExperimentalDetectronROIFeatureExtractor(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); }; diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp index 096e5bebf2f5fb..0fdad9f3c050fc 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.cpp @@ -31,7 +31,7 @@ bool ExperimentalDetectronTopKROIs::isSupportedOperation(const std::shared_ptr& op, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -62,7 +62,7 @@ void ExperimentalDetectronTopKROIs::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void ExperimentalDetectronTopKROIs::execute(dnnl::stream strm) { +void ExperimentalDetectronTopKROIs::execute(const dnnl::stream& strm) { const int input_rois_num = getParentEdgeAt(INPUT_ROIS)->getMemory().getStaticDims()[0]; const int top_rois_num = (std::min)(max_rois_num_, input_rois_num); diff --git a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h index 5e328d224d458b..029fa579f78e87 100644 --- a/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h +++ b/src/plugins/intel_cpu/src/nodes/experimental_detectron_topkrois.h @@ -12,11 +12,11 @@ namespace node { class ExperimentalDetectronTopKROIs : public Node { public: - ExperimentalDetectronTopKROIs(const std::shared_ptr& op, const GraphContext::CPtr context); + ExperimentalDetectronTopKROIs(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override { @@ -25,7 +25,7 @@ class ExperimentalDetectronTopKROIs : public Node { bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); }; diff --git a/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp b/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp index 534f5a518ad11e..7fbe1c3449bfd0 100644 --- a/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp +++ b/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp @@ -357,7 +357,7 @@ bool ExtractImagePatchesKey::operator==(const ExtractImagePatchesKey& rhs) const } } // namespace -ExtractImagePatches::ExtractImagePatches(const std::shared_ptr& op, const GraphContext::CPtr context) +ExtractImagePatches::ExtractImagePatches(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -447,7 +447,7 @@ void ExtractImagePatches::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc({{LayoutType::ncsp, precision}}, {{LayoutType::ncsp, precision}}, impl_desc_type::ref_any); } -void ExtractImagePatches::execute(dnnl::stream strm) { +void ExtractImagePatches::execute(const dnnl::stream& strm) { if (execPtr) { auto src = getSrcDataAtPort(0); auto dst = getDstDataAtPort(0); @@ -459,7 +459,7 @@ void ExtractImagePatches::execute(dnnl::stream strm) { } } -void ExtractImagePatches::executeDynamicImpl(dnnl::stream strm) { +void ExtractImagePatches::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/extract_image_patches.h b/src/plugins/intel_cpu/src/nodes/extract_image_patches.h index f4f3ecc50901aa..49c788086afc41 100644 --- a/src/plugins/intel_cpu/src/nodes/extract_image_patches.h +++ b/src/plugins/intel_cpu/src/nodes/extract_image_patches.h @@ -43,14 +43,14 @@ struct jit_uni_extract_image_patches_kernel { class ExtractImagePatches : public Node { public: - ExtractImagePatches(const std::shared_ptr& op, const GraphContext::CPtr context); + ExtractImagePatches(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index 57fbdd66d0308f..873d07673c8990 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -4,6 +4,7 @@ #include "eye.h" +#include #include #include "openvino/core/parallel.hpp" @@ -33,7 +34,7 @@ bool Eye::isSupportedOperation(const std::shared_ptr& op, std::s namespace { class EyeShapeInferFactory : public ShapeInferFactory { public: - EyeShapeInferFactory(std::shared_ptr op) : m_op(op) {} + EyeShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override { return (m_op->get_input_size() == 4) ? make_shape_inference(m_op) : make_shape_inference(m_op, PortMask(Eye::ROWS_NUM, Eye::COLS_NUM)); @@ -44,7 +45,7 @@ class EyeShapeInferFactory : public ShapeInferFactory { }; } // namespace -Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr context) +Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, EyeShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -71,7 +72,7 @@ struct Eye::EyeExecute { } }; -void Eye::execute(dnnl::stream strm) { +void Eye::execute(const dnnl::stream& strm) { auto outputPrec = getChildEdgeAt(0)->getMemory().getDesc().getPrecision(); OV_SWITCH(intel_cpu, EyeExecute, diff --git a/src/plugins/intel_cpu/src/nodes/eye.h b/src/plugins/intel_cpu/src/nodes/eye.h index 52f78e7cc33711..7bc20113a3af14 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.h +++ b/src/plugins/intel_cpu/src/nodes/eye.h @@ -24,11 +24,11 @@ class Eye : public Node { static constexpr size_t BATCH_SHAPE = 3lu; public: - Eye(const std::shared_ptr& op, const GraphContext::CPtr context); + Eye(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; @@ -36,7 +36,7 @@ class Eye : public Node { bool needShapeInfer() const override { return true; }; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp index e7105f4f016bc7..f08657b7cc2f88 100644 --- a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp +++ b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp @@ -1056,7 +1056,7 @@ struct FakeQuantKey { }; } // namespace -FakeQuantize::FakeQuantize(const std::shared_ptr& op, const GraphContext::CPtr context) +FakeQuantize::FakeQuantize(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -1911,11 +1911,11 @@ void FakeQuantize::executeQuantization(const std::unique_ptrgetImplementationType() != impl_desc_type::ref) { execPtr->exec(*this); } else { diff --git a/src/plugins/intel_cpu/src/nodes/fake_quantize.h b/src/plugins/intel_cpu/src/nodes/fake_quantize.h index 4f985df4cdae41..950c598dfdb51d 100644 --- a/src/plugins/intel_cpu/src/nodes/fake_quantize.h +++ b/src/plugins/intel_cpu/src/nodes/fake_quantize.h @@ -66,13 +66,13 @@ struct jit_uni_quantize_kernel { class FakeQuantize : public Node { public: - FakeQuantize(const std::shared_ptr& op, const GraphContext::CPtr context); + FakeQuantize(const std::shared_ptr& op, const GraphContext::CPtr& context); void initSupportedPrimitiveDescriptors() override; void getSupportedDescriptors() override; bool created() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; size_t getAxis() const { return axis; diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index decbea94be8dd6..bf215a9522a595 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -155,7 +155,7 @@ bool FullyConnected::isSupportedCompressedOperation(const std::shared_ptrgetCPUStreamExecutor()) { if (!context->getCPUStreamExecutor()->get_rank().empty()) { // init tp_cfg.w_rank and tp_cfg.w_size @@ -167,7 +167,7 @@ void FullyConnected::initTensorParallelConfig(const GraphContext::CPtr context) } } -FullyConnected::FullyConnected(const std::shared_ptr& op, const GraphContext::CPtr context) +FullyConnected::FullyConnected(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, FCShapeInferFactory(op)) { std::string errorMessage; initTensorParallelConfig(context); @@ -356,7 +356,7 @@ void FullyConnected::execTensorParallelSync() { } } -void FullyConnected::execute(dnnl::stream strm) { +void FullyConnected::execute(const dnnl::stream& strm) { initTensorParallelSync(); executor->execute(memory); @@ -364,7 +364,7 @@ void FullyConnected::execute(dnnl::stream strm) { execTensorParallelSync(); } -void FullyConnected::executeDynamicImpl(dnnl::stream strm) { +void FullyConnected::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -579,14 +579,13 @@ void FullyConnected::needSplitMemoryForTensorParallel() { // wgt // split N direction tp_cfg.cached_splited_weight = - attrs.weightsNonTransposed - ? split_vertical(context->getEngine(), std::move(wgt), 0, tp_cfg.w_rank, tp_cfg.w_size) - : split_horizontal(context->getEngine(), std::move(wgt), 0, tp_cfg.w_rank, tp_cfg.w_size); + attrs.weightsNonTransposed ? split_vertical(context->getEngine(), wgt, 0, tp_cfg.w_rank, tp_cfg.w_size) + : split_horizontal(context->getEngine(), wgt, 0, tp_cfg.w_rank, tp_cfg.w_size); memory[ARG_WEI] = tp_cfg.cached_splited_weight; // bias if (attrs.withBias) { auto bias = getSrcMemoryAtPort(BIAS); - auto select_bias = split_horizontal(context->getEngine(), std::move(bias), 0, tp_cfg.w_rank, tp_cfg.w_size); + auto select_bias = split_horizontal(context->getEngine(), bias, 0, tp_cfg.w_rank, tp_cfg.w_size); tp_cfg.cached_splited_bias = std::move(select_bias); } else { tp_cfg.cached_splited_bias = MemoryDescUtils::makeEmptyMemory(context); @@ -594,8 +593,7 @@ void FullyConnected::needSplitMemoryForTensorParallel() { memory[ARG_BIAS] = tp_cfg.cached_splited_bias; // dst memory[ARG_DST] = getDstMemoryAtPort(0); - tp_cfg.cached_dst = - split_horizontal(context->getEngine(), std::move(dst), -1, tp_cfg.w_rank, tp_cfg.w_size, false); + tp_cfg.cached_dst = split_horizontal(context->getEngine(), dst, -1, tp_cfg.w_rank, tp_cfg.w_size, false); memory[ARG_DST | ARG_ATTR_SCALES] = split_horizontal(context->getEngine(), memory[ARG_DST | ARG_ATTR_SCALES], 0, tp_cfg.w_rank, tp_cfg.w_size); diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.h b/src/plugins/intel_cpu/src/nodes/fullyconnected.h index 0338b9906a59c1..46d7c7dc1f041a 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.h +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.h @@ -39,10 +39,10 @@ struct FCTensorParallelConfig { class FullyConnected : public Node { public: - FullyConnected(const std::shared_ptr& op, const GraphContext::CPtr context); + FullyConnected(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override { @@ -80,7 +80,7 @@ class FullyConnected : public Node { } void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeExecutedInInt8() const override; void keepWeightsNonTransposed(bool weightsNonTransposed) { this->attrs.weightsNonTransposed = weightsNonTransposed; @@ -111,7 +111,7 @@ class FullyConnected : public Node { void fuseDecompressionConstant(const MemoryCPtr& memory, MemoryCPtr& decompressionValuesPtr); - void initTensorParallelConfig(const GraphContext::CPtr context); + void initTensorParallelConfig(const GraphContext::CPtr& context); void needUpdateTensorParalelConfig(); void needPrepareParamsForTensorParallel(); void initTensorParallelSync(); diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index 337d425465e564..e72901d7d43e62 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -58,7 +58,7 @@ bool Gather::isSupportedOperation(const std::shared_ptr& op, std return true; } -Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr context) +Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, GatherShapeInferFactory(op)), batchDims(0) { std::string errorMessage; @@ -409,7 +409,7 @@ void Gather::prepareParams() { #endif } -void Gather::execute(dnnl::stream strm) { +void Gather::execute(const dnnl::stream& strm) { if (isInPlace()) { return; } @@ -477,7 +477,7 @@ void Gather::execute(dnnl::stream strm) { execReference(); } -void Gather::executeDynamicImpl(dnnl::stream strm) { +void Gather::executeDynamicImpl(const dnnl::stream& strm) { if (isInPlace()) { return; } diff --git a/src/plugins/intel_cpu/src/nodes/gather.h b/src/plugins/intel_cpu/src/nodes/gather.h index 58255411c3d032..9f780b822cd497 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.h +++ b/src/plugins/intel_cpu/src/nodes/gather.h @@ -18,12 +18,12 @@ namespace node { class Gather : public Node { public: - Gather(const std::shared_ptr& op, const GraphContext::CPtr context); + Gather(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool isExecutable() const override; void resolveInPlaceEdges(Edge::LOOK look) override; @@ -56,7 +56,7 @@ class Gather : public Node { void execCompressed4Bit(); protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needPrepareParams() const override; void prepareParams() override; diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp index 61dbe2f635ed4b..7a494d184ce9c1 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp @@ -31,7 +31,7 @@ bool GatherElements::isSupportedOperation(const std::shared_ptr& return true; } -GatherElements::GatherElements(const std::shared_ptr& op, const GraphContext::CPtr context) +GatherElements::GatherElements(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -93,7 +93,7 @@ void GatherElements::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void GatherElements::executeDynamicImpl(dnnl::stream strm) { +void GatherElements::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -130,7 +130,7 @@ void GatherElements::directExecution() { parallel_nt(0, threadBody); } -void GatherElements::execute(dnnl::stream strm) { +void GatherElements::execute(const dnnl::stream& strm) { switch (dataTypeSize_) { case sizeof(element_type_traits::value_type): return directExecution::value_type>(); diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.h b/src/plugins/intel_cpu/src/nodes/gather_elements.h index c627dfb4b364ad..bd05f624cb17cd 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.h +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.h @@ -12,17 +12,17 @@ namespace node { class GatherElements : public Node { public: - GatherElements(const std::shared_ptr& op, const GraphContext::CPtr context); + GatherElements(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: diff --git a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp index dc9891eb8388eb..1124bec41632b8 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp @@ -35,7 +35,7 @@ bool GatherND::isSupportedOperation(const std::shared_ptr& op, s return true; } -GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr context) +GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -109,26 +109,24 @@ void GatherND::prepareParams() { } GatherND::GatherNDExecutor::GatherNDExecutor(const GatherNDAttributes& attrs) - : sliceRank(attrs.sliceRank), - dataSize(attrs.dataSize) { - batchSize = std::accumulate(attrs.srcDims.begin(), + : batchSize(std::accumulate(attrs.srcDims.begin(), attrs.srcDims.begin() + attrs.batchDims, size_t(1), - std::multiplies()); - dataLength = std::accumulate(attrs.srcDims.begin() + sliceRank + attrs.batchDims, + std::multiplies())), + dataSize(attrs.dataSize), + sliceRank(attrs.sliceRank), + dataLength(std::accumulate(attrs.srcDims.begin() + sliceRank + attrs.batchDims, attrs.srcDims.end(), size_t(1), - std::multiplies()); - cycles = attrs.dstElementCount / (dataLength * batchSize); - workAmount = batchSize * cycles; - - srcBatchStride = std::accumulate(attrs.srcDims.begin() + attrs.batchDims, + std::multiplies())), + cycles(attrs.dstElementCount / (dataLength * batchSize)), + workAmount(batchSize * cycles), + srcBatchStride(std::accumulate(attrs.srcDims.begin() + attrs.batchDims, attrs.srcDims.end(), size_t(1), - std::multiplies()); - idxBatchStride = cycles * sliceRank; - dstBatchStride = cycles * dataLength; - + std::multiplies())), + idxBatchStride(cycles * sliceRank), + dstBatchStride(cycles * dataLength) { srcShifts.resize(attrs.sliceRank, 0); for (size_t i = 0; i < attrs.sliceRank; i++) srcShifts[i] = attrs.srcStrides[i + attrs.batchDims] * (dataLength > 1 ? dataSize : 1); @@ -141,7 +139,7 @@ GatherND::GatherNDExecutor::GatherNDExecutor(const GatherNDAttributes& attrs) } } -void GatherND::execute(dnnl::stream strm) { +void GatherND::execute(const dnnl::stream& strm) { if (!execPtr) THROW_ERROR("has not compiled executor."); @@ -246,7 +244,7 @@ void GatherND::GatherNDExecutor::gatherElementwise(const MemoryPtr& srcMemPtr, }); } -void GatherND::executeDynamicImpl(dnnl::stream strm) { +void GatherND::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/gather_nd.h b/src/plugins/intel_cpu/src/nodes/gather_nd.h index 73d7d704610edf..42d5be140325b0 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_nd.h +++ b/src/plugins/intel_cpu/src/nodes/gather_nd.h @@ -12,17 +12,17 @@ namespace node { class GatherND : public Node { public: - GatherND(const std::shared_ptr& op, const GraphContext::CPtr context); + GatherND(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: @@ -47,11 +47,11 @@ class GatherND : public Node { void gatherBlocks(const MemoryPtr& srcMemPtr, const MemoryPtr& idxMemPtr, const MemoryPtr& dstMemPtr); size_t batchSize = 1lu; - size_t cycles = 1lu; - size_t dataLength = 1lu; + size_t dataSize = 1lu; size_t sliceRank = 0lu; + size_t dataLength = 1lu; + size_t cycles = 1lu; size_t workAmount = 0lu; - size_t dataSize = 1lu; size_t srcBatchStride = 1lu; size_t idxBatchStride = 1lu; diff --git a/src/plugins/intel_cpu/src/nodes/gather_tree.cpp b/src/plugins/intel_cpu/src/nodes/gather_tree.cpp index da868ec9c78a34..12b0754d0fb869 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_tree.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_tree.cpp @@ -29,7 +29,7 @@ bool GatherTree::isSupportedOperation(const std::shared_ptr& op, return true; } -GatherTree::GatherTree(const std::shared_ptr& op, const GraphContext::CPtr context) +GatherTree::GatherTree(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -74,7 +74,7 @@ void GatherTree::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void GatherTree::execute(dnnl::stream strm) { +void GatherTree::execute(const dnnl::stream& strm) { if (!execPtr) THROW_CPU_NODE_ERR("has not compiled executor."); @@ -117,7 +117,7 @@ void GatherTree::prepareParams() { execPtr = std::make_shared(stepIdxDims, parentIdxDims, maxSeqLenDims, dstDims); } -void GatherTree::executeDynamicImpl(dnnl::stream strm) { +void GatherTree::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/gather_tree.h b/src/plugins/intel_cpu/src/nodes/gather_tree.h index 3a9368083e3d10..c0d4547c5075d1 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_tree.h +++ b/src/plugins/intel_cpu/src/nodes/gather_tree.h @@ -12,15 +12,15 @@ namespace node { class GatherTree : public Node { public: - GatherTree(const std::shared_ptr& op, const GraphContext::CPtr context); + GatherTree(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp b/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp index 5d73122f6a640c..64965b4e61a138 100644 --- a/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp +++ b/src/plugins/intel_cpu/src/nodes/generate_proposals.cpp @@ -304,7 +304,7 @@ bool GenerateProposals::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +GenerateProposals::GenerateProposals(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -338,11 +338,11 @@ void GenerateProposals::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void GenerateProposals::executeDynamicImpl(dnnl::stream strm) { +void GenerateProposals::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void GenerateProposals::execute(dnnl::stream strm) { +void GenerateProposals::execute(const dnnl::stream& strm) { try { if (inputShapes.size() != 4 || outputShapes.size() != 3) { OPENVINO_THROW("Incorrect number of input or output edges!"); diff --git a/src/plugins/intel_cpu/src/nodes/generate_proposals.h b/src/plugins/intel_cpu/src/nodes/generate_proposals.h index 3f300d8a45f33c..9556be81c6d308 100644 --- a/src/plugins/intel_cpu/src/nodes/generate_proposals.h +++ b/src/plugins/intel_cpu/src/nodes/generate_proposals.h @@ -12,16 +12,16 @@ namespace node { class GenerateProposals : public Node { public: - GenerateProposals(const std::shared_ptr& op, const GraphContext::CPtr context); + GenerateProposals(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp index 6f29c52ffdc4b6..0e25c64acfe534 100644 --- a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp @@ -39,7 +39,7 @@ bool GridSample::isSupportedOperation(const std::shared_ptr& op, #if defined(OPENVINO_ARCH_X86_64) -GridSample::GridSample(const std::shared_ptr& op, const GraphContext::CPtr context) +GridSample::GridSample(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -265,7 +265,7 @@ void GridSample::prepareParams() { }); } -void GridSample::execute(dnnl::stream strm) { +void GridSample::execute(const dnnl::stream& strm) { const void* srcData = getSrcDataAtPort(IN_DATA); const uint8_t* gridData = getSrcDataAtPortAs(IN_GRID); uint8_t* dstData = getDstDataAtPortAs(0); @@ -308,7 +308,7 @@ void GridSample::execute(dnnl::stream strm) { parallel_nt(m_threads_num, threadBody); } -void GridSample::executeDynamicImpl(dnnl::stream strm) { +void GridSample::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.hpp b/src/plugins/intel_cpu/src/nodes/grid_sample.hpp index e09507061241c1..ad8f7dea590413 100644 --- a/src/plugins/intel_cpu/src/nodes/grid_sample.hpp +++ b/src/plugins/intel_cpu/src/nodes/grid_sample.hpp @@ -14,13 +14,13 @@ namespace node { class GridSample : public Node { public: - GridSample(const std::shared_ptr& op, const GraphContext::CPtr context); + GridSample(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; struct threadExecParams { @@ -50,7 +50,7 @@ class GridSample : public Node { }; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: diff --git a/src/plugins/intel_cpu/src/nodes/grn.cpp b/src/plugins/intel_cpu/src/nodes/grn.cpp index f3a22bd7f496e4..8a58ee5ef2cdcf 100644 --- a/src/plugins/intel_cpu/src/nodes/grn.cpp +++ b/src/plugins/intel_cpu/src/nodes/grn.cpp @@ -26,7 +26,7 @@ bool GRN::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -GRN::GRN(const std::shared_ptr& op, const GraphContext::CPtr context) +GRN::GRN(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -86,11 +86,11 @@ void GRN::prepareParams() { W = static_cast(dataDims[3]); } -void GRN::executeDynamicImpl(dnnl::stream strm) { - execute(std::move(strm)); +void GRN::executeDynamicImpl(const dnnl::stream& strm) { + execute(strm); } -void GRN::execute(dnnl::stream strm) { +void GRN::execute(const dnnl::stream& strm) { const float* src_data = getSrcDataAtPortAs(0); float* dst_data = getDstDataAtPortAs(0); diff --git a/src/plugins/intel_cpu/src/nodes/grn.h b/src/plugins/intel_cpu/src/nodes/grn.h index 11ff05e5fb4910..02074dd8cf9188 100644 --- a/src/plugins/intel_cpu/src/nodes/grn.h +++ b/src/plugins/intel_cpu/src/nodes/grn.h @@ -12,15 +12,15 @@ namespace node { class GRN : public Node { public: - GRN(const std::shared_ptr& op, const GraphContext::CPtr context); + GRN(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/if.cpp b/src/plugins/intel_cpu/src/nodes/if.cpp index 8cbae1e62b1731..683eb3c35a9b85 100644 --- a/src/plugins/intel_cpu/src/nodes/if.cpp +++ b/src/plugins/intel_cpu/src/nodes/if.cpp @@ -5,6 +5,7 @@ #include "if.h" #include +#include #include #include "common/cpu_memcpy.h" @@ -17,10 +18,10 @@ namespace ov { namespace intel_cpu { namespace node { -If::PortMapHelper::PortMapHelper(const MemoryPtr& from, const std::deque& to, const dnnl::engine& eng) - : srcMemPtr(from), - dstMemPtrs(to) { - size = 0; +If::PortMapHelper::PortMapHelper(MemoryPtr from, std::deque to, const dnnl::engine& eng) + : srcMemPtr(std::move(from)), + dstMemPtrs(std::move(to)), + size(0) { if (srcMemPtr->getDesc().isDefined()) size = srcMemPtr->getShape().getElementsCount(); @@ -30,7 +31,7 @@ If::PortMapHelper::PortMapHelper(const MemoryPtr& from, const std::deque& op, std::st return true; } -If::If(const std::shared_ptr& op, const GraphContext::CPtr context) +If::If(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()), ovOp(op) { std::string errorMessage; @@ -238,12 +239,12 @@ void If::prepareAfterMappers(const bool isThen, const dnnl::engine& eng) { std::deque If::getToMemories(const Node* node, const size_t port) const { std::deque memories; - for (auto edge : node->getChildEdgesAtPort(port)) + for (const auto& edge : node->getChildEdgesAtPort(port)) memories.push_back(edge->getMemoryPtr()); return memories; } -void If::execute(dnnl::stream strm) { +void If::execute(const dnnl::stream& strm) { const bool condition = static_cast((getSrcDataAtPortAs(0))[0]); auto& beforeMappers = condition ? beforeThenMappers : beforeElseMappers; @@ -258,7 +259,7 @@ void If::execute(dnnl::stream strm) { mapper->execute(strm); } -void If::executeDynamicImpl(dnnl::stream strm) { +void If::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/if.h b/src/plugins/intel_cpu/src/nodes/if.h index be715d88146411..829734142722e1 100644 --- a/src/plugins/intel_cpu/src/nodes/if.h +++ b/src/plugins/intel_cpu/src/nodes/if.h @@ -17,20 +17,20 @@ namespace node { class If : public Node { public: - If(const std::shared_ptr& op, const GraphContext::CPtr context); + If(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void initSupportedPrimitiveDescriptors() override; void getSupportedDescriptors() override; void createPrimitive() override; bool created() const override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool isExecutable() const override { return true; } protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needPrepareParams() const override { return false; }; @@ -51,9 +51,9 @@ class If : public Node { class PortMapHelper { public: - PortMapHelper(const MemoryPtr& from, const std::deque& to, const dnnl::engine& eng); + PortMapHelper(MemoryPtr from, std::deque to, const dnnl::engine& eng); ~PortMapHelper() = default; - void execute(dnnl::stream& strm); + void execute(const dnnl::stream& strm); private: void redefineTo(); diff --git a/src/plugins/intel_cpu/src/nodes/input.cpp b/src/plugins/intel_cpu/src/nodes/input.cpp index 595246405b4900..f812da7ca01159 100644 --- a/src/plugins/intel_cpu/src/nodes/input.cpp +++ b/src/plugins/intel_cpu/src/nodes/input.cpp @@ -220,7 +220,7 @@ jit_has_subnormals_base::fn_t jit_has_subnormals_function() { } // namespace #endif -Input::Input(const std::shared_ptr& op, const GraphContext::CPtr context) +Input::Input(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { if (!one_of(op->get_type_info(), op::v0::Parameter::get_type_info_static(), @@ -404,7 +404,7 @@ Input::Input(const Shape& shape, const ov::element::Type& prc, const std::string& name, const std::string& type, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(type, createInputShapes(shape, TypeFromName(type)), createOutputShapes(shape, TypeFromName(type)), @@ -419,22 +419,26 @@ Input::Input(const Shape& shape, } } -Input::Input(MemoryDescPtr memDesc, const std::string& name, const std::string& type, const GraphContext::CPtr context) +Input::Input(const MemoryDescPtr& memDesc, + const std::string& name, + const std::string& type, + const GraphContext::CPtr& context) : Input(memDesc->getShape(), memDesc->getPrecision(), name, type, context) { - extMemDesc = memDesc; + extMemDesc = memDesc; // NOLINT(cppcoreguidelines-prefer-member-initializer) fixed in clang-tidy-18 } -Input::Input(const std::shared_ptr& op, const GraphContext::CPtr context, InputConfig config) +Input::Input(const std::shared_ptr& op, const GraphContext::CPtr& context, const InputConfig& config) : Input(op, context) { - extMemDesc = config.desc; - m_isInPlace = config.inPlace; + extMemDesc = config.desc; // NOLINT(cppcoreguidelines-prefer-member-initializer) fixed in clang-tidy-18 + m_isInPlace = config.inPlace; // NOLINT(cppcoreguidelines-prefer-member-initializer) fixed in clang-tidy-18 } -Input::Input(const std::shared_ptr& op, const GraphContext::CPtr context, OutputConfig config) +Input::Input(const std::shared_ptr& op, const GraphContext::CPtr& context, const OutputConfig& config) : Input(op, context) { - extMemDesc = config.desc; - m_useParentMemoryDescForOutput = config.useParentMemoryDescForOutput; - m_isInPlace = config.inPlace; + extMemDesc = config.desc; // NOLINT(cppcoreguidelines-prefer-member-initializer) fixed in clang-tidy-18 + m_useParentMemoryDescForOutput = // NOLINT(cppcoreguidelines-prefer-member-initializer) + config.useParentMemoryDescForOutput; + m_isInPlace = config.inPlace; // NOLINT(cppcoreguidelines-prefer-member-initializer) fixed in clang-tidy-18 } MemoryCPtr Input::getMemoryPtr() const { diff --git a/src/plugins/intel_cpu/src/nodes/input.h b/src/plugins/intel_cpu/src/nodes/input.h index c3c424b28c2c5e..d0e1814b7a6878 100644 --- a/src/plugins/intel_cpu/src/nodes/input.h +++ b/src/plugins/intel_cpu/src/nodes/input.h @@ -33,19 +33,22 @@ class Input : public Node { bool inPlace = false; }; - Input(const std::shared_ptr& op, const GraphContext::CPtr context); + Input(const std::shared_ptr& op, const GraphContext::CPtr& context); Input(const Shape& shape, const ov::element::Type& prc, const std::string& name, const std::string& type, - const GraphContext::CPtr context); + const GraphContext::CPtr& context); - Input(MemoryDescPtr memDesc, const std::string& name, const std::string& type, const GraphContext::CPtr context); + Input(const MemoryDescPtr& memDesc, + const std::string& name, + const std::string& type, + const GraphContext::CPtr& context); - Input(const std::shared_ptr& op, const GraphContext::CPtr context, InputConfig config); + Input(const std::shared_ptr& op, const GraphContext::CPtr& context, const InputConfig& config); - Input(const std::shared_ptr& op, const GraphContext::CPtr context, OutputConfig config); + Input(const std::shared_ptr& op, const GraphContext::CPtr& context, const OutputConfig& config); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; @@ -57,8 +60,8 @@ class Input : public Node { void withMeanImage(); MemoryCPtr getMemoryPtr() const; - void execute(dnnl::stream strm) override {} - void executeDynamicImpl(dnnl::stream strm) override {} + void execute(const dnnl::stream& strm) override {} + void executeDynamicImpl(const dnnl::stream& strm) override {} bool isExecutable() const override { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/interaction.cpp b/src/plugins/intel_cpu/src/nodes/interaction.cpp index c674ac13cb773d..13c846da6e2bea 100644 --- a/src/plugins/intel_cpu/src/nodes/interaction.cpp +++ b/src/plugins/intel_cpu/src/nodes/interaction.cpp @@ -181,7 +181,7 @@ struct jit_move_scale_kernel : public jit_uni_move_scale_kernel, public jit_gene #endif // OPENVINO_ARCH_X86_64 -Interaction::Interaction(const std::shared_ptr& op, const GraphContext::CPtr context) +Interaction::Interaction(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -240,7 +240,7 @@ static inline void flat_triangle(const uint8_t* in, uint8_t* out, size_t size, s } } -void Interaction::execRef(dnnl::stream strm) { +void Interaction::execRef(const dnnl::stream& strm) { using namespace dnnl; uint8_t* outFeaturesPtr = getDstDataAtPortAs(0); std::vector inputPtrs(inputSizes); @@ -278,7 +278,7 @@ void Interaction::execRef(dnnl::stream strm) { } } -void Interaction::execute(dnnl::stream strm) { +void Interaction::execute(const dnnl::stream& strm) { execRef(strm); } @@ -356,7 +356,7 @@ void Interaction::prepareParams() { #endif } -void Interaction::executeDynamicImpl(dnnl::stream strm) { +void Interaction::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/interaction.h b/src/plugins/intel_cpu/src/nodes/interaction.h index 8dd446bb595180..405a59940076ba 100644 --- a/src/plugins/intel_cpu/src/nodes/interaction.h +++ b/src/plugins/intel_cpu/src/nodes/interaction.h @@ -42,20 +42,20 @@ struct jit_uni_move_scale_kernel { class Interaction : public Node { public: - Interaction(const std::shared_ptr& op, const GraphContext::CPtr context); + Interaction(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; bool isExecutable() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: - void execRef(dnnl::stream strm); + void execRef(const dnnl::stream& strm); dnnl::primitive prim; size_t batchSize = 0; size_t featureSize = 0; diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.cpp b/src/plugins/intel_cpu/src/nodes/interpolate.cpp index cfcf868cae8c31..b50e50ac05fabd 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/interpolate.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include "common/cpu_memcpy.h" @@ -1848,7 +1849,7 @@ namespace { */ class InterpolateShapeInferFactory : public ShapeInferFactory { public: - InterpolateShapeInferFactory(std::shared_ptr op) : m_op(op) {} + InterpolateShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override { if (auto interp4 = ov::as_type_ptr(m_op)) { const auto& attr = interp4->get_attrs(); @@ -1872,7 +1873,7 @@ class InterpolateShapeInferFactory : public ShapeInferFactory { }; } // namespace -Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext::CPtr context) +Interpolate::Interpolate(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InterpolateShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -2319,7 +2320,7 @@ bool Interpolate::needShapeInfer() const { return false; } -void Interpolate::executeDynamicImpl(dnnl::stream strm) { +void Interpolate::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); const size_t port = interpAttrs.shapeCalcMode == InterpolateShapeCalcMode::sizes ? TARGET_SHAPE_ID : get_scale_id(); @@ -2570,7 +2571,7 @@ std::vector Interpolate::getScales(const VectorDims& srcDimPad, const Vec return fullScales; } -void Interpolate::execute(dnnl::stream strm) { +void Interpolate::execute(const dnnl::stream& strm) { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(DATA_ID); @@ -4055,15 +4056,14 @@ Interpolate::InterpolateExecutorBase::InterpolateExecutorBase(const InterpolateA : mode(interpAttrs.mode), coordTransMode(interpAttrs.coordTransMode), configured_for_layout(interpAttrs.layout), + srcDimPad5d(to5Dim(getPaddedInputShape(srcDims, interpAttrs.padBegin, interpAttrs.padEnd))), + dstDim5d(to5Dim(dstDims)), inputPrec(interpAttrs.inPrc), - outputPrec(interpAttrs.outPrc) { - srcDimPad5d = to5Dim(getPaddedInputShape(srcDims, interpAttrs.padBegin, interpAttrs.padEnd)); - dstDim5d = to5Dim(dstDims); - srcDataSize = interpAttrs.inPrc.size(); - dstDataSize = interpAttrs.outPrc.size(); - dataRank = srcDims.size(); - spatialDimSize = getSpatialDimsNum(dataRank); - + outputPrec(interpAttrs.outPrc), + srcDataSize(interpAttrs.inPrc.size()), + dstDataSize(interpAttrs.outPrc.size()), + dataRank(srcDims.size()), + spatialDimSize(getSpatialDimsNum(dataRank)) { switch (mode) { case InterpolateMode::nearest: { buildTblNN(srcDimPad5d, dstDim5d, dataScales, interpAttrs.layout, interpAttrs.nearestMode); diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.h b/src/plugins/intel_cpu/src/nodes/interpolate.h index ab9b6688ac3c39..9142a2272aefd4 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.h +++ b/src/plugins/intel_cpu/src/nodes/interpolate.h @@ -74,14 +74,14 @@ class Interpolate : public Node { static constexpr float PILLOW_BICUBIC_WINDOW_SCALE = 2.0f; public: - Interpolate(const std::shared_ptr& op, const GraphContext::CPtr context); + Interpolate(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; bool created() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } @@ -170,8 +170,8 @@ class Interpolate : public Node { VectorDims srcDimPad5d, dstDim5d; ov::element::Type inputPrec, outputPrec; size_t srcDataSize, dstDataSize; - int spatialDimSize; size_t dataRank; + int spatialDimSize; std::vector auxTable; std::vector pillow_working_buf; size_t m_threads_num = 0lu; diff --git a/src/plugins/intel_cpu/src/nodes/inverse.cpp b/src/plugins/intel_cpu/src/nodes/inverse.cpp index 51f399a5876642..25f54cb5283847 100644 --- a/src/plugins/intel_cpu/src/nodes/inverse.cpp +++ b/src/plugins/intel_cpu/src/nodes/inverse.cpp @@ -84,11 +84,11 @@ bool Inverse::created() const { return getType() == Type::Inverse; } -void Inverse::execute(dnnl::stream strm) { +void Inverse::execute(const dnnl::stream& strm) { inverse(); } -void Inverse::executeDynamicImpl(dnnl::stream strm) { +void Inverse::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/inverse.hpp b/src/plugins/intel_cpu/src/nodes/inverse.hpp index 4add38136aecc5..0a30c46ed08916 100644 --- a/src/plugins/intel_cpu/src/nodes/inverse.hpp +++ b/src/plugins/intel_cpu/src/nodes/inverse.hpp @@ -25,8 +25,8 @@ class Inverse : public Node { void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/brgemm_kernel.cpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/brgemm_kernel.cpp index 49d946b57d77ae..4185eafc8880cd 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/brgemm_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/brgemm_kernel.cpp @@ -19,6 +19,12 @@ using namespace dnnl::impl::cpu::aarch64::matmul; namespace ov { namespace intel_cpu { +static size_t getVlen() { + return mayiuse(sve_512) ? cpu_isa_traits::vlen + : mayiuse(sve_256) ? cpu_isa_traits::vlen + : cpu_isa_traits::vlen; +} + BrgemmKernel::BrgemmKernel(size_t M, size_t N, size_t K, @@ -29,30 +35,22 @@ BrgemmKernel::BrgemmKernel(size_t M, ov::element::Type inType, bool b_accumulate) : M(M), + M_blk(matmulOptimalM), + M_tail(M % M_blk), K(K), + K_blk(K), + K_tail(K % K_blk), N(N), + N_blk(std::max(N, getVlen() / inType.size())), + N_tail(N % N_blk), lda(lda), ldb(ldb), ldc(ldc), b_transposed(b_transposed), + kBlkStep(4 / inType.size()), + packedBSize(rnd_up(K, getVlen() / inType.size()) * rnd_up(N, N_blk) * inType.size()), inType(inType) { - // blocking M - M_blk = matmulOptimalM; - M_tail = M % M_blk; - kBlkStep = 4 / inType.size(); - size_t vlen; - vlen = mayiuse(sve_512) ? cpu_isa_traits::vlen - : mayiuse(sve_256) ? cpu_isa_traits::vlen - : cpu_isa_traits::vlen; - // blocking N - N_blk = std::max(N, vlen / inType.size()); - N_tail = N % N_blk; - - // blocking K - K_blk = K; - K_tail = K % K_blk; // copied K must be round up by vlen / inType.size(), otherwise copy B kernel may access wrong memory - packedBSize = rnd_up(K, vlen / inType.size()) * rnd_up(N, N_blk) * inType.size(); size_t brg0BaseIdx = std::numeric_limits::max(); for (size_t m = 0; m < 2; m++) { for (size_t k = 0; k < 2; k++) { @@ -330,4 +328,4 @@ void BrgemmKernel::callBrgemm(brgemmCtx& ctx, } } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp index c64ba45cdd93f6..66db416ec7c732 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp @@ -4,6 +4,8 @@ #include "jit_uni_eltwise_generic.hpp" +#include + namespace ov { namespace intel_cpu { namespace aarch64 { @@ -19,15 +21,15 @@ void jit_uni_eltwise_kernel::operator()(const node::jit_eltwise_call_args_ptrs* } template -jit_uni_eltwise_generic::jit_uni_eltwise_generic(const jit_eltwise_params& jep, - const std::vector& eltwise_data, - const std::vector& ops_list, - const dnnl::post_ops& post_ops) - : jit_uni_eltwise_kernel(jep), +jit_uni_eltwise_generic::jit_uni_eltwise_generic(jit_eltwise_params jep, + std::vector eltwise_data, + std::vector ops_list, + dnnl::post_ops post_ops) + : jit_uni_eltwise_kernel(std::move(jep)), jit_generator(), - eltwise_data_(eltwise_data), - ops_list_(ops_list), - post_ops_(post_ops) {} + eltwise_data_(std::move(eltwise_data)), + ops_list_(std::move(ops_list)), + post_ops_(std::move(post_ops)) {} template void jit_uni_eltwise_generic::generate() { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp index c4fb7608d521de..107495a693431b 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.hpp @@ -7,6 +7,7 @@ #include #include +#include #include #include "nodes/executors/eltwise.hpp" @@ -70,7 +71,7 @@ struct jit_uni_eltwise_kernel { void operator()(const node::jit_eltwise_call_args_ptrs* const_args, const jit_eltwise_call_args_indexes* indexes); jit_uni_eltwise_kernel() {} - jit_uni_eltwise_kernel(const jit_eltwise_params& jep) : ker_(nullptr), jep_(jep) {} + jit_uni_eltwise_kernel(jit_eltwise_params jep) : ker_(nullptr), jep_(std::move(jep)) {} virtual ~jit_uni_eltwise_kernel() {} virtual void create_ker() = 0; @@ -83,10 +84,10 @@ struct jit_uni_eltwise_generic : public jit_uni_eltwise_kernel, jit_generator { public: DECLARE_CPU_JIT_AUX_FUNCTIONS(jit_uni_eltwise_generic) - jit_uni_eltwise_generic(const jit_eltwise_params& jep, - const std::vector& eltwise_data, - const std::vector& ops_list, - const dnnl::post_ops& post_ops); + jit_uni_eltwise_generic(jit_eltwise_params jep, + std::vector eltwise_data, + std::vector ops_list, + dnnl::post_ops post_ops); jit_uni_eltwise_generic() {} diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp index 8197f5e648481c..67cf6137bb8b45 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/softmax_kernel.hpp @@ -4,6 +4,8 @@ #pragma once #include +#include +#include #include #include #include @@ -750,7 +752,7 @@ inline void exp_reduce_sum(float* a, const float max, const size_t size, float& # endif #endif for (; i < size; i++) { - a[i] = exp(a[i] - max); + a[i] = std::exp(a[i] - max); sum += a[i]; } } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp index 786884c3805989..5defe98c55aad8 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp @@ -28,6 +28,8 @@ BrgemmKernel::BrgemmKernel(size_t M, ov::element::Type inType, bool b_accumulate) : M(M), + M_blk(matmulOptimalM), + M_tail(M % M_blk), K(K), N(N), lda(lda), @@ -36,10 +38,6 @@ BrgemmKernel::BrgemmKernel(size_t M, b_transposed(b_transposed), inType(inType), b_accumulate(b_accumulate) { - // blocking M - M_blk = matmulOptimalM; - M_tail = M % M_blk; - if (!one_of(inType, ov::element::bf16, ov::element::f16, ov::element::f32)) THROW_ERROR("brgemm kernel only supports f16, bf16, f32"); bool is_f32 = inType == ov::element::f32; diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.cpp index c0de6520b7099c..5606d3902c6526 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.cpp @@ -55,11 +55,8 @@ const unsigned jitGatherKernelBase::incVec[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, template jitUniGatherKernel::jitUniGatherKernel(const jGatherConfParams& jcp) - : jitGatherKernelBase(jcp), + : jitGatherKernelBase(jcp, x64::cpu_isa_traits::vlen, indicesTypeSize), x64::jit_generator(jit_name()) { - vlen = x64::cpu_isa_traits::vlen; - dataElPerVec = vlen / jcp.dataTypeSize; - idxElPerVec = vlen / indicesTypeSize; if (jcp.dataTypeSize == 2) dataTypeShift = 1; else if (jcp.dataTypeSize == 4) diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.hpp index de8cda30d06499..d9a260aabeae93 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/gather_uni_kernel.hpp @@ -75,7 +75,12 @@ struct jitGatherKernelBase { assert(ker_); ker_(args); } - explicit jitGatherKernelBase(const jGatherConfParams& jcp) : ker_(nullptr), jcp(jcp) {} + explicit jitGatherKernelBase(const jGatherConfParams& jcp, uint64_t vlen, uint64_t indicesTypeSize) + : ker_(nullptr), + jcp(jcp), + vlen(vlen), + dataElPerVec(vlen / jcp.dataTypeSize), + idxElPerVec(vlen / indicesTypeSize) {} virtual ~jitGatherKernelBase() {} virtual void create_ker() = 0; diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp index 0a2073ec3dd72e..dd135fb3b7aabc 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp @@ -14,12 +14,7 @@ namespace kernel { template GridSampleKernel::GridSampleKernel(const GridSampleKernelConfParams& jcp) - : GridSampleKernelBase(jit_name(), jcp, isa) { - vlen = x64::cpu_isa_traits::vlen; - dataTypeSize = jcp.inDataPrc.size(); - gridTypeSize = jcp.gridPrc.size(); - dataElPerVec = vlen / dataTypeSize; - gridElPerVec = vlen / gridTypeSize; + : GridSampleKernelBase(jit_name(), jcp, isa, x64::cpu_isa_traits::vlen) { if (dataTypeSize == 2) dataTypeShift = 1; else if (dataTypeSize == 4) diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp index 31c14aac6364bf..9513681341d587 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.hpp @@ -72,10 +72,16 @@ class GridSampleKernelBase : public JitKernelBase { } explicit GridSampleKernelBase(const char* name, const GridSampleKernelConfParams& jcp, - dnnl::impl::cpu::x64::cpu_isa_t isa) + dnnl::impl::cpu::x64::cpu_isa_t isa, + uint64_t vlen) : JitKernelBase(name, isa), ker_(nullptr), - jcp(jcp) {} + jcp(jcp), + vlen(vlen), + dataTypeSize(jcp.inDataPrc.size()), + gridTypeSize(jcp.gridPrc.size()), + dataElPerVec(vlen / dataTypeSize), + gridElPerVec(vlen / gridTypeSize) {} virtual void create_ker() = 0; uint64_t getVecLen() { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.cpp index a967bb2c1de944..8c35b15500fee9 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.cpp @@ -259,7 +259,10 @@ stack_frame::stack_frame(ov::intel_cpu::jit_kernel& kernel, size_t size, uint32_ } } -stack_frame::stack_frame(stack_frame&& rhs) : _kernel(rhs._kernel), _size(rhs._size), _alignment(rhs._alignment) { +stack_frame::stack_frame(stack_frame&& rhs) noexcept + : _kernel(rhs._kernel), + _size(rhs._size), + _alignment(rhs._alignment) { rhs._size = 0; rhs._alignment = 0; } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.hpp index fbfe98be8d6aae..c170086af70bb7 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel.hpp @@ -229,7 +229,7 @@ class variable_base { variable_base& operator=(const variable_base&) = delete; variable_base(const variable_base&); - variable_base(variable_base&&); + variable_base(variable_base&&) noexcept; reg_type& reg() const { return *_reg; @@ -263,7 +263,7 @@ class variable_base { variable_base& operator=(const variable_base&) = delete; variable_base(const variable_base&); - variable_base(variable_base&&); + variable_base(variable_base&&) noexcept; reg_type& reg() const { return *_addr; @@ -286,7 +286,7 @@ class variable using reg_type = const typename base::reg_type; using arithmetic_type = typename std::conditional::value, size_t, T>::type; - variable(variable&&) = default; + variable(variable&&) noexcept = default; variable(jit_kernel& krnl); variable(jit_kernel& krnl, const shared_reg& reg); @@ -491,7 +491,7 @@ class variable : public variable_base { using base = variable_base; using reg_type = const typename base::reg_type; - variable(variable&&) = default; + variable(variable&&) noexcept = default; variable(jit_kernel& krnl, const shared_reg& reg); const variable& operator=(const variable& rhs) const; @@ -505,7 +505,7 @@ class variable : public variable_base { using reg_type = const typename base::reg_type; constexpr static size_t length = N; - variable(variable&&) = default; + variable(variable&&) noexcept = default; variable(jit_kernel& krnl); variable(jit_kernel& krnl, const shared_reg& reg); @@ -546,7 +546,7 @@ class stack_frame { public: stack_frame(jit_kernel& kernel, size_t size, uint32_t alignment = 1); - stack_frame(stack_frame&& rhs); + stack_frame(stack_frame&& rhs) noexcept; ~stack_frame(); const Xbyak::Reg64& pointer() const; void clear() const; @@ -951,8 +951,9 @@ variable_base::variable_base(const variable_base& rhs) : _kerne _reg(rhs._reg) {} template -variable_base::variable_base(variable_base&& rhs) : _kernel(rhs._kernel), - _reg(std::move(rhs._reg)) {} +variable_base::variable_base(variable_base&& rhs) noexcept + : _kernel(rhs._kernel), + _reg(std::move(rhs._reg)) {} template variable_base::variable_base(jit_kernel& krnl, const shared_reg& addr) @@ -964,8 +965,9 @@ variable_base::variable_base(const variable_base& rhs) : _kernel( _addr(rhs._addr) {} template -variable_base::variable_base(variable_base&& rhs) : _kernel(rhs._kernel), - _addr(std::move(rhs._addr)) {} +variable_base::variable_base(variable_base&& rhs) noexcept + : _kernel(rhs._kernel), + _addr(std::move(rhs._addr)) {} template variable::variable(jit_kernel& krnl) diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp index 5e3db6e23493ff..c7a49ffd0feedf 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp @@ -10,9 +10,10 @@ namespace ov { namespace intel_cpu { namespace kernel { -JitKernelBase::JitKernelBase(const char* name, x64::cpu_isa_t isa) : x64::jit_generator(name, isa), m_isa(isa) { - vlen = x64::isa_max_vlen(isa); -} +JitKernelBase::JitKernelBase(const char* name, x64::cpu_isa_t isa) + : x64::jit_generator(name, isa), + m_isa(isa), + vlen(x64::isa_max_vlen(isa)) {} void JitKernelBase::uni_vfmsub132ps(const Xbyak::Xmm& v_dst, const Xbyak::Xmm& v_src, const Xbyak::Operand& op) { if (isValidIsa(x64::avx2)) { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp index 768d2e2dc3d7f4..cc908dd82b6295 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp @@ -422,7 +422,7 @@ struct ScratchBuffAllocator { ScratchBuffAllocator() = default; // register size / allocate totally size / inform consumers - void register_allocation(size_t size, CallBack cb) { + void register_allocation(size_t size, const CallBack& cb) { m_allocs.push_back(cb); m_total_size += size; m_sizes.push_back(size); diff --git a/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp b/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp index 81acbb7a7787e7..b4e7a00c74f8fc 100644 --- a/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp +++ b/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp @@ -5,6 +5,7 @@ #include "llm_mlp.h" #include +#include #include #include "common/bfloat16.hpp" @@ -339,13 +340,12 @@ struct LLMMLP::Executor : public LLMMLP::ExecutorBase { Executor(LLMMLP* pnode, const LLMMLPNode::Config& config, DnnlScratchPadPtr scrachPad) : m_pnode(pnode), m_config(config), - m_scrachPad(scrachPad) { + m_scrachPad(std::move(scrachPad)), + m_rt_prec_f16(std::is_same::value) { PlainTensor w_gate(pnode->getSrcMemoryAtPort(1)); PlainTensor w_up(pnode->getSrcMemoryAtPort(2)); PlainTensor w_down(pnode->getSrcMemoryAtPort(3)); - m_rt_prec_f16 = std::is_same::value; - // [N, K] [N, K] interleave (16-16-...) into [2*N, K] auto K = w_gate.size(1); auto N = w_gate.size(0); @@ -491,12 +491,12 @@ struct LLMMLP::Executor : public LLMMLP::ExecutorBase { #else template struct LLMMLP::Executor : public LLMMLP::ExecutorBase { - Executor(LLMMLP* pnode, const LLMMLPNode::Config& config, DnnlScratchPadPtr scrachPad) {} + Executor(LLMMLP*, const LLMMLPNode::Config&, const DnnlScratchPadPtr&) {} void execute() {} }; #endif -LLMMLP::LLMMLP(const std::shared_ptr& op, const GraphContext::CPtr context) +LLMMLP::LLMMLP(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; const auto& config = context->getConfig(); @@ -589,7 +589,7 @@ void LLMMLP::createPrimitive() { } } -void LLMMLP::execute(dnnl::stream strm) { +void LLMMLP::execute(const dnnl::stream& strm) { MAYBE_UNUSED(strm); m_executor->execute(); } diff --git a/src/plugins/intel_cpu/src/nodes/llm_mlp.h b/src/plugins/intel_cpu/src/nodes/llm_mlp.h index 81d87dc2f7e2d3..0328e99571a28a 100644 --- a/src/plugins/intel_cpu/src/nodes/llm_mlp.h +++ b/src/plugins/intel_cpu/src/nodes/llm_mlp.h @@ -14,7 +14,7 @@ namespace node { class LLMMLP : public Node { public: - LLMMLP(const std::shared_ptr& op, const GraphContext::CPtr context); + LLMMLP(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -24,11 +24,11 @@ class LLMMLP : public Node { return false; } void createPrimitive() override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage, uint64_t fcDynamicQuantizationGroupSize = 0) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/log_softmax.cpp b/src/plugins/intel_cpu/src/nodes/log_softmax.cpp index 172f4fd3ebe87a..05981f170298d9 100644 --- a/src/plugins/intel_cpu/src/nodes/log_softmax.cpp +++ b/src/plugins/intel_cpu/src/nodes/log_softmax.cpp @@ -26,7 +26,7 @@ bool LogSoftmax::isSupportedOperation(const std::shared_ptr& op, return true; } -LogSoftmax::LogSoftmax(const std::shared_ptr& op, const GraphContext::CPtr context) +LogSoftmax::LogSoftmax(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -83,11 +83,11 @@ void LogSoftmax::prepareParams() { reducedAxisStride *= dims[i]; } -void LogSoftmax::executeDynamicImpl(dnnl::stream strm) { +void LogSoftmax::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void LogSoftmax::execute(dnnl::stream strm) { +void LogSoftmax::execute(const dnnl::stream& strm) { const float* srcData = getSrcDataAtPortAs(0); float* dstData = getDstDataAtPortAs(0); diff --git a/src/plugins/intel_cpu/src/nodes/log_softmax.h b/src/plugins/intel_cpu/src/nodes/log_softmax.h index e2f64e52449681..395464f41069ac 100644 --- a/src/plugins/intel_cpu/src/nodes/log_softmax.h +++ b/src/plugins/intel_cpu/src/nodes/log_softmax.h @@ -12,15 +12,15 @@ namespace node { class LogSoftmax : public Node { public: - LogSoftmax(const std::shared_ptr& op, const GraphContext::CPtr context); + LogSoftmax(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/lora.cpp b/src/plugins/intel_cpu/src/nodes/lora.cpp index c59a3a7fa37578..c252142d461b08 100644 --- a/src/plugins/intel_cpu/src/nodes/lora.cpp +++ b/src/plugins/intel_cpu/src/nodes/lora.cpp @@ -106,11 +106,11 @@ void LoRA::createPrimitive() { m_graph.Activate(inputMemory, outputMemory); } -void LoRA::execute(dnnl::stream) { +void LoRA::execute(const dnnl::stream&) { m_graph.Infer(); } -void LoRA::executeDynamicImpl(dnnl::stream strm) { +void LoRA::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/lora.h b/src/plugins/intel_cpu/src/nodes/lora.h index b2b5757db4acb8..3c993c20e3f91d 100644 --- a/src/plugins/intel_cpu/src/nodes/lora.h +++ b/src/plugins/intel_cpu/src/nodes/lora.h @@ -25,8 +25,8 @@ class LoRA : public Node { void selectOptimalPrimitiveDescriptor() override; void createPrimitive() override; void prepareParams() override; - void execute(dnnl::stream) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream&) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: std::shared_ptr m_body; diff --git a/src/plugins/intel_cpu/src/nodes/lrn.cpp b/src/plugins/intel_cpu/src/nodes/lrn.cpp index b913e831ddabe4..95c14667964135 100644 --- a/src/plugins/intel_cpu/src/nodes/lrn.cpp +++ b/src/plugins/intel_cpu/src/nodes/lrn.cpp @@ -109,7 +109,7 @@ bool Lrn::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -Lrn::Lrn(const std::shared_ptr& op, const GraphContext::CPtr context) +Lrn::Lrn(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -240,7 +240,7 @@ void Lrn::createDescriptor(const std::vector& inputDesc, const st descs.push_back(desc); } -void Lrn::execute(dnnl::stream strm) { +void Lrn::execute(const dnnl::stream& strm) { if (execPtr) { execPtr->exec(primArgs, strm); } else { @@ -248,7 +248,7 @@ void Lrn::execute(dnnl::stream strm) { } } -void Lrn::executeDynamicImpl(dnnl::stream strm) { +void Lrn::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/lrn.h b/src/plugins/intel_cpu/src/nodes/lrn.h index bb87e797538045..b74a63745b5b25 100644 --- a/src/plugins/intel_cpu/src/nodes/lrn.h +++ b/src/plugins/intel_cpu/src/nodes/lrn.h @@ -13,7 +13,7 @@ namespace node { class Lrn : public Node { public: - Lrn(const std::shared_ptr& op, const GraphContext::CPtr context); + Lrn(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void createDescriptor(const std::vector& inputDesc, @@ -28,8 +28,8 @@ class Lrn : public Node { } void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.cpp b/src/plugins/intel_cpu/src/nodes/mathematics.cpp index 06b4ef8d3a672a..ebb16097e6d15f 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.cpp +++ b/src/plugins/intel_cpu/src/nodes/mathematics.cpp @@ -39,7 +39,7 @@ bool Math::isSupportedOperation(const std::shared_ptr& op, std:: return true; } -Math::Math(const std::shared_ptr& op, const GraphContext::CPtr context) +Math::Math(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()), alpha(0.f), beta(0.f), @@ -64,11 +64,11 @@ void Math::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inDataConf, {{LayoutType::ncsp, ov::element::f32}}, impl_desc_type::ref_any); } -void Math::executeDynamicImpl(dnnl::stream strm) { +void Math::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Math::execute(dnnl::stream strm) { +void Math::execute(const dnnl::stream& strm) { size_t dataSize = getChildEdgeAt(0)->getMemory().getShape().getElementsCount(); const float* src_data = getSrcDataAtPortAs(0); float* dst_data = getDstDataAtPortAs(0); @@ -151,7 +151,7 @@ void Math::execute(dnnl::stream strm) { gamma = (gamma == 0.0f) ? 1.0507f : gamma; parallel_for(dataSize, [&](size_t i) { float x = src_data[i]; - dst_data[i] = (x > 0.0f) ? (gamma * x) : (gamma * alpha * (exp(x) - 1.0f)); + dst_data[i] = (x > 0.0f) ? (gamma * x) : (gamma * alpha * (std::exp(x) - 1.0f)); }); break; case Algorithm::MathSign: diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.h b/src/plugins/intel_cpu/src/nodes/mathematics.h index 4849bceab4eaa6..0ba80685a9acc1 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.h +++ b/src/plugins/intel_cpu/src/nodes/mathematics.h @@ -12,17 +12,17 @@ namespace node { class Math : public Node { public: - Math(const std::shared_ptr& op, const GraphContext::CPtr context); + Math(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/matmul.cpp b/src/plugins/intel_cpu/src/nodes/matmul.cpp index a9020ea8798243..20c5dcf1040e71 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.cpp +++ b/src/plugins/intel_cpu/src/nodes/matmul.cpp @@ -112,7 +112,7 @@ bool MatMul::isSupportedOperation(const std::shared_ptr& op, std return true; } -MatMul::MatMul(const std::shared_ptr& op, const GraphContext::CPtr context) +MatMul::MatMul(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, MMShapeInferFactory(op)), withBiases(false) { std::string errorMessage; @@ -255,7 +255,7 @@ static VectorDims getStridesAndModifyShape(Shape& shape, const bool transpose) { return strides; } -dnnl::memory::desc MatMul::getBiasDescFrom(const DnnlMemoryDescCPtr outMemDesc) { +dnnl::memory::desc MatMul::getBiasDescFrom(const DnnlMemoryDescCPtr& outMemDesc) { // oneDNN matmul requires shape for bias desc to be the same rank VectorDims biasDims(outMemDesc->getShape().getRank(), 1); const auto outDims = outMemDesc->getShape().getStaticDims(); @@ -684,7 +684,7 @@ void MatMul::prepareParams() { #endif } -void MatMul::execute(dnnl::stream strm) { +void MatMul::execute(const dnnl::stream& strm) { if (execPtr) { execPtr->exec(primArgs, strm); } else if (hasEmptyInputTensors()) { @@ -695,7 +695,7 @@ void MatMul::execute(dnnl::stream strm) { } } -void MatMul::executeDynamicImpl(dnnl::stream strm) { +void MatMul::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/matmul.h b/src/plugins/intel_cpu/src/nodes/matmul.h index aea709970e2839..d1cff0ca8bcd2a 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.h +++ b/src/plugins/intel_cpu/src/nodes/matmul.h @@ -16,7 +16,7 @@ namespace node { class MatMul : public Node { public: - MatMul(const std::shared_ptr& op, const GraphContext::CPtr context); + MatMul(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void createDescriptor(const std::vector& inputDesc, @@ -36,8 +36,8 @@ class MatMul : public Node { } void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; const std::vector& getDefaultImplPriority() override; @@ -52,7 +52,7 @@ class MatMul : public Node { private: using executorPtr = std::shared_ptr; executorPtr execPtr = nullptr; - dnnl::memory::desc getBiasDescFrom(const DnnlMemoryDescCPtr outMemDesc); + dnnl::memory::desc getBiasDescFrom(const DnnlMemoryDescCPtr& outMemDesc); std::pair makeDummyInputShapes(const Shape& in0, const Shape& in1, const Shape& out) const; bool withBiases; diff --git a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp index 2f9758fbc1b242..da8ed5cf60c3a3 100644 --- a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp @@ -47,7 +47,7 @@ bool MatrixNms::isSupportedOperation(const std::shared_ptr& op, return true; } -MatrixNms::MatrixNms(const std::shared_ptr& op, const GraphContext::CPtr context) +MatrixNms::MatrixNms(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -305,7 +305,7 @@ bool MatrixNms::isExecutable() const { return isDynamicNode() || Node::isExecutable(); } -void MatrixNms::executeDynamicImpl(dnnl::stream strm) { +void MatrixNms::executeDynamicImpl(const dnnl::stream& strm) { if (hasEmptyInputTensors()) { redefineOutputMemory({{0, 6}, {0, 1}, {0}}); return; @@ -313,7 +313,7 @@ void MatrixNms::executeDynamicImpl(dnnl::stream strm) { execute(strm); } -void MatrixNms::execute(dnnl::stream strm) { +void MatrixNms::execute(const dnnl::stream& strm) { const float* boxes = getSrcDataAtPortAs(NMS_BOXES); const float* scores = getSrcDataAtPortAs(NMS_SCORES); @@ -444,9 +444,9 @@ void MatrixNms::execute(dnnl::stream strm) { } void MatrixNms::checkPrecision(const ov::element::Type prec, - const std::vector precList, - const std::string name, - const std::string type) { + const std::vector& precList, + const std::string& name, + const std::string& type) { if (std::find(precList.begin(), precList.end(), prec) == precList.end()) THROW_CPU_NODE_ERR("has unsupported '", name, "' ", type, " precision: ", prec); } diff --git a/src/plugins/intel_cpu/src/nodes/matrix_nms.h b/src/plugins/intel_cpu/src/nodes/matrix_nms.h index ad872dc3eeba2d..4071cd81d18ae4 100644 --- a/src/plugins/intel_cpu/src/nodes/matrix_nms.h +++ b/src/plugins/intel_cpu/src/nodes/matrix_nms.h @@ -20,17 +20,17 @@ enum MatrixNmsDecayFunction { GAUSSIAN, LINEAR }; class MatrixNms : public Node { public: - MatrixNms(const std::shared_ptr& op, const GraphContext::CPtr context); + MatrixNms(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; bool isExecutable() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needShapeInfer() const override { return false; @@ -105,9 +105,9 @@ class MatrixNms : public Node { size_t m_realNumBoxes = 0; float (*m_decay_fn)(float, float, float) = nullptr; void checkPrecision(const ov::element::Type prec, - const std::vector precList, - const std::string name, - const std::string type); + const std::vector& precList, + const std::string& name, + const std::string& type); size_t nmsMatrix(const float* boxesData, const float* scoresData, diff --git a/src/plugins/intel_cpu/src/nodes/memory.cpp b/src/plugins/intel_cpu/src/nodes/memory.cpp index d7ce7f6515c2f8..8b29ac8cbfbadb 100644 --- a/src/plugins/intel_cpu/src/nodes/memory.cpp +++ b/src/plugins/intel_cpu/src/nodes/memory.cpp @@ -5,6 +5,7 @@ #include "memory.hpp" #include +#include #include "common/arbitrary_order_desc_creator.h" #include "dnnl_extension_utils.h" @@ -50,9 +51,9 @@ class MemoryStub : public IMemory { }; public: - MemoryStub(const dnnl::engine& eng, const MemoryDescPtr& pMemDesc) - : m_eng(eng), - m_pMemDesc(pMemDesc), + MemoryStub(dnnl::engine eng, MemoryDescPtr pMemDesc) + : m_eng(std::move(eng)), + m_pMemDesc(std::move(pMemDesc)), m_pMemoryBlock(std::make_shared()) {} const MemoryDesc& getDesc() const override { @@ -121,7 +122,7 @@ bool MemoryOutputBase::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +MemoryOutputBase::MemoryOutputBase(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)), MemoryNode(op) { std::string errorMessage; @@ -133,12 +134,12 @@ MemoryOutputBase::MemoryOutputBase(const std::shared_ptr& op, const Gr } } -MemoryOutputBase::MemoryOutputBase(const std::string id, +MemoryOutputBase::MemoryOutputBase(const std::string& id, const std::string& name, const std::string& type, const Shape& input_shape, const ov::element::Type& input_prc, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(type, {input_shape}, {}, {input_prc}, {}, name, context), MemoryNode(id) { isDynamic = input_shape.isDynamic(); @@ -217,17 +218,17 @@ void MemoryOutputBase::initOptimalPrimitiveDescriptor() { selected_pd->setConfig(config); } -void MemoryOutputBase::execute(dnnl::stream strm) { +void MemoryOutputBase::execute(const dnnl::stream& strm) { runStatic(strm); state->commit(); } -void MemoryOutputBase::executeDynamicImpl(dnnl::stream strm) { +void MemoryOutputBase::executeDynamicImpl(const dnnl::stream& strm) { runDynamic(strm); state->commit(); } -void MemoryOutputBase::assignState(MemStatePtr newState) { +void MemoryOutputBase::assignState(const MemStatePtr& newState) { OPENVINO_ASSERT(newState, "MemoryOutput ", getName(), " got null state"); state = newState; assignExtMemory(state->output_mem(), state->internal_desc()); @@ -388,7 +389,7 @@ bool MemoryInputBase::isSupportedOperation(const std::shared_ptr return true; } -MemoryInputBase::MemoryInputBase(const std::shared_ptr& op, const GraphContext::CPtr ctx) +MemoryInputBase::MemoryInputBase(const std::shared_ptr& op, const GraphContext::CPtr& ctx) : Input(op, ctx), MemoryStateNode(op) { std::string errorMessage; @@ -401,12 +402,12 @@ MemoryInputBase::MemoryInputBase(const std::shared_ptr& op, const Grap executeHook = &MemoryInputBase::assignState; } -MemoryInputBase::MemoryInputBase(const std::string id, +MemoryInputBase::MemoryInputBase(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, MemoryInputBase::mode mode) @@ -415,7 +416,7 @@ MemoryInputBase::MemoryInputBase(const std::string id, outputShapes.emplace_back(output_shape); addOriginalOutputPrecision(output_prc); if (input_shape) { - for (auto inp_shape : *input_shape) { + for (const auto& inp_shape : *input_shape) { inputShapes.push_back(inp_shape); isDynamic = isDynamic || inp_shape.isDynamic(); } @@ -553,13 +554,13 @@ void MemoryInputBase::assignState(MemStatePtr newState) { assignStateHook(); } -void MemoryInputBase::execute(dnnl::stream strm) { +void MemoryInputBase::execute(const dnnl::stream& strm) { assert(executeHook && "executeHook is not initialized!"); (this->*executeHook)(); runStatic(strm); } -void MemoryInputBase::executeDynamicImpl(dnnl::stream strm) { +void MemoryInputBase::executeDynamicImpl(const dnnl::stream& strm) { assert(executeHook && "executeHook is not initialized!"); (this->*executeHook)(); runDynamic(strm); @@ -574,7 +575,7 @@ void MemoryInputBase::bypassAssignState() { return; } -MemoryInput::MemoryInput(const std::shared_ptr& op, const GraphContext::CPtr ctx) +MemoryInput::MemoryInput(const std::shared_ptr& op, const GraphContext::CPtr& ctx) : MemoryInputBase::MemoryInputBase(op, ctx) { auto rvWithSubgraph = ov::as_type_ptr(op); if (rvWithSubgraph) { @@ -586,27 +587,18 @@ MemoryInput::MemoryInput(const std::shared_ptr& op, const GraphContext } } -MemoryInput::MemoryInput(const std::string id, +MemoryInput::MemoryInput(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, std::shared_ptr func, mode mode) - : MemoryInputBase::MemoryInputBase(id, - name, - type, - output_shape, - output_prc, - context, - input_shape, - input_prc, - mode) { - body = func; - + : MemoryInputBase::MemoryInputBase(id, name, type, output_shape, output_prc, context, input_shape, input_prc, mode), + body(std::move(func)) { if (haveSubgraph()) { subGraph = make_unique(); if (isDynamic) { @@ -904,12 +896,12 @@ bool MemoryInput::isSupportedOperation(const std::shared_ptr& op return MemoryInputBase::isSupportedOperation(op, errorMessage); } -MemoryInputSDPA::MemoryInputSDPA(const std::string id, +MemoryInputSDPA::MemoryInputSDPA(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, const std::shared_ptr& sdpaNode) @@ -1010,12 +1002,12 @@ void MemoryInputSDPA::resolveInPlaceEdges(Edge::LOOK look) { } } -MemoryInputSingle::MemoryInputSingle(const std::string id, +MemoryInputSingle::MemoryInputSingle(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, std::shared_ptr func) @@ -1027,7 +1019,7 @@ MemoryInputSingle::MemoryInputSingle(const std::string id, context, input_shape, input_prc, - func, + std::move(func), MemoryInputBase::mode::single_read_value) {} MemStatePtr MemoryInputSingle::makeState() const { diff --git a/src/plugins/intel_cpu/src/nodes/memory.hpp b/src/plugins/intel_cpu/src/nodes/memory.hpp index 678c391fcc7cad..604d0d2c80bad2 100644 --- a/src/plugins/intel_cpu/src/nodes/memory.hpp +++ b/src/plugins/intel_cpu/src/nodes/memory.hpp @@ -46,13 +46,13 @@ class MemoryStatesRegister { class MemoryOutputBase : public Node, public MemoryNode { public: - MemoryOutputBase(const std::shared_ptr& op, const GraphContext::CPtr context); - MemoryOutputBase(const std::string id, + MemoryOutputBase(const std::shared_ptr& op, const GraphContext::CPtr& context); + MemoryOutputBase(const std::string& id, const std::string& name, const std::string& type, const Shape& input_shape, const ov::element::Type& input_prc, - const GraphContext::CPtr context); + const GraphContext::CPtr& context); ~MemoryOutputBase() override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; @@ -64,9 +64,9 @@ class MemoryOutputBase : public Node, public MemoryNode { return getType() == Type::MemoryOutput; } - void execute(dnnl::stream strm) override final; // NOLINT - void executeDynamicImpl(dnnl::stream strm) override final; // NOLINT - bool isExecutable() const override final; // NOLINT + void execute(const dnnl::stream& strm) override final; // NOLINT + void executeDynamicImpl(const dnnl::stream& strm) override final; // NOLINT + bool isExecutable() const override final; // NOLINT void registerInputNode(MemoryInputBase* node); void deregisterSibling(MemoryInputBase* node); @@ -78,7 +78,7 @@ class MemoryOutputBase : public Node, public MemoryNode { return false; } - void assignState(MemStatePtr newState); + void assignState(const MemStatePtr& newState); protected: virtual void runStatic(dnnl::stream strm) = 0; @@ -130,7 +130,7 @@ class MemoryInputBase : public Input, public MemoryStateNode { enum class mode { read_value_assign, single_read_value }; public: - MemoryInputBase(const std::shared_ptr& op, const GraphContext::CPtr context); + MemoryInputBase(const std::shared_ptr& op, const GraphContext::CPtr& context); ~MemoryInputBase() override; @@ -141,8 +141,8 @@ class MemoryInputBase : public Input, public MemoryStateNode { void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override final; // NOLINT - void executeDynamicImpl(dnnl::stream strm) override final; // NOLINT + void execute(const dnnl::stream& strm) override final; // NOLINT + void executeDynamicImpl(const dnnl::stream& strm) override final; // NOLINT bool needShapeInfer() const override { return false; } @@ -158,12 +158,12 @@ class MemoryInputBase : public Input, public MemoryStateNode { void assignState(MemStatePtr newState) override final; // NOLINT protected: - MemoryInputBase(const std::string id, + MemoryInputBase(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, mode mode = mode::read_value_assign); @@ -194,13 +194,13 @@ class MemoryInputBase : public Input, public MemoryStateNode { class MemoryInput : public MemoryInputBase { public: - MemoryInput(const std::shared_ptr& op, const GraphContext::CPtr ctx); - MemoryInput(const std::string id, + MemoryInput(const std::shared_ptr& op, const GraphContext::CPtr& ctx); + MemoryInput(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, std::shared_ptr func = nullptr, @@ -241,12 +241,12 @@ class MemoryInput : public MemoryInputBase { class MemoryInputSingle : public MemoryInput { public: - MemoryInputSingle(const std::string id, + MemoryInputSingle(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, std::shared_ptr func); @@ -262,12 +262,12 @@ class MemoryInputSingle : public MemoryInput { class MemoryInputSDPA : public MemoryInputBase { public: - MemoryInputSDPA(const std::string id, + MemoryInputSDPA(const std::string& id, const std::string& name, const std::string& type, const Shape& output_shape, const ov::element::Type& output_prc, - const GraphContext::CPtr context, + const GraphContext::CPtr& context, const ov::optional>& input_shape, const ov::optional>& input_prc, const std::shared_ptr& sdpaNode); diff --git a/src/plugins/intel_cpu/src/nodes/memory_state_base.h b/src/plugins/intel_cpu/src/nodes/memory_state_base.h index 8861d853e518ea..3bd79a4114a535 100644 --- a/src/plugins/intel_cpu/src/nodes/memory_state_base.h +++ b/src/plugins/intel_cpu/src/nodes/memory_state_base.h @@ -4,6 +4,8 @@ #pragma once +#include + #include "memory_state.h" namespace ov { @@ -12,7 +14,7 @@ namespace node { class MemoryNode { public: - explicit MemoryNode(std::string id) : m_id(id) {} + explicit MemoryNode(std::string id) : m_id(std::move(id)) {} explicit MemoryNode(const std::shared_ptr& op); virtual ~MemoryNode() = default; const std::string& getId() const { diff --git a/src/plugins/intel_cpu/src/nodes/mha.cpp b/src/plugins/intel_cpu/src/nodes/mha.cpp index a78629f8c0f1cc..e1f4a774011dc9 100644 --- a/src/plugins/intel_cpu/src/nodes/mha.cpp +++ b/src/plugins/intel_cpu/src/nodes/mha.cpp @@ -39,11 +39,9 @@ struct jit_mul_add_softmax_kernel : public jit_uni_mul_add_softmax_kernel, publi explicit jit_mul_add_softmax_kernel(const jit_mul_add_softmax_compile_params& jcp) : jit_uni_mul_add_softmax_kernel(jcp), - jit_generator(jit_name()) { - exp_emitter = std::make_shared(this, isa, dnnl_eltwise_exp, 0.f, 0.f); - - vec_size = dnnl::impl::cpu::x64::cpu_isa_traits::vlen / sizeof(float); - } + jit_generator(jit_name()), + vec_size(dnnl::impl::cpu::x64::cpu_isa_traits::vlen / sizeof(float)), + exp_emitter(std::make_shared(this, isa, dnnl_eltwise_exp, 0.f, 0.f)) {} virtual ~jit_mul_add_softmax_kernel() {} void create_ker() override { @@ -384,9 +382,8 @@ struct jit_convert_reorder_kernel : public jit_uni_convert_reorder_kernel, publi explicit jit_convert_reorder_kernel(const jit_convert_reorder_compile_params& jcp) : jit_uni_convert_reorder_kernel(jcp), - jit_generator(jit_name()) { - vec_size = dnnl::impl::cpu::x64::cpu_isa_traits::vlen / sizeof(float); - } + jit_generator(jit_name()), + vec_size(dnnl::impl::cpu::x64::cpu_isa_traits::vlen / sizeof(float)) {} virtual ~jit_convert_reorder_kernel() {} void create_ker() override { @@ -801,7 +798,7 @@ bool MHA::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -MHA::MHA(const std::shared_ptr& op, const GraphContext::CPtr context) +MHA::MHA(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -1542,7 +1539,7 @@ void MHA::mhaImpl() { }); } -void MHA::execute(dnnl::stream strm) { +void MHA::execute(const dnnl::stream& strm) { if (inputPrecisions[1] == ov::element::f32) { mhaImpl(); } else if (inputPrecisions[1] == ov::element::bf16) { @@ -1554,7 +1551,7 @@ void MHA::execute(dnnl::stream strm) { } } -void MHA::executeDynamicImpl(dnnl::stream strm) { +void MHA::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/mha.h b/src/plugins/intel_cpu/src/nodes/mha.h index 8770ba2455272c..9229f9dfdc4790 100644 --- a/src/plugins/intel_cpu/src/nodes/mha.h +++ b/src/plugins/intel_cpu/src/nodes/mha.h @@ -41,19 +41,18 @@ struct jit_mul_add_softmax_call_args { }; struct jit_uni_mul_add_softmax_kernel { - void (*ker_)(const jit_mul_add_softmax_call_args*); - void operator()(const jit_mul_add_softmax_call_args* call_args) { assert(ker_); ker_(call_args); } - explicit jit_uni_mul_add_softmax_kernel(const jit_mul_add_softmax_compile_params& jcp) : ker_(nullptr), jcp_(jcp) {} + explicit jit_uni_mul_add_softmax_kernel(const jit_mul_add_softmax_compile_params& jcp) : jcp_(jcp), ker_(nullptr) {} virtual ~jit_uni_mul_add_softmax_kernel() {} virtual void create_ker() = 0; jit_mul_add_softmax_compile_params jcp_; + void (*ker_)(const jit_mul_add_softmax_call_args*); }; struct jit_convert_reorder_compile_params { @@ -129,17 +128,17 @@ struct jit_uni_convert_transpose_kernel { class MHA : public Node { public: - MHA(const std::shared_ptr& op, const GraphContext::CPtr context); + MHA(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: diff --git a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp index 67d840ebbc48ae..c735aea89b8660 100644 --- a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp @@ -42,7 +42,7 @@ bool MultiClassNms::isSupportedOperation(const std::shared_ptr& return true; } -MultiClassNms::MultiClassNms(const std::shared_ptr& op, const GraphContext::CPtr context) +MultiClassNms::MultiClassNms(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -213,7 +213,7 @@ bool MultiClassNms::isExecutable() const { return isDynamicNode() || Node::isExecutable(); } -void MultiClassNms::executeDynamicImpl(dnnl::stream strm) { +void MultiClassNms::executeDynamicImpl(const dnnl::stream& strm) { if (hasEmptyInputTensors()) { redefineOutputMemory({{0, 6}, {0, 1}, {0}}); return; @@ -221,7 +221,7 @@ void MultiClassNms::executeDynamicImpl(dnnl::stream strm) { execute(strm); } -void MultiClassNms::execute(dnnl::stream strm) { +void MultiClassNms::execute(const dnnl::stream& strm) { const float* boxes = getSrcDataAtPortAs(NMS_BOXES); const float* scores = getSrcDataAtPortAs(NMS_SCORES); @@ -640,9 +640,9 @@ void MultiClassNms::nmsWithoutEta(const float* boxes, } void MultiClassNms::checkPrecision(const ov::element::Type prec, - const std::vector precList, - const std::string name, - const std::string type) { + const std::vector& precList, + const std::string& name, + const std::string& type) { if (std::find(precList.begin(), precList.end(), prec) == precList.end()) THROW_CPU_NODE_ERR("has unsupported '", name, "' ", type, " precision: ", prec); } diff --git a/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp b/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp index 030a14e56b61df..fdcdc9af26d611 100644 --- a/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp +++ b/src/plugins/intel_cpu/src/nodes/multiclass_nms.hpp @@ -18,17 +18,17 @@ enum class MulticlassNmsSortResultType { class MultiClassNms : public Node { public: - MultiClassNms(const std::shared_ptr& op, const GraphContext::CPtr context); + MultiClassNms(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; bool isExecutable() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool needShapeInfer() const override { return false; @@ -92,9 +92,9 @@ class MultiClassNms : public Node { std::vector m_filtBoxes; // rois after nms for each class in each image void checkPrecision(const ov::element::Type prec, - const std::vector precList, - const std::string name, - const std::string type); + const std::vector& precList, + const std::string& name, + const std::string& type); float intersectionOverUnion(const float* boxesI, const float* boxesJ, const bool normalized); diff --git a/src/plugins/intel_cpu/src/nodes/multinomial.cpp b/src/plugins/intel_cpu/src/nodes/multinomial.cpp index 0b522f4042b12c..e7ed2d1b8e199f 100644 --- a/src/plugins/intel_cpu/src/nodes/multinomial.cpp +++ b/src/plugins/intel_cpu/src/nodes/multinomial.cpp @@ -124,7 +124,7 @@ bool Multinomial::created() const { return getType() == Type::Multinomial; } -void Multinomial::execute(dnnl::stream strm) { +void Multinomial::execute(const dnnl::stream& strm) { switch (m_probs_precision) { case ov::element::f32: return execute_probs_type(); @@ -137,7 +137,7 @@ void Multinomial::execute(dnnl::stream strm) { } } -void Multinomial::executeDynamicImpl(dnnl::stream strm) { +void Multinomial::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/multinomial.hpp b/src/plugins/intel_cpu/src/nodes/multinomial.hpp index 7a36a8773c2523..b56fe08e870d93 100644 --- a/src/plugins/intel_cpu/src/nodes/multinomial.hpp +++ b/src/plugins/intel_cpu/src/nodes/multinomial.hpp @@ -31,8 +31,8 @@ class Multinomial : public Node { void createPrimitive() override; bool isExecutable() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/mvn.cpp b/src/plugins/intel_cpu/src/nodes/mvn.cpp index e3c8d7b3f40822..df017fe052c343 100644 --- a/src/plugins/intel_cpu/src/nodes/mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/mvn.cpp @@ -361,7 +361,7 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k } }; - auto vector_worker = [&](std::function func) { + auto vector_worker = [&](const std::function& func) { Xbyak::Label label_end; func(0); cmp(reg_unroll_size, 1); @@ -749,7 +749,7 @@ struct jit_uni_mvn_mean_variance_kernel_f32 : public jit_uni_mvn_mean_variance_k } } - inline void worker_tails(Xbyak::Reg64& reg_tail_num, std::function func) { + inline void worker_tails(Xbyak::Reg64& reg_tail_num, const std::function& func) { int tile_start_idx = (isa == cpu::x64::avx512_core) ? 0 : ((isa == cpu::x64::avx2) ? 1 : 2); Label tile_exit[kTileNum]; for (int i = tile_start_idx; i < kTileNum; i++) { @@ -1314,7 +1314,7 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator add(reg_dst_aux, step * jcp_.dst_data_size); }; - auto vector_worker = [&](std::function func) { + auto vector_worker = [&](const std::function& func) { Xbyak::Label label_end; func(0, vector_step); cmp(addr_unroll_size, 1); @@ -1659,7 +1659,7 @@ struct jit_uni_mvn_kernel_f32 : public jit_uni_mvn_kernel, public jit_generator {store_pool_gpr_idxs}); } - inline void worker_mvn_tails(Xbyak::Reg64& reg_tail_num, std::function func) { + inline void worker_mvn_tails(Xbyak::Reg64& reg_tail_num, const std::function& func) { int tile_start_idx = (isa == cpu::x64::avx512_core) ? 0 : ((isa == cpu::x64::avx2) ? 1 : 2); Label tile_exit[kTileNum]; for (int i = tile_start_idx; i < kTileNum; i++) { @@ -1869,7 +1869,7 @@ bool MVN::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -MVN::MVN(const std::shared_ptr& op, const GraphContext::CPtr context) +MVN::MVN(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -2257,11 +2257,11 @@ void MVN::setPostOps(dnnl::primitive_attr& attr, bool initWeights) { attr.set_post_ops(ops); } -void MVN::executeDynamicImpl(dnnl::stream strm) { +void MVN::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void MVN::execute(dnnl::stream strm) { +void MVN::execute(const dnnl::stream& strm) { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(0); diff --git a/src/plugins/intel_cpu/src/nodes/mvn.h b/src/plugins/intel_cpu/src/nodes/mvn.h index c9cb0894315457..b7bba5309a05d0 100644 --- a/src/plugins/intel_cpu/src/nodes/mvn.h +++ b/src/plugins/intel_cpu/src/nodes/mvn.h @@ -80,14 +80,14 @@ struct jit_uni_mvn_kernel { class MVN : public Node { public: - MVN(const std::shared_ptr& op, const GraphContext::CPtr context); + MVN(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; bool created() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/ngram.cpp b/src/plugins/intel_cpu/src/nodes/ngram.cpp index 90229676a23a3c..4a4c41e0a45d94 100644 --- a/src/plugins/intel_cpu/src/nodes/ngram.cpp +++ b/src/plugins/intel_cpu/src/nodes/ngram.cpp @@ -98,7 +98,7 @@ std::vector Ngram::computeBatchLenghts() { return batchLenghts; } -void Ngram::execute(dnnl::stream strm) { +void Ngram::execute(const dnnl::stream& strm) { auto* srcData = getSrcDataAtPortAs(0); auto* dstData = getDstDataAtPortAs(0); @@ -140,7 +140,7 @@ void Ngram::execute(dnnl::stream strm) { }); } -void Ngram::executeDynamicImpl(dnnl::stream strm) { +void Ngram::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/ngram.h b/src/plugins/intel_cpu/src/nodes/ngram.h index d9b4fef245789e..50a1eedb76dc57 100644 --- a/src/plugins/intel_cpu/src/nodes/ngram.h +++ b/src/plugins/intel_cpu/src/nodes/ngram.h @@ -20,13 +20,13 @@ class Ngram : public Node { void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: diff --git a/src/plugins/intel_cpu/src/nodes/node_config.h b/src/plugins/intel_cpu/src/nodes/node_config.h index 5dfe123bff3dd6..d09e2132c1b470 100644 --- a/src/plugins/intel_cpu/src/nodes/node_config.h +++ b/src/plugins/intel_cpu/src/nodes/node_config.h @@ -5,6 +5,7 @@ #pragma once #include +#include #include "memory_desc/blocked_memory_desc.h" #include "memory_desc/cpu_memory_desc.h" @@ -46,7 +47,7 @@ class PortDescBase_ : public PortDescBase { class PortDescGeneric : public PortDescBase_ { public: - explicit PortDescGeneric(MemoryDescPtr memDesc) : _memDesc(memDesc) { + explicit PortDescGeneric(MemoryDescPtr memDesc) : _memDesc(std::move(memDesc)) { if (nullptr == _memDesc) { OPENVINO_THROW("ParameterMismatch: PortDescGeneric constructor got nullptr"); } @@ -67,7 +68,7 @@ class PortDescBlocked : public PortDescBase_ { using CmpMask = BlockedMemoryDesc::CmpMask; public: - PortDescBlocked(BlockedMemoryDescPtr memDesc, CmpMask cmpMask) : _memDesc(memDesc), _cmpMask(cmpMask) { + PortDescBlocked(BlockedMemoryDescPtr memDesc, CmpMask cmpMask) : _memDesc(std::move(memDesc)), _cmpMask(cmpMask) { if (nullptr == _memDesc) { OPENVINO_THROW("ParameterMismatch: PortDescBlocked constructor got nullptr"); } @@ -88,7 +89,7 @@ class PortConfig { public: PortConfig() = default; - PortConfig(MemoryDescPtr desc, + PortConfig(const MemoryDescPtr& desc, BlockedMemoryDesc::CmpMask cmpMask = BlockedMemoryDesc::FULL_MASK, int inPlacePort = -1, bool isConstant = false) @@ -130,23 +131,23 @@ class PortConfig { return _desc; } - void setMemDesc(MemoryDescPtr desc) { + void setMemDesc(const MemoryDescPtr& desc) { _desc = createPortDesc(desc, BlockedMemoryDesc::FULL_MASK); } - void setMemDesc(BlockedMemoryDescPtr desc, BlockedMemoryDesc::CmpMask cmpMask) { + void setMemDesc(const BlockedMemoryDescPtr& desc, BlockedMemoryDesc::CmpMask cmpMask) { _desc = createPortDesc(desc, cmpMask); } private: - PortDescBasePtr createPortDesc(MemoryDescPtr desc, BlockedMemoryDesc::CmpMask cmpMask) { + PortDescBasePtr createPortDesc(const MemoryDescPtr& desc, BlockedMemoryDesc::CmpMask cmpMask) { if (desc->getType() & Blocked) return createPortDesc(std::dynamic_pointer_cast(desc), cmpMask); return std::make_shared(desc); } - PortDescBasePtr createPortDesc(BlockedMemoryDescPtr desc, BlockedMemoryDesc::CmpMask cmpMask) { + PortDescBasePtr createPortDesc(const BlockedMemoryDescPtr& desc, BlockedMemoryDesc::CmpMask cmpMask) { return std::make_shared(desc, cmpMask); } diff --git a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp index 05b3b12f284285..8ad10c25ebf1a3 100644 --- a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp @@ -221,7 +221,7 @@ void NonMaxSuppression::createJitKernel() { #endif // OPENVINO_ARCH_X86_64 } -void NonMaxSuppression::executeDynamicImpl(dnnl::stream strm) { +void NonMaxSuppression::executeDynamicImpl(const dnnl::stream& strm) { if (hasEmptyInputTensors() || (inputShapes.size() > NMS_MAX_OUTPUT_BOXES_PER_CLASS && getSrcDataAtPortAs(NMS_MAX_OUTPUT_BOXES_PER_CLASS)[0] == 0)) { redefineOutputMemory({{0, 3}, {0, 3}, {1}}); @@ -231,7 +231,7 @@ void NonMaxSuppression::executeDynamicImpl(dnnl::stream strm) { execute(strm); } -void NonMaxSuppression::execute(dnnl::stream strm) { +void NonMaxSuppression::execute(const dnnl::stream& strm) { const auto inputs_num = inputShapes.size(); size_t max_number_of_boxes = m_output_boxes_per_class * m_batches_num * m_classes_num; diff --git a/src/plugins/intel_cpu/src/nodes/non_max_suppression.h b/src/plugins/intel_cpu/src/nodes/non_max_suppression.h index 91d81c1c2dcceb..8d7aa93b969ad7 100644 --- a/src/plugins/intel_cpu/src/nodes/non_max_suppression.h +++ b/src/plugins/intel_cpu/src/nodes/non_max_suppression.h @@ -21,9 +21,9 @@ class NonMaxSuppression : public Node { void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool created() const override; diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.cpp b/src/plugins/intel_cpu/src/nodes/non_zero.cpp index 57eee8520d0ccf..5e9a09d13a9dcf 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_zero.cpp @@ -31,7 +31,7 @@ bool NonZero::isSupportedOperation(const std::shared_ptr& op, st return true; } -NonZero::NonZero(const std::shared_ptr& op, const GraphContext::CPtr context) +NonZero::NonZero(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -119,11 +119,11 @@ struct NonZero::NonZeroExecute { } }; -void NonZero::executeDynamicImpl(dnnl::stream strm) { +void NonZero::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void NonZero::execute(dnnl::stream strm) { +void NonZero::execute(const dnnl::stream& strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().getDesc().getPrecision(); NonZeroContext ctx = {*this}; OV_SWITCH(intel_cpu, diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.h b/src/plugins/intel_cpu/src/nodes/non_zero.h index 7ba3552a45c846..e9e2bef9fe294a 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.h +++ b/src/plugins/intel_cpu/src/nodes/non_zero.h @@ -19,11 +19,11 @@ namespace node { class NonZero : public Node { public: - NonZero(const std::shared_ptr& op, const GraphContext::CPtr context); + NonZero(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override { return false; @@ -31,7 +31,7 @@ class NonZero : public Node { bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; bool isExecutable() const override { diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index 5b83b043d53e30..e416781cdf69a2 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -5,6 +5,7 @@ #include "normalize.h" #include +#include #include "common/cpu_memcpy.h" #include "common/primitive_hashing_utils.hpp" @@ -773,7 +774,7 @@ bool NormalizeL2::isSupportedOperation(const std::shared_ptr& op return true; } -NormalizeL2::NormalizeL2(const std::shared_ptr& op, const GraphContext::CPtr context) +NormalizeL2::NormalizeL2(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -965,11 +966,11 @@ void NormalizeL2::prepareParams() { execPtr = result.first; } -void NormalizeL2::executeDynamicImpl(dnnl::stream strm) { +void NormalizeL2::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void NormalizeL2::execute(dnnl::stream strm) { +void NormalizeL2::execute(const dnnl::stream& strm) { if (!execPtr) THROW_ERROR("doesn't have a compiled executor."); @@ -983,9 +984,8 @@ void NormalizeL2::execute(dnnl::stream strm) { template class NormalizeL2::NormalizeL2CornerCaseExecutor : public NormalizeL2::NormalizeL2Executor { public: - NormalizeL2CornerCaseExecutor(const VectorDims& dims) { - workAmount = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); - } + NormalizeL2CornerCaseExecutor(const VectorDims& dims) + : workAmount(std::accumulate(dims.begin(), dims.end(), 1, std::multiplies())) {} void exec(const uint8_t* src_ptr, uint8_t* dst_ptr, const void** post_ops_data) override { normalize(reinterpret_cast(src_ptr), reinterpret_cast(dst_ptr)); @@ -1347,8 +1347,8 @@ class NormalizeL2::NormalizeL2ReferenceExecutor : public NormalizeL2::NormalizeL public: NormalizeL2ReferenceExecutor(const NormalizeL2Attrs& attrs, const dnnl::primitive_attr& kernel_attrs, - const VectorDims& dims) - : dims(dims), + VectorDims dims) + : dims(std::move(dims)), kernel_attrs(kernel_attrs), attrs(attrs) { if (attrs.layout != LayoutType::ncsp) { diff --git a/src/plugins/intel_cpu/src/nodes/normalize.h b/src/plugins/intel_cpu/src/nodes/normalize.h index 08422fc52f4b68..fe6c6c83589fc1 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.h +++ b/src/plugins/intel_cpu/src/nodes/normalize.h @@ -82,21 +82,21 @@ struct jit_uni_normalize_kernel { #endif class NormalizeL2 : public Node { public: - NormalizeL2(const std::shared_ptr& op, const GraphContext::CPtr context); + NormalizeL2(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; bool created() const override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } bool canFuse(const NodePtr& node) const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool isExecutable() const override; diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.cpp b/src/plugins/intel_cpu/src/nodes/one_hot.cpp index 2a8f6aea669107..efdcbc9f0d31db 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.cpp +++ b/src/plugins/intel_cpu/src/nodes/one_hot.cpp @@ -41,7 +41,7 @@ bool OneHot::isSupportedOperation(const std::shared_ptr& op, std return true; } -OneHot::OneHot(const std::shared_ptr& op, const GraphContext::CPtr context) +OneHot::OneHot(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, OneHotShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -132,11 +132,11 @@ void OneHot::one_hot(size_t prefix_size, size_t suffix_size) { }); } -void OneHot::executeDynamicImpl(dnnl::stream strm) { +void OneHot::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void OneHot::execute(dnnl::stream strm) { +void OneHot::execute(const dnnl::stream& strm) { std::size_t prefix_size = 1; auto input_dims = getParentEdgeAt(0)->getMemory().getStaticDims(); diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.h b/src/plugins/intel_cpu/src/nodes/one_hot.h index f33efaef39fc26..bb89a7c91df6c5 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.h +++ b/src/plugins/intel_cpu/src/nodes/one_hot.h @@ -12,19 +12,19 @@ namespace node { class OneHot : public Node { public: - OneHot(const std::shared_ptr& op, const GraphContext::CPtr context); + OneHot(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override{}; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/pad.cpp b/src/plugins/intel_cpu/src/nodes/pad.cpp index efbf51b3e05b7f..4d651e2d4f87a1 100644 --- a/src/plugins/intel_cpu/src/nodes/pad.cpp +++ b/src/plugins/intel_cpu/src/nodes/pad.cpp @@ -38,7 +38,7 @@ bool Pad::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr context) +Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -387,14 +387,14 @@ void Pad::PadExecutor::exec(const MemoryPtr& srcMemPtr, const MemoryPtr& dstMemP } } -void Pad::execute(dnnl::stream strm) { +void Pad::execute(const dnnl::stream& strm) { if (!execPtr) THROW_CPU_NODE_ERR("has not compiled executor."); execPtr->exec(getSrcMemoryAtPort(0), getDstMemoryAtPort(0)); } -void Pad::executeDynamicImpl(dnnl::stream strm) { +void Pad::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/pad.h b/src/plugins/intel_cpu/src/nodes/pad.h index 38fb57e4c9971a..02915eff6a7d3b 100644 --- a/src/plugins/intel_cpu/src/nodes/pad.h +++ b/src/plugins/intel_cpu/src/nodes/pad.h @@ -12,13 +12,13 @@ namespace node { class Pad : public Node { public: - Pad(const std::shared_ptr& op, const GraphContext::CPtr context); + Pad(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; @@ -27,7 +27,7 @@ class Pad : public Node { bool needPrepareParams() const override; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: using VectorIdxs = std::vector; diff --git a/src/plugins/intel_cpu/src/nodes/paged_attn.cpp b/src/plugins/intel_cpu/src/nodes/paged_attn.cpp index b558a129eb9b75..484e2af6d96b19 100644 --- a/src/plugins/intel_cpu/src/nodes/paged_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/paged_attn.cpp @@ -52,7 +52,7 @@ bool PagedAttentionKey::operator==(const PagedAttentionKey& rhs) const { return retVal; } -PagedAttention::PagedAttention(const std::shared_ptr& op, const GraphContext::CPtr context) +PagedAttention::PagedAttention(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -180,7 +180,7 @@ void PagedAttention::createPrimitive() { m_executor = result.first; } -void PagedAttention::execute(dnnl::stream strm) { +void PagedAttention::execute(const dnnl::stream& strm) { auto orginInputNumber = getOriginalInputsNumber(); std::vector inputs(orginInputNumber); std::vector outputs(m_hasScore ? 2 : 1); diff --git a/src/plugins/intel_cpu/src/nodes/paged_attn.h b/src/plugins/intel_cpu/src/nodes/paged_attn.h index df83f550de0c34..8526b3b8dda999 100644 --- a/src/plugins/intel_cpu/src/nodes/paged_attn.h +++ b/src/plugins/intel_cpu/src/nodes/paged_attn.h @@ -16,7 +16,7 @@ namespace node { class PagedAttention : public Node { public: - PagedAttention(const std::shared_ptr& op, const GraphContext::CPtr context); + PagedAttention(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -29,11 +29,11 @@ class PagedAttention : public Node { bool needPrepareParams() const override { return false; } - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; void createPrimitive() override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/pooling.cpp b/src/plugins/intel_cpu/src/nodes/pooling.cpp index 28d0f20654469f..7b1bb1653404bc 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/pooling.cpp @@ -164,14 +164,15 @@ bool Pooling::isSupportedOperation(const std::shared_ptr& op, st return true; } -Pooling::Pooling(const std::shared_ptr& op, const GraphContext::CPtr context) +Pooling::Pooling(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - auto get_attributes = [](std::vector& internal_attribute, const std::vector external_attribute) { + auto get_attributes = [](std::vector& internal_attribute, + const std::vector& external_attribute) { for (size_t i = 0; i < external_attribute.size(); i++) { internal_attribute.push_back(static_cast(external_attribute[i])); } @@ -503,7 +504,7 @@ void Pooling::prepareParams() { } } -void Pooling::execute(dnnl::stream strm) { +void Pooling::execute(const dnnl::stream& strm) { if (dnnlExecPtr) { dnnlExecPtr->exec(primArgs, strm); } else if (execPtr) { @@ -522,7 +523,7 @@ void Pooling::execute(dnnl::stream strm) { } } -void Pooling::executeDynamicImpl(dnnl::stream strm) { +void Pooling::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/pooling.h b/src/plugins/intel_cpu/src/nodes/pooling.h index 74e6c5456f1d01..269a155ff9b90b 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.h +++ b/src/plugins/intel_cpu/src/nodes/pooling.h @@ -15,7 +15,7 @@ namespace node { class Pooling : public Node { public: - Pooling(const std::shared_ptr& op, const GraphContext::CPtr context); + Pooling(const std::shared_ptr& op, const GraphContext::CPtr& context); void createDescriptor(const std::vector& inputDesc, const std::vector& outputDesc) override; @@ -29,8 +29,8 @@ class Pooling : public Node { } void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.cpp b/src/plugins/intel_cpu/src/nodes/priorbox.cpp index 9f892f733c7375..d1a2acd05d1a7a 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox.cpp @@ -43,7 +43,7 @@ bool PriorBox::isSupportedOperation(const std::shared_ptr& op, s return true; } -PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr context) +PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PriorBoxShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -73,7 +73,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr } for (float _aspect_ratio : aspect_ratio) { - if (fabs(aspect_ratio_item - _aspect_ratio) < 1e-6) { + if (std::fabs(aspect_ratio_item - _aspect_ratio) < 1e-6) { exist = true; break; } @@ -142,7 +142,7 @@ void PriorBox::createPrimitive() { } } -void PriorBox::execute(dnnl::stream strm) { +void PriorBox::execute(const dnnl::stream& strm) { const int* in_data = getSrcDataAtPortAs(0); const int H = in_data[0]; const int W = in_data[1]; diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.h b/src/plugins/intel_cpu/src/nodes/priorbox.h index 9d2a9e7ecebe9c..b9b7679e120af3 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox.h +++ b/src/plugins/intel_cpu/src/nodes/priorbox.h @@ -12,18 +12,18 @@ namespace node { class PriorBox : public Node { public: - PriorBox(const std::shared_ptr& op, const GraphContext::CPtr context); + PriorBox(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp index fb21f1e8c6b5ab..26cd97f2334a7f 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp @@ -31,7 +31,7 @@ bool PriorBoxClustered::isSupportedOperation(const std::shared_ptr& op, const GraphContext::CPtr context) +PriorBoxClustered::PriorBoxClustered(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PriorBoxClusteredShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -93,7 +93,7 @@ void PriorBoxClustered::createPrimitive() { } } -void PriorBoxClustered::execute(dnnl::stream strm) { +void PriorBoxClustered::execute(const dnnl::stream& strm) { const int* in_data = getSrcDataAtPortAs(0); const int layer_height = in_data[0]; const int layer_width = in_data[1]; diff --git a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.h b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.h index e077f4b123ccc3..a6ee787545f8d5 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.h +++ b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.h @@ -12,18 +12,18 @@ namespace node { class PriorBoxClustered : public Node { public: - PriorBoxClustered(const std::shared_ptr& op, const GraphContext::CPtr context); + PriorBoxClustered(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/proposal.cpp b/src/plugins/intel_cpu/src/nodes/proposal.cpp index 722a997f9a429c..6fefd4ca3e24b9 100644 --- a/src/plugins/intel_cpu/src/nodes/proposal.cpp +++ b/src/plugins/intel_cpu/src/nodes/proposal.cpp @@ -92,7 +92,7 @@ bool Proposal::isSupportedOperation(const std::shared_ptr& op, s return true; } -Proposal::Proposal(const std::shared_ptr& op, const GraphContext::CPtr context) +Proposal::Proposal(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -156,11 +156,11 @@ void Proposal::initSupportedPrimitiveDescriptors() { } } -void Proposal::executeDynamicImpl(dnnl::stream strm) { +void Proposal::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Proposal::execute(dnnl::stream strm) { +void Proposal::execute(const dnnl::stream& strm) { try { const float* probabilitiesData = getSrcDataAtPortAs(PROBABILITIES_IN_IDX); const float* anchorsData = getSrcDataAtPortAs(ANCHORS_IN_IDX); diff --git a/src/plugins/intel_cpu/src/nodes/proposal.h b/src/plugins/intel_cpu/src/nodes/proposal.h index 3ee1100a5551f7..064eb11aebd974 100644 --- a/src/plugins/intel_cpu/src/nodes/proposal.h +++ b/src/plugins/intel_cpu/src/nodes/proposal.h @@ -15,17 +15,17 @@ namespace node { class Proposal : public Node { public: - Proposal(const std::shared_ptr& op, const GraphContext::CPtr context); + Proposal(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp b/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp index 3049e82dcd93ee..e171b0113ac4c5 100644 --- a/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp @@ -59,7 +59,7 @@ bool PSROIPooling::isSupportedOperation(const std::shared_ptr& o return true; } -PSROIPooling::PSROIPooling(const std::shared_ptr& op, const GraphContext::CPtr context) +PSROIPooling::PSROIPooling(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -203,8 +203,8 @@ void PSROIPooling::unpackParams(const BlockedMemoryDesc& srcDesc, const bool outIsBlk = dstDesc.hasLayoutType(LayoutType::nCsp16c) || dstDesc.hasLayoutType(LayoutType::nCsp8c); size_t expectedInBlockDimsSize = (inpIsBlk ? 5 : 4); size_t expectedOutBlockDimsSize = (outIsBlk ? 5 : 4); - auto inBlkDims = srcDesc.getBlockDims(); - auto outBlkDims = dstDesc.getBlockDims(); + const auto& inBlkDims = srcDesc.getBlockDims(); + const auto& outBlkDims = dstDesc.getBlockDims(); if (inBlkDims.size() != expectedInBlockDimsSize) THROW_CPU_NODE_ERR("has unexpected size of blocking dims in input (given ", inBlkDims.size(), @@ -601,7 +601,7 @@ struct PSROIPooling::PSROIPoolingExecute { } }; -void PSROIPooling::execute(dnnl::stream strm) { +void PSROIPooling::execute(const dnnl::stream& strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().getDesc().getPrecision(); auto outputPrec = getChildEdgeAt(0)->getMemory().getDesc().getPrecision(); diff --git a/src/plugins/intel_cpu/src/nodes/psroi_pooling.h b/src/plugins/intel_cpu/src/nodes/psroi_pooling.h index 2a0f59de26430e..08912df92087a0 100644 --- a/src/plugins/intel_cpu/src/nodes/psroi_pooling.h +++ b/src/plugins/intel_cpu/src/nodes/psroi_pooling.h @@ -12,12 +12,12 @@ namespace node { class PSROIPooling : public Node { public: - PSROIPooling(const std::shared_ptr& op, const GraphContext::CPtr context); + PSROIPooling(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override{}; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp b/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp index 73cc613e86a802..d6f50e2df78244 100644 --- a/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp +++ b/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp @@ -5,6 +5,7 @@ #include "qkv_proj.h" #include +#include #include #include "common/bfloat16.hpp" @@ -66,7 +67,7 @@ struct QKVProjection::Executor : public QKVProjection::ExecutorBase { WeightBuffer wbuffer; - Executor(QKVProjection* pnode, DnnlScratchPadPtr scrachPad) : m_node(pnode), m_scrachPad(scrachPad) { + Executor(QKVProjection* pnode, DnnlScratchPadPtr scrachPad) : m_node(pnode), m_scrachPad(std::move(scrachPad)) { PlainTensor w0(pnode->getSrcMemoryAtPort(1)); PlainTensor w1(pnode->getSrcMemoryAtPort(2)); PlainTensor w2(pnode->getSrcMemoryAtPort(3)); @@ -329,12 +330,12 @@ void QKVProjection::createPrimitive() { } } -void QKVProjection::execute(dnnl::stream strm) { +void QKVProjection::execute(const dnnl::stream& strm) { MAYBE_UNUSED(strm); m_executor->execute(); } -QKVProjection::QKVProjection(const std::shared_ptr& op, const GraphContext::CPtr context) +QKVProjection::QKVProjection(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; diff --git a/src/plugins/intel_cpu/src/nodes/qkv_proj.h b/src/plugins/intel_cpu/src/nodes/qkv_proj.h index 2e2c444612c6f2..d65dbc79daabd8 100644 --- a/src/plugins/intel_cpu/src/nodes/qkv_proj.h +++ b/src/plugins/intel_cpu/src/nodes/qkv_proj.h @@ -17,7 +17,7 @@ namespace node { class QKVProjection : public Node { public: - QKVProjection(const std::shared_ptr& op, const GraphContext::CPtr context); + QKVProjection(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -27,11 +27,11 @@ class QKVProjection : public Node { return false; } void createPrimitive() override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage, int concurrency = 0, diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp index ad52060dc9dd2b..eeb36442a71bc7 100644 --- a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp @@ -152,7 +152,7 @@ void RandomUniform::prepareParams() { } } -void RandomUniform::execute(dnnl::stream strm) { +void RandomUniform::execute(const dnnl::stream& strm) { if (!m_const_inputs[MIN_VAL]) { initEdgeValues(m_min_val, getSrcDataAtPort(MIN_VAL), m_output_prc); if (m_const_inputs[MAX_VAL]) { @@ -177,7 +177,7 @@ void RandomUniform::execute(dnnl::stream strm) { } } -void RandomUniform::executeDynamicImpl(dnnl::stream strm) { +void RandomUniform::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -191,7 +191,7 @@ std::string RandomUniform::getPrimitiveDescriptorType() const { std::string str_type; - auto add_type = [&](std::string t) { + auto add_type = [&](const std::string& t) { if (!str_type.empty() && t.c_str()[0] != '_') str_type += "_"; str_type += t; diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.hpp b/src/plugins/intel_cpu/src/nodes/random_uniform.hpp index 404e7edaa3f041..7c0321b8183bfc 100644 --- a/src/plugins/intel_cpu/src/nodes/random_uniform.hpp +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.hpp @@ -37,9 +37,9 @@ class RandomUniform : public Node { void prepareParams() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool isExecutable() const override; diff --git a/src/plugins/intel_cpu/src/nodes/range.cpp b/src/plugins/intel_cpu/src/nodes/range.cpp index 02eb470d2f6d5b..1f0a02e5594d55 100644 --- a/src/plugins/intel_cpu/src/nodes/range.cpp +++ b/src/plugins/intel_cpu/src/nodes/range.cpp @@ -29,7 +29,7 @@ bool Range::isSupportedOperation(const std::shared_ptr& op, std: return true; } -Range::Range(const std::shared_ptr& op, const GraphContext::CPtr context) +Range::Range(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -87,11 +87,11 @@ void Range::initSupportedPrimitiveDescriptors() { } } -void Range::executeDynamicImpl(dnnl::stream strm) { +void Range::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Range::execute(dnnl::stream strm) { +void Range::execute(const dnnl::stream& strm) { StatusCode retcode = OK; switch (getParentEdgeAt(0)->getMemory().getDesc().getPrecision()) { case ov::element::f32: diff --git a/src/plugins/intel_cpu/src/nodes/range.h b/src/plugins/intel_cpu/src/nodes/range.h index bd21f12495f76b..e6f0d637adb5fb 100644 --- a/src/plugins/intel_cpu/src/nodes/range.h +++ b/src/plugins/intel_cpu/src/nodes/range.h @@ -12,11 +12,11 @@ namespace node { class Range : public Node { public: - Range(const std::shared_ptr& op, const GraphContext::CPtr context); + Range(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; @@ -24,7 +24,7 @@ class Range : public Node { bool needShapeInfer() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; enum StatusCode : int { diff --git a/src/plugins/intel_cpu/src/nodes/rdft.cpp b/src/plugins/intel_cpu/src/nodes/rdft.cpp index d39aa9e23343fe..4639bbd8a8c814 100644 --- a/src/plugins/intel_cpu/src/nodes/rdft.cpp +++ b/src/plugins/intel_cpu/src/nodes/rdft.cpp @@ -74,7 +74,7 @@ static std::vector getDefaultSignalSizes(const VectorDims& inputShape, return signalSizes; } -RDFT::RDFT(const std::shared_ptr& op, const GraphContext::CPtr context) +RDFT::RDFT(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -151,7 +151,7 @@ void RDFT::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(configurators, {{LayoutType::ncsp, ov::element::f32}}, impl_desc_type::ref_any); } -void RDFT::execute(dnnl::stream strm) { +void RDFT::execute(const dnnl::stream& strm) { const auto& inputMem = getParentEdgeAt(DATA_INDEX)->getMemory(); const auto& outputMem = getChildEdgeAt(0)->getMemory(); const auto& inputShape = inputMem.getStaticDims(); @@ -177,7 +177,7 @@ void RDFT::execute(dnnl::stream strm) { outputStrides); } -void RDFT::executeDynamicImpl(dnnl::stream strm) { +void RDFT::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/rdft.h b/src/plugins/intel_cpu/src/nodes/rdft.h index 0238ecb36867ad..e8990e383d8ff3 100644 --- a/src/plugins/intel_cpu/src/nodes/rdft.h +++ b/src/plugins/intel_cpu/src/nodes/rdft.h @@ -99,13 +99,13 @@ struct RDFTExecutor { class RDFT : public Node { public: - RDFT(const std::shared_ptr& op, const GraphContext::CPtr context); + RDFT(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool created() const override; void createPrimitive() override; diff --git a/src/plugins/intel_cpu/src/nodes/reduce.cpp b/src/plugins/intel_cpu/src/nodes/reduce.cpp index f320ed270c6d18..04dfbc2c35e30a 100644 --- a/src/plugins/intel_cpu/src/nodes/reduce.cpp +++ b/src/plugins/intel_cpu/src/nodes/reduce.cpp @@ -1959,7 +1959,7 @@ bool Reduce::isSupportedOperation(const std::shared_ptr& op, std return true; } -Reduce::Reduce(const std::shared_ptr& op, const GraphContext::CPtr context) +Reduce::Reduce(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -2362,11 +2362,11 @@ void Reduce::create_reduce_kernel(std::shared_ptr& kernel jit_mode = jit_mode && kernel; } -void Reduce::executeDynamicImpl(dnnl::stream strm) { +void Reduce::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Reduce::execute(dnnl::stream strm) { +void Reduce::execute(const dnnl::stream& strm) { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(REDUCE_DATA); diff --git a/src/plugins/intel_cpu/src/nodes/reduce.h b/src/plugins/intel_cpu/src/nodes/reduce.h index e0d68241365ef4..16cf99bd9c75d4 100644 --- a/src/plugins/intel_cpu/src/nodes/reduce.h +++ b/src/plugins/intel_cpu/src/nodes/reduce.h @@ -88,15 +88,15 @@ struct jit_uni_reduce_post_kernel { class Reduce : public Node { public: - Reduce(const std::shared_ptr& op, const GraphContext::CPtr context); + Reduce(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void prepareParams() override; void createPrimitive() override; bool created() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; int getFusingAxis() const override; bool canFuse(const NodePtr& node) const override; bool canBeInPlace() const override { diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index 78d62bebf50c83..c7f1bbe30ff574 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -4,6 +4,8 @@ #include "reference.h" +#include + #include "common/cpu_memcpy.h" #include "shape_inference/shape_inference.hpp" @@ -24,12 +26,10 @@ class ReferenceShapeInferFactory : public ShapeInferFactory { namespace node { -Reference::Reference(const std::shared_ptr& op, - const GraphContext::CPtr& context, - const std::string& errorMessage) +Reference::Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, std::string errorMessage) : Node(op, context, ReferenceShapeInferFactory(op)), ovCoreNode(op), - additionalErrorMessage(errorMessage) { + additionalErrorMessage(std::move(errorMessage)) { if (!op->has_evaluate()) { OPENVINO_THROW_NOT_IMPLEMENTED( "Cannot fallback on ngraph reference implementation. Ngraph::Node::evaluate() is not implemented for op: ", @@ -63,7 +63,7 @@ void Reference::initSupportedPrimitiveDescriptors() { void Reference::createPrimitive() {} -void Reference::execute(dnnl::stream strm) { +void Reference::execute(const dnnl::stream& strm) { auto inputs = prepareInputs(); auto outputs = prepareOutputs(); if (!ovCoreNode->evaluate(outputs, inputs)) { @@ -71,7 +71,7 @@ void Reference::execute(dnnl::stream strm) { } } -void Reference::executeDynamicImpl(dnnl::stream strm) { +void Reference::executeDynamicImpl(const dnnl::stream& strm) { auto inputs = prepareInputs(); ov::TensorVector outputs; auto result = Node::shapeInfer(); diff --git a/src/plugins/intel_cpu/src/nodes/reference.h b/src/plugins/intel_cpu/src/nodes/reference.h index 705f2308721130..782c55716506a8 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.h +++ b/src/plugins/intel_cpu/src/nodes/reference.h @@ -12,12 +12,12 @@ namespace node { class Reference : public Node { public: - Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, const std::string& errorMessage); + Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, std::string errorMessage); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needShapeInfer() const override; @@ -27,7 +27,7 @@ class Reference : public Node { bool isExecutable() const override { return true; } - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: ov::TensorVector prepareInputs() const; diff --git a/src/plugins/intel_cpu/src/nodes/region_yolo.cpp b/src/plugins/intel_cpu/src/nodes/region_yolo.cpp index fc198bc0cc2d72..10fd3ef2bb77f5 100644 --- a/src/plugins/intel_cpu/src/nodes/region_yolo.cpp +++ b/src/plugins/intel_cpu/src/nodes/region_yolo.cpp @@ -258,7 +258,7 @@ bool RegionYolo::needPrepareParams() const { return false; } -RegionYolo::RegionYolo(const std::shared_ptr& op, const GraphContext::CPtr context) +RegionYolo::RegionYolo(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -387,7 +387,7 @@ inline void RegionYolo::calculate_logistic(size_t start_index, int count, uint8_ } } -void RegionYolo::execute(dnnl::stream strm) { +void RegionYolo::execute(const dnnl::stream& strm) { const auto& inShape = getParentEdgeAt(0)->getMemory().getShape(); const auto& inDims = inShape.getStaticDims(); size_t B = (inShape.getRank() > 0) ? inDims[0] : 1; diff --git a/src/plugins/intel_cpu/src/nodes/region_yolo.h b/src/plugins/intel_cpu/src/nodes/region_yolo.h index 1f34e8cb0431a6..77054a249b664e 100644 --- a/src/plugins/intel_cpu/src/nodes/region_yolo.h +++ b/src/plugins/intel_cpu/src/nodes/region_yolo.h @@ -40,19 +40,19 @@ struct jit_uni_logistic_kernel { class RegionYolo : public Node { public: - RegionYolo(const std::shared_ptr& op, const GraphContext::CPtr context); + RegionYolo(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; protected: bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/reorder.cpp b/src/plugins/intel_cpu/src/nodes/reorder.cpp index 901df6c6bf8742..bde3481da6fc47 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorder.cpp @@ -32,7 +32,7 @@ bool Reorder::isExecutable() const { return Node::isExecutable() && !isOptimized; } -Reorder::Reorder(const std::shared_ptr& op, const GraphContext::CPtr context) +Reorder::Reorder(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { THROW_CPU_NODE_ERR("could not create CPU node from Core node."); } @@ -40,7 +40,7 @@ Reorder::Reorder(const std::shared_ptr& op, const GraphContext::CPtr c Reorder::Reorder(const MemoryDesc& input, const MemoryDesc& output, const std::string& name, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node("Reorder", {input.getShape()}, {output.getShape()}, @@ -128,11 +128,11 @@ void Reorder::createPrimitive() { } } -void Reorder::executeDynamicImpl(dnnl::stream strm) { +void Reorder::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Reorder::prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr childDesc) { +void Reorder::prepareReorderAsTranspose(const MemoryDescPtr& parentDesc, const MemoryDescPtr& childDesc) { auto getOrderAndBlockedDims = [](const MemoryDesc& lhs, const MemoryDesc& rhs) -> std::pair, std::vector> { const auto& in = lhs.as()->getBlockDims(); @@ -399,7 +399,7 @@ void Reorder::optimizedNspc2Ncsp() { }); } -void Reorder::execute(dnnl::stream strm) { +void Reorder::execute(const dnnl::stream& strm) { #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) if (transposeExecutor) { auto dstMemPtr = getDstMemoryAtPort(0); @@ -449,7 +449,7 @@ std::string Reorder::getReorderArgs(const MemoryDesc& parentDesc, const MemoryDe return inArgs + "_" + outArgs; } -void Reorder::reorderData(const IMemory& input, const IMemory& output, MultiCachePtr cache) { +void Reorder::reorderData(const IMemory& input, const IMemory& output, const MultiCachePtr& cache) { if (!input.getDesc().isDefined() || !output.getDesc().isDefined()) OPENVINO_THROW("Can't reorder data with dynamic shapes"); @@ -510,7 +510,7 @@ void Reorder::reorderData(const IMemory& input, const IMemory& output, MultiCach input.getSize() / input.getDesc().getPrecision().size()); auto tmpDesc = input.getDesc().cloneWithNewPrecision(outPrc); - Memory tmpMem(engine, std::move(tmpDesc), tmpBuff.data()); + Memory tmpMem(engine, tmpDesc, tmpBuff.data()); srcMemory = tmpMem.getPrimitive(); reorder = getReorderPrim(cache, dstMemory.get_engine(), srcMemory.get_desc(), dstMemory.get_desc()); diff --git a/src/plugins/intel_cpu/src/nodes/reorder.h b/src/plugins/intel_cpu/src/nodes/reorder.h index 33aba78c323bac..d04416157a0991 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.h +++ b/src/plugins/intel_cpu/src/nodes/reorder.h @@ -14,15 +14,15 @@ namespace node { class Reorder : public Node { public: - Reorder(const std::shared_ptr& op, const GraphContext::CPtr context); + Reorder(const std::shared_ptr& op, const GraphContext::CPtr& context); Reorder(const MemoryDesc& input, const MemoryDesc& output, const std::string& name, - const GraphContext::CPtr context); + const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; const std::vector& getDefaultImplPriority() override; @@ -32,7 +32,7 @@ class Reorder : public Node { void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void setSrcPermutation(const std::vector& src_perm) { this->src_permutation = src_perm; @@ -59,7 +59,7 @@ class Reorder : public Node { static std::string getReorderArgs(const MemoryDesc& parentDesc, const MemoryDesc& childDesc); - static void reorderData(const IMemory& input, const IMemory& output, MultiCachePtr cache = nullptr); + static void reorderData(const IMemory& input, const IMemory& output, const MultiCachePtr& cache = nullptr); private: dnnl::reorder::primitive prim; @@ -79,7 +79,7 @@ class Reorder : public Node { void optimizedNcsp2Nspc(); void createReorderPrimitive(const DnnlMemoryDescPtr& srcDesc, const DnnlMemoryDescPtr& dstDesc); - void prepareReorderAsTranspose(MemoryDescPtr parentDesc, MemoryDescPtr childDesc); + void prepareReorderAsTranspose(const MemoryDescPtr& parentDesc, const MemoryDescPtr& childDesc); TransposeExecutorPtr transposeExecutor; }; diff --git a/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp b/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp index 015bc38adea65b..2c098d83d97215 100644 --- a/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp @@ -26,7 +26,7 @@ bool ReorgYolo::isSupportedOperation(const std::shared_ptr& op, return true; } -ReorgYolo::ReorgYolo(const std::shared_ptr& op, const GraphContext::CPtr context) +ReorgYolo::ReorgYolo(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -52,11 +52,11 @@ void ReorgYolo::initSupportedPrimitiveDescriptors() { impl_desc_type::ref_any); } -void ReorgYolo::executeDynamicImpl(dnnl::stream strm) { +void ReorgYolo::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void ReorgYolo::execute(dnnl::stream strm) { +void ReorgYolo::execute(const dnnl::stream& strm) { const auto* src_data = getSrcDataAtPortAs(0); auto* dst_data = getDstDataAtPortAs(0); diff --git a/src/plugins/intel_cpu/src/nodes/reorg_yolo.h b/src/plugins/intel_cpu/src/nodes/reorg_yolo.h index a2f341fee4aed2..0c8f4ec445eda5 100644 --- a/src/plugins/intel_cpu/src/nodes/reorg_yolo.h +++ b/src/plugins/intel_cpu/src/nodes/reorg_yolo.h @@ -12,16 +12,16 @@ namespace node { class ReorgYolo : public Node { public: - ReorgYolo(const std::shared_ptr& op, const GraphContext::CPtr context); + ReorgYolo(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; } - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/reshape.cpp b/src/plugins/intel_cpu/src/nodes/reshape.cpp index f4ee25aab83aa8..c79f430aac4bd6 100644 --- a/src/plugins/intel_cpu/src/nodes/reshape.cpp +++ b/src/plugins/intel_cpu/src/nodes/reshape.cpp @@ -30,7 +30,7 @@ bool Reshape::isSupportedOperation(const std::shared_ptr& op, st return true; } -Reshape::Reshape(const std::shared_ptr& op, const GraphContext::CPtr context) +Reshape::Reshape(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, ReshapeShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -38,7 +38,7 @@ Reshape::Reshape(const std::shared_ptr& op, const GraphContext::CPtr c } if (isDynamicNode()) { - auto checkSecondInput = [](const std::shared_ptr& op, const std::string opType) { + auto checkSecondInput = [](const std::shared_ptr& op, const std::string& opType) { if (op->get_input_partial_shape(1).is_dynamic()) { OPENVINO_THROW("CPU plug-in doesn't support ", opType, " node with non static second input"); } @@ -120,11 +120,11 @@ void Reshape::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::unknown); } -void Reshape::executeDynamicImpl(dnnl::stream strm) { +void Reshape::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Reshape::execute(dnnl::stream strm) { +void Reshape::execute(const dnnl::stream& strm) { auto srcMemPtr = getSrcMemoryAtPort(0); auto dstMemPtr = getDstMemoryAtPort(0); diff --git a/src/plugins/intel_cpu/src/nodes/reshape.h b/src/plugins/intel_cpu/src/nodes/reshape.h index f64c6e2807b7e4..7758dfa6e06746 100644 --- a/src/plugins/intel_cpu/src/nodes/reshape.h +++ b/src/plugins/intel_cpu/src/nodes/reshape.h @@ -13,7 +13,7 @@ namespace node { class Reshape : public Node { public: - Reshape(const std::shared_ptr& op, const GraphContext::CPtr context); + Reshape(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; @@ -24,8 +24,8 @@ class Reshape : public Node { bool needPrepareParams() const override { return false; } - void executeDynamicImpl(dnnl::stream strm) override; - void execute(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; + void execute(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp b/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp index 9cdc9fa596b436..de8e0319b8d525 100644 --- a/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp +++ b/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp @@ -28,7 +28,7 @@ bool ReverseSequence::isSupportedOperation(const std::shared_ptr return true; } -ReverseSequence::ReverseSequence(const std::shared_ptr& op, const GraphContext::CPtr context) +ReverseSequence::ReverseSequence(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -98,8 +98,8 @@ void ReverseSequence::prepareParams() { execPtr = std::make_shared(dataDims, seqLengthsDims, dstDims, batch_axis, seq_axis); } -void ReverseSequence::executeDynamicImpl(dnnl::stream strm) { - execute(std::move(strm)); +void ReverseSequence::executeDynamicImpl(const dnnl::stream& strm) { + execute(strm); } ReverseSequence::ReverseSequenceExecutor::ReverseSequenceExecutor(const VectorDims& dataDims, @@ -169,7 +169,7 @@ void ReverseSequence::ReverseSequenceExecutor::exec(const MemoryPtr& dataMemPtr, }); } -void ReverseSequence::execute(dnnl::stream strm) { +void ReverseSequence::execute(const dnnl::stream& strm) { if (!execPtr) THROW_CPU_NODE_ERR("has no compiled executor"); diff --git a/src/plugins/intel_cpu/src/nodes/reverse_sequence.h b/src/plugins/intel_cpu/src/nodes/reverse_sequence.h index 3e9fa7e352a234..6593a383d84b62 100644 --- a/src/plugins/intel_cpu/src/nodes/reverse_sequence.h +++ b/src/plugins/intel_cpu/src/nodes/reverse_sequence.h @@ -12,15 +12,15 @@ namespace node { class ReverseSequence : public Node { public: - ReverseSequence(const std::shared_ptr& op, const GraphContext::CPtr context); + ReverseSequence(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/rms_norm.cpp b/src/plugins/intel_cpu/src/nodes/rms_norm.cpp index b2eda6cf8dad6f..85d09ae093ce10 100644 --- a/src/plugins/intel_cpu/src/nodes/rms_norm.cpp +++ b/src/plugins/intel_cpu/src/nodes/rms_norm.cpp @@ -117,7 +117,7 @@ struct RMSNorm::RMSNormExecutor : public RMSNorm::Executor { }; #endif // OPENVINO_ARCH_X86_64 -RMSNorm::RMSNorm(const std::shared_ptr& op, const GraphContext::CPtr context) +RMSNorm::RMSNorm(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, RMSNormShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -172,7 +172,7 @@ void RMSNorm::createPrimitive() { m_executor = result.first; } -void RMSNorm::execute(dnnl::stream strm) { +void RMSNorm::execute(const dnnl::stream& strm) { auto orginInputNumber = getOriginalInputsNumber(); std::vector inputs(orginInputNumber); diff --git a/src/plugins/intel_cpu/src/nodes/rms_norm.h b/src/plugins/intel_cpu/src/nodes/rms_norm.h index 00ace4c13de753..039e13eb2d7c2b 100644 --- a/src/plugins/intel_cpu/src/nodes/rms_norm.h +++ b/src/plugins/intel_cpu/src/nodes/rms_norm.h @@ -13,7 +13,7 @@ namespace node { class RMSNorm : public Node { public: - RMSNorm(const std::shared_ptr& op, const GraphContext::CPtr context); + RMSNorm(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -22,11 +22,11 @@ class RMSNorm : public Node { bool needPrepareParams() const override { return false; } - void executeDynamicImpl(dnnl::stream strm) override { - execute(std::move(strm)); + void executeDynamicImpl(const dnnl::stream& strm) override { + execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; void createPrimitive() override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index 7f831ab02c66e3..8140624edd3c37 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -4,6 +4,8 @@ #include "rnn.h" +#include + #include "common/primitive_hashing_utils.hpp" #include "memory_desc/cpu_memory_desc_utils.h" #include "nodes/common/cpu_convert.h" @@ -409,7 +411,7 @@ class RnnShapeInfer : public IShapeInfer { class RnnShapeInferFactory final : public ShapeInferFactory { public: - RnnShapeInferFactory(std::shared_ptr op) : m_op(op) {} + RnnShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override { return std::make_shared(m_op); } @@ -420,7 +422,7 @@ class RnnShapeInferFactory final : public ShapeInferFactory { } // namespace -RNN::RNN(const std::shared_ptr& op, const GraphContext::CPtr context) +RNN::RNN(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, RnnShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -1108,7 +1110,7 @@ void RNN::copyWeightsData() { } namespace { -dnnl::primitive_desc createPrimitiveDescriptor(const dnnl::engine engine, +dnnl::primitive_desc createPrimitiveDescriptor(const dnnl::engine& engine, const dnnl::algorithm cellType, const dnnl::algorithm cellAct, const dnnl::rnn_direction direction, @@ -1385,7 +1387,7 @@ std::shared_ptr RNN::getDstMemDesc(const dnnl::primitive_desc& prim_ return supportedPrimitiveDescriptors[0].getConfig().outConfs[idx].getMemDesc(); } -void RNN::execute(dnnl::stream strm) { +void RNN::execute(const dnnl::stream& strm) { if (!execPtr) THROW_CPU_NODE_ERR("does not have initialized primitive to execute."); @@ -1423,7 +1425,7 @@ void RNN::execute(dnnl::stream strm) { execPtr->exec(args, strm); } -void RNN::executeDynamicImpl(dnnl::stream strm) { +void RNN::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -1434,11 +1436,11 @@ void RNN::cleanup() { m_initial_weights[2].reset(); } - for (auto it : fusedWith) { + for (const auto& it : fusedWith) { it->cleanup(); } - for (auto it : mergedWith) { + for (const auto& it : mergedWith) { it->cleanup(); } } diff --git a/src/plugins/intel_cpu/src/nodes/rnn.h b/src/plugins/intel_cpu/src/nodes/rnn.h index 7ad53dca6d0e25..f714ba474a55e7 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.h +++ b/src/plugins/intel_cpu/src/nodes/rnn.h @@ -14,7 +14,7 @@ namespace node { class RNN : public Node { public: - RNN(const std::shared_ptr& op, const GraphContext::CPtr context); + RNN(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; static bool isCell(const std::shared_ptr& op); @@ -27,7 +27,7 @@ class RNN : public Node { const std::vector& outputDesc) override; std::shared_ptr initPrimitiveAttr() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; inline bool hasNativeOrder() const { return nativeOrder; @@ -39,7 +39,7 @@ class RNN : public Node { protected: void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: void configurePortDataTypes(); @@ -98,10 +98,7 @@ class RNN : public Node { struct Interval { Interval() = default; - Interval(Dim min, Dim max) { - minVal = min; - maxVal = max; - } + Interval(Dim min, Dim max) : minVal(min), maxVal(max) {} bool isStatic() { return minVal == maxVal; diff --git a/src/plugins/intel_cpu/src/nodes/roi_align.cpp b/src/plugins/intel_cpu/src/nodes/roi_align.cpp index 38bf4594c4d882..8b6d388551f034 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_align.cpp +++ b/src/plugins/intel_cpu/src/nodes/roi_align.cpp @@ -694,7 +694,7 @@ bool ROIAlign::isSupportedOperation(const std::shared_ptr& op, s return true; } -ROIAlign::ROIAlign(const std::shared_ptr& op, const GraphContext::CPtr context) +ROIAlign::ROIAlign(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -864,7 +864,7 @@ struct ROIAlign::ROIAlignExecute { ctx.node.executeSpecified(); } }; -void ROIAlign::execute(dnnl::stream strm) { +void ROIAlign::execute(const dnnl::stream& strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().getDataType(); auto outputPrec = getChildEdgeAt(0)->getMemory().getDataType(); if (!((inputPrec == dnnl_bf16 && outputPrec == dnnl_bf16) || (inputPrec == dnnl_f32 && outputPrec == dnnl_f32))) @@ -1185,7 +1185,7 @@ bool ROIAlign::needPrepareParams() const { return false; } -void ROIAlign::executeDynamicImpl(dnnl::stream strm) { +void ROIAlign::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/roi_align.h b/src/plugins/intel_cpu/src/nodes/roi_align.h index dc78a23b4c79d5..c359dd9dddf73b 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_align.h +++ b/src/plugins/intel_cpu/src/nodes/roi_align.h @@ -55,16 +55,16 @@ struct jit_uni_roi_align_kernel { class ROIAlign : public Node { public: - ROIAlign(const std::shared_ptr& op, const GraphContext::CPtr context); + ROIAlign(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/roi_align_rotated.cpp b/src/plugins/intel_cpu/src/nodes/roi_align_rotated.cpp index 77de786a773009..d9566c679ce438 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_align_rotated.cpp +++ b/src/plugins/intel_cpu/src/nodes/roi_align_rotated.cpp @@ -13,7 +13,7 @@ namespace ov { namespace intel_cpu { namespace node { -ROIAlignRotated::ROIAlignRotated(const std::shared_ptr& op, const GraphContext::CPtr context) +ROIAlignRotated::ROIAlignRotated(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { const auto roiAlign = ov::as_type_ptr(op); pooledH = roiAlign->get_pooled_h(); @@ -48,7 +48,7 @@ bool ROIAlignRotated::needPrepareParams() const { return false; } -void ROIAlignRotated::executeDynamicImpl(dnnl::stream strm) { +void ROIAlignRotated::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -83,7 +83,7 @@ void ROIAlignRotated::executeImpl() { clockwiseMode); } -void ROIAlignRotated::execute(dnnl::stream) { +void ROIAlignRotated::execute(const dnnl::stream&) { const ov::element::Type type = getOriginalInputPrecisionAtPort(0); executeImpl(); diff --git a/src/plugins/intel_cpu/src/nodes/roi_align_rotated.h b/src/plugins/intel_cpu/src/nodes/roi_align_rotated.h index a73789869ff51f..1f932ea27b854e 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_align_rotated.h +++ b/src/plugins/intel_cpu/src/nodes/roi_align_rotated.h @@ -12,14 +12,14 @@ namespace node { class ROIAlignRotated : public Node { public: - ROIAlignRotated(const std::shared_ptr& op, const GraphContext::CPtr context); + ROIAlignRotated(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: template diff --git a/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp b/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp index 20013eef3c6b88..db00dfe607c1c4 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/roi_pooling.cpp @@ -401,7 +401,7 @@ bool ROIPooling::isSupportedOperation(const std::shared_ptr& op, return true; } -ROIPooling::ROIPooling(const std::shared_ptr& op, const GraphContext::CPtr context) +ROIPooling::ROIPooling(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -497,7 +497,7 @@ void ROIPooling::createPrimitive() { } } -void ROIPooling::execute(dnnl::stream strm) { +void ROIPooling::execute(const dnnl::stream& strm) { if (execPtr) { const auto& srcMemory0 = getParentEdgeAt(0)->getMemory(); const auto& srcMemory1 = getParentEdgeAt(1)->getMemory(); @@ -508,7 +508,7 @@ void ROIPooling::execute(dnnl::stream strm) { } } -void ROIPooling::executeDynamicImpl(dnnl::stream strm) { +void ROIPooling::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/roi_pooling.h b/src/plugins/intel_cpu/src/nodes/roi_pooling.h index e02be525f0c34b..8852bf6c42e465 100644 --- a/src/plugins/intel_cpu/src/nodes/roi_pooling.h +++ b/src/plugins/intel_cpu/src/nodes/roi_pooling.h @@ -63,15 +63,15 @@ struct jit_uni_roi_pooling_kernel { class ROIPooling : public Node { public: - ROIPooling(const std::shared_ptr& op, const GraphContext::CPtr context); + ROIPooling(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; private: diff --git a/src/plugins/intel_cpu/src/nodes/roll.cpp b/src/plugins/intel_cpu/src/nodes/roll.cpp index 3d6a422b634abd..858f4750463852 100644 --- a/src/plugins/intel_cpu/src/nodes/roll.cpp +++ b/src/plugins/intel_cpu/src/nodes/roll.cpp @@ -34,7 +34,7 @@ bool Roll::isSupportedOperation(const std::shared_ptr& op, std:: return true; } -Roll::Roll(const std::shared_ptr& op, const GraphContext::CPtr context) +Roll::Roll(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -121,11 +121,11 @@ void Roll::prepareParams() { execPtr = std::make_shared(dataDims, shiftDims, axesDims, dstDims); } -void Roll::executeDynamicImpl(dnnl::stream strm) { - execute(std::move(strm)); +void Roll::executeDynamicImpl(const dnnl::stream& strm) { + execute(strm); } -void Roll::execute(dnnl::stream strm) { +void Roll::execute(const dnnl::stream& strm) { if (!execPtr) THROW_CPU_NODE_ERR("has no compiled executor"); diff --git a/src/plugins/intel_cpu/src/nodes/roll.h b/src/plugins/intel_cpu/src/nodes/roll.h index 35fe87994aa3e5..7d3ad9efce3d53 100644 --- a/src/plugins/intel_cpu/src/nodes/roll.h +++ b/src/plugins/intel_cpu/src/nodes/roll.h @@ -12,15 +12,15 @@ namespace node { class Roll : public Node { public: - Roll(const std::shared_ptr& op, const GraphContext::CPtr context); + Roll(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/rope.cpp b/src/plugins/intel_cpu/src/nodes/rope.cpp index 90a9ab9d6f945f..984b35237d93ba 100644 --- a/src/plugins/intel_cpu/src/nodes/rope.cpp +++ b/src/plugins/intel_cpu/src/nodes/rope.cpp @@ -21,7 +21,7 @@ namespace ov { namespace intel_cpu { namespace node { -RoPE::RoPE(const std::shared_ptr& op, const GraphContext::CPtr context) +RoPE::RoPE(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -106,7 +106,7 @@ struct RoPE::RoPEExecutorRotateHalf : public RoPE::Executor { m_rotaryKernel = createJitKernel(jcp); } - void execute(dnnl::stream strm, + void execute(const dnnl::stream& strm, const std::vector& inputs, const std::vector& outputs) override { ov::intel_cpu::PlainTensor t_src(inputs[0]); @@ -188,7 +188,7 @@ struct RoPE::RoPEExecutorInterleaved : public RoPE::Executor { m_rotaryKernel = createJitKernel(jcp, true); } - void execute(dnnl::stream strm, + void execute(const dnnl::stream& strm, const std::vector& inputs, const std::vector& outputs) override { ov::intel_cpu::PlainTensor t_src(inputs[0]); @@ -238,7 +238,7 @@ struct RoPE::RoPEExecutorChatGLM : public RoPE::Executor { m_rotaryKernel = createJitKernel(jcp, true); } - void execute(dnnl::stream strm, + void execute(const dnnl::stream& strm, const std::vector& inputs, const std::vector& outputs) override { ov::intel_cpu::PlainTensor t_src(inputs[0]); @@ -327,7 +327,7 @@ struct RoPE::RoPEExecutorQwen : public RoPE::Executor { m_rotaryKernel = createJitKernel(jcp); } - void execute(dnnl::stream strm, + void execute(const dnnl::stream& strm, const std::vector& inputs, const std::vector& outputs) override { ov::intel_cpu::PlainTensor t_src(inputs[0]); // [batch, length, head_cnt*head_size * 3] @@ -444,7 +444,7 @@ void RoPE::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inPortConfigs, outPortConfigs, impl_desc_type::ref_any); } -void RoPE::execute(dnnl::stream strm) { +void RoPE::execute(const dnnl::stream& strm) { std::vector inputs(getParentEdges().size()), outputs(getChildEdges().size()); for (size_t i = 0; i < inputs.size(); i++) { inputs[i] = getSrcMemoryAtPort(i); diff --git a/src/plugins/intel_cpu/src/nodes/rope.h b/src/plugins/intel_cpu/src/nodes/rope.h index 9bb910f29e4236..6082301f6bd27e 100644 --- a/src/plugins/intel_cpu/src/nodes/rope.h +++ b/src/plugins/intel_cpu/src/nodes/rope.h @@ -13,7 +13,7 @@ namespace node { class RoPE : public Node { public: - RoPE(const std::shared_ptr& op, const GraphContext::CPtr context); + RoPE(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -22,16 +22,16 @@ class RoPE : public Node { bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; private: struct Executor { - virtual void execute(dnnl::stream strm, + virtual void execute(const dnnl::stream& strm, const std::vector& inputs, const std::vector& outputs) = 0; virtual ~Executor() = default; diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp index 6b4308c6b807d4..e6455505e55532 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp @@ -29,6 +29,7 @@ #include #include +#include #include #include "kernels/scaled_attn/attn_memcpy.hpp" @@ -73,7 +74,7 @@ template struct MHAKernel { const GraphContext::CPtr context; MHAKernel() = delete; - explicit MHAKernel(GraphContext::CPtr ctx) : context(ctx) {} + explicit MHAKernel(GraphContext::CPtr ctx) : context(std::move(ctx)) {} template float dot_product(const D* a, const D* b, int len, int stride_b = 1) { @@ -110,7 +111,7 @@ struct MHAKernel { PlainTensor causal_mask; bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this - void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) { + void set_causal_mask(const PlainTensor& mask, bool _select_nfltmax_at_0) { causal_mask = mask; select_nfltmax_at_0 = _select_nfltmax_at_0; } @@ -121,7 +122,7 @@ struct MHAKernel { // present_value [B, H, kv_len, S] // attention_mask [B, 1, q_len, kv_len] // output_emb [B, q_len, H*S] - void operator()(dnnl::stream strm, + void operator()(const dnnl::stream& strm, PlainTensor& query, PlainTensor& present_key, PlainTensor& present_value, @@ -252,7 +253,7 @@ struct MHAKernel { std::shared_ptr wv_gemm_ptr = nullptr; MHAKernel() = delete; - explicit MHAKernel(GraphContext::CPtr ctx) : context(ctx) {} + explicit MHAKernel(GraphContext::CPtr ctx) : context(std::move(ctx)) {} dnnl::memory::dims make_dnnl_dims(const std::vector& dims) { dnnl::memory::dims dnnl_dims(dims.size()); @@ -261,7 +262,7 @@ struct MHAKernel { return dnnl_dims; } - void prepare_brgemm_prim(dnnl::stream strm, + void prepare_brgemm_prim(const dnnl::stream& strm, PlainTensor& query, PlainTensor& present_key, PlainTensor& present_value, @@ -436,14 +437,13 @@ struct MHAKernel { } T* v_ptr = is_xf16 ? &wv_scratch_b.at({b, h / h_each_group_len, 0}) : &present_value.at({b, h / h_each_group_len, 0, 0}); - wv_gemm_ptr->executeGemm(m_cntget_scratch_a_size()> 0 - ? &wv_scratch_a.at({tid, 0}) - : nullptr); + const bool is_m_tail = m_cnt < m_block_size; + wv_gemm_ptr->executeGemm(is_m_tail, + w_ptr, + v_ptr, + fp32_out_ptr, + wsp.data() + tid * wsp_size_per_thread, + wv_gemm_ptr->get_scratch_a_size() > 0 ? &wv_scratch_a.at({tid, 0}) : nullptr); if (is_xf16) { if (has_out_transpose) { attn_memcpy2d_kernel(&fp32_out.at({b, m_start, h, 0}), @@ -474,7 +474,7 @@ struct MHAKernel { PlainTensor causal_mask; bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this - void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) { + void set_causal_mask(const PlainTensor& mask, bool _select_nfltmax_at_0) { causal_mask = mask; select_nfltmax_at_0 = _select_nfltmax_at_0; } @@ -521,15 +521,15 @@ struct MHAKernel { ov::element::Type precision; MHAKernel() = delete; - explicit MHAKernel(GraphContext::CPtr ctx) : context(ctx) { - m_block_size = 512; - select_nfltmax_at_0 = false; - precision = ov::element::from(); - } + explicit MHAKernel(GraphContext::CPtr ctx) + : context(std::move(ctx)), + m_block_size(512), + precision(ov::element::from()), + select_nfltmax_at_0(false) {} PlainTensor causal_mask; bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this - void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) { + void set_causal_mask(const PlainTensor& mask, bool _select_nfltmax_at_0) { causal_mask = mask; select_nfltmax_at_0 = _select_nfltmax_at_0; } @@ -541,7 +541,7 @@ struct MHAKernel { // attention_mask [B, 1, q_len, kv_len] // alibi // output_emb [B, L1, H*S] - void operator()(dnnl::stream strm, + void operator()(const dnnl::stream& strm, PlainTensor& query, PlainTensor& present_key, PlainTensor& present_value, @@ -668,16 +668,17 @@ struct MHAKernel { size_t m_threads_num = 0lu; MHAKernel() = delete; - explicit MHAKernel(GraphContext::CPtr ctx) : context(ctx) { - m_block_size = 4; - select_nfltmax_at_0 = false; - m_threads_num = parallel_get_max_threads(); + explicit MHAKernel(GraphContext::CPtr ctx) + : context(std::move(ctx)), + m_block_size(4), + m_threads_num(parallel_get_max_threads()), + select_nfltmax_at_0(false) { qk_buffers.resize(m_threads_num); } PlainTensor causal_mask; bool select_nfltmax_at_0 = false; // set attn_score to -FLT_MAX when causal_mask[...] equal to this - void set_causal_mask(PlainTensor mask, bool _select_nfltmax_at_0) { + void set_causal_mask(const PlainTensor& mask, bool _select_nfltmax_at_0) { causal_mask = mask; select_nfltmax_at_0 = _select_nfltmax_at_0; } @@ -689,7 +690,7 @@ struct MHAKernel { // attention_mask [B, 1, q_len, kv_len] // alibi // output_emb [B, L1, H*S] - void operator()(dnnl::stream strm, + void operator()(const dnnl::stream& strm, PlainTensor& query, PlainTensor& present_key, PlainTensor& present_value, @@ -887,16 +888,16 @@ struct ScaledDotProductAttention::AttentionExecutor : public ScaledDotProductAtt MHAKernel kernel; MHASingleToken kernel_single_token; - AttentionExecutor(GraphContext::CPtr ctx) : context(ctx), kernel(context) {} + AttentionExecutor(GraphContext::CPtr ctx) : context(std::move(ctx)), kernel(context) {} - void prepare_attn_mask(MemoryPtr attn_input) { + void prepare_attn_mask(const MemoryPtr& attn_input) { attn_buf.resize(attn_input->getStaticDims()); auto p = attn_input->getDataAs(); for (size_t i = 0; i < attn_input->getSize(); i++) attn_buf.ptr()[i] = p[i] ? 0.0f : -FLT_MAX; } - void execute(dnnl::stream strm, + void execute(const dnnl::stream& strm, const Config& config, const std::vector& inputs, const MemoryPtr output, @@ -1055,7 +1056,7 @@ struct ScaledDotProductAttention::AttentionExecutor : public ScaledDotProductAtt }; ScaledDotProductAttention::ScaledDotProductAttention(const std::shared_ptr& op, - const GraphContext::CPtr context) + const GraphContext::CPtr& context) : Node(op, context, SDPAShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -1216,7 +1217,7 @@ void ScaledDotProductAttention::createPrimitive() { m_executor = result.first; } -void ScaledDotProductAttention::execute(dnnl::stream strm) { +void ScaledDotProductAttention::execute(const dnnl::stream& strm) { auto orginSDPInputNumber = getOriginalInputsNumber() - (m_config.config.fuse_concat ? 3 : 0); std::vector inputs(orginSDPInputNumber); auto output = getDstMemoryAtPort(0); diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.h b/src/plugins/intel_cpu/src/nodes/scaled_attn.h index c3636ab6626519..aeabee681599b8 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.h +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.h @@ -15,7 +15,7 @@ namespace node { class ScaledDotProductAttention : public Node { public: - ScaledDotProductAttention(const std::shared_ptr& op, const GraphContext::CPtr context); + ScaledDotProductAttention(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override {} bool created() const override { @@ -28,11 +28,11 @@ class ScaledDotProductAttention : public Node { bool needPrepareParams() const override { return false; } - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; void createPrimitive() override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; @@ -65,7 +65,7 @@ class ScaledDotProductAttention : public Node { }; struct Executor { - virtual void execute(dnnl::stream strm, + virtual void execute(const dnnl::stream& strm, const Config& config, const std::vector& inputs, const MemoryPtr output, diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp index 3225d9d8f8da96..c8bd8fc783a96b 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include "common/cpu_memcpy.h" @@ -73,7 +74,7 @@ bool ScatterUpdate::isExecutable() const { return !isInputTensorAtPortEmpty(DATA_ID); } -ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphContext::CPtr context) +ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)), dataSize(0lu), indicesSize(0lu), @@ -312,7 +313,7 @@ bool ScatterUpdate::needPrepareParams() const { return false; } -void ScatterUpdate::executeDynamicImpl(dnnl::stream strm) { +void ScatterUpdate::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -371,8 +372,8 @@ static inline void getCoordinate(VectorDims& coordinate, size_t offset, const Ve } struct TensorIterator { - TensorIterator(const VectorDims& squashed_shape, const int64_t squashed_axis) - : m_squashed_shape(squashed_shape), + TensorIterator(VectorDims squashed_shape, const int64_t squashed_axis) + : m_squashed_shape(std::move(squashed_shape)), m_squashed_axis(squashed_axis) { OPENVINO_ASSERT(m_squashed_shape[m_squashed_axis] == 1); } @@ -824,7 +825,7 @@ void ScatterUpdate::scatterElementsUpdate(const MemoryPtr& dstMemPtr, OV_CASE(ov::element::u8, uint8_t)); } -void ScatterUpdate::execute(dnnl::stream strm) { +void ScatterUpdate::execute(const dnnl::stream& strm) { auto srcMemPtr = getSrcMemoryAtPort(DATA_ID); auto dstMemPtr = getDstMemoryAtPort(0); auto indicesMemPtr = getSrcMemoryAtPort(INDICES_ID); diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.h b/src/plugins/intel_cpu/src/nodes/scatter_update.h index 4dc9ed1be59a63..df3827c2fa4f65 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.h +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.h @@ -76,18 +76,18 @@ class ReduceNone { class ScatterUpdate : public Node { public: - ScatterUpdate(const std::shared_ptr& op, const GraphContext::CPtr context); + ScatterUpdate(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; bool created() const override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool isExecutable() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/search_sorted.cpp b/src/plugins/intel_cpu/src/nodes/search_sorted.cpp index 860964fdddf340..2254dc46391073 100644 --- a/src/plugins/intel_cpu/src/nodes/search_sorted.cpp +++ b/src/plugins/intel_cpu/src/nodes/search_sorted.cpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { namespace node { -SearchSorted::SearchSorted(const std::shared_ptr& op, const GraphContext::CPtr context) +SearchSorted::SearchSorted(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -70,7 +70,7 @@ bool SearchSorted::needPrepareParams() const { return false; } -void SearchSorted::executeDynamicImpl(dnnl::stream strm) { +void SearchSorted::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -99,7 +99,7 @@ struct SearchSorted::SearchSortedExecute { ctx.node.executeImpl(); } }; -void SearchSorted::execute(dnnl::stream strm) { +void SearchSorted::execute(const dnnl::stream& strm) { auto inputPrecision = getParentEdgeAt(0)->getMemory().getDesc().getPrecision(); auto outputPrecision = getChildEdgeAt(0)->getMemory().getDesc().getPrecision(); diff --git a/src/plugins/intel_cpu/src/nodes/search_sorted.h b/src/plugins/intel_cpu/src/nodes/search_sorted.h index 6f90c20355d911..60c81c2b1f566d 100644 --- a/src/plugins/intel_cpu/src/nodes/search_sorted.h +++ b/src/plugins/intel_cpu/src/nodes/search_sorted.h @@ -12,15 +12,15 @@ namespace node { class SearchSorted : public Node { public: - SearchSorted(const std::shared_ptr& op, const GraphContext::CPtr context); + SearchSorted(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: template diff --git a/src/plugins/intel_cpu/src/nodes/shapeof.cpp b/src/plugins/intel_cpu/src/nodes/shapeof.cpp index 43f30b680c880c..abd55142b098c2 100644 --- a/src/plugins/intel_cpu/src/nodes/shapeof.cpp +++ b/src/plugins/intel_cpu/src/nodes/shapeof.cpp @@ -25,7 +25,7 @@ bool ShapeOf::isSupportedOperation(const std::shared_ptr& op, st return true; } -ShapeOf::ShapeOf(const std::shared_ptr& op, const GraphContext::CPtr context) +ShapeOf::ShapeOf(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, ShapeOfShapeInferFactory()) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -82,7 +82,7 @@ bool ShapeOf::isExecutable() const { return true; } -void ShapeOf::execute(dnnl::stream strm) { +void ShapeOf::execute(const dnnl::stream& strm) { auto inPtr = getSrcMemoryAtPort(0); auto outPtr = getDstMemoryAtPort(0); auto&& inDims = inPtr->getStaticDims(); diff --git a/src/plugins/intel_cpu/src/nodes/shapeof.h b/src/plugins/intel_cpu/src/nodes/shapeof.h index 7b2ebb733e99a9..e625af7bfb6a0c 100644 --- a/src/plugins/intel_cpu/src/nodes/shapeof.h +++ b/src/plugins/intel_cpu/src/nodes/shapeof.h @@ -18,17 +18,17 @@ namespace node { class ShapeOf : public Node { public: - ShapeOf(const std::shared_ptr& op, const GraphContext::CPtr context); + ShapeOf(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void initOptimalPrimitiveDescriptor() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { return false; }; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp b/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp index ee66dce2744ff1..f73b920345c46a 100644 --- a/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp +++ b/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp @@ -64,7 +64,7 @@ bool ShuffleChannels::isSupportedOperation(const std::shared_ptr return true; } -ShuffleChannels::ShuffleChannels(const std::shared_ptr& op, const GraphContext::CPtr context) +ShuffleChannels::ShuffleChannels(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -277,11 +277,11 @@ void ShuffleChannels::ShuffleChannelsExecutor::exec(const uint8_t* srcData, uint permuteKernel->execute(srcData, dstData); } -void ShuffleChannels::executeDynamicImpl(dnnl::stream strm) { +void ShuffleChannels::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void ShuffleChannels::execute(dnnl::stream strm) { +void ShuffleChannels::execute(const dnnl::stream& strm) { if (!execPtr) THROW_SHCH_ERROR("doesn't have a compiled executor."); diff --git a/src/plugins/intel_cpu/src/nodes/shuffle_channels.h b/src/plugins/intel_cpu/src/nodes/shuffle_channels.h index a7b2f768c89ed3..da6be0ad18c8a0 100644 --- a/src/plugins/intel_cpu/src/nodes/shuffle_channels.h +++ b/src/plugins/intel_cpu/src/nodes/shuffle_channels.h @@ -13,14 +13,14 @@ namespace node { class ShuffleChannels : public Node { public: - ShuffleChannels(const std::shared_ptr& op, const GraphContext::CPtr context); + ShuffleChannels(const std::shared_ptr& op, const GraphContext::CPtr& context); ~ShuffleChannels() override = default; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; @@ -38,7 +38,7 @@ class ShuffleChannels : public Node { }; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: ShuffleChannelsAttributes attrs; diff --git a/src/plugins/intel_cpu/src/nodes/softmax.cpp b/src/plugins/intel_cpu/src/nodes/softmax.cpp index 2d3b0a98fee9a9..7dee3ae7aad8b6 100644 --- a/src/plugins/intel_cpu/src/nodes/softmax.cpp +++ b/src/plugins/intel_cpu/src/nodes/softmax.cpp @@ -68,7 +68,7 @@ bool SoftMax::isSupportedOperation(const std::shared_ptr& op, st return true; } -SoftMax::SoftMax(const std::shared_ptr& op, const GraphContext::CPtr context) +SoftMax::SoftMax(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -223,7 +223,7 @@ void SoftMax::prepareParams() { #endif } -void SoftMax::execute(dnnl::stream strm) { +void SoftMax::execute(const dnnl::stream& strm) { if (execPtr) { execPtr->exec(primArgs, strm); } else { @@ -231,7 +231,7 @@ void SoftMax::execute(dnnl::stream strm) { } } -void SoftMax::executeDynamicImpl(dnnl::stream strm) { +void SoftMax::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/softmax.h b/src/plugins/intel_cpu/src/nodes/softmax.h index daa285843d77e1..0ce000d38dda00 100644 --- a/src/plugins/intel_cpu/src/nodes/softmax.h +++ b/src/plugins/intel_cpu/src/nodes/softmax.h @@ -13,7 +13,7 @@ namespace node { class SoftMax : public Node { public: - SoftMax(const std::shared_ptr& op, const GraphContext::CPtr context); + SoftMax(const std::shared_ptr& op, const GraphContext::CPtr& context); void initOptimalPrimitiveDescriptor() override; void createDescriptor(const std::vector& inputDesc, @@ -22,8 +22,8 @@ class SoftMax : public Node { bool created() const override; AttrPtr initPrimitiveAttr() override; void prepareParams() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp b/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp index c01cd65a9407bf..58d5879cca9e1a 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp +++ b/src/plugins/intel_cpu/src/nodes/space_to_batch.cpp @@ -25,7 +25,7 @@ bool SpaceToBatch::isSupportedOperation(const std::shared_ptr& o return true; } -SpaceToBatch::SpaceToBatch(const std::shared_ptr& op, const GraphContext::CPtr context) +SpaceToBatch::SpaceToBatch(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -239,11 +239,11 @@ void SpaceToBatch::SpaceToBatchKernel() { }); } -void SpaceToBatch::executeDynamicImpl(dnnl::stream strm) { +void SpaceToBatch::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void SpaceToBatch::execute(dnnl::stream strm) { +void SpaceToBatch::execute(const dnnl::stream& strm) { switch (getParentEdgeAt(0)->getMemory().getDesc().getPrecision().size()) { case 1: SpaceToBatchKernel::value_type>(); diff --git a/src/plugins/intel_cpu/src/nodes/space_to_batch.h b/src/plugins/intel_cpu/src/nodes/space_to_batch.h index fec4423a91a1db..e8787c06b75fc4 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_batch.h +++ b/src/plugins/intel_cpu/src/nodes/space_to_batch.h @@ -16,11 +16,11 @@ namespace node { class SpaceToBatch : public Node { public: - SpaceToBatch(const std::shared_ptr& op, const GraphContext::CPtr context); + SpaceToBatch(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override { @@ -29,7 +29,7 @@ class SpaceToBatch : public Node { bool needShapeInfer() const override { return true; }; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; diff --git a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp index 7e4b4c4b1e307f..859944161d48b9 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp +++ b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp @@ -69,7 +69,7 @@ bool SpaceToDepth::isSupportedOperation(const std::shared_ptr& o return true; } -SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphContext::CPtr context) +SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -299,7 +299,7 @@ void SpaceToDepth::SpaceToDepthExecutor::exec(const uint8_t* srcData, uint8_t* d permuteKernel->execute(srcData, dstData, MB); } -void SpaceToDepth::execute(dnnl::stream strm) { +void SpaceToDepth::execute(const dnnl::stream& strm) { if (!execPtr) { THROW_ERROR("doesn't have a compiled executor."); } @@ -309,7 +309,7 @@ void SpaceToDepth::execute(dnnl::stream strm) { execPtr->exec(srcData, dstData, MB); } -void SpaceToDepth::executeDynamicImpl(dnnl::stream strm) { +void SpaceToDepth::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/space_to_depth.h b/src/plugins/intel_cpu/src/nodes/space_to_depth.h index 51091a07721151..5f628a2a3b4bd3 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_depth.h +++ b/src/plugins/intel_cpu/src/nodes/space_to_depth.h @@ -13,13 +13,13 @@ namespace node { class SpaceToDepth : public Node { public: - SpaceToDepth(const std::shared_ptr& op, const GraphContext::CPtr context); + SpaceToDepth(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void prepareParams() override; @@ -40,7 +40,7 @@ class SpaceToDepth : public Node { }; protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: SpaceToDepthAttrs attrs; diff --git a/src/plugins/intel_cpu/src/nodes/split.cpp b/src/plugins/intel_cpu/src/nodes/split.cpp index 7722da89f2d428..59ab2776ba884b 100644 --- a/src/plugins/intel_cpu/src/nodes/split.cpp +++ b/src/plugins/intel_cpu/src/nodes/split.cpp @@ -50,7 +50,7 @@ bool Split::isSupportedOperation(const std::shared_ptr& op, std: return true; } -Split::Split(const std::shared_ptr& op, const GraphContext::CPtr context) +Split::Split(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -295,7 +295,7 @@ bool Split::isExecutable() const { return !isInPlace() && !isInputTensorAtPortEmpty(0); } -void Split::execute(dnnl::stream strm) { +void Split::execute(const dnnl::stream& strm) { if (isInPlace()) { return; } @@ -493,7 +493,7 @@ std::vector Split::getRawDstMemPtrs() const { return result; } -Split::SplitOptimizedExecutor::SplitOptimizedExecutor(BlockedMemoryDescCPtr inDesc, +Split::SplitOptimizedExecutor::SplitOptimizedExecutor(const BlockedMemoryDescCPtr& inDesc, const std::vector& outDescs, const size_t axis) { // find axis order position diff --git a/src/plugins/intel_cpu/src/nodes/split.h b/src/plugins/intel_cpu/src/nodes/split.h index 858739c6a4df65..a93e439d7fd5f6 100644 --- a/src/plugins/intel_cpu/src/nodes/split.h +++ b/src/plugins/intel_cpu/src/nodes/split.h @@ -12,13 +12,13 @@ namespace node { class Split : public Node { public: - Split(const std::shared_ptr& op, const GraphContext::CPtr context); + Split(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void selectOptimalPrimitiveDescriptor() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; void initOptimalPrimitiveDescriptor() override; @@ -29,7 +29,7 @@ class Split : public Node { bool needShapeInfer() const override; void prepareParams() override; void createPrimitive() override; - void executeDynamicImpl(dnnl::stream strm) override { + void executeDynamicImpl(const dnnl::stream& strm) override { execute(strm); } void resolveInPlaceEdges(Edge::LOOK look) override; @@ -43,7 +43,7 @@ class Split : public Node { struct SplitOptimizedExecutor : public SplitExecutor { public: - SplitOptimizedExecutor(BlockedMemoryDescCPtr inDesc, + SplitOptimizedExecutor(const BlockedMemoryDescCPtr& inDesc, const std::vector& outDescs, const size_t axis); void exec(const uint8_t* srcData, const std::vector& dstRawMemPtrs) override; diff --git a/src/plugins/intel_cpu/src/nodes/stft.cpp b/src/plugins/intel_cpu/src/nodes/stft.cpp index 699895fcece850..21a34585c45dda 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.cpp +++ b/src/plugins/intel_cpu/src/nodes/stft.cpp @@ -100,7 +100,7 @@ static void transpose_out4d(const uint8_t* in, } } // namespace -void STFT::execute(dnnl::stream strm) { +void STFT::execute(const dnnl::stream& strm) { const float* signal = getSrcDataAtPortAs(DATA_IDX); const float* window = getSrcDataAtPortAs(WINDOW_IDX); float* rdft_result = getDstDataAtPortAs(0); @@ -168,7 +168,7 @@ void STFT::execute(dnnl::stream strm) { } } -void STFT::executeDynamicImpl(dnnl::stream strm) { +void STFT::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/stft.h b/src/plugins/intel_cpu/src/nodes/stft.h index cd87c997c59a0f..4b822241c2c24b 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.h +++ b/src/plugins/intel_cpu/src/nodes/stft.h @@ -24,8 +24,8 @@ class STFT : public Node { bool needPrepareParams() const override; void createPrimitive() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index 542f8897c2d625..f2b1a90e7b4c60 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -4,6 +4,7 @@ #include "strided_slice.h" +#include #include #include "common/cpu_memcpy.h" @@ -33,7 +34,7 @@ bool StridedSlice::isSupportedOperation(const std::shared_ptr& o return true; } -StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphContext::CPtr context) +StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, StridedSliceShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -192,6 +193,7 @@ static void addHiddenDims(StridedSlice::StridedSliceAttributes& attrs, auto addHiddenDims = [&](std::vector& data, const int bit = 0) { std::vector temp; + temp.reserve(attrs.ellipsisPos1); for (int i = 0; i < attrs.ellipsisPos1; i++) temp.push_back(data[i]); for (size_t i = attrs.ellipsisPos1; i < ellipsisPos2 + 1; i++) @@ -340,14 +342,14 @@ bool StridedSlice::needShapeInfer() const { return Node::inputShapesModified() || shapeHasDataDependency; } -void StridedSlice::execute(dnnl::stream strm) { +void StridedSlice::execute(const dnnl::stream& strm) { if (!execPtr) THROW_CPU_NODE_ERR("doesn't have compiled executor!"); execPtr->exec(srcMemory, dstMemory); } -void StridedSlice::executeDynamicImpl(dnnl::stream strm) { +void StridedSlice::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } @@ -363,7 +365,6 @@ StridedSlice::StridedSliceCommonExecutor::StridedSliceCommonExecutor(const Strid dimsNormalization(); dimsGluing(); indicesCalculation(); - m_threads_num = parallel_get_max_threads(); } void StridedSlice::StridedSliceCommonExecutor::orderParametersByLayouts( @@ -382,7 +383,7 @@ void StridedSlice::StridedSliceCommonExecutor::orderParametersByLayouts( if (isBlockedLayout) { params.attrs.begin[1] = params.attrs.begin[1] / blk; - params.attrs.end[1] = ceil(params.attrs.end[1] / static_cast(blk)); + params.attrs.end[1] = std::ceil(params.attrs.end[1] / static_cast(blk)); params.attrs.begin.push_back(0); params.attrs.end.push_back(0); params.attrs.stride.push_back(1); @@ -573,7 +574,7 @@ void StridedSlice::StridedSliceCommonExecutor::dimsNormalization() { strideTemp.push_back(params.attrs.stride[axis]); newSrcDims.push_back(params.srcBlockedDims[srcIdx]); newDstDims.push_back( - ceil(static_cast(abs(e - b) + 1) / static_cast(abs(strideTemp.back())))); + std::ceil(static_cast(abs(e - b) + 1) / static_cast(abs(strideTemp.back())))); srcIdx++; } diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.h b/src/plugins/intel_cpu/src/nodes/strided_slice.h index b21e99c7efeb2e..ca755a62a7bdf5 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.h +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.h @@ -15,13 +15,13 @@ namespace node { class StridedSlice : public Node { public: - StridedSlice(const std::shared_ptr& op, const GraphContext::CPtr context); + StridedSlice(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override { return false; @@ -65,7 +65,7 @@ class StridedSlice : public Node { protected: bool needPrepareParams() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: class StridedSliceExecutor { diff --git a/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp b/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp index c3634b67399e8b..2f7db689e7a17f 100644 --- a/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp +++ b/src/plugins/intel_cpu/src/nodes/string_tensor_pack.cpp @@ -10,7 +10,7 @@ namespace ov { namespace intel_cpu { namespace node { -StringTensorPack::StringTensorPack(const std::shared_ptr& op, const GraphContext::CPtr context) +StringTensorPack::StringTensorPack(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -54,8 +54,8 @@ bool StringTensorPack::needPrepareParams() const { return false; } -void StringTensorPack::executeDynamicImpl(dnnl::stream strm) { - execute(std::move(strm)); +void StringTensorPack::executeDynamicImpl(const dnnl::stream& strm) { + execute(strm); } template @@ -85,7 +85,7 @@ bool StringTensorPack::isExecutable() const { return !(isInputTensorAtPortEmpty(0) || isInputTensorAtPortEmpty(1)); } -void StringTensorPack::execute(dnnl::stream strm) { +void StringTensorPack::execute(const dnnl::stream& strm) { auto indicesPrecision = getParentEdgeAt(0)->getMemory().getDesc().getPrecision(); StringTensorPackContext ctx = {*this}; OV_SWITCH(intel_cpu, diff --git a/src/plugins/intel_cpu/src/nodes/string_tensor_pack.h b/src/plugins/intel_cpu/src/nodes/string_tensor_pack.h index 952093eab53e21..52aacc1c4ecce0 100644 --- a/src/plugins/intel_cpu/src/nodes/string_tensor_pack.h +++ b/src/plugins/intel_cpu/src/nodes/string_tensor_pack.h @@ -12,16 +12,16 @@ namespace node { class StringTensorPack : public Node { public: - StringTensorPack(const std::shared_ptr& op, const GraphContext::CPtr context); + StringTensorPack(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; bool isExecutable() const override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: template diff --git a/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp b/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp index bcdbd54cb7c68a..9bfb7544470686 100644 --- a/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp +++ b/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.cpp @@ -11,7 +11,7 @@ namespace ov { namespace intel_cpu { namespace node { -StringTensorUnpack::StringTensorUnpack(const std::shared_ptr& op, const GraphContext::CPtr context) +StringTensorUnpack::StringTensorUnpack(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -54,7 +54,7 @@ bool StringTensorUnpack::needPrepareParams() const { return false; } -void StringTensorUnpack::executeDynamicImpl(dnnl::stream strm) { +void StringTensorUnpack::executeDynamicImpl(const dnnl::stream& strm) { const auto& srcMemory = getSrcMemoryAtPort(0); const auto& srcDataDims = srcMemory->getStaticDims(); const auto& srcData = srcMemory->getDataAs(); @@ -64,10 +64,10 @@ void StringTensorUnpack::executeDynamicImpl(dnnl::stream strm) { totalCharLength += srcData[i].length(); } redefineOutputMemory({srcDataDims, srcDataDims, {totalCharLength}}); - execute(std::move(strm)); + execute(strm); } -void StringTensorUnpack::execute(dnnl::stream strm) { +void StringTensorUnpack::execute(const dnnl::stream& strm) { const auto stringCount = ov::shape_size(getSrcMemoryAtPort(0)->getStaticDims()); ov::reference::string_tensor_unpack(getSrcDataAtPortAs(0), getDstDataAtPortAs(0), diff --git a/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.h b/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.h index 34bf342eb71a25..3edadccb67c82c 100644 --- a/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.h +++ b/src/plugins/intel_cpu/src/nodes/string_tensor_unpack.h @@ -12,15 +12,15 @@ namespace node { class StringTensorUnpack : public Node { public: - StringTensorUnpack(const std::shared_ptr& op, const GraphContext::CPtr context); + StringTensorUnpack(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool needPrepareParams() const override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; }; } // namespace node diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index f69041fdf45b7c..43a005b27cb450 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -44,6 +44,7 @@ #include #include +#include #include #include "utils/cpu_utils.hpp" @@ -67,9 +68,9 @@ namespace { #if defined(OPENVINO_ARCH_X86_64) || defined(OPENVINO_ARCH_ARM64) struct SubgraphKey { SubgraphKey() = default; - SubgraphKey(const std::shared_ptr& attrs_, const std::vector& in_shapes_) - : attrs(attrs_), - in_shapes(in_shapes_) {} + SubgraphKey(std::shared_ptr attrs_, std::vector in_shapes_) + : attrs(std::move(attrs_)), + in_shapes(std::move(in_shapes_)) {} virtual ~SubgraphKey() = default; size_t hash() const { @@ -91,8 +92,8 @@ struct SubgraphKey { }; struct SubgraphCodeGeneratorKey { - SubgraphCodeGeneratorKey(const std::shared_ptr& attrs_, uint8_t mask_) - : attrs(attrs_), + SubgraphCodeGeneratorKey(std::shared_ptr attrs_, uint8_t mask_) + : attrs(std::move(attrs_)), broadcasting_mask(mask_) {} size_t hash() const { @@ -142,15 +143,19 @@ struct SubgraphShapeInferResult { } // namespace -Subgraph::Subgraph(const std::shared_ptr& op, const GraphContext::CPtr& context) - : Node(op, context, SnippetShapeInferFactory(op)), - subgraph_attrs(std::make_shared()) { +static _ov_dnnl_cpu_isa getHostIsa() { #if defined(OPENVINO_ARCH_ARM64) - host_isa = dnnl::impl::cpu::aarch64::asimd; + return dnnl::impl::cpu::aarch64::asimd; #else - host_isa = dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) ? dnnl::impl::cpu::x64::avx512_core - : dnnl::impl::cpu::x64::avx2; + return dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) ? dnnl::impl::cpu::x64::avx512_core + : dnnl::impl::cpu::x64::avx2; #endif +} + +Subgraph::Subgraph(const std::shared_ptr& op, const GraphContext::CPtr& context) + : Node(op, context, SnippetShapeInferFactory(op)), + host_isa(getHostIsa()), + subgraph_attrs(std::make_shared()) { const auto& tmp_snippet = ov::as_type_ptr(op); OPENVINO_ASSERT(tmp_snippet, "Attempt to create Subgraph node from an invalid op type"); subgraph_attrs->snippet = tmp_snippet->clone(); @@ -709,12 +714,12 @@ bool Subgraph::created() const { return getType() == Type::Subgraph; } -void Subgraph::execute(dnnl::stream strm) { +void Subgraph::execute(const dnnl::stream& strm) { OPENVINO_ASSERT(execPtr, "Can't execute Subgraph node. Primitive didn't created"); execPtr->execute(strm, srcMemPtrs, dstMemPtrs); } -void Subgraph::executeDynamicImpl(dnnl::stream strm) { +void Subgraph::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.h b/src/plugins/intel_cpu/src/nodes/subgraph.h index 9e6cb3cd49a9d7..89cf6adf128393 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.h +++ b/src/plugins/intel_cpu/src/nodes/subgraph.h @@ -34,8 +34,8 @@ class Subgraph : public Node { bool created() const override; // if generator is set, it would execute generated code otherwise it would fallback to nGraph reference - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; protected: IShapeInfer::Result shapeInfer() const override; @@ -61,11 +61,13 @@ class Subgraph : public Node { // Holds ISA version used is codeGeneration target #if defined(OPENVINO_ARCH_ARM64) - dnnl::impl::cpu::aarch64::cpu_isa_t host_isa; +# define _ov_dnnl_cpu_isa dnnl::impl::cpu::aarch64::cpu_isa_t #else - dnnl::impl::cpu::x64::cpu_isa_t host_isa; +# define _ov_dnnl_cpu_isa dnnl::impl::cpu::x64::cpu_isa_t #endif + _ov_dnnl_cpu_isa host_isa; + std::shared_ptr subgraph_attrs; // Index of Paramater -> Index of broadcastable dimension from end diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp index 2bdda694b701ec..fbd6361eca53fc 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp @@ -5,6 +5,7 @@ #include "tensoriterator.h" #include +#include #include #include "common/blocked_desc_creator.h" @@ -54,7 +55,7 @@ static NodeConfig make_plain_config(const std::shared_ptr& op) { return config; } -static void redefineToMemories(const std::vector& to_mems, MemoryDescPtr new_desc) { +static void redefineToMemories(const std::vector& to_mems, const MemoryDescPtr& new_desc) { // TODO : check the entire dstMemPtrs usage considering the proper memory sharing for (size_t j = 0; j < to_mems.size(); j++) { to_mems[j]->redefineDesc(new_desc); @@ -77,7 +78,7 @@ static void nullifyUndefinedDims(VectorDims& dims) { class PortIteratorHelper : public PortMapHelper { public: - PortIteratorHelper(MultiCachePtr cache, + PortIteratorHelper(const MultiCachePtr& cache, const MemoryPtr& from, const MemoryPtr& to, bool sliced_src, @@ -127,7 +128,7 @@ class PortIteratorHelper : public PortMapHelper { getReorderPrim(cache, mem_holder_dst.get_engine(), mem_holder_src.get_desc(), mem_holder_dst.get_desc()); } - void execute(dnnl::stream strm, int iter) override { + void execute(const dnnl::stream& strm, int iter) override { OPENVINO_ASSERT(iter >= 0 && iter < iter_count); auto& chunk_mem = sliced_src ? mem_holder_src : mem_holder_dst; @@ -149,14 +150,14 @@ class PortIteratorHelper : public PortMapHelper { class BackEdgePortHelper : public PortMapHelper { public: - BackEdgePortHelper(MultiCachePtr cache, const MemoryPtr& from, const MemoryPtr& to) { + BackEdgePortHelper(const MultiCachePtr& cache, const MemoryPtr& from, const MemoryPtr& to) { mem_holder_src = from->getPrimitive(); mem_holder_dst = to->getPrimitive(); reorder = getReorderPrim(cache, mem_holder_dst.get_engine(), mem_holder_src.get_desc(), mem_holder_dst.get_desc()); } - void execute(dnnl::stream strm, int iter = -1) override { + void execute(const dnnl::stream& strm, int iter = -1) override { if (iter != 0) { reorder.execute(strm, {{DNNL_ARG_FROM, mem_holder_src}, {DNNL_ARG_TO, mem_holder_dst}}); } @@ -172,7 +173,7 @@ class IterCountPortHelper : public PortMapHelper { mem_holder_dst = to->getPrimitive(); } - void execute(dnnl::stream strm, int n_iter) override { + void execute(const dnnl::stream& strm, int n_iter) override { auto mem = mem_holder_dst; auto data_ptr = static_cast(mem.get_data_handle()); if (data_ptr == nullptr) { @@ -228,12 +229,11 @@ class staticValueCheck : public PortChecker { int value; }; -DynamicBuffer::DynamicBuffer(const MemoryPtr& from_, const std::vector& to_, const PortMap& map_rule_) - : from(from_), - to(to_), - map_rule(map_rule_) { - elem_size = DnnlExtensionUtils::sizeOfDataType(from->getDataType()); -} +DynamicBuffer::DynamicBuffer(MemoryPtr from_, std::vector to_, const PortMap& map_rule_) + : from(std::move(from_)), + to(std::move(to_)), + map_rule(map_rule_), + elem_size(DnnlExtensionUtils::sizeOfDataType(from->getDataType())) {} void DynamicBuffer::execute(const dnnl::engine& eng, const int iter) { if (from->getStaticDims()[map_rule.axis] != static_cast(std::abs(map_rule.stride))) @@ -423,7 +423,7 @@ bool TensorIterator::isSupportedOperation(const std::shared_ptr& return true; } -TensorIterator::TensorIterator(const std::shared_ptr& op, const GraphContext::CPtr context) +TensorIterator::TensorIterator(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()), ngraphOp(op) { std::string errorMessage; @@ -621,7 +621,7 @@ void TensorIterator::prepareParamsImpl(const bool compileStage) { } } -void TensorIterator::execute(dnnl::stream strm) { +void TensorIterator::execute(const dnnl::stream& strm) { // Special case, the subgraph is dynamic while the node has all static shapes if (runAsDynamic()) { restoreSubgraphInputByBackEdges(); @@ -657,7 +657,7 @@ void TensorIterator::execute(dnnl::stream strm) { mapper->execute(strm); } -void TensorIterator::executeDynamicImpl(dnnl::stream strm) { +void TensorIterator::executeDynamicImpl(const dnnl::stream& strm) { const auto& eng = getEngine(); sub_graph.ResetInferCount(); @@ -823,7 +823,7 @@ void TensorIterator::reshapeSubgraphInput() { } } -void TensorIterator::reshapeAndFillOutput(dnnl::stream strm) { +void TensorIterator::reshapeAndFillOutput(const dnnl::stream& strm) { for (auto map_rule : outputPortMap) { if (map_rule.axis == -1) { auto to_mems = getToMemories(this, map_rule.from); @@ -845,7 +845,7 @@ void TensorIterator::reshapeAndFillOutput(dnnl::stream strm) { } } - for (auto buffer : buffers) { + for (const auto& buffer : buffers) { buffer->transfer(this); } } diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.h b/src/plugins/intel_cpu/src/nodes/tensoriterator.h index b5d6f178cb2b51..97399d28e788b3 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.h +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.h @@ -37,7 +37,7 @@ struct PortMap { class PortMapHelper { public: virtual ~PortMapHelper() = default; - virtual void execute(dnnl::stream strm, int n_iter = -1) = 0; + virtual void execute(const dnnl::stream& strm, int n_iter = -1) = 0; protected: dnnl::primitive reorder; @@ -65,7 +65,7 @@ class PortChecker { */ class DynamicBuffer { public: - DynamicBuffer(const MemoryPtr& from_, const std::vector& to_, const PortMap& map_rule_); + DynamicBuffer(MemoryPtr from_, std::vector to_, const PortMap& map_rule_); void execute(const dnnl::engine& eng, const int iter); void transfer(const Node* node); @@ -109,14 +109,14 @@ class DynamicBuffer { class TensorIterator : public Node { public: - TensorIterator(const std::shared_ptr& op, const GraphContext::CPtr context); + TensorIterator(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void initSupportedPrimitiveDescriptors() override; void getSupportedDescriptors() override; void createPrimitive() override; bool created() const override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool isExecutable() const override { return true; } @@ -130,7 +130,7 @@ class TensorIterator : public Node { bool needPrepareParams() const override; void prepareParams() override; - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; private: void prepareInputPorts(); @@ -145,7 +145,7 @@ class TensorIterator : public Node { /* Dynamic support */ void reshapeSubgraphInput(); - void reshapeAndFillOutput(dnnl::stream strm); + void reshapeAndFillOutput(const dnnl::stream& strm); bool checkForInputAndBodyShapesInequality() const; int getNumIteration(const std::vector& inputPortMap, const std::vector& outputPortMap) const; void prepareParamsImpl(const bool compileStage); diff --git a/src/plugins/intel_cpu/src/nodes/tile.cpp b/src/plugins/intel_cpu/src/nodes/tile.cpp index 473d404a0b57d8..57bff9631cebde 100644 --- a/src/plugins/intel_cpu/src/nodes/tile.cpp +++ b/src/plugins/intel_cpu/src/nodes/tile.cpp @@ -33,7 +33,7 @@ bool Tile::isSupportedOperation(const std::shared_ptr& op, std:: return true; } -Tile::Tile(const std::shared_ptr& op, const GraphContext::CPtr context) +Tile::Tile(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -148,11 +148,11 @@ bool Tile::needShapeInfer() const { return false; } -void Tile::executeDynamicImpl(dnnl::stream strm) { +void Tile::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void Tile::execute(dnnl::stream strm) { +void Tile::execute(const dnnl::stream& strm) { if (optimizedCase) { optimizedExecute(getSrcMemoryAtPort(TILE_INPUT), getDstMemoryAtPort(0)); } else { @@ -160,7 +160,7 @@ void Tile::execute(dnnl::stream strm) { } } -void Tile::plainExecute(dnnl::stream strm) { +void Tile::plainExecute(const dnnl::stream& strm) { if (noTiling) { return; } diff --git a/src/plugins/intel_cpu/src/nodes/tile.h b/src/plugins/intel_cpu/src/nodes/tile.h index 0bb43ac91e8fb5..cac55b7be8d15c 100644 --- a/src/plugins/intel_cpu/src/nodes/tile.h +++ b/src/plugins/intel_cpu/src/nodes/tile.h @@ -14,12 +14,12 @@ namespace node { class Tile : public Node, public TileBroadcastCommon { public: - Tile(const std::shared_ptr& op, const GraphContext::CPtr context); + Tile(const std::shared_ptr& op, const GraphContext::CPtr& context); void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool created() const override; static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; @@ -30,7 +30,7 @@ class Tile : public Node, public TileBroadcastCommon { bool needShapeInfer() const override; private: - void plainExecute(dnnl::stream strm); + void plainExecute(const dnnl::stream& strm); static constexpr size_t TILE_INPUT = 0lu; static constexpr size_t TILE_REPEATS = 1lu; diff --git a/src/plugins/intel_cpu/src/nodes/topk.cpp b/src/plugins/intel_cpu/src/nodes/topk.cpp index f20bfeb8f599cf..ba1507c9b4b2e6 100644 --- a/src/plugins/intel_cpu/src/nodes/topk.cpp +++ b/src/plugins/intel_cpu/src/nodes/topk.cpp @@ -1887,7 +1887,7 @@ bool TopK::isSupportedOperation(const std::shared_ptr& op, std:: return true; } -TopK::TopK(const std::shared_ptr& op, const GraphContext::CPtr context) +TopK::TopK(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (isSupportedOperation(op, errorMessage)) { @@ -2195,11 +2195,11 @@ void TopK::createPrimitive() { } } -void TopK::executeDynamicImpl(dnnl::stream strm) { +void TopK::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } -void TopK::execute(dnnl::stream strm) { +void TopK::execute(const dnnl::stream& strm) { auto srcMemPtr = getSrcMemoryAtPort(TOPK_DATA); auto dstMemPtr = getDstMemoryAtPort(TOPK_DATA); auto dstIndexesMemPtr = getDstMemoryAtPort(TOPK_INDEX); diff --git a/src/plugins/intel_cpu/src/nodes/topk.h b/src/plugins/intel_cpu/src/nodes/topk.h index e0fbb4545ccf46..950e3fadefd662 100644 --- a/src/plugins/intel_cpu/src/nodes/topk.h +++ b/src/plugins/intel_cpu/src/nodes/topk.h @@ -74,7 +74,7 @@ struct jit_uni_topk_kernel { class TopK : public Node { public: - TopK(const std::shared_ptr& op, const GraphContext::CPtr context); + TopK(const std::shared_ptr& op, const GraphContext::CPtr& context); ~TopK() override = default; void getSupportedDescriptors() override; @@ -84,8 +84,8 @@ class TopK : public Node { void prepareParams() override; void createPrimitive() override; bool created() const override; - void execute(dnnl::stream strm) override; - void executeDynamicImpl(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; bool canBeInPlace() const override { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/transpose.cpp b/src/plugins/intel_cpu/src/nodes/transpose.cpp index f316da30f97c0c..0b253d4b83892a 100644 --- a/src/plugins/intel_cpu/src/nodes/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/transpose.cpp @@ -36,7 +36,7 @@ bool Transpose::isSupportedOperation(const std::shared_ptr& op, return true; } -Transpose::Transpose(const std::shared_ptr& op, const GraphContext::CPtr context) +Transpose::Transpose(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, TransposeShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -78,7 +78,7 @@ void Transpose::initSupportedPrimitiveDescriptors() { config.outConfs[0].constant(false); transpose_context = std::make_shared(context, getImplPriority()); - auto supportedPrimitiveDescriptorsBuilder = [this](NodeConfig config, TransposeParams transposeParams) { + auto supportedPrimitiveDescriptorsBuilder = [this](NodeConfig config, const TransposeParams& transposeParams) { std::vector srcMemoryDescs; for (size_t i = 0; i < config.inConfs.size(); i++) { srcMemoryDescs.push_back(config.inConfs[i].getMemDesc()); @@ -238,7 +238,7 @@ void Transpose::createPrimitive() { } } -void Transpose::execute(dnnl::stream strm) { +void Transpose::execute(const dnnl::stream& strm) { if (isOptimized) return; @@ -254,7 +254,7 @@ void Transpose::execute(dnnl::stream strm) { } } -void Transpose::executeDynamicImpl(dnnl::stream strm) { +void Transpose::executeDynamicImpl(const dnnl::stream& strm) { execute(strm); } diff --git a/src/plugins/intel_cpu/src/nodes/transpose.h b/src/plugins/intel_cpu/src/nodes/transpose.h index 0ed1ad67940d0a..c865e4918c28cd 100644 --- a/src/plugins/intel_cpu/src/nodes/transpose.h +++ b/src/plugins/intel_cpu/src/nodes/transpose.h @@ -18,13 +18,13 @@ namespace node { class Transpose : public Node { public: - Transpose(const std::shared_ptr& op, const GraphContext::CPtr context); + Transpose(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override; bool canBeInPlace() const override { return false; @@ -43,7 +43,7 @@ class Transpose : public Node { } protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; std::shared_ptr transpose_context; private: diff --git a/src/plugins/intel_cpu/src/nodes/unique.cpp b/src/plugins/intel_cpu/src/nodes/unique.cpp index 83db293885af0d..391e1967a8c682 100644 --- a/src/plugins/intel_cpu/src/nodes/unique.cpp +++ b/src/plugins/intel_cpu/src/nodes/unique.cpp @@ -33,7 +33,7 @@ bool Unique::isSupportedOperation(const std::shared_ptr& op, std return true; } -Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr context) +Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, InternalDynShapeInferFactory()) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { @@ -78,6 +78,7 @@ void Unique::initSupportedPrimitiveDescriptors() { inPortConfigs.push_back({LayoutType::ncsp, axisPrecision}); } std::vector outPortConfigs; + outPortConfigs.reserve(4); for (int i = 0; i < 4; i++) { outPortConfigs.push_back({LayoutType::ncsp, i == 0 ? dataPrecision : axisPrecision}); } @@ -132,7 +133,7 @@ struct Unique::slicedExec { } }; -void Unique::execute(dnnl::stream strm) { +void Unique::execute(const dnnl::stream& strm) { if (flattened) { OV_SWITCH(intel_cpu, flattenExec, @@ -154,7 +155,7 @@ void Unique::execute(dnnl::stream strm) { } } -void Unique::executeDynamicImpl(dnnl::stream strm) { +void Unique::executeDynamicImpl(const dnnl::stream& strm) { const auto& srcDataDims = getSrcMemoryAtPort(IN_DATA)->getStaticDims(); VectorDims dstDataDims; Dim uniqLen = 1; diff --git a/src/plugins/intel_cpu/src/nodes/unique.hpp b/src/plugins/intel_cpu/src/nodes/unique.hpp index ddc7bdeaf62f9e..cc1a47431dc358 100644 --- a/src/plugins/intel_cpu/src/nodes/unique.hpp +++ b/src/plugins/intel_cpu/src/nodes/unique.hpp @@ -12,19 +12,19 @@ namespace node { class Unique : public Node { public: - Unique(const std::shared_ptr& op, const GraphContext::CPtr context); + Unique(const std::shared_ptr& op, const GraphContext::CPtr& context); static bool isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept; void getSupportedDescriptors() override{}; void initSupportedPrimitiveDescriptors() override; void createPrimitive() override; - void execute(dnnl::stream strm) override; + void execute(const dnnl::stream& strm) override; bool created() const override { return getType() == Type::Unique; } protected: - void executeDynamicImpl(dnnl::stream strm) override; + void executeDynamicImpl(const dnnl::stream& strm) override; void prepareParams() override; bool needShapeInfer() const override { return false; diff --git a/src/plugins/intel_cpu/src/partitioned_mem_blk.h b/src/plugins/intel_cpu/src/partitioned_mem_blk.h index 4eb8e96424cbf7..1d1558a5edb3ab 100644 --- a/src/plugins/intel_cpu/src/partitioned_mem_blk.h +++ b/src/plugins/intel_cpu/src/partitioned_mem_blk.h @@ -4,6 +4,8 @@ #pragma once +#include + #include "cpu_memory.h" namespace ov { diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index f2494e061c8301..6194438c928068 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -241,7 +241,7 @@ std::shared_ptr Plugin::compile_model(const std::shared_ptr< } } - auto config = orig_config; + const auto& config = orig_config; const std::shared_ptr cloned_model = model->clone(); Config::ModelType modelType = getModelType(model); DEBUG_LOG(PrintableModel(*cloned_model, "org_")); diff --git a/src/plugins/intel_cpu/src/post_ops.cpp b/src/plugins/intel_cpu/src/post_ops.cpp index fcd250cc892e31..87904c8aa0dd7e 100644 --- a/src/plugins/intel_cpu/src/post_ops.cpp +++ b/src/plugins/intel_cpu/src/post_ops.cpp @@ -151,17 +151,17 @@ Algorithm convertToEltwiseAlgorithm(const ActivationPostOp::Type type) { OPENVINO_THROW("Unsupported algorithm"); } -PostOps getPostOps(std::vector fused) { +PostOps getPostOps(const std::vector& fused) { PostOps ops; - auto makeActivationPostOp = [](const std::shared_ptr eltwise) { + auto makeActivationPostOp = [](const std::shared_ptr& eltwise) { return std::make_shared(convertToActivationPostOpt(eltwise->getAlgorithm()), eltwise->getAlpha(), eltwise->getBeta(), eltwise->getGamma()); }; - auto makeScaleShiftPostOp = [](const std::shared_ptr eltwise) { + auto makeScaleShiftPostOp = [](const std::shared_ptr& eltwise) { return std::make_shared(convertToScaleShiftOpt(eltwise->getAlgorithm()), eltwise->getScales(), eltwise->getShifts()); diff --git a/src/plugins/intel_cpu/src/post_ops.hpp b/src/plugins/intel_cpu/src/post_ops.hpp index 706d248d37e324..e54b07544342ec 100644 --- a/src/plugins/intel_cpu/src/post_ops.hpp +++ b/src/plugins/intel_cpu/src/post_ops.hpp @@ -56,7 +56,7 @@ struct ActivationPostOp : PostOp { const float alpha, const float beta, const float gamma, - eltwiseExecutorCreatingStrategy strategy = nullptr) + const eltwiseExecutorCreatingStrategy& strategy = nullptr) : m_type(type), m_alpha(alpha), m_beta(beta), @@ -189,6 +189,6 @@ ActivationPostOp::Type convertToActivationPostOpt(const Algorithm alg); Algorithm convertToEltwiseAlgorithm(const ActivationPostOp::Type m_type); -PostOps getPostOps(std::vector fused); +PostOps getPostOps(const std::vector& fused); } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/proxy_mem_blk.cpp b/src/plugins/intel_cpu/src/proxy_mem_blk.cpp index 11256acbf86fa3..830fca67f7a007 100644 --- a/src/plugins/intel_cpu/src/proxy_mem_blk.cpp +++ b/src/plugins/intel_cpu/src/proxy_mem_blk.cpp @@ -82,4 +82,4 @@ void ProxyMemoryBlock::notifyUpdate() { item->update(); } } -} \ No newline at end of file +} diff --git a/src/plugins/intel_cpu/src/proxy_mem_blk.h b/src/plugins/intel_cpu/src/proxy_mem_blk.h index ffaa605ed491e0..b44ca44712a592 100644 --- a/src/plugins/intel_cpu/src/proxy_mem_blk.h +++ b/src/plugins/intel_cpu/src/proxy_mem_blk.h @@ -15,9 +15,9 @@ namespace intel_cpu { class ProxyMemoryBlock : public IMemoryBlockObserver { public: ProxyMemoryBlock() : m_pOrigBlock(std::make_shared()), m_pMemBlock(m_pOrigBlock) {} - explicit ProxyMemoryBlock(std::shared_ptr pBlock) { + explicit ProxyMemoryBlock(const std::shared_ptr& pBlock) { OPENVINO_ASSERT(pBlock, "Memory block is uninitialized"); - m_pMemBlock = std::move(pBlock); + m_pMemBlock = pBlock; } void* getRawPtr() const noexcept override; @@ -50,4 +50,4 @@ using ProxyMemoryBlockPtr = std::shared_ptr; using ProxyMemoryBlockCPtr = std::shared_ptr; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp index 8735d3164edf31..b52f62cb7e4691 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/adaptive_pooling.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -33,7 +35,7 @@ class AdaptivePoolingShapeInfer : public ShapeInferEmptyPads { class AdaptivePoolingShapeInferFactory : public ShapeInferFactory { public: - AdaptivePoolingShapeInferFactory(std::shared_ptr op) : m_op(op) {} + AdaptivePoolingShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp index 277639be54221e..5813c4084173cb 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/color_convert.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -32,7 +34,7 @@ class ColorConvertShapeInfer : public ShapeInferEmptyPads { class ColorConvertShapeInferFactory : public ShapeInferFactory { public: - ColorConvertShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ColorConvertShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp index 25fa5c8b0b07c5..22967cf6ae3663 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/fullyconnected.hpp @@ -28,7 +28,7 @@ class FCShapeInfer : public ShapeInferEmptyPads { class FCShapeInferFactory : public ShapeInferFactory { public: - FCShapeInferFactory(std::shared_ptr op) : m_op(op) {} + FCShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} ShapeInferPtr makeShapeInfer() const override { return std::make_shared(m_op->get_output_partial_shape(0).rank().get_length()); } diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp index ee6176302ad5be..9cb77fa2c9e25d 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/gather.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -35,7 +37,7 @@ class GatherShapeInfer : public ShapeInferEmptyPads { class GatherShapeInferFactory : public ShapeInferFactory { public: - GatherShapeInferFactory(std::shared_ptr op) : m_op(op) {} + GatherShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp index 1fea18914ae426..5e7c90a3df9d4e 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/matmul.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -35,7 +37,7 @@ class MMShapeInfer : public ShapeInferEmptyPads { class MMShapeInferFactory : public ShapeInferFactory { public: - MMShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + MMShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp index 0014891baada6d..1a9d87ff20a789 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/ngram.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -27,7 +29,7 @@ class NgramShapeInfer : public ShapeInferEmptyPads { class NgramShapeInferFactory : public ShapeInferFactory { public: - NgramShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + NgramShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp index 5b8b648999bb75..bbccf5d435f9f8 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -32,7 +34,7 @@ class OneHotShapeInfer : public ShapeInferEmptyPads { class OneHotShapeInferFactory : public ShapeInferFactory { public: - OneHotShapeInferFactory(std::shared_ptr op) : m_op(op) {} + OneHotShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp index dc543a4d16f316..39189f3219b8b5 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -34,7 +36,7 @@ class PriorBoxShapeInfer : public ShapeInferEmptyPads { class PriorBoxShapeInferFactory : public ShapeInferFactory { public: - explicit PriorBoxShapeInferFactory(std::shared_ptr op) : m_op(op) {} + explicit PriorBoxShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp index 27faf3a63c6d4b..0100a8546d1a32 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/priorbox_clustered.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -34,7 +36,7 @@ class PriorBoxClusteredShapeInfer : public ShapeInferEmptyPads { class PriorBoxClusteredShapeInferFactory : public ShapeInferFactory { public: - explicit PriorBoxClusteredShapeInferFactory(std::shared_ptr op) : m_op(op) {} + explicit PriorBoxClusteredShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp index fba7fb3ac3be0c..c60644a9fdba6f 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.cpp @@ -18,7 +18,7 @@ Result ReshapeShapeInfer::infer(const std::vectorgetData(); const auto& dims = memPtr->getStaticDims(); const auto outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); @@ -76,7 +76,7 @@ Result SqueezeShapeInfer::infer(const std::vectorgetData(); const auto& dims = memPtr->getStaticDims(); if (dims.size() != 0) { @@ -123,7 +123,7 @@ Result UnsqueezeShapeInfer::infer(const std::vectorgetData(); const auto& dims = memPtr->getStaticDims(); size_t outputPatternSize = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp index 2c85f34a723713..f2ef40db7a87c1 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/reshape.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -46,7 +48,7 @@ class UnsqueezeShapeInfer : public ShapeInferEmptyPads { class ReshapeShapeInferFactory : public ShapeInferFactory { public: - ReshapeShapeInferFactory(std::shared_ptr op) : m_op(op) {} + ReshapeShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp index 1a2b8b55aad5fe..68fc4bf0462499 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/rms_norm.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp index 69153901c40ce2..5b893b3458e4fb 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp @@ -4,6 +4,8 @@ #include "scaled_attn.hpp" +#include + #include "shape_inference/shape_inference.hpp" #include "transformations/cpu_opset/common/op/sdpa.hpp" #include "utils.hpp" @@ -14,7 +16,7 @@ namespace node { class SDPAShapeInfer : public ShapeInferEmptyPads { public: - SDPAShapeInfer(const ScaledDotProductAttentionWithKVCache::Config& config) : m_config(config) {} + SDPAShapeInfer(ScaledDotProductAttentionWithKVCache::Config config) : m_config(std::move(config)) {} IShapeInfer::Result infer(const std::vector>& input_shapes, const std::unordered_map& data_dependency) override { diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp index 870b1e9da98574..09d54181177c91 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -13,7 +15,7 @@ namespace node { class SDPAShapeInferFactory : public ShapeInferFactory { public: - SDPAShapeInferFactory(std::shared_ptr op) : m_op(op) {} + SDPAShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp index bd62d0450ec986..9261d11e88c903 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -39,7 +41,7 @@ class StridedSliceShapeInfer : public ShapeInferEmptyPads { class StridedSliceShapeInferFactory : public ShapeInferFactory { public: - StridedSliceShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + StridedSliceShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp index 0b03f442575f2d..7d52eedf0d7156 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/subgraph.hpp @@ -4,7 +4,10 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" +#include "snippets/op/subgraph.hpp" #pragma once namespace ov { @@ -14,7 +17,7 @@ using Result = IShapeInfer::Result; class SnippetShapeInfer : public ShapeInferEmptyPads { public: - explicit SnippetShapeInfer(const std::shared_ptr& s) : m_subgraph(s) { + explicit SnippetShapeInfer(std::shared_ptr s) : m_subgraph(std::move(s)) { m_status_map[snippets::ShapeInferStatus::success] = ov::intel_cpu::ShapeInferStatus::success; m_status_map[snippets::ShapeInferStatus::skip] = ov::intel_cpu::ShapeInferStatus::skip; } diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp b/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp index 259e379fc2cc14..3403e8510cf355 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/transpose.hpp @@ -4,6 +4,8 @@ #include +#include + #include "shape_inference/shape_inference_cpu.hpp" #pragma once @@ -46,7 +48,7 @@ class TransposeShapeInfer : public ShapeInferEmptyPads { class TransposeShapeInferFactory : public ShapeInferFactory { public: - TransposeShapeInferFactory(const std::shared_ptr& op) : m_op(op) {} + TransposeShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override; private: diff --git a/src/plugins/intel_cpu/src/shape_inference/static_shape.hpp b/src/plugins/intel_cpu/src/shape_inference/static_shape.hpp index 26a13e10d3cefe..01feba13ee8ed3 100644 --- a/src/plugins/intel_cpu/src/shape_inference/static_shape.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/static_shape.hpp @@ -78,10 +78,6 @@ class StaticShapeAdapter { return m_dims; } - const TDims&& operator*() const&& noexcept { - return std::move(m_dims); - } - TDims&& operator*() && noexcept { return std::move(m_dims); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp index c65eb2340dcd34..a0ea60b9a20a63 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp @@ -4,12 +4,13 @@ #include "causal_mask_preprocess.hpp" #include +#include #include "transformations/itt.hpp" -ov::intel_cpu::CausalMaskPreprocessNode::CausalMaskPreprocessNode(const OutputVector& args, const Config& cfg) +ov::intel_cpu::CausalMaskPreprocessNode::CausalMaskPreprocessNode(const OutputVector& args, Config cfg) : Op(args), - m_config(cfg) { + m_config(std::move(cfg)) { constructor_validate_and_infer_types(); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp index 602795bf8cedfb..7628aea386e4e7 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp @@ -19,7 +19,7 @@ class CausalMaskPreprocessNode : public ov::op::Op { std::string type; }; - CausalMaskPreprocessNode(const OutputVector& args, const Config& cfg); + CausalMaskPreprocessNode(const OutputVector& args, Config cfg); bool visit_attributes(ov::AttributeVisitor& visitor) override; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.cpp index 4e5a28461a3e45..94c0fb376196fb 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.cpp @@ -7,13 +7,13 @@ #include "transformations/itt.hpp" ov::intel_cpu::ReadValueWithSubgraph::ReadValueWithSubgraph(const std::shared_ptr& variable, - std::shared_ptr body) { + const std::shared_ptr& body) { m_variable = variable; set_function(body); } ov::intel_cpu::ReadValueWithSubgraph::ReadValueWithSubgraph(const std::shared_ptr& variable, - std::shared_ptr body, + const std::shared_ptr& body, const OutputVector& args) : ReadValueWithSubgraph(variable, body) { set_arguments(args); @@ -111,4 +111,4 @@ void ov::intel_cpu::ReadValueWithSubgraph::validate_and_infer_types() { set_output_type(output_index, node_result.get_element_type(), node_result.get_partial_shape()); } -} \ No newline at end of file +} diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.hpp index 35faf88422cbc5..3e06979b414cb8 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/read_value_with_subgraph.hpp @@ -17,9 +17,10 @@ class ReadValueWithSubgraph : public ov::op::util::SubGraphOp, public ov::op::ut OPENVINO_OP("ReadValueWithSubgraph", "cpu_plugin_opset", ov::op::util::SubGraphOp); ReadValueWithSubgraph() = default; - ReadValueWithSubgraph(const std::shared_ptr& variable, std::shared_ptr body); ReadValueWithSubgraph(const std::shared_ptr& variable, - std::shared_ptr body, + const std::shared_ptr& body); + ReadValueWithSubgraph(const std::shared_ptr& variable, + const std::shared_ptr& body, const OutputVector& args); std::string get_variable_id() const override; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp index 06738401791926..f4fe54e7d41f43 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.cpp @@ -5,13 +5,14 @@ #include "sdpa.hpp" #include +#include #include "transformations/itt.hpp" ov::intel_cpu::ScaledDotProductAttentionWithKVCache::ScaledDotProductAttentionWithKVCache(const OutputVector& args, - const Config& cfg) + Config cfg) : Op(args), - m_config(cfg) { + m_config(std::move(cfg)) { constructor_validate_and_infer_types(); } @@ -102,9 +103,9 @@ bool ov::intel_cpu::ScaledDotProductAttentionWithKVCache::visit_attributes(ov::A return true; } -ov::intel_cpu::SDPAWithTransposeReshape::SDPAWithTransposeReshape(const OutputVector& args, const Config& cfg) +ov::intel_cpu::SDPAWithTransposeReshape::SDPAWithTransposeReshape(const OutputVector& args, Config cfg) : Op(args), - m_config(cfg) {} + m_config(std::move(cfg)) {} std::shared_ptr ov::intel_cpu::SDPAWithTransposeReshape::clone_with_new_inputs( const ov::OutputVector& new_args) const { @@ -117,7 +118,7 @@ void ov::intel_cpu::SDPAWithTransposeReshape::validate_and_infer_types() { INTERNAL_OP_SCOPE(SDPAWithTransposeReshape_validate_and_infer_types); // [B,L,H*S] auto q_ps = get_input_partial_shape(0); - auto output_ps = q_ps; + const auto& output_ps = q_ps; NODE_VALIDATION_CHECK(this, m_config.output_BLHxS == true); NODE_VALIDATION_CHECK(this, m_config.input_BLHxS == true); NODE_VALIDATION_CHECK(this, q_ps.size() == 3u); @@ -142,4 +143,4 @@ bool ov::intel_cpu::SDPAWithTransposeReshape::visit_attributes(ov::AttributeVisi visitor.on_attribute("order_HS", m_config.order_HS); visitor.finish_structure(); return true; -} \ No newline at end of file +} diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.hpp index 3deb4cf932bd23..2864ea41d1b4ed 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/sdpa.hpp @@ -32,7 +32,7 @@ class ScaledDotProductAttentionWithKVCache : public ov::op::Op { std::vector order_HS; // Reshape[B,L,H*S]->B,L,H,S], H,S are fixed value, when input_BLHxS is true. }; - ScaledDotProductAttentionWithKVCache(const OutputVector& args, const Config& cfg); + ScaledDotProductAttentionWithKVCache(const OutputVector& args, Config cfg); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; bool visit_attributes(AttributeVisitor& visitor) override; @@ -57,7 +57,7 @@ class SDPAWithTransposeReshape : public ov::op::Op { SDPAWithTransposeReshape() = default; - SDPAWithTransposeReshape(const OutputVector& args, const Config& cfg); + SDPAWithTransposeReshape(const OutputVector& args, Config cfg); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; bool visit_attributes(AttributeVisitor& visitor) override; @@ -76,4 +76,4 @@ class SDPAWithTransposeReshape : public ov::op::Op { }; } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp index 551ddf95ba18d1..e2bcac397af164 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp @@ -89,7 +89,7 @@ CausalMaskPreprocess::CausalMaskPreprocess() { auto max_seq_len = Symbol("max_seq_len"); - auto ShapeOf_41610 = batch_size; // shapeOf(beamidx) + const auto& ShapeOf_41610 = batch_size; // shapeOf(beamidx) auto ListConstruct_Concat = makePattern({ShapeOf_41610, {1}, {1}, {1}}, {{"axis", 0}}); // tensor_array auto repeat_Tile = diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp index 0f2252fd5d256f..e28485b4c9cb1d 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp @@ -56,7 +56,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { // Check that if second inputs is Constant path and it's shape without ones dimensions has length <= 2 // we replace MatMul with FullyConnected operation. - if (std::count_if(shape_b.begin(), shape_b.end(), [](ov::Dimension x) { + if (std::count_if(shape_b.begin(), shape_b.end(), [](const ov::Dimension& x) { return x != 1; }) > 2) { return false; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.cpp index c815e7373c870c..bc883c82484c2b 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_fc_reshape_to_weights.cpp @@ -24,7 +24,7 @@ ov::intel_cpu::MoveFCReshapeToWeights::MoveFCReshapeToWeights() { auto convert_m = wrap_type({weights_m}, consumers_count(1)); auto one_consumer_rank_equals = [](const ov::Dimension& expected_rank) { - return [=](ov::Output output) -> bool { + return [=](const ov::Output& output) -> bool { return consumers_count(1)(output) && rank_equals(expected_rank)(output); }; }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.cpp index 3f652bad3e4580..6f8d1cdaf8950c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/move_readvalue_inputs_to_subgraph.cpp @@ -43,7 +43,7 @@ ov::intel_cpu::MoveReadValueInputsToSubgraph::MoveReadValueInputsToSubgraph() { OutputVector outputs = {}; // DFS, Check if current node's final successor is only ReadValue. - std::function, bool&)> dfs = [&](std::shared_ptr node, + std::function, bool&)> dfs = [&](const std::shared_ptr& node, bool& found_output) { if (found_output) { return; @@ -84,7 +84,7 @@ ov::intel_cpu::MoveReadValueInputsToSubgraph::MoveReadValueInputsToSubgraph() { } }; - std::function)> reverse_dfs = [&](std::shared_ptr node) { + std::function)> reverse_dfs = [&](const std::shared_ptr& node) { if (visited_path_to_output.find(node) != visited_path_to_output.end()) { inputs.emplace_back(node); return; @@ -128,7 +128,7 @@ ov::intel_cpu::MoveReadValueInputsToSubgraph::MoveReadValueInputsToSubgraph() { // Subgraph's input auto params = ParameterVector{}; - for (auto inp : inputs) { + for (const auto& inp : inputs) { auto param = std::make_shared(inp->get_element_type(), inp->get_output_partial_shape(0)); params.push_back(param); @@ -161,4 +161,4 @@ ov::intel_cpu::MoveReadValueInputsToSubgraph::MoveReadValueInputsToSubgraph() { auto m = std::make_shared(readvalue_pattern, matcher_name); this->register_matcher(m, callback); -} \ No newline at end of file +} diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.cpp index f1a8a679c0b52f..f9b1e4f2b2c053 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/ngram_fusion.cpp @@ -11,14 +11,14 @@ #include #include +#include "openvino/cc/pass/itt.hpp" #include "openvino/opsets/opset1.hpp" #include "transformations/cpu_opset/common/op/ngram.hpp" -#include "transformations/itt.hpp" using namespace ov::pass::pattern; ov::intel_cpu::NgramFusion::NgramFusion() { MATCHER_SCOPE(NgramFusion); - auto concat_matches = [](ov::Output output) -> bool { + auto concat_matches = [](const ov::Output& output) -> bool { if (auto concat = ov::as_type_ptr(output.get_node_shared_ptr())) { return ov::pass::pattern::rank_equals(2)(output) && concat->get_axis() == 1; } @@ -41,7 +41,7 @@ ov::intel_cpu::NgramFusion::NgramFusion() { } auto check_bias = [](const PatternValueMap& pattern_map, - const std::shared_ptr matched_constant_to_check, + const std::shared_ptr& matched_constant_to_check, const size_t expected_bias) { auto out_it = pattern_map.find(matched_constant_to_check); if (expected_bias == 0) { @@ -53,15 +53,15 @@ ov::intel_cpu::NgramFusion::NgramFusion() { return constant != nullptr && ov::op::util::constantIsEqualTo(constant, expected_bias); }; - auto tokens_match = [](ov::Output output) -> bool { + auto tokens_match = [](const ov::Output& output) -> bool { return ov::pass::pattern::rank_equals(2)(output) && ov::pass::pattern::type_matches(ov::element::f32)(output); }; - auto idces_match = [](ov::Output output) -> bool { + auto idces_match = [](const ov::Output& output) -> bool { return ov::pass::pattern::rank_equals(2)(output) && ov::pass::pattern::type_matches(ov::element::i32)(output); }; - auto as_is_cropped_shape_match = [](ov::Output output) -> bool { + auto as_is_cropped_shape_match = [](const ov::Output& output) -> bool { const auto& symbols = output.get_tensor().get_value_symbol(); return ov::pass::pattern::rank_equals(1)(output) && !symbols.empty() && symbols[0] != nullptr; }; @@ -107,13 +107,13 @@ ov::intel_cpu::NgramFusion::NgramFusion() { return false; } - auto cropped_shape_symbol_match = [cropped_shape_symbol](ov::Output output) -> bool { + auto cropped_shape_symbol_match = [cropped_shape_symbol](const ov::Output& output) -> bool { const auto& symbols = output.get_tensor().get_value_symbol(); return ov::pass::pattern::rank_equals(1)(output) && !symbols.empty() && ov::symbol::are_equal(symbols[0], cropped_shape_symbol); }; - auto tokens_symbol_match = [tokens_match, cropped_shape_symbol](ov::Output output) -> bool { + auto tokens_symbol_match = [tokens_match, cropped_shape_symbol](const ov::Output& output) -> bool { return tokens_match(output) && symbol::are_equal(output.get_partial_shape()[0].get_symbol(), cropped_shape_symbol); }; diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp index 291ef98f1a88c6..8f44582ba89b01 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp @@ -67,7 +67,7 @@ StatefulSDPAFusion::StatefulSDPAFusion() { auto reshape_kv = makePattern({kv, any_input()}); auto unsqueeze_kv = makePattern({kv, any_input()}); - auto check_one = [](Output output) -> bool { + auto check_one = [](const Output& output) -> bool { auto node = ov::as_type_ptr(output.get_node_shared_ptr()); const auto& bcst_arg = node->cast_vector(); return std::all_of(bcst_arg.begin(), bcst_arg.end(), [](float i) { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp index 51fb8c33833eef..107425e38ff848 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/convert_to_cpu_specific_opset.hpp @@ -13,8 +13,8 @@ #include "common/pass/move_readvalue_inputs_to_subgraph.hpp" #include "common/pass/rnn_sequences_optimization.hpp" #include "config.h" -#include "itt.hpp" #include "nodes/fullyconnected.h" +#include "openvino/cc/pass/itt.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/pass/constant_folding.hpp" #include "openvino/pass/manager.hpp" diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp index 18a396feaf09a4..1acf3c465338f8 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.cpp @@ -5,6 +5,8 @@ #include "mha.hpp" #include +#include +#include #include "openvino/opsets/opset3.hpp" #include "transformations/itt.hpp" @@ -13,16 +15,16 @@ ov::intel_cpu::MHANode::MHANode(const ov::Output& in0, const ov::Output& in1, const ov::Output& in2, const ov::Output& in3, - const std::vector& mul_scales, + std::vector mul_scales, bool is_mul_first, const ov::element::Type output_type) : Op({in0, in1, in2, in3}), - m_output_type(output_type) { - this->mul_scales = mul_scales; - this->is_mul_first = is_mul_first; - this->fq0_output_type = ov::element::undefined; - this->fq1_output_type = ov::element::undefined; - this->fq2_output_type = ov::element::undefined; + m_output_type(output_type), + mul_scales(std::move(mul_scales)), + is_mul_first(is_mul_first), + fq0_output_type(ov::element::undefined), + fq1_output_type(ov::element::undefined), + fq2_output_type(ov::element::undefined) { validate_and_infer_types(); } @@ -30,27 +32,27 @@ ov::intel_cpu::MHANode::MHANode(const ov::Output& in0, const ov::Output& in1, const ov::Output& in2, const ov::Output& in3, - const std::vector& mul_scales, + std::vector mul_scales, bool is_mul_first, - const std::vector& fq_scales0, - const std::vector& fq_scales1, - const std::vector& fq_scales2, - const std::vector& fq_scales3, + std::vector fq_scales0, + std::vector fq_scales1, + std::vector fq_scales2, + std::vector fq_scales3, const ov::element::Type fq0_output_type, const ov::element::Type fq1_output_type, const ov::element::Type fq2_output_type, const ov::element::Type output_type) : Op({in0, in1, in2, in3}), - m_output_type(output_type) { - this->mul_scales = mul_scales; - this->is_mul_first = is_mul_first; - this->fq_scales0 = fq_scales0; - this->fq_scales1 = fq_scales1; - this->fq_scales2 = fq_scales2; - this->fq_scales3 = fq_scales3; - this->fq0_output_type = fq0_output_type; - this->fq1_output_type = fq1_output_type; - this->fq2_output_type = fq2_output_type; + m_output_type(output_type), + mul_scales(std::move(mul_scales)), + is_mul_first(is_mul_first), + fq_scales0(std::move(fq_scales0)), + fq_scales1(std::move(fq_scales1)), + fq_scales2(std::move(fq_scales2)), + fq_scales3(std::move(fq_scales3)), + fq0_output_type(fq0_output_type), + fq1_output_type(fq1_output_type), + fq2_output_type(fq2_output_type) { validate_and_infer_types(); } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.hpp index dff09b3cc46a05..f7b0af91516987 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/op/mha.hpp @@ -19,7 +19,7 @@ class MHANode : public ov::op::Op { const ov::Output& in1, const ov::Output& in2, const ov::Output& in3, - const std::vector& mul_scales, + std::vector mul_scales, bool is_mul_first, const ov::element::Type output_type); @@ -27,12 +27,12 @@ class MHANode : public ov::op::Op { const ov::Output& in1, const ov::Output& in2, const ov::Output& in3, - const std::vector& mul_scales, + std::vector mul_scales, bool is_mul_first, - const std::vector& fq_scales0, - const std::vector& fq_scales1, - const std::vector& fq_scales2, - const std::vector& fq_scales3, + std::vector fq_scales0, + std::vector fq_scales1, + std::vector fq_scales2, + std::vector fq_scales3, const ov::element::Type fq0_output_type, const ov::element::Type fq1_output_type, const ov::element::Type fq2_output_type, diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.cpp index ff06a1b70cfd93..d4988ce9f43337 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/x64/pass/mlp_fusion.cpp @@ -232,7 +232,7 @@ ov::intel_cpu::MLPFusion::MLPFusion() { new_args.push_back(pattern_map.at(down_proj_weight_scales_per_OC)); } - auto old_node = root; + const auto& old_node = root; auto new_node = std::make_shared(new_args, config); new_node->set_friendly_name(old_node->get_friendly_name()); ov::copy_runtime_info( diff --git a/src/plugins/intel_cpu/src/transformations/itt.hpp b/src/plugins/intel_cpu/src/transformations/itt.hpp index 030a80745a2a54..2dd4035062a5db 100644 --- a/src/plugins/intel_cpu/src/transformations/itt.hpp +++ b/src/plugins/intel_cpu/src/transformations/itt.hpp @@ -9,7 +9,9 @@ #pragma once -#include +#include "openvino/cc/pass/itt.hpp" +#include "openvino/cc/selective_build.h" +#include "openvino/itt.hpp" namespace ov { namespace intel_cpu { diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp index ce57cd1529b893..df05ce5d539f46 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp @@ -17,7 +17,7 @@ intel_cpu::BrgemmCopyB::BrgemmCopyB(const Output& x, const size_t offset_in, const size_t offset_out0, const size_t offset_out1, - std::vector layout_input) + const std::vector& layout_input) : snippets::modifier::MemoryAccess(1, with_compensations(type) ? 2 : 1), op::Op({x}), m_type(type), @@ -28,7 +28,7 @@ intel_cpu::BrgemmCopyB::BrgemmCopyB(const Output& x, if (with_compensations(m_type)) { set_output_port_descriptor({0, offset_out1}, 1); } - custom_constructor_validate_and_infer_types(std::move(layout_input)); + custom_constructor_validate_and_infer_types(layout_input); } intel_cpu::BrgemmCopyB::BrgemmCopyB(const Output& x, @@ -37,7 +37,7 @@ intel_cpu::BrgemmCopyB::BrgemmCopyB(const Output& x, const PortDescriptor& desc_in0, const PortDescriptor& desc_out0, const PortDescriptor& desc_out1, - std::vector layout_input) + const std::vector& layout_input) : snippets::modifier::MemoryAccess(1, with_compensations(type) ? 2 : 1), op::Op({x}), m_type(type), @@ -48,7 +48,7 @@ intel_cpu::BrgemmCopyB::BrgemmCopyB(const Output& x, if (with_compensations(m_type)) { set_output_port_descriptor(desc_out1, 1); } - custom_constructor_validate_and_infer_types(std::move(layout_input)); + custom_constructor_validate_and_infer_types(layout_input); } bool BrgemmCopyB::visit_attributes(AttributeVisitor& visitor) { @@ -59,7 +59,7 @@ bool BrgemmCopyB::visit_attributes(AttributeVisitor& visitor) { return true; } -void BrgemmCopyB::custom_constructor_validate_and_infer_types(std::vector layout_input) { +void BrgemmCopyB::custom_constructor_validate_and_infer_types(const std::vector& layout_input) { INTERNAL_OP_SCOPE(BrgemmRepack_ctor_validate_and_infer_types); OPENVINO_ASSERT(m_type == BRGEMM_TYPE::WITH_COMPENSATIONS || m_type == BRGEMM_TYPE::REPACKING_ONLY, "Unsupported BRGEMM_TYPE value"); @@ -136,4 +136,4 @@ ov::snippets::IShapeInferSnippets::Result BrgemmCopyB::ShapeInfer::infer( return {new_shapes, ov::snippets::ShapeInferStatus::success}; } } // namespace intel_cpu -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp index b4e7b030fc605b..bf327784503352 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp @@ -31,14 +31,14 @@ class BrgemmCopyB : public snippets::modifier::MemoryAccess, public ov::op::Op { const size_t offset_in = 0lu, const size_t offset_out0 = 0lu, const size_t offset_out1 = 0lu, - std::vector layout_input = {}); + const std::vector& layout_input = {}); BrgemmCopyB(const Output& x, const element::Type src_type, BRGEMM_TYPE type, const PortDescriptor& desc_in0, const PortDescriptor& desc_out0, const PortDescriptor& desc_out1, - std::vector layout_input = {}); + const std::vector& layout_input = {}); BrgemmCopyB() = default; size_t get_offset_in() const { @@ -75,7 +75,7 @@ class BrgemmCopyB : public snippets::modifier::MemoryAccess, public ov::op::Op { static bool is_transposed(const std::vector& layout); private: - void custom_constructor_validate_and_infer_types(std::vector layout_input = {}); + void custom_constructor_validate_and_infer_types(const std::vector& layout_input = {}); void validate_element_type(const ov::element::Type& element_type); BRGEMM_TYPE m_type = BRGEMM_TYPE::REPACKING_ONLY; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp index 871cd3d2ac3686..b994ef4fa2d5df 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.cpp @@ -20,9 +20,9 @@ BrgemmCPU::BrgemmCPU(const Output& A, const size_t offset_a, const size_t offset_b, const size_t offset_c, - std::vector layout_a, - std::vector layout_b, - std::vector layout_c) + const std::vector& layout_a, + const std::vector& layout_b, + const std::vector& layout_c) : Brgemm(), m_type(type) { // We call default ctor of Brgemm class to avoid incorrect shape infer in constructor_validate_and_type_infer() call @@ -32,7 +32,7 @@ BrgemmCPU::BrgemmCPU(const Output& A, set_input_port_descriptor({0, offset_a}, 0); set_input_port_descriptor({0, offset_b}, 1); set_output_port_descriptor({0, offset_c}, 0); - custom_constructor_validate_and_infer_types(std::move(layout_a), std::move(layout_b), std::move(layout_c)); + custom_constructor_validate_and_infer_types(layout_a, layout_b, layout_c); } BrgemmCPU::BrgemmCPU(const Output& A, @@ -43,9 +43,9 @@ BrgemmCPU::BrgemmCPU(const Output& A, const size_t offset_b, const size_t offset_scratch, const size_t offset_c, - std::vector layout_a, - std::vector layout_b, - std::vector layout_c) + const std::vector& layout_a, + const std::vector& layout_b, + const std::vector& layout_c) : Brgemm(), m_type(type) { set_arguments({A, B, scratch}); @@ -55,7 +55,7 @@ BrgemmCPU::BrgemmCPU(const Output& A, set_input_port_descriptor({0, offset_b}, 1); set_output_port_descriptor({0, offset_c}, 0); set_input_port_descriptor({0, offset_scratch}, 2); - custom_constructor_validate_and_infer_types(std::move(layout_a), std::move(layout_b), std::move(layout_c)); + custom_constructor_validate_and_infer_types(layout_a, layout_b, layout_c); } BrgemmCPU::BrgemmCPU(const Output& A, @@ -64,16 +64,16 @@ BrgemmCPU::BrgemmCPU(const Output& A, const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c, - std::vector layout_a, - std::vector layout_b, - std::vector layout_c) + const std::vector& layout_a, + const std::vector& layout_b, + const std::vector& layout_c) : Brgemm(), m_type(type) { set_arguments({A, B}); set_output_size(1); m_input_ports = {{0, desc_a}, {1, desc_b}}; m_output_ports = {{0, desc_c}}; - custom_constructor_validate_and_infer_types(std::move(layout_a), std::move(layout_b), std::move(layout_c)); + custom_constructor_validate_and_infer_types(layout_a, layout_b, layout_c); } BrgemmCPU::BrgemmCPU(const Output& A, @@ -84,21 +84,21 @@ BrgemmCPU::BrgemmCPU(const Output& A, const PortDescriptor& desc_b, const PortDescriptor& desc_scratch, const PortDescriptor& desc_c, - std::vector layout_a, - std::vector layout_b, - std::vector layout_c) + const std::vector& layout_a, + const std::vector& layout_b, + const std::vector& layout_c) : Brgemm(), m_type(type) { set_arguments({A, B, scratch}); set_output_size(1); m_input_ports = {{0, desc_a}, {1, desc_b}, {2, desc_scratch}}; m_output_ports = {{0, desc_c}}; - custom_constructor_validate_and_infer_types(std::move(layout_a), std::move(layout_b), std::move(layout_c)); + custom_constructor_validate_and_infer_types(layout_a, layout_b, layout_c); } -void BrgemmCPU::custom_constructor_validate_and_infer_types(std::vector layout_a, - std::vector layout_b, - std::vector layout_c) { +void BrgemmCPU::custom_constructor_validate_and_infer_types(const std::vector& layout_a, + const std::vector& layout_b, + const std::vector& layout_c) { INTERNAL_OP_SCOPE(BrgemmCPU_constructor_validate_and_infer_types); validate_inputs(); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp index c2329fdf8af62d..ddc21e8ddb59d3 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp @@ -29,9 +29,9 @@ class BrgemmCPU : public snippets::op::Brgemm { const size_t offset_a = 0, const size_t offset_b = 0, const size_t offset_c = 0, - std::vector layout_a = {}, - std::vector layout_b = {}, - std::vector layout_c = {}); + const std::vector& layout_a = {}, + const std::vector& layout_b = {}, + const std::vector& layout_c = {}); BrgemmCPU(const Output& A, const Output& B, const Output& scratch, @@ -40,18 +40,18 @@ class BrgemmCPU : public snippets::op::Brgemm { const size_t offset_b = 0, const size_t offset_scratch = 0, const size_t offset_c = 0, - std::vector layout_a = {}, - std::vector layout_b = {}, - std::vector layout_c = {}); + const std::vector& layout_a = {}, + const std::vector& layout_b = {}, + const std::vector& layout_c = {}); BrgemmCPU(const Output& A, const Output& B, BRGEMM_TYPE type, const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c, - std::vector layout_a = {}, - std::vector layout_b = {}, - std::vector layout_c = {}); + const std::vector& layout_a = {}, + const std::vector& layout_b = {}, + const std::vector& layout_c = {}); BrgemmCPU(const Output& A, const Output& B, const Output& scratch, @@ -60,9 +60,9 @@ class BrgemmCPU : public snippets::op::Brgemm { const PortDescriptor& desc_b, const PortDescriptor& desc_scratch, const PortDescriptor& desc_c, - std::vector layout_a = {}, - std::vector layout_b = {}, - std::vector layout_c = {}); + const std::vector& layout_a = {}, + const std::vector& layout_b = {}, + const std::vector& layout_c = {}); BrgemmCPU() = default; void validate_and_infer_types() override; @@ -79,9 +79,9 @@ class BrgemmCPU : public snippets::op::Brgemm { constexpr static size_t SCRATCH_BYTE_SIZE = 32 * 1024; private: - void custom_constructor_validate_and_infer_types(std::vector layout_a, - std::vector layout_b, - std::vector layout_c); + void custom_constructor_validate_and_infer_types(const std::vector& layout_a, + const std::vector& layout_b, + const std::vector& layout_c); void validate_with_scratchpad() const; void validate_inputs() const; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp index 386941fd94bb98..f360437d59da6b 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp @@ -120,13 +120,13 @@ size_t compute_inner_k_block(const ov::element::Type& precision) { ov::snippets::lowered::ExpressionPtr get_copy_b_expr(const ov::snippets::lowered::ExpressionPtr& brgemm_expr) { OPENVINO_ASSERT(ov::is_type(brgemm_expr->get_node()), "get_copy_b_expr must be called only for BrgemmCPU node"); - const auto b_input_expr = brgemm_expr->get_input_port_connector(1)->get_source().get_expr(); + auto b_input_expr = brgemm_expr->get_input_port_connector(1)->get_source().get_expr(); if (ov::is_type(b_input_expr->get_node())) { return b_input_expr; } else if (ov::is_type(b_input_expr)) { OPENVINO_ASSERT(b_input_expr->get_input_count() >= 1, "BufferExpression on brgemm's B input must have at least one input"); - const auto input_buffer_expr = b_input_expr->get_input_port_connector(0)->get_source().get_expr(); + auto input_buffer_expr = b_input_expr->get_input_port_connector(0)->get_source().get_expr(); if (ov::is_type(input_buffer_expr->get_node())) { return input_buffer_expr; } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp index 6b7d5d31a5b12f..46c48425157ef9 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp @@ -18,7 +18,7 @@ using namespace ov::intel_cpu::pass; EnforcePrecision::EnforcePrecision( const ov::element::Type source, const ov::element::Type target, - std::function>(const std::shared_ptr& op)> + const std::function>(const std::shared_ptr& op)>& get_supported_precisions) : source(source), target(target), diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp index 24e848cf157e0e..d7738dec3f5935 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.hpp @@ -19,7 +19,7 @@ class EnforcePrecision : public ov::pass::ModelPass { EnforcePrecision(const element::Type source, const element::Type target, - std::function>(const std::shared_ptr& op)> + const std::function>(const std::shared_ptr& op)>& get_supported_precisions = nullptr); bool run_on_model(const std::shared_ptr& m) override; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/insert_brgemm_copy_buffers.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/insert_brgemm_copy_buffers.cpp index 91334e61db91c7..b38562fa088bb1 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/insert_brgemm_copy_buffers.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/insert_brgemm_copy_buffers.cpp @@ -81,7 +81,7 @@ bool InsertBrgemmCopyBuffers::run(LinearIR& linear_ir, LinearIR::constExprIt beg bool modified = false; for (auto expr_it = begin; expr_it != end; ++expr_it) { - const auto brgemm_expr = *expr_it; + const auto& brgemm_expr = *expr_it; if (const auto brgemm_cpu = ov::as_type_ptr(brgemm_expr->get_node())) { if (brgemm_utils::with_repacking(brgemm_cpu->get_type())) { // BrgemmCopyB might be extracted from the body diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp index d410995f09e8c7..4ae8be0bb5612c 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp @@ -345,7 +345,7 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr& node, bool isSuitableParentForFusingSumActivation(const std::shared_ptr& node) { if (!ov::is_type(node)) return false; - auto isFusedBiasNode = [](std::shared_ptr n) { + auto isFusedBiasNode = [](const std::shared_ptr& n) { if (!(ov::is_type(n) && GetNodeFusingType(n) == NodeFusingType::FusedWithConvolution)) return false; const auto conv = n->get_input_source_output(0); @@ -372,7 +372,7 @@ bool isSuitableParentForFusingSumActivation(const std::shared_ptr& n conv_shape[channelAxis].get_length() == static_cast(bias_norm_dims[channelAxis]) && bias_norm_dims[channelAxis] == static_cast(shape_size(bias_norm_dims)); }; - auto isFusedFQNode = [&isFusedBiasNode](std::shared_ptr n) { + auto isFusedFQNode = [&isFusedBiasNode](const std::shared_ptr& n) { if (!(ov::is_type(n) && GetNodeFusingType(n) == NodeFusingType::FusedWithConvolution)) return false; const auto& parent = n->get_input_node_shared_ptr(0); diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h index 1c8991b0e19ed5..d1b92e59452579 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.h @@ -5,6 +5,7 @@ #pragma once #include +#include #include #include "config.h" @@ -19,8 +20,8 @@ namespace intel_cpu { class Transformations { public: - Transformations(const std::shared_ptr& initialModel, const Config& config) - : model(initialModel), + Transformations(std::shared_ptr initialModel, const Config& config) + : model(std::move(initialModel)), config(config) { CPU_DEBUG_CAPS_MAYBE_UNUSED(this->config); } diff --git a/src/plugins/intel_cpu/src/utils/blob_dump.h b/src/plugins/intel_cpu/src/utils/blob_dump.h index ca6afe50dd01fb..23ef61ac909234 100644 --- a/src/plugins/intel_cpu/src/utils/blob_dump.h +++ b/src/plugins/intel_cpu/src/utils/blob_dump.h @@ -7,6 +7,7 @@ #include #include +#include #include "memory_desc/dnnl_blocked_memory_desc.h" @@ -35,7 +36,7 @@ class BlobDumper { BlobDumper(const BlobDumper&) = default; BlobDumper& operator=(BlobDumper&&) = default; - explicit BlobDumper(const MemoryPtr& _memory) : memory(_memory) {} + explicit BlobDumper(MemoryPtr _memory) : memory(std::move(_memory)) {} static BlobDumper read(const std::string& file_path); static BlobDumper read(std::istream& stream); diff --git a/src/plugins/intel_cpu/src/utils/codec_xor.hpp b/src/plugins/intel_cpu/src/utils/codec_xor.hpp index 45f5a5e2e9cf30..087fc86ea689d3 100644 --- a/src/plugins/intel_cpu/src/utils/codec_xor.hpp +++ b/src/plugins/intel_cpu/src/utils/codec_xor.hpp @@ -5,6 +5,7 @@ #include #include +#include namespace ov { namespace intel_cpu { @@ -22,9 +23,9 @@ union CacheDecrypt { CacheDecrypt() {} - CacheDecrypt(CacheDecryptStr fn) : m_decrypt_str(fn) {} + CacheDecrypt(CacheDecryptStr fn) : m_decrypt_str(std::move(fn)) {} - CacheDecrypt(CacheDecryptChar fn) : m_decrypt_char(fn) {} + CacheDecrypt(CacheDecryptChar fn) : m_decrypt_char(std::move(fn)) {} ~CacheDecrypt() {} diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index 2a85edb7701ee5..722230484b6c13 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -33,7 +33,7 @@ namespace ov { namespace intel_cpu { namespace { -size_t replace_all(std::string& inout, std::string what, std::string with) { +size_t replace_all(std::string& inout, const std::string& what, const std::string& with) { std::size_t count{}; for (std::string::size_type pos{}; inout.npos != (pos = inout.find(what.data(), pos, what.length())); pos += with.length(), ++count) { diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.h b/src/plugins/intel_cpu/src/utils/debug_capabilities.h index fa06c64992133f..4165f71885b2b9 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.h +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.h @@ -14,6 +14,7 @@ # include # include # include +# include # include "edge.h" # include "nodes/node_config.h" @@ -60,8 +61,8 @@ class PrintableModel { public: PrintableModel(const ov::Model& model, std::string tag = "", std::string prefix = "") : model(model), - tag(tag), - prefix(prefix) {} + tag(std::move(tag)), + prefix(std::move(prefix)) {} const ov::Model& model; const std::string tag; const std::string prefix; @@ -86,9 +87,7 @@ struct PrintableDelta { class PrintableTimer { public: - PrintableTimer() : t0(std::chrono::high_resolution_clock::now()) { - t1 = t0; - } + PrintableTimer() : t0(std::chrono::high_resolution_clock::now()), t1(t0) {} std::chrono::high_resolution_clock::time_point t0; std::chrono::high_resolution_clock::time_point t1; @@ -208,9 +207,9 @@ struct EnforceInferPrcDebug { int count_limit = atoi(safe_getenv("OV_CPU_INFER_PRC_CNT", "9999999").c_str()); int count = 0; - EnforceInferPrcDebug() { - str_pos_pattern = std::getenv("OV_CPU_INFER_PRC_POS_PATTERN"); - str_neg_pattern = std::getenv("OV_CPU_INFER_PRC_NEG_PATTERN"); + EnforceInferPrcDebug() + : str_pos_pattern(std::getenv("OV_CPU_INFER_PRC_POS_PATTERN")), + str_neg_pattern(std::getenv("OV_CPU_INFER_PRC_NEG_PATTERN")) { if (str_pos_pattern || str_neg_pattern) { pattern_verbose = true; } else { @@ -245,7 +244,7 @@ struct EnforceInferPrcDebug { } } - bool enabled(std::string type, std::string name, std::string org_names) { + bool enabled(const std::string& type, const std::string& name, const std::string& org_names) { std::string tag = type + "@" + org_names; std::smatch match; bool matched = true; diff --git a/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp b/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp index 7507d4234a6b6e..f35c7bb8b60711 100644 --- a/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp +++ b/src/plugins/intel_cpu/src/utils/ngraph_utils.hpp @@ -12,7 +12,7 @@ namespace ov { namespace intel_cpu { -inline std::string getRTInfoValue(const std::map& rtInfo, std::string paramName) { +inline std::string getRTInfoValue(const std::map& rtInfo, const std::string& paramName) { auto it = rtInfo.find(paramName); if (it != rtInfo.end()) { return it->second.as(); diff --git a/src/plugins/intel_cpu/src/utils/node_dumper.cpp b/src/plugins/intel_cpu/src/utils/node_dumper.cpp index 74eb472cd1c7e0..e5c2326ff95c76 100644 --- a/src/plugins/intel_cpu/src/utils/node_dumper.cpp +++ b/src/plugins/intel_cpu/src/utils/node_dumper.cpp @@ -113,11 +113,20 @@ static void dumpInternalBlobs(const NodePtr& node, const DebugCapsConfig& config } } +static std::string createDumpFilePath(const std::string& blobDumpDir, const std::string& fileName, int execIndex) { + auto execIndexStr = std::to_string(execIndex); + std::string dump_file; + dump_file.reserve(blobDumpDir.size() + execIndexStr.size() + fileName.size() + 4); + + dump_file.append(blobDumpDir).append("/#").append(execIndexStr).append("_").append(fileName); + + return dump_file; +} + void dumpInputBlobs(const NodePtr& node, const DebugCapsConfig& config, int count) { if (!shouldBeDumped(node, config, "IN")) return; - auto exec_order = std::to_string(node->getExecIndex()); std::string nodeName = node->getName(); formatNodeName(nodeName); @@ -133,7 +142,8 @@ void dumpInputBlobs(const NodePtr& node, const DebugCapsConfig& config, int coun if (file_name.size() > 240) file_name = file_name.substr(file_name.size() - 240); - auto dump_file = config.blobDumpDir + "/#" + exec_order + "_" + file_name; + std::string dump_file = createDumpFilePath(config.blobDumpDir, file_name, node->getExecIndex()); + std::cout << "Dump inputs: " << dump_file << std::endl; auto& desc = prEdge->getMemory().getDesc(); @@ -151,7 +161,6 @@ void dumpOutputBlobs(const NodePtr& node, const DebugCapsConfig& config, int cou if (!shouldBeDumped(node, config, "OUT")) return; - auto exec_order = std::to_string(node->getExecIndex()); std::string nodeName = node->getName(); formatNodeName(nodeName); @@ -166,7 +175,8 @@ void dumpOutputBlobs(const NodePtr& node, const DebugCapsConfig& config, int cou if (file_name.size() > 240) file_name = file_name.substr(file_name.size() - 240); - auto dump_file = config.blobDumpDir + "/#" + exec_order + "_" + file_name; + std::string dump_file = createDumpFilePath(config.blobDumpDir, file_name, node->getExecIndex()); + std::cout << "Dump outputs: " << dump_file << std::endl; auto& desc = childEdge->getMemory().getDesc(); diff --git a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp index 81fac7a1b0ca0f..a27f29c0ab0e1b 100644 --- a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp +++ b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #ifdef _WIN32 @@ -131,7 +132,7 @@ struct PlainTensor { return strides; } - PlainTensor(MemoryPtr mem) { + PlainTensor(const MemoryPtr& mem) { reset(mem); } @@ -149,7 +150,7 @@ struct PlainTensor { return *this; } - void reset(MemoryPtr mem) { + void reset(const MemoryPtr& mem) { auto mem_desc = mem->getDescWithType(); // not support block layout OPENVINO_ASSERT(mem_desc && mem_desc->getOrder().size() == mem->getStaticDims().size()); @@ -177,11 +178,7 @@ struct PlainTensor { int step; int count; // select all - tensor_index() { - start = 0; - end = INT_MAX; - step = 1; - } + tensor_index() : start(0), end(INT_MAX), step(1) {} bool slice_with_squeeze() { return end == INT_MIN; } diff --git a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp index 080ebeb11d0c40..6ed7605f53323a 100644 --- a/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp +++ b/src/plugins/intel_cpu/src/utils/rt_info/memory_formats_attribute.hpp @@ -6,6 +6,7 @@ #include #include +#include #include "openvino/core/node.hpp" #include "openvino/op/util/op_types.hpp" @@ -23,7 +24,7 @@ class MemoryFormats : public ov::RuntimeAttribute { public: MemoryFormats() = default; - explicit MemoryFormats(const std::string& _memory_format) : memory_format(_memory_format) {} + explicit MemoryFormats(std::string _memory_format) : memory_format(std::move(_memory_format)) {} std::string to_string() const override { return memory_format; }; diff --git a/src/plugins/intel_cpu/src/utils/serialize.cpp b/src/plugins/intel_cpu/src/utils/serialize.cpp index e577573f49ead9..177cc817b8b3ab 100644 --- a/src/plugins/intel_cpu/src/utils/serialize.cpp +++ b/src/plugins/intel_cpu/src/utils/serialize.cpp @@ -4,6 +4,8 @@ #include "serialize.hpp" +#include + #include "openvino/core/descriptor_tensor.hpp" #include "openvino/core/parallel.hpp" #include "openvino/runtime/shared_buffer.hpp" @@ -39,7 +41,7 @@ ModelDeserializer::ModelDeserializer(std::istream& model_stream, : m_istream(model_stream), m_model_builder(std::move(fn)), m_decript_from_string(decript_from_string), - m_model_buffer(model_buffer) { + m_model_buffer(std::move(model_buffer)) { if (m_decript_from_string) { m_cache_decrypt.m_decrypt_str = decrypt_fn.m_decrypt_str; } else { diff --git a/src/plugins/intel_cpu/src/weights_cache.cpp b/src/plugins/intel_cpu/src/weights_cache.cpp index 63832c125c4d49..5c4caaeec257c0 100644 --- a/src/plugins/intel_cpu/src/weights_cache.cpp +++ b/src/plugins/intel_cpu/src/weights_cache.cpp @@ -5,6 +5,7 @@ #include "weights_cache.hpp" #include +#include #include "openvino/runtime/system_conf.hpp" @@ -12,11 +13,11 @@ namespace ov { namespace intel_cpu { WeightsSharing::SharedMemory::SharedMemory(std::unique_lock&& lock, - const MemoryInfo::Ptr& memory, + MemoryInfo::Ptr memory, MemoryPtr newPtr) : lock(std::move(lock)), - memory(memory), - newPtr(newPtr) {} + memory(std::move(memory)), + newPtr(std::move(newPtr)) {} WeightsSharing::SharedMemory::operator MemoryPtr() const { return memory->sharedMemory.lock(); @@ -31,7 +32,7 @@ void WeightsSharing::SharedMemory::valid(bool b) { } WeightsSharing::SharedMemory::Ptr WeightsSharing::findOrCreate(const std::string& key, - std::function create, + const std::function& create, bool valid) { MemoryInfo::Ptr ptr; MemoryPtr newPtr; diff --git a/src/plugins/intel_cpu/src/weights_cache.hpp b/src/plugins/intel_cpu/src/weights_cache.hpp index 4630bdc2132940..bb1d4e95ba80ec 100644 --- a/src/plugins/intel_cpu/src/weights_cache.hpp +++ b/src/plugins/intel_cpu/src/weights_cache.hpp @@ -32,7 +32,7 @@ class WeightsSharing { struct MemoryInfo { typedef std::shared_ptr Ptr; - MemoryInfo(MemoryPtr memoryPtr, bool valid) : sharedMemory(memoryPtr), valid(valid) {} + MemoryInfo(const MemoryPtr& memoryPtr, bool valid) : sharedMemory(memoryPtr), valid(valid) {} std::mutex guard; std::weak_ptr sharedMemory; @@ -46,7 +46,7 @@ class WeightsSharing { public: typedef std::shared_ptr Ptr; - SharedMemory(std::unique_lock&& lock, const MemoryInfo::Ptr& memory, MemoryPtr newPtr = nullptr); + SharedMemory(std::unique_lock&& lock, MemoryInfo::Ptr memory, MemoryPtr newPtr = nullptr); operator MemoryPtr() const; bool isValid() const; @@ -58,7 +58,9 @@ class WeightsSharing { MemoryPtr newPtr; }; - SharedMemory::Ptr findOrCreate(const std::string& key, std::function create, bool valid = true); + SharedMemory::Ptr findOrCreate(const std::string& key, + const std::function& create, + bool valid = true); SharedMemory::Ptr get(const std::string& key) const; diff --git a/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp b/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp index f459e866076a9c..2fca5933aa116c 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp +++ b/src/plugins/intel_cpu/tests/unit/graph/dummy_node.hpp @@ -68,7 +68,7 @@ class DummyNode : public Node { }; bool isExecutable() const override {return m_is_executable;} - void execute(dnnl::stream strm) override {}; + void execute(const dnnl::stream& strm) override {}; bool created() const override {return true;} bool needPrepareParams() const override { From 7da364db460afda3147e1968fd072c08485dba14 Mon Sep 17 00:00:00 2001 From: "jag.Xu" Date: Fri, 17 Jan 2025 14:01:38 +0800 Subject: [PATCH 24/97] [GPU] fix memory conflict for multi iteration in loop. (#28487) Cause is shown in graph below. The black, green, and dotted line donates the primitive dependency, memory buffer reuse, and memory conflict (at the second iteration) respectively. The body of the loop is compiled as a separate model and is not aware the data reuse in backedge in multiple iteration. Tickets: [CVS-158017](https://jira.devtools.intel.com/browse/CVS-158017) --- src/plugins/intel_gpu/src/graph/primitive_inst.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 6e1af3f5429283..abfeabe2b6a149 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -2391,6 +2391,9 @@ memory::ptr primitive_inst::allocate_output(engine& _engine, if (_node.is_in_shape_of_subgraph()) reusable_across_network = false; + if (reusable_across_network && _node.get_program().is_body_program() && is_output_buffer && runtime_alloc) + reusable_across_network = false; + // For outputs, cpu prim we want to have lockable alloc type // Also if the successor of a node is an cpu, then memory needs to be lockable. bool is_cpu = _node.get_selected_impl() ? _node.get_selected_impl()->is_cpu() : From 7a157e98e0a4bc541d44358edf29297685c6f688 Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Fri, 17 Jan 2025 07:49:23 +0100 Subject: [PATCH 25/97] [TRANSFORMATIONS] SDPAToPagedAttention transformation: support decompression case in the Qwen-7b-Chat pattern (#28493) Qwen-7b-Chat has a decompression if the model is executed in lower precision resulting into the model having additional Converts (i.e. FP16 to FP32). Handle this case of optional Convert in PositionIDsReplacerQwen Added a unit test for it. ### Tickets: - *CVS-157308* Signed-off-by: Andrii Staikov Signed-off-by: Ivan Tikhonov --------- Co-authored-by: Ivan Tikhonov --- .../position_ids_replacer.cpp | 9 ++- .../sdpa_to_paged_attention_test.cpp | 68 ++++++++++++++----- 2 files changed, 59 insertions(+), 18 deletions(-) diff --git a/src/common/transformations/src/transformations/sdpa_to_paged_attention/position_ids_replacer.cpp b/src/common/transformations/src/transformations/sdpa_to_paged_attention/position_ids_replacer.cpp index 452d2757d29a85..31d5e87dfae2a6 100644 --- a/src/common/transformations/src/transformations/sdpa_to_paged_attention/position_ids_replacer.cpp +++ b/src/common/transformations/src/transformations/sdpa_to_paged_attention/position_ids_replacer.cpp @@ -72,8 +72,15 @@ ov::pass::PositionIDsReplacerQwen::PositionIDsReplacerQwen(const Output& p auto p_neg_const = wrap_type(); auto p_neg_mul = wrap_type({p_current_len, p_neg_const}); + + // For now, it has always been a constant, but this may change in the future. + // In case of model being in FP16, there will be a decompressing subgraph: + // i.e. Constant -> Convert -> Slice + // + // Also, it hasn't been observed yet, but, theoretically, there can also be a + // dequantizing subgraph, so it's going to be any_input() here. + auto p_rotary_emb_sincos = pattern::any_input(); // the rotary_emb_cos/rotary_emb_sin are sliced by the total length [1,..4096,1,128] - auto p_rotary_emb_sincos = wrap_type(); auto p_slice_1 = wrap_type({p_rotary_emb_sincos, _const(), p_opt_reshape, _const(), _const()}); auto p_slice_2 = wrap_type({p_slice_1, p_neg_mul, _const(), _const(), _const()}); diff --git a/src/common/transformations/tests/op_conversions/sdpa_to_paged_attention_test.cpp b/src/common/transformations/tests/op_conversions/sdpa_to_paged_attention_test.cpp index c703b84429805a..3268be21e6dad2 100644 --- a/src/common/transformations/tests/op_conversions/sdpa_to_paged_attention_test.cpp +++ b/src/common/transformations/tests/op_conversions/sdpa_to_paged_attention_test.cpp @@ -29,7 +29,6 @@ #include "openvino/op/subtract.hpp" #include "openvino/op/transpose.hpp" #include "openvino/op/unsqueeze.hpp" -#include "openvino/pass/visualize_tree.hpp" #include "transformations/sdpa_to_paged_attention/prev_sequence_length_pattern.hpp" #include "transformations/sdpa_to_paged_attention/state_management_pattern.hpp" #include "transformations/sdpa_to_paged_attention/total_sequence_length_pattern.hpp" @@ -186,8 +185,12 @@ class Qwen7bChatSDPA { static std::shared_ptr gen_rope_emb_sin(const std::shared_ptr& total_seq_len, const std::shared_ptr& neg_mul, - std::shared_ptr& head_size) { - auto sin = makeConst(element::f32, {1, 4096, 1, 128}, MOCK_VALUE); + std::shared_ptr& head_size, + element::Type model_precision) { + auto sin = makeConst(model_precision, {1, 4096, 1, 128}, MOCK_VALUE); + if (model_precision != element::f32) { + sin = makeOP({sin}, {dest_type_f32}); + } auto sliced_sin_by_total = makeOP({sin, {0}, total_seq_len, {1}, {1}}); auto rotary_emb_sin_shape = makeOP({sliced_sin_by_total}, {{"output_type", "i64"}}); head_size = makeOP({rotary_emb_sin_shape, {3}, 0}, {{"batch_dims", 0}}); @@ -195,8 +198,12 @@ class Qwen7bChatSDPA { } static std::shared_ptr gen_rope_emb_cos(const std::shared_ptr& total_seq_len, - const std::shared_ptr& neg_mul) { - auto cos = makeConst(element::f32, {1, 4096, 1, 128}, MOCK_VALUE); + const std::shared_ptr& neg_mul, + element::Type model_precision) { + auto cos = makeConst(model_precision, {1, 4096, 1, 128}, MOCK_VALUE); + if (model_precision != element::f32) { + cos = makeOP({cos}, {dest_type_f32}); + } auto sliced_cos_by_total = makeOP({cos, {0}, total_seq_len, {1}, {1}}); return makeOP({sliced_cos_by_total, neg_mul, {LLONG_MAX}, {1}, {1}}); } @@ -343,8 +350,12 @@ class Qwen7bChatPA { static std::shared_ptr gen_rope_emb_sin(const std::shared_ptr& max_context_len, const std::shared_ptr& position_ids, - std::shared_ptr& head_size) { - auto sin = makeConst(element::f32, {1, 4096, 1, 128}, MOCK_VALUE); + std::shared_ptr& head_size, + element::Type model_precision) { + auto sin = makeConst(model_precision, {1, 4096, 1, 128}, MOCK_VALUE); + if (model_precision != element::f32) { + sin = makeOP({sin}, {dest_type_f32}); + } auto slice_sin = makeOP({sin, position_ids, 1}, {{"batch_dims", 0}}); auto slice = makeOP({sin, {0}, max_context_len, {1}, {1}}); @@ -355,8 +366,12 @@ class Qwen7bChatPA { } static std::shared_ptr gen_rope_emb_cos(const std::shared_ptr& max_context_len, - const std::shared_ptr& position_ids) { - auto cos = makeConst(element::f32, {1, 4096, 1, 128}, MOCK_VALUE); + const std::shared_ptr& position_ids, + element::Type model_precision) { + auto cos = makeConst(model_precision, {1, 4096, 1, 128}, MOCK_VALUE); + if (model_precision != element::f32) { + cos = makeOP({cos}, {dest_type_f32}); + } auto slice = makeOP({cos, position_ids, 1}, {{"batch_dims", 0}}); return makeOP({slice, {-1, 1, 1, 128}}, {{"special_zero", false}}); } @@ -425,7 +440,10 @@ class Qwen7bChatPA { } // namespace -TEST_F(TransformationTestsF, SDPAToPA_Qwen) { +class SDPAToPATest : public TransformationTestsF, public ::testing::WithParamInterface {}; + +TEST_P(SDPAToPATest, SDPAToPA_Qwen7bChat_General) { + const auto model_precision = GetParam(); { // Inputs to SDPA transformer: auto beam_idx = makeOP({}, {{"shape", PartialShape{DYN}}, el_type_i64}); @@ -455,8 +473,9 @@ TEST_F(TransformationTestsF, SDPAToPA_Qwen) { // RoPE emb sin/cos init: auto neg_cur_seq_len = Qwen7bChatSDPA::neg_mul(current_seq_len); auto head_size = shared_ptr(); - auto rope_emb_sin = Qwen7bChatSDPA::gen_rope_emb_sin(total_seq_len, neg_cur_seq_len, head_size); - auto rope_emb_cos = Qwen7bChatSDPA::gen_rope_emb_cos(total_seq_len, neg_cur_seq_len); + auto rope_emb_sin = + Qwen7bChatSDPA::gen_rope_emb_sin(total_seq_len, neg_cur_seq_len, head_size, model_precision); + auto rope_emb_cos = Qwen7bChatSDPA::gen_rope_emb_cos(total_seq_len, neg_cur_seq_len, model_precision); // RoPE for Q,K inputs: auto rope_q = Qwen7bChatSDPA::gen_rope(QKV::Q, qkv_proj, head_size, rope_emb_sin, rope_emb_cos); @@ -515,8 +534,10 @@ TEST_F(TransformationTestsF, SDPAToPA_Qwen) { // RoPE emb sin/cos init: auto head_size = shared_ptr(); - auto rope_emb_sin = Qwen7bChatPA::gen_rope_emb_sin(max_context_len_aligned, position_ids_aligned, head_size); - auto rope_emb_cos = Qwen7bChatPA::gen_rope_emb_cos(max_context_len_aligned, position_ids_aligned); + auto rope_emb_sin = + Qwen7bChatPA::gen_rope_emb_sin(max_context_len_aligned, position_ids_aligned, head_size, model_precision); + auto rope_emb_cos = + Qwen7bChatPA::gen_rope_emb_cos(max_context_len_aligned, position_ids_aligned, model_precision); // rope Q, K: auto rope_Q = Qwen7bChatPA::gen_rope(QKV::Q, qkv_proj, head_size, rope_emb_sin, rope_emb_cos); @@ -564,7 +585,7 @@ TEST_F(TransformationTestsF, SDPAToPA_Qwen) { disable_rt_info_check(); } -TEST_F(TransformationTestsF, SDPAToPA_TotalSequenceLengthPatternQwen) { +TEST_P(SDPAToPATest, SDPAToPA_Qwen7bChat_TotalSequenceLengthPattern) { { // Inputs to SDPA transformer: auto beam_idx = makeOP({}, {{"shape", PartialShape{DYN}}, el_type_i64}); @@ -632,7 +653,7 @@ static std::shared_ptr make_param(const PartialShape& pshape, // TODO: write a test for StateManagementPattern only (because changes for Alibi are inside it) // TODO: align precisions, check the copying of "fuse_names" attr in SDPAToPagedAttention // checking the graph structure and names, other checks are temporarily disabled: -TEST_F(TransformationTestsF, SDPAToPA_Baichuan2_13b_general_test) { +TEST_P(SDPAToPATest, SDPAToPA_Baichuan2_13b_General) { { auto beam_idx = make_param(PartialShape{DYN}, element::i32, "beam_idx"); auto position_ids = make_param(PartialShape{DYN, DYN}, element::i64, "position_ids"); @@ -881,4 +902,17 @@ TEST_F(TransformationTestsF, SDPAToPA_Baichuan2_13b_general_test) { disable_result_friendly_names_check(); disable_rt_info_check(); } -} \ No newline at end of file +} + +/* +As there's often a need to cover specific model's architecutres in these +tests, please, make sure you name the tests in the following manner: +SDPAToPA_MODELNAME_PATTERNYOUCOVER: +i.e. SDPAToPA_Qwen7bChat_TotalSequenceLengthPattern or +SDPAToPA_Baichuan2_13b_General if this is a test for the +entire SDPAToPA transformation +*/ + +const std::vector element_types = {element::f16, element::f32}; + +INSTANTIATE_TEST_SUITE_P(SDPAToPATest_Conversion, SDPAToPATest, testing::ValuesIn(element_types)); From 81e45017aa1e11f1f38e1ee99f747564fd572a65 Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Fri, 17 Jan 2025 08:55:16 +0000 Subject: [PATCH 26/97] Protopipe: Fix warnings in main.cpp (#28409) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- src/plugins/intel_npu/tools/protopipe/main.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_npu/tools/protopipe/main.cpp b/src/plugins/intel_npu/tools/protopipe/main.cpp index 8596ba864335ca..23840a9852a41a 100644 --- a/src/plugins/intel_npu/tools/protopipe/main.cpp +++ b/src/plugins/intel_npu/tools/protopipe/main.cpp @@ -90,7 +90,7 @@ static ICompiled::Ptr compileSimulation(Simulation::Ptr simulation, const bool p return simulation->compilePipelined(drop_frames); } return simulation->compileSync(drop_frames); -}; +} class ThreadRunner { public: @@ -112,8 +112,8 @@ void ThreadRunner::run() { } for (auto& future : futures) { future.get(); - }; -}; + } +} class Task { public: From 9dd5fdf075aba1905babadfd6dbcbedea861a324 Mon Sep 17 00:00:00 2001 From: Sungeun Kim Date: Fri, 17 Jan 2025 18:03:41 +0900 Subject: [PATCH 27/97] [GPU] integrate onednn rls-v3.7(706a3ce3b3) to master (#28499) --- src/plugins/intel_gpu/thirdparty/onednn_gpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/thirdparty/onednn_gpu b/src/plugins/intel_gpu/thirdparty/onednn_gpu index d989ded8c51582..706a3ce3b391cf 160000 --- a/src/plugins/intel_gpu/thirdparty/onednn_gpu +++ b/src/plugins/intel_gpu/thirdparty/onednn_gpu @@ -1 +1 @@ -Subproject commit d989ded8c5158200dd2ccb602f53aeba92a64413 +Subproject commit 706a3ce3b391cf1d8a904a8efa981c70078719eb From 9949164cd479a1f18e7856d5e65a56ded9352138 Mon Sep 17 00:00:00 2001 From: Emmanuel Ferdman Date: Fri, 17 Jan 2025 11:32:29 +0200 Subject: [PATCH 28/97] Update Intel GPU resources references (#27994) # PR Summary Small PR - Commits 1e878b6a0174eb38d827567485668e80f5460ec0 and c6ec6d457f72f4c8986272b1b8822c6a16c0aa83 moved bunch of Intel GPU resources. This PR adjusts sources to changes. Signed-off-by: Emmanuel Ferdman --- src/plugins/intel_gpu/docs/gpu_debug_utils.md | 2 +- src/plugins/intel_gpu/docs/gpu_plugin_unit_test.md | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/plugins/intel_gpu/docs/gpu_debug_utils.md b/src/plugins/intel_gpu/docs/gpu_debug_utils.md index fb45c3da84c394..0708d9dd3557b7 100644 --- a/src/plugins/intel_gpu/docs/gpu_debug_utils.md +++ b/src/plugins/intel_gpu/docs/gpu_debug_utils.md @@ -268,7 +268,7 @@ Each file contains a single buffer in a common planar format (`bfyx`, `bfzyx`, o shape: [b:1, f:1280, x:1, y:1, z:1, w:1, g:1] (count: 1280, original format: b_fs_yx_fsv16) ``` -For troubleshooting the accuracy, you may want to compare the results of GPU plugin and CPU plugin. For CPU dump, see [Blob dumping](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/src/docs/blob_dumping.md) +For troubleshooting the accuracy, you may want to compare the results of GPU plugin and CPU plugin. For CPU dump, see [Blob dumping](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu/docs/debug_capabilities/blob_dumping.md) ## Run int8 model on Gen9 HW diff --git a/src/plugins/intel_gpu/docs/gpu_plugin_unit_test.md b/src/plugins/intel_gpu/docs/gpu_plugin_unit_test.md index dcc27f3577ba7f..cf7a80cf712f11 100644 --- a/src/plugins/intel_gpu/docs/gpu_plugin_unit_test.md +++ b/src/plugins/intel_gpu/docs/gpu_plugin_unit_test.md @@ -23,8 +23,8 @@ openvino/src/plugins/intel_gpu/tests - root of Intel GPU unit test - Fusion is an algorithm that fuses several operations into one optimized operation. For example, two nodes of `conv -> relu` may be fused into a single node of `conv`. - Fusion unit tests checks whether the fusion is done as expected. - fusion_test_common.cpp - - The base class for a fusing test, that is, [BaseFusingTest](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/fusions/fusion_test_common.hpp#L19), is implemented here. It tests whether the fusing is successful or not by comparing the execution results of the two networks, one is the fused network, the other is non-fused network for the same topology. - - [BaseFusingTest](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/fusions/fusion_test_common.hpp#L19) has an important method called `compare()`. + - The base class for a fusing test, that is, [BaseFusingTest](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp#L20), is implemented here. It tests whether the fusing is successful or not by comparing the execution results of the two networks, one is the fused network, the other is non-fused network for the same topology. + - [BaseFusingTest](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp#L20) has an important method called `compare()`. - `compare()` method has the following three tasks: - Execute two networks (fused network and not fused network) - Compare the actual number of executed primitives with the expected number of executed primitives in test params @@ -138,9 +138,9 @@ GPU unit tests are using two types of test macros (**TEST** and **TEST_P**) in - **TEST** checks the test result by comparing the execution results with expected values after running network created from the target topology to check. - It is important to generate test input and expected output result in **TEST** - You can create input data and expected output data using these three approaches: - - Generate simple input data and calculate the expected output data from input data manually, like [basic_deformable_convolution_def_group1_2](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp#L254) - - Generate random input and get the expected output, using reference function, which is made in the test codes like [mvn_test_across_channels_outside_sqrt_bfyx](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/test_cases/mvn_gpu_test.cpp#L108) - - Generate random input and get the expected output from another reference kernel which exists in clDNN kernels like [mvn_random_test_bsv32](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/test_cases/mvn_gpu_test.cpp#L793) + - Generate simple input data and calculate the expected output data from input data manually, like [basic_deformable_convolution_def_group1_2](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp#L224) + - Generate random input and get the expected output, using reference function, which is made in the test codes like [mvn_test_across_channels_outside_sqrt_bfyx](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/unit/test_cases/mvn_gpu_test.cpp#L138) + - Generate random input and get the expected output from another reference kernel which exists in clDNN kernels like [mvn_random_test_bsv32](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/unit/test_cases/mvn_gpu_test.cpp#L762) - When you allocate input data, keep in mind that the layout order in `engine.allocation_memory` is not `bfyx` but `bfxy`. For example, if input is `{1,1,4,5}`, the layout should be as below: @@ -151,7 +151,7 @@ GPU unit tests are using two types of test macros (**TEST** and **TEST_P**) in ## fusions - It is implemented based on **TEST_P** because there are many cases where multiple layouts are tested in the same topology. -- If the fusing test class already exists, you can use it. Otherwise, you should make a new fusing test class, which is inherited [BaseFusingTest](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/fusions/fusion_test_common.hpp#L19). +- If the fusing test class already exists, you can use it. Otherwise, you should make a new fusing test class, which is inherited [BaseFusingTest](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu/tests/unit/fusions/fusion_test_common.hpp#L20). - The new fusing test class should create the `execute()` method, which creates fused / non-fused networks and calls `compare` method after setting input. - Create a test case, using **TEST_P**: - You can make the desired networks using create_topologies. From 87887f5af6109b5b58501f6295ec55c2b88185ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 10:50:10 +0000 Subject: [PATCH 29/97] Bump pyyaml from 6.0.1 to 6.0.2 in /tests (#28509) Bumps [pyyaml](https://github.com/yaml/pyyaml) from 6.0.1 to 6.0.2.
Release notes

Sourced from pyyaml's releases.

6.0.2

What's Changed

  • Support for Cython 3.x and Python 3.13.

Full Changelog: https://github.com/yaml/pyyaml/compare/6.0.1...6.0.2

6.0.2rc1

  • Support for extension build with Cython 3.x
  • Support for Python 3.13
  • Added PyPI wheels for musllinux on aarch64
Changelog

Sourced from pyyaml's changelog.

6.0.2 (2024-08-06)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyyaml&package-manager=pip&previous-version=6.0.1&new-version=6.0.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/e2e_tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e_tests/requirements.txt b/tests/e2e_tests/requirements.txt index a2056071e5417e..934a5bcbc90888 100644 --- a/tests/e2e_tests/requirements.txt +++ b/tests/e2e_tests/requirements.txt @@ -32,7 +32,7 @@ pytest-timeout==2.3.1 # for common utils, e2e_tests openvino-dev distro==1.9.0 -pyyaml==6.0.1 +pyyaml==6.0.2 jsonschema==4.22.0 # filelock==3.9.0 omegaconf>=2.1,<2.4 From 93b2567455e5a271a96862346f0b2f5fc03bbd79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 10:54:53 +0000 Subject: [PATCH 30/97] Bump kornia from 0.7.0 to 0.8.0 in /tests (#28403) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [kornia](https://github.com/kornia/kornia) from 0.7.0 to 0.8.0.
Release notes

Sourced from kornia's releases.

v0.8.0

What's Changed

New Contributors

Full Changelog: https://github.com/kornia/kornia/compare/v0.7.4...v0.8.0

v0.7.4

What's Changed

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=kornia&package-manager=pip&previous-version=0.7.0&new-version=0.8.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ilya Lavrenov --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index e77b48a9e38662..eea8f81e6571c7 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -20,7 +20,7 @@ pytest>=5.0,<8.4 pytest-dependency==0.5.1 pytest-html==4.1.1 pytest-timeout==2.3.1 -kornia==0.7.0 +kornia==0.8.0 --extra-index-url https://download.pytorch.org/whl/cpu torch~=2.5.1; platform_system != "Darwin" or platform_machine != "x86_64" From c64aa945d4219d46090f01cadef506f77b97299b Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Fri, 17 Jan 2025 12:58:38 +0100 Subject: [PATCH 31/97] Update transformers version to 4.47.1 (#28348) Update transformers version to 4.47.1 Update the version of the transformers module to support latest models in precommit testing like katuni4ka/tiny-random-nanollava Adjust reference values for other models that were affected by the changes in transformers. Signed-off-by: Andrii Staikov Ticket: * CVS-157416 --- .../models/hf-tiny-random-vl-models-precommit | 2 +- .../transformation_tests/sdpa2pa_ref_diff.py | 40 +++++++++---------- tests/requirements_pytorch | 10 ++--- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/tests/model_hub_tests/transformation_tests/models/hf-tiny-random-vl-models-precommit b/tests/model_hub_tests/transformation_tests/models/hf-tiny-random-vl-models-precommit index 327ba3f7e8e966..7cdd3fdb3527be 100644 --- a/tests/model_hub_tests/transformation_tests/models/hf-tiny-random-vl-models-precommit +++ b/tests/model_hub_tests/transformation_tests/models/hf-tiny-random-vl-models-precommit @@ -1,4 +1,4 @@ katuni4ka/tiny-random-llava-next,https://huggingface.co/katuni4ka/tiny-random-llava-next katuni4ka/tiny-random-minicpmv-2_6,https://huggingface.co/katuni4ka/tiny-random-minicpmv-2_6 katuni4ka/tiny-random-llava,https://huggingface.co/katuni4ka/tiny-random-llava -katuni4ka/tiny-random-nanollava,https://huggingface.co/katuni4ka/tiny-random-nanollava,xfail,CVS-157416 \ No newline at end of file +katuni4ka/tiny-random-nanollava,https://huggingface.co/katuni4ka/tiny-random-nanollava \ No newline at end of file diff --git a/tests/model_hub_tests/transformation_tests/sdpa2pa_ref_diff.py b/tests/model_hub_tests/transformation_tests/sdpa2pa_ref_diff.py index b7131aeb024293..118ba5bcd392e7 100644 --- a/tests/model_hub_tests/transformation_tests/sdpa2pa_ref_diff.py +++ b/tests/model_hub_tests/transformation_tests/sdpa2pa_ref_diff.py @@ -133,7 +133,7 @@ "hf-tiny-model-private/tiny-random-OPTForCausalLM" : { "Assign" : -10, "PagedAttentionExtension" : 5, - "Parameter" : 14, + "Parameter" : 13, "ReadValue" : -10, "ScaledDotProductAttention" : -5, }, @@ -273,14 +273,14 @@ "facebook/opt-125m" : { "Assign" : -24, "PagedAttentionExtension" : 12, - "Parameter" : 28, + "Parameter" : 27, "ReadValue" : -24, "ScaledDotProductAttention" : -12, }, "facebook/opt-350m" : { "Assign" : -48, "PagedAttentionExtension" : 24, - "Parameter" : 52, + "Parameter" : 51, "ReadValue" : -48, "ScaledDotProductAttention" : -24, }, @@ -319,13 +319,13 @@ "ReadValue" : -4, "ScaledDotProductAttention" : -2, }, - # "katuni4ka/tiny-random-nanollava" : { - # "Assign" : -4, - # "PagedAttentionExtension" : 2, - # "Parameter" : 7, - # "ReadValue" : -4, - # "ScaledDotProductAttention" : -2, - # }, + "katuni4ka/tiny-random-nanollava" : { + "Assign" : -4, + "Parameter" : 7, + "ReadValue" : -4, + "ScaledDotProductAttention" : -2, + "PagedAttentionExtension" : 2, + }, "hf-internal-testing/tiny-random-GPTNeoForCausalLM" : { "ScaledDotProductAttention" : -4, "ReadValue" : -8, @@ -465,7 +465,7 @@ "hf-tiny-model-private/tiny-random-OPTForCausalLM" : { "Assign" : -10, "PagedAttentionExtension" : 5, - "Parameter" : 29, + "Parameter" : 28, "ReadValue" : -10, "ScaledDotProductAttention" : -5, }, @@ -605,14 +605,14 @@ "facebook/opt-125m" : { "Assign" : -24, "PagedAttentionExtension" : 12, - "Parameter" : 64, + "Parameter" : 63, "ReadValue" : -24, "ScaledDotProductAttention" : -12, }, "facebook/opt-350m" : { "Assign" : -48, "PagedAttentionExtension" : 24, - "Parameter" : 124, + "Parameter" : 123, "ReadValue" : -48, "ScaledDotProductAttention" : -24, }, @@ -651,13 +651,13 @@ "ReadValue" : -4, "ScaledDotProductAttention" : -2, }, - # "katuni4ka/tiny-random-nanollava" : { - # "Assign" : -4, - # "PagedAttentionExtension" : 2, - # "Parameter" : 13, - # "ReadValue" : -4, - # "ScaledDotProductAttention" : -2, - # }, + "katuni4ka/tiny-random-nanollava" : { + "Assign" : -4, + "PagedAttentionExtension" : 2, + "Parameter" : 13, + "ReadValue" : -4, + "ScaledDotProductAttention" : -2, + }, "hf-internal-testing/tiny-random-GPTNeoForCausalLM" : { "ScaledDotProductAttention" : -4, diff --git a/tests/requirements_pytorch b/tests/requirements_pytorch index 33907145f7de4b..261e8c79c587c7 100644 --- a/tests/requirements_pytorch +++ b/tests/requirements_pytorch @@ -11,9 +11,8 @@ torchvision==0.20.1; platform_system != "Darwin" or platform_machine != "x86_64" torchvision==0.17.2; platform_system == "Darwin" and platform_machine == "x86_64" torchaudio==2.5.1; platform_system != "Darwin" or platform_machine != "x86_64" torchaudio==2.2.2; platform_system == "Darwin" and platform_machine == "x86_64" -# transformers 4.45.1 is available -# but optimum still requires <4.45.0 -transformers==4.44.2 +# before updating transformers version, make sure no tests (esp. sdpa2pa) are failing +transformers==4.47.1 pytest==7.0.1; python_version < '3.10' pytest==7.2.0; python_version >= '3.10' pytest-html==4.1.1 @@ -44,8 +43,9 @@ super-image==0.1.7 # huggingface-hub required for super-image huggingface-hub==0.25.2 -# use latest released version once it's available -git+https://github.com/huggingface/optimum-intel.git@5c735487d4bd3dd8d7dccb242d8d5988e7dd4069; python_version < "3.12" +# For now, we decided to pin a specific working version of optimum-intel. +# It will be discussed in the future how to manage versioning of the components properly. +git+https://github.com/huggingface/optimum-intel.git@190ae8737db68a826a86e48a709b41ae51d2e3ee; python_version < "3.12" # set 'export HF_HUB_ENABLE_HF_TRANSFER=1' to benefits from hf_transfer hf_transfer==0.1.8 From 33eda4b9f0792f62745d70b286a9c583a7993915 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Fri, 17 Jan 2025 13:29:34 +0100 Subject: [PATCH 32/97] [DOCS] Remove "experimental" note for torch.export (#28492) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* Signed-off-by: Maxim Vafin --- .../model-preparation/convert-model-pytorch.rst | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst b/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst index fc2637aba9139e..fa1b6b733bb548 100644 --- a/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst +++ b/docs/articles_en/openvino-workflow/model-preparation/convert-model-pytorch.rst @@ -179,7 +179,7 @@ It is recommended to address model outputs by the index rather then the name. Support for torch.export ######################## -`torch.export `__ is the current way to get a graph +`torch.export `__ is the current way to get a graph representation of a model (since PyTorch 2.1). It produces ``ExportedProgram`` which includes the graph representation in the FX format. To see why it has an advantage over the TorchScript representation, refer to `PyTorch documentation `__. @@ -198,11 +198,6 @@ Here is an example of how to convert a model obtained with ``torch.export``: exported_model = export(model, (torch.randn(1, 3, 224, 224),)) ov_model = convert_model(exported_model) -.. note:: - - This is an experimental feature. Use it only if you know that you need to. PyTorch version 2.2 - is recommended. Dynamic shapes are not supported yet. - Converting a PyTorch Model from Disk #################################### From 8d162097a68de924d19e5b4f532bbc4b32766b56 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 17:01:59 +0400 Subject: [PATCH 33/97] Update protobuf requirement from <4.0.0,>=3.18.1 to >=3.18.1,<6.0.0 in /tests (#28405) Updates the requirements on [protobuf](https://github.com/protocolbuffers/protobuf) to permit the latest version.
Commits
  • b407e84 Updating version.json and repo version numbers to: 29.3
  • 9a5d2c3 Add .bazeliskrc for protobuf repo to tell bazelisk to use 7.1.2 by default. (...
  • 1dc5842 Fix cmake installation location of java and go features (#19773)
  • 8e7e6b0 Update artifact actions to v4 (#19703)
  • cbdc8ab Merge pull request #19719 from protocolbuffers/29.x-202412181411
  • 5621748 Updating version.json and repo version numbers to: 29.3-dev
  • 2330983 Updating version.json and repo version numbers to: 29.2
  • 1772657 Automated rollback of commit 23aada230b2478c7a07fe7612489eb8e79b9c379. (#19692)
  • 8b9d76c Export environment variables so bazelisk picks them up (#19690)
  • a1c9b6a Pin staleness check to Bazel 7 (#19689)
  • Additional commits viewable in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ilya Lavrenov --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index eea8f81e6571c7..30ba701095ecf4 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -14,7 +14,7 @@ tensorflow>=2.5,<2.19.0 requests>=2.25.1 opencv-python>=4.5 paddlepaddle==2.6.1 -protobuf>=3.18.1,<4.0.0 +protobuf>=3.18.1,<6.0.0 py>=1.9.0 pytest>=5.0,<8.4 pytest-dependency==0.5.1 From 58766e7c7606b38767710429daf8fb11e147e55b Mon Sep 17 00:00:00 2001 From: Alexandra Sidorova Date: Fri, 17 Jan 2025 18:04:03 +0400 Subject: [PATCH 34/97] [Snippets] Implemented SetDynamicWAToOuterMostLoop pass (#28505) ### Details: - *Dynamic MHA Subgraphs may have only dynamic batch. Then the pass `MHAParallelWAOptimizer` cannot be applied to this subgraph to increase parallel work amount since outermost Loop By M in MHA has static work amount. Then Subgraph may be inefficiently executed. This PR implemented the pass `SetDynamicWAToOuterMostLoop ` which sets dynamic work amount to outmost Loop in dynamic MHA to make applicable `MHAParallelWAOptimizer` in runtime.* ### Tickets: - *160647* --- .../pass/mha_parallel_wa_optimizer.hpp | 9 ++- .../pass/set_dynamic_wa_to_outermost_loop.hpp | 30 ++++++++ .../pass/mha_parallel_wa_optimizer.cpp | 8 +- .../pass/set_dynamic_wa_to_outermost_loop.cpp | 73 +++++++++++++++++++ src/common/snippets/src/op/subgraph.cpp | 2 + .../snippets/mha_wo_transpose.cpp | 5 ++ 6 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp create mode 100644 src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp diff --git a/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp b/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp index 2f42a523ec4eac..7a49f5942f1db2 100644 --- a/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp +++ b/src/common/snippets/include/snippets/lowered/pass/mha_parallel_wa_optimizer.hpp @@ -12,6 +12,8 @@ namespace ov { namespace snippets { namespace lowered { namespace pass { + +class SetDynamicWAToOuterMostLoop; /** * @class MHAParallelWAOptimizer * @brief Optimizes the dynamic MHA execution increasing parallel work amount dy dividing Brgemm's "M" dimension to "parallel_m" @@ -22,6 +24,7 @@ namespace pass { * - Determines loops that should be adjusted. */ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer { + friend class SetDynamicWAToOuterMostLoop; public: OPENVINO_RTTI("MHAParallelWAOptimizer", "", RuntimeOptimizer) MHAParallelWAOptimizer() = default; @@ -31,10 +34,14 @@ class MHAParallelWAOptimizer : public lowered::pass::RuntimeOptimizer { bool applicable() const override { return !m_loops_to_split.empty(); } private: - static std::unordered_set find_applicable_brgemms(const lowered::LinearIRCPtr& linear_ir); + static std::unordered_set find_applicable_brgemms( + const lowered::LinearIRCPtr& linear_ir, + bool check_dynamic_wa = true); + static std::unordered_set find_unsqueezed_params( const lowered::LinearIRCPtr& linear_ir, const std::unordered_set& brgemms); + static std::vector find_loops_to_split( const lowered::LinearIRCPtr& linear_ir, const std::unordered_set& unsqueezed_params); diff --git a/src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp b/src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp new file mode 100644 index 00000000000000..6daeb97ec8c566 --- /dev/null +++ b/src/common/snippets/include/snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp @@ -0,0 +1,30 @@ +// Copyright (C) 2023-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "pass.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +/** + * @interface SetDynamicWAToOuterMostLoop + * @brief The pass set dynamic work amount to outermost Loop by M in dynamic MHA Subgraphs + * to allow MHAParallelWAOptimizer optimizes parallel work amount in runtime. + * @ingroup snippets + */ +class SetDynamicWAToOuterMostLoop : public Pass { +public: + OPENVINO_RTTI("SetDynamicWAToOuterMostLoop", "", Pass); + SetDynamicWAToOuterMostLoop() = default; + bool run(LinearIR& linear_ir) override; +}; + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov diff --git a/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp b/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp index c75d1e86abbfa5..bb01346f4eff7d 100644 --- a/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp +++ b/src/common/snippets/src/lowered/pass/mha_parallel_wa_optimizer.cpp @@ -85,7 +85,9 @@ bool MHAParallelWAOptimizer::run(const lowered::LinearIR& linear_ir) { return true; } -std::unordered_set MHAParallelWAOptimizer::find_applicable_brgemms(const lowered::LinearIRCPtr& linear_ir) { +std::unordered_set MHAParallelWAOptimizer::find_applicable_brgemms( + const lowered::LinearIRCPtr& linear_ir, + bool check_dynamic_wa) { auto is_brgemm = [](const lowered::ExpressionPtr& expr) { return ov::is_type(expr->get_node()); }; @@ -96,12 +98,12 @@ std::unordered_set MHAParallelWAOptimizer::find_applicab brgemm_it = std::find_if(std::next(brgemm_it), linear_ir->end(), is_brgemm); } const auto& loop_manager = linear_ir->get_loop_manager(); - auto applicable_brgemm = [&loop_manager](const lowered::ExpressionPtr& expr) { + auto applicable_brgemm = [&loop_manager, check_dynamic_wa](const lowered::ExpressionPtr& expr) { const auto& loop_idces = expr->get_loop_ids(); if (loop_idces.empty()) return false; const auto& outermost_loop = loop_manager->get_loop_info(loop_idces[0]); - if (!snippets::utils::is_dynamic_value(outermost_loop->get_work_amount())) + if (check_dynamic_wa && !snippets::utils::is_dynamic_value(outermost_loop->get_work_amount())) return false; bool loop_by_m = true; outermost_loop->iterate_through_ports([&loop_by_m](const lowered::LoopPort& port) { diff --git a/src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp b/src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp new file mode 100644 index 00000000000000..8a5db80f577aee --- /dev/null +++ b/src/common/snippets/src/lowered/pass/set_dynamic_wa_to_outermost_loop.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2023-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp" + +#include "snippets/lowered/pass/mha_parallel_wa_optimizer.hpp" +#include "snippets/itt.hpp" +#include "snippets/lowered/linear_ir.hpp" +#include "snippets/lowered/loop_manager.hpp" +#include "snippets/op/brgemm.hpp" +#include "snippets/utils/loop_utils.hpp" + +namespace ov { +namespace snippets { +namespace lowered { +namespace pass { + +bool SetDynamicWAToOuterMostLoop::run(LinearIR& linear_ir) { + OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "Snippets::SetDynamicWAToOuterMostLoop") + if (linear_ir.empty() || !linear_ir.is_dynamic() || linear_ir.get_config().m_enable_domain_optimization) + return false; + + const auto linear_ir_ptr = std::make_shared(linear_ir); + const auto brgemms = MHAParallelWAOptimizer::find_applicable_brgemms(linear_ir_ptr, false); + if (brgemms.empty()) + return false; + + const auto unsqueezed_params = MHAParallelWAOptimizer::find_unsqueezed_params(linear_ir_ptr, brgemms); + OPENVINO_ASSERT(!unsqueezed_params.empty(), "unsqueezed_params mustn't be empty after initialization"); + + + const auto& loop_manager = linear_ir_ptr->get_loop_manager(); + std::unordered_set affected_loops; + size_t prev_loop_id = std::numeric_limits::max(); + static const size_t dim_M_idx = 1; + + auto add_affected_loop = [&](const lowered::ExpressionPtr& expr) { + const auto& loop_idces = expr->get_loop_ids(); + if (loop_idces.empty() || loop_idces.front() == prev_loop_id) + return; + + prev_loop_id = loop_idces.front(); + const auto loop_info = loop_manager->get_loop_info(prev_loop_id); + if (loop_info->get_dim_idx() == dim_M_idx) { + affected_loops.insert(loop_info); + } + }; + + size_t i = 0; + std::unordered_set visited; + for (const auto& param : linear_ir_ptr->get_parameters()) { + if (unsqueezed_params.count(i++)) + continue; + utils::visit_path(param, visited, add_affected_loop, false); + } + + bool modified = false; + for (const auto& loop : affected_loops) { + if (!utils::is_dynamic_value(loop->get_work_amount())) { + loop->set_work_amount(utils::get_dynamic_value()); + ov::snippets::utils::update_data_pointer_shifts(loop); + modified = true; + } + } + + return modified; +} + +} // namespace pass +} // namespace lowered +} // namespace snippets +} // namespace ov \ No newline at end of file diff --git a/src/common/snippets/src/op/subgraph.cpp b/src/common/snippets/src/op/subgraph.cpp index ecfa72bcb20919..42820889e2f63f 100644 --- a/src/common/snippets/src/op/subgraph.cpp +++ b/src/common/snippets/src/op/subgraph.cpp @@ -54,6 +54,7 @@ #include "snippets/lowered/pass/validate_expanded_loops.hpp" #include "snippets/lowered/pass/set_load_store_scalar.hpp" #include "snippets/lowered/pass/extract_loop_invariants.hpp" +#include "snippets/lowered/pass/set_dynamic_wa_to_outermost_loop.hpp" #include "snippets/lowered/pass/init_registers.hpp" @@ -467,6 +468,7 @@ void Subgraph::control_flow_transformations(size_t min_parallel_work_amount, siz pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(); + pipeline.register_pass(); pipeline.register_pass(); pipeline.register_pass(m_linear_ir->get_config().m_are_buffers_optimized); pipeline.register_pass(); diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp index 0967ef27087674..c6b11f48efa24c 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/snippets/mha_wo_transpose.cpp @@ -44,6 +44,11 @@ std::vector> originalShape_3D { {PartialShape{2, -1, 64}, {{2, 9, 64}, {2, 4, 64}, {2, 9, 64}}}, {PartialShape{2, 64, -1}, {{2, 64, 9}, {2, 64, 4}, {2, 64, 9}}}, {PartialShape{2, -1, 64}, {{2, 9, 64}, {2, 4, 64}, {2, 9, 64}}}, + }, + { + {PartialShape{-1, 128, 64}, {{1, 128, 64}, {2, 128, 64}, {1, 128, 64}}}, + {PartialShape{-1, 64, 128}, {{1, 64, 128}, {2, 64, 128}, {1, 64, 128}}}, + {PartialShape{-1, 128, 64}, {{1, 128, 64}, {2, 128, 64}, {1, 128, 64}}}, } }; From 049c8ba0b22e147172ec80b63122e3fd29ef02a5 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Fri, 17 Jan 2025 15:08:03 +0100 Subject: [PATCH 35/97] [DOCS] realign menu pass 1 (#28486) --- README.md | 2 +- .../about-openvino/key-features.rst | 2 +- .../documentation/openvino-ecosystem.rst | 4 +- .../configurations/genai-dependencies.rst | 2 +- .../get-started/install-openvino.rst | 4 +- .../install-openvino-genai.rst | 4 +- docs/articles_en/learn-openvino.rst | 4 - ...e.rst => openvino-workflow-generative.rst} | 16 +- .../genai-model-preparation.rst | 0 .../inference-with-genai-on-npu.rst} | 0 .../inference-with-genai.rst} | 4 +- .../inference-with-optimum-intel.rst} | 0 .../ov-tokenizers.rst | 2 +- .../weight-compression.rst | 6 +- .../running-inference/stateful-models.rst | 2 +- .../obtaining-stateful-openvino-model.rst | 2 +- ...lm-agent-functioncall-qwen-with-output.rst | 2 +- .../llm-agent-react-langchain-with-output.rst | 162 +++++++++--------- .../notebooks/llm-agent-react-with-output.rst | 84 ++++----- .../llm-chatbot-generate-api-with-output.rst | 72 ++++---- docs/notebooks/llm-chatbot-with-output.rst | 6 +- ...multilora-image-generation-with-output.rst | 58 +++---- .../speculative-sampling-with-output.rst | 2 +- .../text-to-image-genai-with-output.rst | 58 +++---- .../selector-tool/assets/selector-DiE3WrtX.js | 2 +- docs/sphinx_setup/index.rst | 5 +- src/frontends/tensorflow/src/frontend.cpp | 2 +- 27 files changed, 252 insertions(+), 255 deletions(-) rename docs/articles_en/{learn-openvino/llm_inference_guide.rst => openvino-workflow-generative.rst} (86%) rename docs/articles_en/{learn-openvino/llm_inference_guide => openvino-workflow-generative}/genai-model-preparation.rst (100%) rename docs/articles_en/{learn-openvino/llm_inference_guide/genai-guide-npu.rst => openvino-workflow-generative/inference-with-genai-on-npu.rst} (100%) rename docs/articles_en/{learn-openvino/llm_inference_guide/genai-guide.rst => openvino-workflow-generative/inference-with-genai.rst} (99%) rename docs/articles_en/{learn-openvino/llm_inference_guide/llm-inference-hf.rst => openvino-workflow-generative/inference-with-optimum-intel.rst} (100%) rename docs/articles_en/{learn-openvino/llm_inference_guide => openvino-workflow-generative}/ov-tokenizers.rst (99%) diff --git a/README.md b/README.md index 8019bb892023f2..9ed2d4690e39e9 100644 --- a/README.md +++ b/README.md @@ -100,7 +100,7 @@ OpenVINO supports the CPU, GPU, and NPU [devices](https://docs.openvino.ai/2024/ ## Generative AI with OpenVINO -Get started with the OpenVINO GenAI [installation](https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html) and refer to the [detailed guide](https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/genai-guide.html) to explore the capabilities of Generative AI using OpenVINO. +Get started with the OpenVINO GenAI [installation](https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html) and refer to the [detailed guide](https://docs.openvino.ai/2024/openvino-workflow-generative/generative-inference.html) to explore the capabilities of Generative AI using OpenVINO. Learn how to run LLMs and GenAI with [Samples](https://github.com/openvinotoolkit/openvino.genai/tree/master/samples) in the [OpenVINO™ GenAI repo](https://github.com/openvinotoolkit/openvino.genai). See GenAI in action with Jupyter notebooks: [LLM-powered Chatbot](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/notebooks/llm-chatbot/README.md) and [LLM Instruction-following pipeline](https://github.com/openvinotoolkit/openvino_notebooks/blob/latest/notebooks/llm-question-answering/README.md). diff --git a/docs/articles_en/about-openvino/key-features.rst b/docs/articles_en/about-openvino/key-features.rst index 6514bdc67a3302..c751a5bc65d3cf 100644 --- a/docs/articles_en/about-openvino/key-features.rst +++ b/docs/articles_en/about-openvino/key-features.rst @@ -13,7 +13,7 @@ Easy Integration :doc:`torch.compile <../openvino-workflow/torch-compile>` to improve model inference. Apply OpenVINO optimizations to your PyTorch models directly with a single line of code. -| :doc:`GenAI Out Of The Box <../learn-openvino/llm_inference_guide/genai-guide>` +| :doc:`GenAI Out Of The Box <../openvino-workflow-generative/inference-with-genai>` | With the genAI flavor of OpenVINO, you can run generative AI with just a couple lines of code. Check out the GenAI guide for instructions on how to do it. diff --git a/docs/articles_en/documentation/openvino-ecosystem.rst b/docs/articles_en/documentation/openvino-ecosystem.rst index 1975fe0a48a181..cb62672c032412 100644 --- a/docs/articles_en/documentation/openvino-ecosystem.rst +++ b/docs/articles_en/documentation/openvino-ecosystem.rst @@ -24,7 +24,7 @@ you an overview of a whole ecosystem of tools and solutions under the OpenVINO u | **GenAI** | :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` + :bdg-link-success:`User Guide ` OpenVINO™ GenAI Library aims to simplify running inference of generative AI models. Check the LLM-powered Chatbot Jupyter notebook to see how GenAI works. @@ -113,7 +113,7 @@ generative AI and vision models directly on your computer or edge device using O | **Tokenizers** | :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` + :bdg-link-success:`User Guide ` OpenVINO Tokenizers add text processing operations to OpenVINO. diff --git a/docs/articles_en/get-started/configurations/genai-dependencies.rst b/docs/articles_en/get-started/configurations/genai-dependencies.rst index 4486890c3a40b8..6eec18a74f0f05 100644 --- a/docs/articles_en/get-started/configurations/genai-dependencies.rst +++ b/docs/articles_en/get-started/configurations/genai-dependencies.rst @@ -27,5 +27,5 @@ Additional Resources * :doc:`OpenVINO GenAI Installation Guide <../install-openvino/install-openvino-genai>` * `OpenVINO GenAI repository `__ * :doc:`OpenVINO Installation Guide <../install-openvino>` -* :doc:`OpenVINO Tokenizers <../../learn-openvino/llm_inference_guide/ov-tokenizers>` +* :doc:`OpenVINO Tokenizers <../../openvino-workflow-generative/ov-tokenizers>` diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 7603adf37b7e89..401aa79213e6d7 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -35,8 +35,8 @@ All currently supported versions are: A new OpenVINO GenAI Flavor streamlines application development by providing LLM-specific interfaces for easy integration of language models, handling tokenization and text generation. For installation and usage instructions, proceed to - :doc:`Install OpenVINO GenAI Flavor <../learn-openvino/llm_inference_guide/genai-guide>` and - :doc:`Run LLMs with OpenVINO GenAI Flavor <../learn-openvino/llm_inference_guide/genai-guide>`. + :doc:`Install OpenVINO GenAI Flavor <../openvino-workflow-generative>` and + :doc:`Run LLMs with OpenVINO GenAI Flavor <../openvino-workflow-generative/inference-with-genai>`. .. dropdown:: Building OpenVINO from Source diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst index bbfaa7817017ef..b548353b36977e 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst @@ -5,7 +5,7 @@ OpenVINO GenAI is a new flavor of OpenVINO, aiming to simplify running inference It hides the complexity of the generation process and minimizes the amount of code required. You can now provide a model and input context directly to OpenVINO, which performs tokenization of the input text, executes the generation loop on the selected device, and returns the generated text. -For a quickstart guide, refer to the :doc:`GenAI API Guide <../../learn-openvino/llm_inference_guide/genai-guide>`. +For a quickstart guide, refer to the :doc:`GenAI API Guide <../../openvino-workflow-generative/inference-with-genai>`. To see GenAI in action, check the Jupyter notebooks: `LLM-powered Chatbot `__ and @@ -28,7 +28,7 @@ but use the *openvino-genai* package instead of *openvino*: Archive Installation ############################### -The OpenVINO GenAI archive package includes the OpenVINO™ Runtime and :doc:`Tokenizers <../../learn-openvino/llm_inference_guide/ov-tokenizers>`. +The OpenVINO GenAI archive package includes the OpenVINO™ Runtime and :doc:`Tokenizers <../../openvino-workflow-generative/ov-tokenizers>`. To install the GenAI flavor of OpenVINO from an archive file, follow the standard installation steps for your system but instead of using the vanilla package file, download the one with OpenVINO GenAI: diff --git a/docs/articles_en/learn-openvino.rst b/docs/articles_en/learn-openvino.rst index 98797c9c67c126..762e51985159d3 100644 --- a/docs/articles_en/learn-openvino.rst +++ b/docs/articles_en/learn-openvino.rst @@ -14,7 +14,6 @@ Learn OpenVINO Interactive Tutorials (Python) Sample Applications (Python & C++) - Generative AI workflow @@ -28,6 +27,3 @@ as well as an experienced user. | :doc:`OpenVINO Samples ` | The OpenVINO samples (Python and C++) are simple console applications that show how to use specific OpenVINO API features. They can assist you in executing tasks such as loading a model, running inference, querying particular device capabilities, etc. - -| :doc:`Generative AI workflow ` -| Detailed information on how OpenVINO accelerates Generative AI use cases and what models it supports. This tutorial provides instructions for running Generative AI models using Hugging Face Optimum Intel and Native OpenVINO APIs. diff --git a/docs/articles_en/learn-openvino/llm_inference_guide.rst b/docs/articles_en/openvino-workflow-generative.rst similarity index 86% rename from docs/articles_en/learn-openvino/llm_inference_guide.rst rename to docs/articles_en/openvino-workflow-generative.rst index 8401923b8c7ac6..a4fa53335988ae 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide.rst +++ b/docs/articles_en/openvino-workflow-generative.rst @@ -9,10 +9,10 @@ Generative AI workflow :maxdepth: 1 :hidden: - Generative Model Preparation - Inference with OpenVINO GenAI - Inference with Optimum Intel - OpenVINO Tokenizers + Generative Model Preparation + Inference with OpenVINO GenAI + Inference with Optimum Intel + OpenVINO Tokenizers @@ -58,7 +58,7 @@ options: Note that the base version of OpenVINO may also be used to run generative AI. Although it may offer a simpler environment, with fewer dependencies, it has significant limitations and a more demanding implementation process. For reference, see -`the article on generative AI usage of OpenVINO 2024.6 `__. +`the article on generative AI usage of OpenVINO 2024.6 `__. The advantages of using OpenVINO for generative model deployment: @@ -90,8 +90,8 @@ The advantages of using OpenVINO for generative model deployment: Proceed to guides on: -* :doc:`OpenVINO GenAI Flavor <./llm_inference_guide/genai-guide>` -* :doc:`Hugging Face and Optimum Intel <./llm_inference_guide/llm-inference-hf>` -* `Generative AI with Base OpenVINO `__ +* :doc:`OpenVINO GenAI Flavor <./openvino-workflow-generative/inference-with-genai>` +* :doc:`Hugging Face and Optimum Intel <./openvino-workflow-generative/inference-with-optimum-intel>` +* `Generative AI with Base OpenVINO `__ diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-model-preparation.rst b/docs/articles_en/openvino-workflow-generative/genai-model-preparation.rst similarity index 100% rename from docs/articles_en/learn-openvino/llm_inference_guide/genai-model-preparation.rst rename to docs/articles_en/openvino-workflow-generative/genai-model-preparation.rst diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide-npu.rst b/docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst similarity index 100% rename from docs/articles_en/learn-openvino/llm_inference_guide/genai-guide-npu.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst b/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst similarity index 99% rename from docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-genai.rst index 43f9435bf79b1b..1f19c3eed7da8f 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst +++ b/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst @@ -8,7 +8,7 @@ Inference with OpenVINO GenAI :maxdepth: 1 :hidden: - NPU inference of LLMs + NPU inference of LLMs OpenVINO™ GenAI is a library of pipelines and methods, extending the OpenVINO runtime to work @@ -16,7 +16,7 @@ with generative AI models more efficiently. This article provides reference code on its usage. Note that the base OpenVINO version will not work with these instructions, make sure to :doc:`install OpenVINO with GenAI <../../get-started/install-openvino/install-openvino-genai>`. -.. image:: ../../assets/images/genai_main_diagram.svg +.. image:: ../assets/images/genai_main_diagram.svg :align: center :alt: OpenVINO GenAI workflow diagram diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-hf.rst b/docs/articles_en/openvino-workflow-generative/inference-with-optimum-intel.rst similarity index 100% rename from docs/articles_en/learn-openvino/llm_inference_guide/llm-inference-hf.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-optimum-intel.rst diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst b/docs/articles_en/openvino-workflow-generative/ov-tokenizers.rst similarity index 99% rename from docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst rename to docs/articles_en/openvino-workflow-generative/ov-tokenizers.rst index 1dbd85e3ee59a5..c836eb52e99495 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst +++ b/docs/articles_en/openvino-workflow-generative/ov-tokenizers.rst @@ -6,7 +6,7 @@ generation with LLMs. Tokenizers convert the input text into a sequence of token corresponding IDs, so that the model can understand and process it during inference. The transformation of a sequence of numbers into a string is called detokenization. -.. image:: ../../assets/images/tokenization.svg +.. image:: ../assets/images/tokenization.svg :align: center There are two important points in the tokenizer-model relation: diff --git a/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst b/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst index 4b752b74187768..232e0f2c2a66b9 100644 --- a/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst +++ b/docs/articles_en/openvino-workflow/model-optimization-guide/weight-compression.rst @@ -105,7 +105,7 @@ By default, weights are compressed asymmetrically to "INT8_ASYM" mode. print(results) For more details, refer to the article on how to - :doc:`infer LLMs using Optimum Intel <../../learn-openvino/llm_inference_guide/llm-inference-hf>`. + :doc:`infer LLMs using Optimum Intel <../../openvino-workflow-generative/inference-with-optimum-intel>`. .. tab-item:: Compression with NNCF :sync: nncf @@ -221,7 +221,7 @@ depending on the model. For more details, refer to the article on how to - :doc:`infer LLMs using Optimum Intel <../../../learn-openvino/llm_inference_guide/llm-inference-hf>`. + :doc:`infer LLMs using Optimum Intel <../../../openvino-workflow-generative/inference-with-optimum-intel>`. The code snippet below shows how to do 4-bit quantization of the model weights represented in OpenVINO IR using NNCF: @@ -344,7 +344,7 @@ load the compressed model later for faster time to first inference. .. tip:: Models optimized with with NNCF or Optimum Intel can be used with - :doc:`OpenVINO GenAI <../../learn-openvino/llm_inference_guide/genai-guide>`. + :doc:`OpenVINO GenAI <../../openvino-workflow-generative/inference-with-genai>`. Auto-tuning of Weight Compression Parameters diff --git a/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst b/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst index d00fd19c4d636d..55626d485c412d 100644 --- a/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst +++ b/docs/articles_en/openvino-workflow/running-inference/stateful-models.rst @@ -66,7 +66,7 @@ from the application code to OpenVINO and all related internal work is hidden fr There are three methods of turning an OpenVINO model into a stateful one: -* :doc:`Optimum-Intel <../../learn-openvino/llm_inference_guide/llm-inference-hf>` - the most user-friendly option. All necessary optimizations +* :doc:`Optimum-Intel <../../openvino-workflow-generative/inference-with-optimum-intel>` - the most user-friendly option. All necessary optimizations are recognized and applied automatically. The drawback is, the tool does not work with all models. diff --git a/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst b/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst index 12a21a5dd1fad0..0ad6530cb61188 100644 --- a/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst +++ b/docs/articles_en/openvino-workflow/running-inference/stateful-models/obtaining-stateful-openvino-model.rst @@ -10,7 +10,7 @@ and you have three ways to do it: * `Optimum-Intel `__ - an automated solution applicable to a selection of models (not covered by this article, for a usage guide - refer to the :doc:`LLM Inference with Hugging Face and Optimum Intel <../../../learn-openvino/llm_inference_guide>` article). + refer to the :doc:`LLM Inference with Hugging Face and Optimum Intel <../../../openvino-workflow-generative>` article). * :ref:`MakeStateful transformation ` - to choose which pairs of Parameter and Result to replace. * :ref:`LowLatency2 transformation ` - to detect and replace Parameter diff --git a/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst b/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst index 07f84987dca33e..051e83eff184bb 100644 --- a/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst +++ b/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst @@ -258,7 +258,7 @@ pipeline. You can get additional inference speed improvement with `Dynamic Quantization of activations and KV-cache quantization on -CPU `__. +CPU `__. These options can be enabled with ``ov_config`` as follows: .. code:: ipython3 diff --git a/docs/notebooks/llm-agent-react-langchain-with-output.rst b/docs/notebooks/llm-agent-react-langchain-with-output.rst index 9adb0311542426..7313d4c454c42a 100644 --- a/docs/notebooks/llm-agent-react-langchain-with-output.rst +++ b/docs/notebooks/llm-agent-react-langchain-with-output.rst @@ -70,12 +70,12 @@ Prerequisites import requests from pathlib import Path - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + if not Path("cmd_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py") open("cmd_helper.py", "w", encoding="utf-8").write(r.text) @@ -92,9 +92,9 @@ Prerequisites .. code:: ipython3 import os - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + %pip install -Uq pip %pip uninstall -q -y optimum optimum-intel %pip install --pre -Uq "openvino>=2024.5.0" openvino-tokenizers[transformers] --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly @@ -131,20 +131,20 @@ creating custom tools. .. code:: ipython3 from langchain_core.tools import tool - - + + @tool def multiply(first_int: int, second_int: int) -> int: """Multiply two integers together.""" return first_int * second_int - - + + @tool def add(first_int: int, second_int: int) -> int: "Add two integers." return first_int + second_int - - + + @tool def exponentiate(base: int, exponent: int) -> int: "Exponentiate the base to the exponent power." @@ -213,22 +213,22 @@ previous agent tool invocations and the corresponding tool outputs. .. code:: ipython3 PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" - + FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). - + Valid "action" values: "Final Answer" or {tool_names} - + Provide only ONE action per $JSON_BLOB, as shown: - + ``` {{{{ "action": $TOOL_NAME, "action_input": $INPUT }}}} ``` - + Follow this format: - + Question: input question to answer Thought: consider previous and subsequent steps Action: @@ -245,10 +245,10 @@ previous agent tool invocations and the corresponding tool outputs. "action_input": "Final response to human" }}}} ```""" - + SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Thought:""" - + HUMAN_MESSAGE_TEMPLATE = "{input}\n\n{agent_scratchpad}" Create LLM @@ -294,7 +294,7 @@ select following models as LLM in agent pipeline. .. code:: python - ## login to huggingfacehub to get access to pretrained model + ## login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -318,16 +318,16 @@ folder. .. code:: ipython3 import ipywidgets as widgets - + llm_model_ids = ["Qwen/Qwen2.5-7B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/qwen2.5-14b-instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct"] - + llm_model_id = widgets.Dropdown( options=llm_model_ids, value=llm_model_ids[0], description="Model:", disabled=False, ) - + llm_model_id @@ -342,10 +342,10 @@ folder. .. code:: ipython3 from cmd_helper import optimum_cli - + llm_model_path = llm_model_id.value.split("/")[-1] repo_name = llm_model_id.value.split("/")[0] - + if not Path(llm_model_path).exists(): optimum_cli( llm_model_id.value, llm_model_path, additional_args={"task": "text-generation-with-past", "weight-format": "int4", "group-size": "128", "ratio": "1.0"} @@ -359,9 +359,9 @@ Select inference device for LLM .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget("CPU", exclude=["NPU"]) - + device @@ -383,37 +383,37 @@ information `__. from langchain_huggingface import HuggingFacePipeline from transformers.generation.stopping_criteria import StoppingCriteriaList, StoppingCriteria - + import openvino.properties as props import openvino.properties.hint as hints import openvino.properties.streams as streams - - + + class StopSequenceCriteria(StoppingCriteria): """ This class can be used to stop generation whenever a sequence of tokens is encountered. - + Args: stop_sequences (`str` or `List[str]`): The sequence (or list of sequences) on which to stop execution. tokenizer: The tokenizer used to decode the model outputs. """ - + def __init__(self, stop_sequences, tokenizer): if isinstance(stop_sequences, str): stop_sequences = [stop_sequences] self.stop_sequences = stop_sequences self.tokenizer = tokenizer - + def __call__(self, input_ids, scores, **kwargs) -> bool: decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences) - - + + ov_config = {hints.performance_mode(): hints.PerformanceMode.LATENCY, streams.num(): "1", props.cache_dir(): ""} stop_tokens = ["Observation:"] - + ov_llm = HuggingFacePipeline.from_model_id( model_id=llm_model_path, task="text-generation", @@ -425,20 +425,20 @@ information `__. }, pipeline_kwargs={"max_new_tokens": 2048}, ) - + tokenizer = ov_llm.pipeline.tokenizer ov_llm.pipeline._forward_params["stopping_criteria"] = StoppingCriteriaList([StopSequenceCriteria(stop_tokens, tokenizer)]) .. code:: ipython3 from langchain_huggingface import ChatHuggingFace - + ov_chat = ChatHuggingFace(llm=ov_llm, verbose=True) ov_chat = ov_chat.bind(skip_prompt=True, stop=["Observation:"]) You can get additional inference speed improvement with `Dynamic Quantization of activations and KV-cache quantization on -CPU `__. +CPU `__. These options can be enabled with ``ov_config`` as follows: .. code:: ipython3 @@ -466,7 +466,7 @@ outputs back to the agent, and repeats. .. code:: ipython3 from langchain.agents import AgentExecutor, StructuredChatAgent - + agent = StructuredChatAgent.from_llm_and_tools( ov_chat, tools, @@ -494,11 +494,11 @@ prompt template. .. parsed-literal:: - - + + > Entering new AgentExecutor chain... Thought: First, we need to take 3 to the fifth power. Then we will find the sum of twelve and three. After that, we multiply the first result by the second result. Finally, we'll square the whole result. - + Action: ``` { @@ -512,7 +512,7 @@ prompt template. Observation: Observation: 243 Thought:Next, let's find the sum of twelve and three. - + Action: ``` { @@ -526,7 +526,7 @@ prompt template. Observation: Observation: 15 Thought:Now, we will multiply the result of \(3^5\) (which is 243) by the sum of 12 and 3 (which is 15). - + Action: ``` { @@ -539,8 +539,8 @@ prompt template. ``` Observation: Observation: 3645 - Thought:Thought: Now, we need to square the result of the multiplication (3645). - + Thought:Thought: Now, we need to square the result of the multiplication (3645). + Action: ``` { @@ -553,7 +553,7 @@ prompt template. ``` Observation: 13286025 Thought:Thought: I know what to respond - + Action: ``` { @@ -561,7 +561,7 @@ prompt template. "action_input": "The final result is 13286025." } ``` - + > Finished chain. @@ -598,10 +598,10 @@ words generated by agent. from langchain_community.utilities import WikipediaAPIWrapper from langchain_core.callbacks import CallbackManagerForToolRun from typing import Optional - + from pydantic import BaseModel, Field - - + + class WikipediaQueryRunWrapper(WikipediaQueryRun): def _run( self, @@ -610,17 +610,17 @@ words generated by agent. ) -> str: """Use the Wikipedia tool.""" return self.api_wrapper.run(text) - - + + api_wrapper = WikipediaAPIWrapper(top_k_results=2, doc_content_chars_max=1000) - - + + class WikiInputs(BaseModel): """inputs to the wikipedia tool.""" - + text: str = Field(description="query to look up on wikipedia.") - - + + wikipedia = WikipediaQueryRunWrapper( description="A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.", args_schema=WikiInputs, @@ -652,8 +652,8 @@ In this examples, we will create 2 customized tools for import urllib.parse import json5 - - + + @tool def painting(prompt: str) -> str: """ @@ -661,8 +661,8 @@ In this examples, we will create 2 customized tools for """ prompt = urllib.parse.quote(prompt) return json5.dumps({"image_url": f"https://image.pollinations.ai/prompt/{prompt}"}, ensure_ascii=False) - - + + painting.invoke({"prompt": "a cat"}) @@ -683,10 +683,10 @@ In this examples, we will create 2 customized tools for """ Get the current weather for `city_name` """ - + if not isinstance(city_name, str): raise TypeError("City name must be a string") - + key_selection = { "current_condition": [ "temp_C", @@ -697,15 +697,15 @@ In this examples, we will create 2 customized tools for ], } import requests - + resp = requests.get(f"https://wttr.in/{city_name}?format=j1") resp.raise_for_status() resp = resp.json() ret = {k: {_v: resp[k][0][_v] for _v in v} for k, v in key_selection.items()} - + return str(ret) - - + + weather.invoke({"city_name": "London"}) @@ -725,7 +725,7 @@ Create AI agent demo with Gradio UI .. code:: ipython3 tools = [wikipedia, painting, weather] - + agent = StructuredChatAgent.from_llm_and_tools( ov_chat, tools, @@ -741,28 +741,28 @@ Create AI agent demo with Gradio UI def partial_text_processor(partial_text, new_text): """ helper for updating partially generated answer, used by default - + Params: partial_text: text buffer for storing previosly generated text new_text: text update for the current step Returns: updated text string - + """ partial_text += new_text return partial_text - - + + def run_chatbot(history): """ callback function for running chatbot on submit button click - + Params: history: conversation history - + """ partial_text = "" - + for new_text in agent_executor.stream( {"input": history[-1][0]}, ): @@ -770,8 +770,8 @@ Create AI agent demo with Gradio UI partial_text = partial_text_processor(partial_text, new_text["output"]) history[-1][1] = partial_text yield history - - + + def request_cancel(): ov_chat.llm.pipeline.model.request.cancel() @@ -780,11 +780,11 @@ Create AI agent demo with Gradio UI if not Path("gradio_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-agent-react/gradio_helper.py") open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo - + demo = make_demo(run_fn=run_chatbot, stop_fn=request_cancel) - + try: demo.launch() except Exception: diff --git a/docs/notebooks/llm-agent-react-with-output.rst b/docs/notebooks/llm-agent-react-with-output.rst index 791355276fd2fd..8741f5f5254013 100644 --- a/docs/notebooks/llm-agent-react-with-output.rst +++ b/docs/notebooks/llm-agent-react-with-output.rst @@ -62,22 +62,22 @@ Prerequisites import os import requests - - + + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/pip_helper.py", ) open("pip_helper.py", "w").write(r.text) - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + from pip_helper import pip_install - + pip_install( "-q", "--extra-index-url", @@ -122,16 +122,16 @@ Vietnamese, Thai, Arabic, and more. For more details, please refer to .. code:: ipython3 import ipywidgets as widgets - + llm_model_ids = ["Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-7B-Instruct", "Qwen/qwen2.5-14b-instruct"] - + llm_model_id = widgets.Dropdown( options=llm_model_ids, value=llm_model_ids[0], description="Model:", disabled=False, ) - + llm_model_id @@ -146,9 +146,9 @@ Vietnamese, Thai, Arabic, and more. For more details, please refer to .. code:: ipython3 from pathlib import Path - + llm_model_path = llm_model_id.value.split("/")[-1] - + if not Path(llm_model_path).exists(): !optimum-cli export openvino --model {llm_model_id.value} --task text-generation-with-past --trust-remote-code --weight-format int4 --group-size 128 --ratio 1.0 --sym {llm_model_path} @@ -160,9 +160,9 @@ Select inference device for LLM .. code:: ipython3 from notebook_utils import device_widget - + llm_device = device_widget("CPU", exclude=["NPU"]) - + llm_device @@ -213,7 +213,7 @@ Tokenizer class and pipelines API are compatible with Optimum models. You can find more details about OpenVINO LLM inference using HuggingFace Optimum API in `LLM inference -guide `__. +guide `__. .. code:: ipython3 @@ -226,15 +226,15 @@ guide `__ import openvino.properties as props import openvino.properties.hint as hints import openvino.properties.streams as streams - + import json import json5 import torch - + tokenizer = AutoTokenizer.from_pretrained(llm_model_path, trust_remote_code=True) - + ov_config = {hints.performance_mode(): hints.PerformanceMode.LATENCY, streams.num(): "1", props.cache_dir(): ""} - + llm = OVModelForCausalLM.from_pretrained( llm_model_path, device=llm_device.value, @@ -242,7 +242,7 @@ guide `__ config=AutoConfig.from_pretrained(llm_model_path, trust_remote_code=True), trust_remote_code=True, ) - + llm.generation_config.top_k = 1 llm.generation_config.max_length = 2000 @@ -260,31 +260,31 @@ received from tool calling.. class StopSequenceCriteria(StoppingCriteria): """ This class can be used to stop generation whenever a sequence of tokens is encountered. - + Args: stop_sequences (`str` or `List[str]`): The sequence (or list of sequences) on which to stop execution. tokenizer: The tokenizer used to decode the model outputs. """ - + def __init__(self, stop_sequences, tokenizer): if isinstance(stop_sequences, str): stop_sequences = [stop_sequences] self.stop_sequences = stop_sequences self.tokenizer = tokenizer - + def __call__(self, input_ids, scores, **kwargs) -> bool: decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences) - - + + def text_completion(prompt: str, stop_words) -> str: im_end = "<|im_end|>" if im_end not in stop_words: stop_words = stop_words + [im_end] streamer = TextStreamer(tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) - + stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(stop_words, tokenizer)]) input_ids = torch.tensor([tokenizer.encode(prompt)]) generate_kwargs = dict( @@ -297,7 +297,7 @@ received from tool calling.. output = tokenizer.decode(output, errors="ignore") assert output.startswith(prompt) output = output[len(prompt) :].replace("<|endoftext|>", "").replace(im_end, "") - + for stop_str in stop_words: idx = output.find(stop_str) if idx != -1: @@ -339,13 +339,13 @@ parameter should be a sequence of messages that contains the .. code:: ipython3 TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}""" - + PROMPT_REACT = """Answer the following questions as best you can. You have access to the following APIs: - + {tools_text} - + Use the following format: - + Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tools_name_text}] @@ -354,9 +354,9 @@ parameter should be a sequence of messages that contains the ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Final Answer: the final answer to the original input question - + Begin! - + Question: {query}""" Meanwhile we have to create function for consolidate the tools @@ -381,9 +381,9 @@ information and conversation history into the prompt template. raise NotImplementedError tools_text.append(tool) tools_text = "\n\n".join(tools_text) - + tools_name_text = ", ".join([tool_info["name_for_model"] for tool_info in list_of_tool_info]) - + messages = [{"role": "system", "content": "You are a helpful assistant."}] for i, (query, response) in enumerate(chat_history): if list_of_tool_info: @@ -397,9 +397,9 @@ information and conversation history into the prompt template. messages.append({"role": "user", "content": query}) if response: messages.append({"role": "assistant", "content": response}) - + prompt = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False, return_tensors="pt") - + return prompt Create parser @@ -493,7 +493,7 @@ execute them according to the output of LLM. return str(ret) elif tool_name == "image_gen": import urllib.parse - + tool_args = tool_args.replace("(", "").replace(")", "") prompt = json5.loads(tool_args)["prompt"] prompt = urllib.parse.quote(prompt) @@ -503,11 +503,11 @@ execute them according to the output of LLM. ) else: raise NotImplementedError - - + + def llm_with_tool(prompt: str, history, list_of_tool_info=()): chat_history = [(x["user"], x["bot"]) for x in history] + [(prompt, "")] - + planning_prompt = build_input_text(chat_history, list_of_tool_info) text = "" while True: @@ -522,7 +522,7 @@ execute them according to the output of LLM. else: text += output break - + new_history = [] new_history.extend(history) new_history.append({"user": prompt, "bot": text}) @@ -537,7 +537,7 @@ Run agent history = [] query = "get the weather in London, and create a picture of Big Ben based on the weather information" - + response, history = llm_with_tool(prompt=query, history=history, list_of_tool_info=tools) diff --git a/docs/notebooks/llm-chatbot-generate-api-with-output.rst b/docs/notebooks/llm-chatbot-generate-api-with-output.rst index c09b463ae985d0..2c23e3ef3b4f64 100644 --- a/docs/notebooks/llm-chatbot-generate-api-with-output.rst +++ b/docs/notebooks/llm-chatbot-generate-api-with-output.rst @@ -81,9 +81,9 @@ Install required dependencies .. code:: ipython3 import os - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + %pip install -Uq pip %pip uninstall -q -y optimum optimum-intel %pip install -q -U "openvino>=2024.3.0" openvino-tokenizers[transformers] openvino-genai @@ -103,12 +103,12 @@ Install required dependencies from pathlib import Path import requests import shutil - + # fetch model configuration - + config_shared_path = Path("../../utils/llm_config.py") config_dst_path = Path("llm_config.py") - + if not config_dst_path.exists(): if config_shared_path.exists(): try: @@ -127,7 +127,7 @@ Install required dependencies r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/llm_config.py") with open("llm_config.py", "w", encoding="utf-8") as f: f.write(r.text) - + if not Path("notebook_utils.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py") open("notebook_utils.py", "w").write(r.text) @@ -238,7 +238,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -270,7 +270,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -304,7 +304,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -338,7 +338,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -399,7 +399,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -432,7 +432,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -466,7 +466,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -500,7 +500,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -531,7 +531,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -644,9 +644,9 @@ Click here to see available models options .. code:: ipython3 from llm_config import get_llm_selection_widget - + form, lang, model_id_widget, compression_variant, use_preconverted = get_llm_selection_widget() - + form @@ -668,7 +668,7 @@ Click here to see available models options .. parsed-literal:: Selected model qwen2-0.5b-instruct with INT4 compression - + Convert model using Optimum-CLI tool ------------------------------------ @@ -676,7 +676,7 @@ Convert model using Optimum-CLI tool `Optimum Intel `__ -is the interface between the +is the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -776,28 +776,28 @@ be additionally applied during model export with INT4 precision using .. code:: ipython3 from llm_config import convert_and_compress_model - + model_dir = convert_and_compress_model(model_id, model_configuration, compression_variant.value, use_preconverted.value) .. parsed-literal:: ✅ INT4 qwen2-0.5b-instruct model already converted and can be found in qwen2/INT4_compressed_weights - + Let’s compare model size for different compression types .. code:: ipython3 from llm_config import compare_model_size - + compare_model_size(model_dir) .. parsed-literal:: Size of model with INT4 compressed weights is 358.86 MB - + Select device for inference --------------------------- @@ -807,9 +807,9 @@ Select device for inference .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) - + device @@ -852,14 +852,14 @@ of the available generation parameters more deeply later. .. code:: ipython3 import openvino_genai as ov_genai - + print(f"Loading model from {model_dir}\n") - - + + pipe = ov_genai.LLMPipeline(str(model_dir), device.value) - + generation_config = pipe.get_generation_config() - + input_prompt = "The Sun is yellow bacause" print(f"Input text: {input_prompt}") print(pipe.generate(input_prompt, max_new_tokens=10)) @@ -868,10 +868,10 @@ of the available generation parameters more deeply later. .. parsed-literal:: Loading model from qwen2/INT4_compressed_weights - + Input text: The Sun is yellow bacause it is made of hydrogen and oxygen atoms. The - + Run Chatbot ----------- @@ -932,7 +932,7 @@ history, we should move LLMPipeline to chat mode using ``start_chat()`` method. More info about OpenVINO LLM inference can be found in `LLM Inference -Guide `__ +Guide `__ .. raw:: html @@ -1022,11 +1022,11 @@ Click here to see detailed description of advanced options if not Path("gradio_helper_genai.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-chatbot/gradio_helper_genai.py") open("gradio_helper_genai.py", "w").write(r.text) - + from gradio_helper_genai import make_demo - + demo = make_demo(pipe, model_configuration, model_id, lang.value) - + try: demo.launch(debug=True) except Exception: diff --git a/docs/notebooks/llm-chatbot-with-output.rst b/docs/notebooks/llm-chatbot-with-output.rst index 0d214f5cccc0fc..24ebf376309f08 100644 --- a/docs/notebooks/llm-chatbot-with-output.rst +++ b/docs/notebooks/llm-chatbot-with-output.rst @@ -1012,7 +1012,7 @@ Tokenizer class and pipelines API are compatible with Optimum models. You can find more details about OpenVINO LLM inference using HuggingFace Optimum API in `LLM inference -guide `__. +guide `__. .. code:: ipython3 @@ -1109,7 +1109,7 @@ decoding methods in this generation updates conversation history for next conversation step. it makes stronger connection of next question with previously provided and allows user to make clarifications regarding previously provided -answers.https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html +answers.https://docs.openvino.ai/2024/openvino-workflow-generative.html | There are several parameters that can control text generation quality: \* ``Temperature`` is a parameter used to control the level of @@ -1160,7 +1160,7 @@ answers.https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html heavily than a token that has appeared only one time. A value of 1 means that there is no penalty and values larger than 1 discourage repeated - tokens.https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide.html + tokens.https://docs.openvino.ai/2024/openvino-workflow-generative.html .. code:: ipython3 diff --git a/docs/notebooks/multilora-image-generation-with-output.rst b/docs/notebooks/multilora-image-generation-with-output.rst index 2efe1aaab50908..f6445e5a2ec1f2 100644 --- a/docs/notebooks/multilora-image-generation-with-output.rst +++ b/docs/notebooks/multilora-image-generation-with-output.rst @@ -75,11 +75,11 @@ Guide 0.25.0" pillow "gradio>=4.19" "peft>=0.7.0" %pip install -q "git+https://github.com/huggingface/optimum-intel.git" %pip install -q -U "openvino>=2024.5.0" "openvino-tokenizers>=2024.5.0" "openvino-genai>=2024.5.0" - + if platform.system() == "Darwin": %pip install -q "numpy<2.0.0" @@ -87,16 +87,16 @@ Guide `__ is -the interface between the +the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -144,7 +144,7 @@ saved on disk before export. For avoiding this, we will use ``export_from_model`` function that accepts initialized model. Additionally, for using model with OpenVINO GenAI, we need to export tokenizers to OpenVINO format using `OpenVINO -Tokenizers `__ +Tokenizers `__ library. In this tutorial we will use `Stable Diffusion @@ -160,9 +160,9 @@ Diffusion family. from optimum.intel.openvino import OVConfig from optimum.exporters.openvino.convert import export_tokenizer import gc - + model_dir = Path("sdxl-lcm") - + if not model_dir.exists(): model_id = "stabilityai/stable-diffusion-xl-base-1.0" adapter_id = "latent-consistency/lcm-lora-sdxl" @@ -262,17 +262,17 @@ Prepare LoRA Adapters .. code:: ipython3 from lora_config import LORA - + # uncomment this line to see predefined LoRA adapters configuration used in this notebook # LORA .. code:: ipython3 from huggingface_hub import hf_hub_download - + lora_dir = Path("lora") adapter_paths = [] - + for lora in LORA: lora_model_dir = lora_dir / lora["name"].lower().replace(" ", "_") file_name = lora["file_name"] @@ -283,8 +283,8 @@ Prepare LoRA Adapters .. code:: ipython3 import openvino_genai as ov_genai - - + + def prepare_adapter_config(scales=None): if scales is None: scales = [1 / len(adapter_paths)] * len(adapter_paths) @@ -293,10 +293,10 @@ Prepare LoRA Adapters adapter_config = ov_genai.AdapterConfig() for adapter, scale in zip(adapter_paths, scales): adapter_config.add(ov_genai.Adapter(adapter), scale) - + return adapter_config - - + + adapters_config = prepare_adapter_config(0.0) adapters = adapters_config.get_adapters() @@ -312,7 +312,7 @@ denoising. For reproducibility of generation results, we will use .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) device @@ -329,21 +329,21 @@ denoising. For reproducibility of generation results, we will use import openvino as ov import torch - - + + class Generator(ov_genai.Generator): def __init__(self, seed): ov_genai.Generator.__init__(self) self.generator = torch.Generator(device="cpu").manual_seed(seed) - + def next(self): return torch.randn(1, generator=self.generator, dtype=torch.float32).item() - + def randn_tensor(self, shape: ov.Shape): torch_tensor = torch.randn(list(shape), generator=self.generator, dtype=torch.float32) return ov.Tensor(torch_tensor.numpy()) - - + + pipe = ov_genai.Text2ImagePipeline(model_dir, "CPU", adapters=adapters_config) Selection specific adapter during generation @@ -370,7 +370,7 @@ let’s select LoRA for generation images in X-Ray style. .. code:: ipython3 from PIL import Image - + image = Image.fromarray(image_tensor.data[0]) image @@ -396,7 +396,7 @@ modern illustration pointillistic style. prompt_template2 = LORA[2].get("prompt", "") adapter1_weight = LORA[1].get("weight", 1.0) adapter2_weight = LORA[2].get("weight", 1.0) - + prompt = prompt_template2.replace("", prompt_template1.replace("", subject)) adapter_config = ov_genai.AdapterConfig() adapter_config.add(adapters[1], adapter1_weight) @@ -446,7 +446,7 @@ Interactive demo .. code:: ipython3 gradio_helper_path = Path("gradio_helper.py") - + if not gradio_helper_path.exists(): r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/multilora-image-generation/gradio_helper.py", @@ -456,9 +456,9 @@ Interactive demo .. code:: ipython3 from gradio_helper import make_demo - + demo = make_demo(pipe, Generator, adapters, LORA) - + try: demo.launch(debug=False) except Exception: diff --git a/docs/notebooks/speculative-sampling-with-output.rst b/docs/notebooks/speculative-sampling-with-output.rst index 868fbe9beccf9e..8ca9ca5bc7002c 100644 --- a/docs/notebooks/speculative-sampling-with-output.rst +++ b/docs/notebooks/speculative-sampling-with-output.rst @@ -136,7 +136,7 @@ In case, if you want run own models, you should convert them using Optimum `__ library accelerated by OpenVINO integration. More details about model preparation can be found in `OpenVINO LLM inference -guide `__ +guide `__ .. code:: ipython3 diff --git a/docs/notebooks/text-to-image-genai-with-output.rst b/docs/notebooks/text-to-image-genai-with-output.rst index 126c654405b36a..a0f0af9ef41538 100644 --- a/docs/notebooks/text-to-image-genai-with-output.rst +++ b/docs/notebooks/text-to-image-genai-with-output.rst @@ -23,7 +23,7 @@ the Hugging Face Transformers library to the OpenVINO™ IR format. For more details, refer to the `Hugging Face Optimum Intel documentation `__. 2. Run inference using the `Text-to-Image Generation -pipeline `__ +pipeline `__ from OpenVINO GenAI. @@ -59,19 +59,19 @@ Prerequisites import platform import requests - - + + %pip install -q "git+https://github.com/huggingface/optimum-intel.git" %pip install -q -U "openvino>=2024.5" "openvino-tokenizers>=2024.5" "openvino-genai>=2024.5" %pip install -q Pillow "diffusers>=0.30.3" "gradio>=4.19" "typing_extensions>=4.9" if platform.system() == "Darwin": %pip install -q "numpy<2.0.0" - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py", ) @@ -83,7 +83,7 @@ Convert model using Optimum-CLI tool `Optimum Intel `__ -is the interface between the +is the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -118,12 +118,12 @@ wrapper over cli-command. .. code:: ipython3 from pathlib import Path - + from cmd_helper import optimum_cli - - + + model_dir = Path("dreamlike_anime_1_0_ov") - + if not model_dir.exists(): optimum_cli("dreamlike-art/dreamlike-anime-1.0", model_dir) @@ -137,8 +137,8 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 from notebook_utils import device_widget - - + + device = device_widget("CPU", exclude=["NPU"]) device @@ -163,27 +163,27 @@ That’s it:) import openvino as ov from PIL import Image import torch - - + + class Generator(ov_genai.Generator): def __init__(self, seed): ov_genai.Generator.__init__(self) self.generator = torch.Generator(device="cpu").manual_seed(seed) - + def next(self): return torch.randn(1, generator=self.generator, dtype=torch.float32).item() - + def randn_tensor(self, shape: ov.Shape): torch_tensor = torch.randn(list(shape), generator=self.generator, dtype=torch.float32) return ov.Tensor(torch_tensor.numpy()) - - + + random_generator = Generator(42) # openvino_genai.CppStdGenerator can be used to have same images as C++ sample pipe = ov_genai.Text2ImagePipeline(model_dir, device.value) prompt = "anime, masterpiece, high quality, a green snowman with a happy smiling face in the snows" - + image_tensor = pipe.generate(prompt, width=512, height=512, num_inference_steps=20, num_images_per_prompt=1, generator=random_generator) - + image = Image.fromarray(image_tensor.data[0]) .. code:: ipython3 @@ -230,20 +230,20 @@ from command line: def prepare_adapter_config(adapters): adapter_config = ov_genai.AdapterConfig() - + # Multiple LoRA adapters applied simultaneously are supported, parse them all and corresponding alphas from cmd parameters: for i in range(int(len(adapters) / 2)): adapter = ov_genai.Adapter(adapters[2 * i]) alpha = float(adapters[2 * i + 1]) adapter_config.add(adapter, alpha) - + return adapter_config - - + + adapter_config = prepare_adapter_config(["soulcard.safetensors", 0.5]) - + pipe = ov_genai.Text2ImagePipeline(model_dir, device.value, adapters=adapter_config) - + image_tensor = pipe.generate(prompt, generator=Generator(42), width=512, height=512, num_inference_steps=20) image = Image.fromarray(image_tensor.data[0]) @@ -270,10 +270,10 @@ Interactive demo .. code:: ipython3 from gradio_helper import make_demo - - + + demo = make_demo(pipe, Generator, adapter_config) - + try: demo.launch(debug=True) except Exception: diff --git a/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js b/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js index 264f23f1dd17e3..568dd5dad034f2 100644 --- a/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js +++ b/docs/sphinx_setup/_static/selector-tool/assets/selector-DiE3WrtX.js @@ -56,4 +56,4 @@ enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB -EOF`,getMoveRepoFileCommand:e=>`sudo mv /tmp/openvino-${e.metadata.yumYear}.repo ${Zu}`,verifyRepoCommand:"yum repolist | grep -i openvino",getInstallCommand:e=>`sudo yum install openvino-${e.metadata.yumVersion}`};class Sv extends Ae{constructor(t){super({level:T.DISTRIBUTION,key:A.ZYPPER,metadata:{title:"ZYPPER",subtitle:de("distributions.CAPIOnly")}}),this._data=t}get data(){return{...this._data,commands:xv}}}const xv={addRepo:"sudo zypper addrepo https://download.opensuse.org/repositories/science/openSUSE_Tumbleweed/science.repo",refresh:"sudo zypper refresh",getInstallCommand:({metadata:e})=>`sudo zypper install openvino-devel-${e.zypperVersion} openvino-sample-${e.zypperVersion}`};class aa extends Xr{constructor(t,n,r){super({level:T.OP_SYSTEM,key:t,metadata:n},r),this._setDefaultOS()}_setDefaultOS(){const t=this._detectOS()||Qe.WINDOWS;this.key===t&&this.default()}_detectOS(){const{userAgent:t}=navigator,n={windows:/(Windows|Win)/g,macOS:/(Macintosh|Mac)/g,linux:/(Linux|X11)/g};return n.windows.test(t)?Qe.WINDOWS:n.macOS.test(t)?Qe.MACOS:n.linux.test(t)?Qe.LINUX:null}}class Zr extends aa{constructor(t){super(Qe.WINDOWS,Mm,t)}}class qr extends aa{constructor(t){super(Qe.MACOS,Bm,t)}}class ei extends aa{constructor(t){super(Qe.LINUX,Km,t)}}const Ov=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Pv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Nv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),_v=new Cd([Nv,Pv,Ov]),Ev=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_apt.html"},os:[re.UBUNTU_18,re.UBUNTU_20,re.UBUNTU_22]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Cv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),jv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Iv=new Xm([jv,Cv,Ev]),Lv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-apt.html"},os:[re.UBUNTU_20,re.UBUNTU_22,re.UBUNTU_24]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}}),new Sv({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-zypper.html"}}),new yv({linksSet:{installation:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"},downloadLink:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"})]),Rv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Tv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Av=new jd([Tv,Rv,Lv]),Dv=new Gm([Av.default(),_v,Iv]),Uv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Fv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),zv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Vv=new Cd([zv,Fv,Uv]),bv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),$v=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/macos"}),new _e,new Ne]),Mv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),Bv=new jd([Mv,$v,bv]),Kv=new Wm([Bv.default(),Vv]),Hv=new Xr({level:T.ROOT,key:_d.ROOT,metadata:{title:"ROOT"}},[Dv.default(),Kv]).default();function Gv(e,t){var i,o;if(t.key===A.DOCKER||!t.footnoteLevel)return e;const n=(i=e[t.footnoteLevel])==null?void 0:i.selected,r=(o=e[t.footnoteLevel])==null?void 0:o.nodes;return!n||!r||((Array.isArray(n)?[...n,...r]:[n]).forEach(s=>s.hasFootnote=!0),r.forEach(s=>s.checked&&(s.hasFootnote=!0))),e}class Wv{constructor(){ze(this,"_root",Hv)}getState(){try{return this._getState()}catch(t){return console.error(t),this._selectDefaults(this._root),this._getState()}}_getState(){const t=this._root.children,n=this._get_selected(t),r=n.children,i=this._get_selected(r),{systems:o,system:s}=this._processVersion(i),l=s.children,a=this._get_selected(l),c={[T.PACKAGE]:{nodes:t.map(p=>p.toOption()),selected:n.toOption()},[T.VERSION]:{nodes:r.map(p=>p.toOption()),selected:i.toOption()},[T.OP_SYSTEM]:{nodes:o.map(p=>p.toOption()),selected:s.toOption()},[T.DISTRIBUTION]:{nodes:l.map(p=>p.toOption()),selected:a.toOption()}};return Gv(c,a)}_get_selected(t){t.some(({checked:r})=>r)||this._selectDefaultsForLevel(t[0].level);const n=t.find(({checked:r})=>r);if(!n)throw new Error("Not valid tree");return n}_processVersion(t){const n=t.children,r=this._get_selected(n);return{systems:n,system:r}}setState(t){this._setState(t)}_setState(t,n=this._root){if(!n.children.length)return;const r=n.children[0].level,i=Yv(t[r]);n.children.forEach(o=>o.checked=i.includes(o.key)),n.children.forEach(o=>this._setState(t,o))}select(t,n){return this._select(t,n),this.getState()}_select(t,n,r=this._root){var i;if(((i=r.children[0])==null?void 0:i.level)!==t){r.children.forEach(o=>this._select(t,n,o));return}if(r.childrenSelector){r.childrenSelector(r.children,n);return}r.children.forEach(o=>o.checked=o.key===n)}_selectDefaultsForLevel(t,n=this._root){if(n.children.length){if(n.children[0].level!==t){n.children.forEach(r=>this._selectDefaultsForLevel(t,r));return}this._selectDefaults(n)}}_selectDefaults(t){t.children.forEach(n=>{n.checked=n.isDefault,this._selectDefaults(n)})}}const _n=new Wv;function Yv(e){const t=[];return Array.isArray(e)?t.push(...e):e&&t.push(e),t}function Ad(e,{serializeVersion:t}={serializeVersion:!0}){const n=[[T.PACKAGE,e.PACKAGE.selected.key],[T.VERSION,t?e.VERSION.selected.key:null],[T.OP_SYSTEM,e.OP_SYSTEM.selected.key],[T.DISTRIBUTION,e.DISTRIBUTION.selected.key]],r=new URLSearchParams;for(const[i,o]of n)o&&r.set(i,o);return r}function Dd(e){function t(n,r){const i=e.get(n);if(!i)throw new Error(`Cannot extract value for: ${n}`);if(!r[i])throw new Error(`Bad node key for: ${n}`);return r[i]}try{return{[T.PACKAGE]:t(T.PACKAGE,Se),[T.VERSION]:e.has(T.VERSION)?t(T.VERSION,wn):null,[T.OP_SYSTEM]:t(T.OP_SYSTEM,Qe),[T.DISTRIBUTION]:t(T.DISTRIBUTION,A)}}catch(n){return console.log(`Cannot restore state from url due to error "${n}"`),null}}function Qv(){const e=window.parent;if(!e.location.search)return null;const t=new URLSearchParams(e.location.search);return Dd(t)}function Jv(e,t,n,{serializeVersion:r}={serializeVersion:!0}){F.useEffect(()=>{const i=window.parent,o=Ad(t,{serializeVersion:r}).toString(),s=new URL(i.location.toString());if(!s.search){s.search=o,i.history.replaceState(null,"",s);return}s.search.slice(1)!==o&&(s.search=o,i.history.pushState(null,"",s))}),parent.onpopstate=()=>{const i=window.parent,o=new URLSearchParams(i.location.search),s=Dd(o);s&&(e.setState(s),n(e.getState()))}}const os=function(e){let t,n=!1;return function(...r){return n||(t=e(r),n=!0),t}};function Xv(e){var t,n;return typeof((n=(t=e.wap_tms)==null?void 0:t.custom)==null?void 0:n.trackComponentClick)!="function"?null:e.wap_tms.custom.trackComponentClick.bind(e.wap_tms.custom)}class Zv{constructor(){ze(this,"_window");ze(this,"_consoleNotification",{notInitialized:os(()=>console.log("Adobe analytics is not initialized")),notFound:os(()=>console.log("Adobe analytics not found on a page")),devMode:os(()=>console.log("Analytics in dev mode"))});ze(this,"_send",t=>{if(!this._window){this._consoleNotification.notInitialized();return}const n=Ad(_n.getState()).toString(),r=Xv(this._window);if(!r){this._consoleNotification.notFound();return}try{r(t,n)}catch(i){console.error(i)}})}initialize(t){this._window=t}install(){this._send("install")}combinationView(){this._send("combination-view")}}const He=new Zv;function qv(){const e=Qv();e&&_n.setState(e);const t=F.createContext((r,i)=>{_n.select(r,i)});function n(){const[r,i]=F.useState(_n.getState());return Jv(_n,r,i),[r,(o,s)=>i(_n.select(o,s))]}return{SelectorContext:t,useSelector:n}}async function ey(e){e&&(navigator.clipboard?await navigator.clipboard.writeText(e):ty(e))}function ty(e){const t=ny(e);document.body.append(t),t.select(),document.execCommand("copy"),t.remove()}function ny(e){const t=document.createElement("textarea");t.style.fontSize="12pt",t.style.border="0",t.style.padding="0",t.style.margin="0",t.style.position="absolute",t.style.left="-9999px";const n=window.pageYOffset||document.documentElement.scrollTop;return t.style.top=`${n}px`,t.setAttribute("readonly",""),t.value=e,t}function ry(){return m.jsxs("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 205 205",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:[m.jsx("path",{fill:"none",stroke:"currentColor",strokeWidth:"10",d:"M 50 145 a 15 15 0 0 1 -15 -15 v -90 a 15 15 0 0 1 15 -15 h 70 a 15 15 0 0 1 15 15 v 5"}),m.jsx("rect",{x:"65",y:"60",width:"100",height:"120",rx:"15",fill:"none",stroke:"currentColor",strokeWidth:"10"})]})}function iy(){return m.jsx("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 200 200",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:m.jsx("path",{strokeLinejoin:"round",strokeLinecap:"round",fill:"none",stroke:"currentColor",strokeWidth:"15",d:"M 40 100 L 90 150 L 170 40"})})}const b=({comment:e,command:t,onCopy:n})=>{const[r,i]=F.useState(!1),o=async()=>{r||(await ey(t),i(!0),setTimeout(()=>i(!1),1500),n==null||n())};return m.jsxs("div",{className:"st-code-snippet","data-cy":"instructions-step",children:[e&&m.jsx(Ud,{children:e}),m.jsxs("div",{"data-cy":"command",children:[t&&m.jsx("code",{className:"st-code-snippet-content",children:t}),t&&m.jsx("button",{className:"copy-button",type:"button","aria-label":"Copy","data-cy":"copy",onClick:o,children:r?m.jsx(iy,{}):m.jsx(ry,{})})]})]})},Ud=({children:e})=>m.jsxs("pre",{className:"st-code-snippet-comment",children:["# ",e]}),oy=({comment:e,snippets:t})=>m.jsxs("div",{className:"st-code-snippet-multi-line","data-cy":"command",children:[e&&m.jsx(Ud,{children:e}),t.map(n=>m.jsx(b,{...n},n.command))]});function sy(e){return e.host==="docs.openvino.ai"}const ss="production.docs.en",Fd=(ss==null?void 0:ss.includes("idz"))||!1,ls={link:"spark-hyperlink spark-hyperlink-primary spark-hyperlink-standard spark-focus-visible spark-focus-visible-self spark-focus-visible-snap spark-focus-visible-background",button:"spark-button spark-button-action spark-button-size-m spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",buttonContent:"spark-button-content"},we=({href:e,children:t,type:n="link",testId:r="link",onClick:i})=>{const o=!Fd&&sy(new URL(e))?"_parent":"_blank";return n==="link"?m.jsx("a",{href:e,className:ls.link,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t}):m.jsx("span",{className:ls.button,children:m.jsx("span",{className:ls.buttonContent,children:m.jsx("a",{href:e,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t})})})},ly={heading:"spark-heading spark-font-200"},De=({title:e,accent:t=!1,dashed:n=!1,children:r,testId:i})=>m.jsxs("div",{className:`st-section ${t?"st-section-accent":""} ${n?"st-section-dashed":""}`,"data-cy":i,children:[m.jsx("span",{className:`st-section-title ${ly.heading}`,children:e}),m.jsx("div",{className:"st-section-content",children:F.Children.map(r,o=>m.jsx(ay,{children:o}))})]}),ay=({children:e})=>m.jsx("div",{className:"st-section-content-row",children:e}),uy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.apt.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={comment:m.jsxs(z,{ns:"translation",i18nKey:"distributions.apt.steps.addRepository",children:[m.jsx("b",{children:"Step 3:"})," Add the repository via the following command"]}),snippets:i.getAddRepositoryCommands(e,t.data.os).map(({ubuntuVersionNumber:l,command:a})=>({comment:`Ubuntu ${l}`,command:a}))},s={downloadKey:{comment:m.jsxs(z,{t:n,i18nKey:"download",values:{filename:i.keyFilename},children:[m.jsx("b",{children:"Step 1:"})," Download the ",m.jsx(we,{href:i.keyHref,children:i.keyFilename}),". You can also use the following command"]}),command:i.downloadKeyCommand},addKey:{comment:m.jsxs(z,{t:n,i18nKey:"addKey",children:[m.jsx("b",{children:"Step 2:"})," Add this key to the system keyring"]}),command:i.addKeyCommand},addRepository:o,updatePackages:{comment:m.jsxs(z,{t:n,i18nKey:"updateList",children:[m.jsx("b",{children:"Step 4:"})," Update the list of packages via the update command"]}),command:i.updatePackagesCommand},verifyAptCache:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 5:"})," Verify that the APT repository is properly set up. Run the apt-cache command to see a list of all available OpenVINO packages and components"]}),command:i.verifyAptCacheCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 6:"})," Install OpenVINO Runtime"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.downloadKey}),m.jsx(b,{...s.addKey}),m.jsx(oy,{...s.addRepository}),m.jsx(b,{...s.updatePackages}),m.jsx(b,{...s.verifyAptCache}),m.jsx(b,{...s.install})]})},cy=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.brew.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},fy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conan.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,{txtFilename:o,cmakeFilename:s}=i,l={createConanFile:{comment:m.jsxs(z,{t:n,i18nKey:"createConanFile",values:{txtFilename:o},children:[m.jsx("b",{children:"Step 1:"})," Create a ",m.jsx("b",{children:o})," file for your OpenVINO project and add “openvino” dependency in there"]}),command:i.conanTXTContent(e)},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",values:{cmakeFilename:s},children:[m.jsx("b",{children:"Step 2:"})," Run the command below to create ",m.jsx("b",{children:s})," file, which will be used to compile your project with OpenVINO"]}),command:i.install,onCopy:()=>He.install()},compile:{comment:m.jsxs(z,{t:n,i18nKey:"compile",children:[m.jsx("b",{children:"Step 3:"})," Configure and compile your project with OpenVINO"]}),command:i.compile}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...l.createConanFile}),m.jsx(b,{...l.install}),m.jsx(b,{...l.compile})]})},dy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conda.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={createEnv:{comment:m.jsxs(z,{t:n,i18nKey:"createEnv",children:[m.jsx("b",{children:"Step 1:"})," Create the Anaconda environment (Python 3.10 used as an example)"]}),command:i.createEnv},activateEnv:{comment:m.jsxs(z,{t:n,i18nKey:"activateEnv",children:[m.jsx("b",{children:"Step 2:"})," Activate the Anaconda environment"]}),command:i.activateEnv},upgradePip:{comment:m.jsxs(z,{t:n,i18nKey:"update",children:[m.jsx("b",{children:"Step 3:"})," Update the Anaconda to latest version"]}),command:i.update},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:i.getInstall(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.createEnv}),m.jsx(b,{...o.activateEnv}),m.jsx(b,{...o.upgradePip}),m.jsx(b,{...o.install})]})},as=({ovPackage:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.download"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),i={[A.ARCHIVE]:e.key===Se.OPENVINO_BASE?n("downloadArchives"):n("downloadArchivesGenAI"),[A.DOCKER]:n("gotoDocker"),[A.SNAP]:n("gotoInstallInstruction")}[t.key],o=m.jsxs(m.Fragment,{children:[n("useFollowingLink"),m.jsx("br",{}),m.jsx("b",{children:m.jsx(we,{href:t.data.downloadLink,testId:"download-button",onClick:()=>He.install(),children:i})})]});return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{comment:o})})},py=({ovPackage:e,version:t,distribution:n})=>{const{t:r}=M("translation",{keyPrefix:"distributions.githubGitee"}),{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),o={clone:{comment:m.jsxs(z,{t:r,i18nKey:"steps.useGitClone",children:[m.jsx("b",{children:"Step 1:"})," Use Git to clone the OpenVINO toolkit repository"]}),command:n.data.commands.getCloneCommand(e,t),onCopy:()=>He.install()},build:{comment:m.jsxs(z,{t:r,i18nKey:"steps.buildInstructions",children:[m.jsx("b",{children:"Step 2:"})," Follow the ",m.jsx(we,{href:n.data.links.getBuildInstructionsLink(e,t),testId:"build-instructions-link",children:"instructions to build from source"})]})}};return m.jsxs(De,{title:i("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.clone}),m.jsx(b,{...o.build})]})},hy=({distribution:e,version:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.npm.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=e.data,o={install:{comment:m.jsx(z,{t:n,i18nKey:"install",children:"Download and install the package"}),command:i.getInstall(t),onCopy:()=>He.install()}};return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...o.install})})},gy=({ovPackage:e,os:t,version:n,distribution:r})=>{const{t:i}=M("translation",{keyPrefix:"distributions.pip.steps"}),{t:o}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:s}=r.data,l=s.getCreateVenvCommand(t,n),a=s.getActivateVenvCommand(t,n),c=s.getInstallCommand({ovPackage:e,os:t,version:n}),p={createEnv:{comment:m.jsxs(z,{t:i,i18nKey:"createVenv",children:[m.jsx("b",{children:"Step 1:"})," Create virtual environment"]}),command:l},activateEnv:{comment:m.jsxs(z,{t:i,i18nKey:"activateVenv",children:[m.jsx("b",{children:"Step 2:"})," Activate virtual environment"]}),command:a},upgradePip:{comment:m.jsxs(z,{t:i,i18nKey:"upgradePip",children:[m.jsx("b",{children:"Step 3:"})," Upgrade pip to latest version"]}),command:s.upgradeCommand},install:{comment:m.jsxs(z,{t:i,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:c,onCopy:()=>He.install()}};return m.jsxs(De,{title:o("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...p.createEnv}),m.jsx(b,{...p.activateEnv}),m.jsx(b,{...p.upgradePip}),m.jsx(b,{...p.install})]})},my=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.vcpkg.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},vy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.yum.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{yumYear:i}=e.metadata,{commands:o}=t.data,s={createRepo:{comment:m.jsxs(z,{t:n,i18nKey:"createRepoFile",children:[m.jsx("b",{children:"Step 1:"})," Create the YUM repo file in the /tmp directory as a normal user"]}),command:o.getCreateRepoCommand(e)},moveRepoFile:{comment:m.jsxs(z,{t:n,i18nKey:"moveFile",values:{year:i,directory:o.directory},children:[m.jsx("b",{children:"Step 2:"})," Move the new openvino-",{year:i},".repo file to the YUM configuration directory ",m.jsx("b",{children:o.directory})]}),command:o.getMoveRepoFileCommand(e)},verifyRepo:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 3:"})," Verify that the new repo is properly setup by running the following command"]}),command:o.verifyRepoCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Install OpenVINO Runtime"]}),command:o.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.createRepo}),m.jsx(b,{...s.moveRepoFile}),m.jsx(b,{...s.verifyRepo}),m.jsx(b,{...s.install})]})},yy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.zypper.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={addRepo:{comment:m.jsxs(z,{t:n,i18nKey:"addRepo",children:[m.jsx("b",{children:"Step 1:"})," Create a ZYPPER repository file with the command below"]}),command:i.addRepo},refresh:{comment:m.jsxs(z,{t:n,i18nKey:"refresh",children:[m.jsx("b",{children:"Step 2:"})," Refresh repositories"]}),command:i.refresh},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 3:"})," Install OpenVINO"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.addRepo}),m.jsx(b,{...o.refresh}),m.jsx(b,{...o.install})]})},wy=({state:e})=>{const t={ovPackage:e.PACKAGE.selected,os:e.OP_SYSTEM.selected,version:e.VERSION.selected,distribution:e.DISTRIBUTION.selected};if(t.distribution.key===A.PIP)return m.jsx(gy,{...t,distribution:t.distribution});if(t.distribution.key===A.ARCHIVE)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.DOCKER)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.GITHUB||t.distribution.key===A.GITEE)return m.jsx(py,{...t,distribution:t.distribution});if(t.distribution.key===A.APT)return m.jsx(uy,{...t,distribution:t.distribution});if(t.distribution.key===A.YUM)return m.jsx(vy,{...t,distribution:t.distribution});if(t.distribution.key===A.CONDA)return m.jsx(dy,{...t,distribution:t.distribution});if(t.distribution.key===A.BREW)return m.jsx(cy,{...t,distribution:t.distribution});if(t.distribution.key===A.VCPKG)return m.jsx(my,{...t,distribution:t.distribution});if(t.distribution.key===A.CONAN)return m.jsx(fy,{...t,distribution:t.distribution});if(t.distribution.key===A.NPM)return m.jsx(hy,{...t,distribution:t.distribution});if(t.distribution.key===A.ZYPPER)return m.jsx(yy,{...t,distribution:t.distribution});if(t.distribution.key===A.SNAP)return m.jsx(as,{...t,distribution:t.distribution});const n=t.distribution;throw new Error(`${n}`)};function ky(){const{t:e}=M("common",{keyPrefix:"relatedTools"}),{t}=M("translation");return m.jsx(De,{title:t("selectorForm.titles.relatedTools"),testId:"relatedTools",accent:!0,dashed:!0,children:m.jsxs("div",{className:"st-related-tools-links",children:[m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino_notebooks",testId:"notebooks-link",children:e("OpenVINONotebooks")}),m.jsx(we,{href:"https://huggingface.co/docs/optimum/main/intel/openvino/inference",testId:"hf_optimum-link",children:"Hugging Face + Optimum Intel"}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"tokenizers",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/ov-tokenizers.html",testId:"openvino_tokenizers-link",children:"OpenVINO Tokenizers"}),"to streamline tokenizer conversion"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"nncf",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/openvino-workflow/model-optimization-guide/compressing-models-during-training.html",testId:"nncf-link",children:"NNCF"}),"for implementing compression algorithms on models"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"ovms",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/ovms_what_is_openvino_model_server.html",testId:"ovms-link",children:"OVMS"}),"for serving models optimized for deployment"]})})]})})}function Sy({state:e}){const t=e.PACKAGE.selected,n=e.DISTRIBUTION.selected,r=e.VERSION.selected,{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),{t:o}=M("common",{keyPrefix:"resources"});let s=m.jsx(m.Fragment,{});if(A.GITHUB===n.key||A.GITEE===n.key){const l=n.key===A.GITHUB?t.key===Se.OPENVINO_BASE?o("githubRepository"):o("githubGenAIRepository"):t.key===Se.OPENVINO_BASE?o("giteeRepository"):o("giteeGenAIRepository");s=m.jsxs(m.Fragment,{children:[m.jsx(we,{href:n.data.links.getBuildInstructionsLink(t,r),testId:"install-instructions-link",children:o("installationInstructions")}),m.jsx(we,{href:n.data.links.getRepositoryLink(t,r),testId:"repository-link",children:l})]})}else s=m.jsx(we,{href:n.data.linksSet.installation,testId:"install-instructions-link",children:o("installationInstructions")});return m.jsx(De,{title:i("resources"),testId:"resources",accent:!0,children:m.jsxs("div",{className:"st-resources-links",children:[m.jsxs("div",{children:[s,m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino/releases",testId:"previous-releases-link",children:o("prevReleases")}),m.jsx(we,{href:r.metadata.systemRequirementsLink,testId:"system-requirements-link",children:o("systemRequirements")})]}),m.jsxs("div",{children:[m.jsx(we,{href:r.metadata.getStartedLink,testId:"get-started-link",children:o("getStarted")}),m.jsx(we,{href:r.metadata.troubleshootingLink,testId:"troubleshooting-link",children:o("troubleshooting")})]})]})})}const sn={toggleButton:"spark-button spark-button-size-l spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",toggleButtonGroup:"spark-button-group spark-button-group-orientation-horizontal spark-button-group-align-start spark-button-group-spacing-l",actionButton:"spark-button-action",secondaryButton:"spark-button-secondary",disabledButton:"spark-button-disabled",buttonContent:"spark-button-content",fontXs:"spark-font-25"},xy=({onClick:e,checked:t=!1,disabled:n=!1,title:r,subtitle:i,value:o})=>m.jsx("button",{className:`${sn.toggleButton} ${t?sn.actionButton:sn.secondaryButton} ${n&&sn.disabledButton}`,type:"button",role:"radio","aria-checked":t,onClick:()=>e==null?void 0:e(),"data-cy":o,"aria-label":r,children:m.jsxs("span",{className:sn.buttonContent,children:[m.jsx("span",{className:"title",children:r}),i&&m.jsx("span",{className:`${sn.fontXs} subtitle`,children:i})]})}),Oy=({children:e,className:t})=>m.jsx("div",{className:`option-button-group ${t||""} ${sn.toggleButtonGroup}`,children:e});function ki({title:e,options:t,level:n}){const r=F.useContext(zd),i=t.map(({level:o,key:s,checked:l,metadata:a})=>m.jsx(xy,{value:`${o}_${s}`,checked:l,title:a.title,subtitle:a.subtitle,onClick:()=>r(o,s)},s));return m.jsx(De,{title:e,testId:n,children:m.jsx(Oy,{children:i})})}function Py({state:e}){const t=e.PACKAGE.nodes,n=e.VERSION.nodes,r=e.OP_SYSTEM.nodes,i=e.DISTRIBUTION.nodes;F.useEffect(()=>He.combinationView(),[e]);const{t:o}=M("translation",{keyPrefix:"selectorForm.titles"});return m.jsxs(m.Fragment,{children:[m.jsx(ki,{title:o("package"),options:t,level:T.PACKAGE}),m.jsx(ki,{title:o("version"),options:n,level:T.VERSION}),m.jsx(ki,{title:o("os"),options:r,level:T.OP_SYSTEM}),m.jsx(ki,{title:o("distribution"),options:i,level:T.DISTRIBUTION})]})}const{SelectorContext:zd,useSelector:Ny}=qv();He.initialize(window.parent);function _y(){const[e,t]=Ny();return m.jsx("div",{className:`st-responsive-container ${Fd?"idz-page":""}`,children:m.jsxs(zd.Provider,{value:t,children:[m.jsx(Py,{state:e}),m.jsx(wy,{state:e}),m.jsx(Sy,{state:e}),m.jsx(ky,{})]})})}ds.createRoot(document.getElementById("root")).render(m.jsx(np.StrictMode,{children:m.jsx(_y,{})})); +EOF`,getMoveRepoFileCommand:e=>`sudo mv /tmp/openvino-${e.metadata.yumYear}.repo ${Zu}`,verifyRepoCommand:"yum repolist | grep -i openvino",getInstallCommand:e=>`sudo yum install openvino-${e.metadata.yumVersion}`};class Sv extends Ae{constructor(t){super({level:T.DISTRIBUTION,key:A.ZYPPER,metadata:{title:"ZYPPER",subtitle:de("distributions.CAPIOnly")}}),this._data=t}get data(){return{...this._data,commands:xv}}}const xv={addRepo:"sudo zypper addrepo https://download.opensuse.org/repositories/science/openSUSE_Tumbleweed/science.repo",refresh:"sudo zypper refresh",getInstallCommand:({metadata:e})=>`sudo zypper install openvino-devel-${e.zypperVersion} openvino-sample-${e.zypperVersion}`};class aa extends Xr{constructor(t,n,r){super({level:T.OP_SYSTEM,key:t,metadata:n},r),this._setDefaultOS()}_setDefaultOS(){const t=this._detectOS()||Qe.WINDOWS;this.key===t&&this.default()}_detectOS(){const{userAgent:t}=navigator,n={windows:/(Windows|Win)/g,macOS:/(Macintosh|Mac)/g,linux:/(Linux|X11)/g};return n.windows.test(t)?Qe.WINDOWS:n.macOS.test(t)?Qe.MACOS:n.linux.test(t)?Qe.LINUX:null}}class Zr extends aa{constructor(t){super(Qe.WINDOWS,Mm,t)}}class qr extends aa{constructor(t){super(Qe.MACOS,Bm,t)}}class ei extends aa{constructor(t){super(Qe.LINUX,Km,t)}}const Ov=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Pv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),Nv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/latest"}),new _e,new Ne]),_v=new Cd([Nv,Pv,Ov]),Ev=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_apt.html"},os:[re.UBUNTU_18,re.UBUNTU_20,re.UBUNTU_22]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Cv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),jv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_from_archive_windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2023.3/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"},downloadLink:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_docker.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conda.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2023.3/openvino_docs_install_guides_installing_openvino_conan.html"}})]),Iv=new Xm([jv,Cv,Ev]),Lv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-linux.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/linux"}).includesNPUPlugin(),new Id({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-apt.html"},os:[re.UBUNTU_20,re.UBUNTU_22,re.UBUNTU_24]}),new Td({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-yum.html"}}),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}}),new Sv({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-zypper.html"}}),new yv({linksSet:{installation:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"},downloadLink:"https://docs.openvino.ai/2024/openvino-workflow/deployment-locally/integrate-openvino-with-ubuntu-snap.html"})]),Rv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-macos.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/macos"}),new _e,new Ne,new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new Ld({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-brew.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Tv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-pip.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-archive-windows.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"}),new rr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conda.html"}}),new la({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-vcpkg.html"}}),new nr({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-conan.html"}}),new sa({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-npm.html"}})]),Av=new jd([Tv,Rv,Lv]),Dv=new Gm([Av.default(),_v,Iv]),Uv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Fv=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),zv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#pypi-installation"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/nightly/get-started/install-openvino/install-openvino-genai.html#archive-installation"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/nightly/latest"}),new _e,new Ne]),Vv=new Cd([zv,Fv,Uv]),bv=new ei([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/linux"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),$v=new qr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/macos"}),new _e,new Ne]),Mv=new Zr([new Ee({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"}},{pythonAPI:!0}).includesNPUPlugin().default(),new Pe({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-genai.html"},downloadLink:"https://storage.openvinotoolkit.org/repositories/openvino_genai/packages/2024.6/windows"}).includesNPUPlugin(),new _e,new Ne,new ir({linksSet:{installation:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"},downloadLink:"https://docs.openvino.ai/2024/get-started/install-openvino/install-openvino-docker-linux.html"})]),Bv=new jd([Mv,$v,bv]),Kv=new Wm([Bv.default(),Vv]),Hv=new Xr({level:T.ROOT,key:_d.ROOT,metadata:{title:"ROOT"}},[Dv.default(),Kv]).default();function Gv(e,t){var i,o;if(t.key===A.DOCKER||!t.footnoteLevel)return e;const n=(i=e[t.footnoteLevel])==null?void 0:i.selected,r=(o=e[t.footnoteLevel])==null?void 0:o.nodes;return!n||!r||((Array.isArray(n)?[...n,...r]:[n]).forEach(s=>s.hasFootnote=!0),r.forEach(s=>s.checked&&(s.hasFootnote=!0))),e}class Wv{constructor(){ze(this,"_root",Hv)}getState(){try{return this._getState()}catch(t){return console.error(t),this._selectDefaults(this._root),this._getState()}}_getState(){const t=this._root.children,n=this._get_selected(t),r=n.children,i=this._get_selected(r),{systems:o,system:s}=this._processVersion(i),l=s.children,a=this._get_selected(l),c={[T.PACKAGE]:{nodes:t.map(p=>p.toOption()),selected:n.toOption()},[T.VERSION]:{nodes:r.map(p=>p.toOption()),selected:i.toOption()},[T.OP_SYSTEM]:{nodes:o.map(p=>p.toOption()),selected:s.toOption()},[T.DISTRIBUTION]:{nodes:l.map(p=>p.toOption()),selected:a.toOption()}};return Gv(c,a)}_get_selected(t){t.some(({checked:r})=>r)||this._selectDefaultsForLevel(t[0].level);const n=t.find(({checked:r})=>r);if(!n)throw new Error("Not valid tree");return n}_processVersion(t){const n=t.children,r=this._get_selected(n);return{systems:n,system:r}}setState(t){this._setState(t)}_setState(t,n=this._root){if(!n.children.length)return;const r=n.children[0].level,i=Yv(t[r]);n.children.forEach(o=>o.checked=i.includes(o.key)),n.children.forEach(o=>this._setState(t,o))}select(t,n){return this._select(t,n),this.getState()}_select(t,n,r=this._root){var i;if(((i=r.children[0])==null?void 0:i.level)!==t){r.children.forEach(o=>this._select(t,n,o));return}if(r.childrenSelector){r.childrenSelector(r.children,n);return}r.children.forEach(o=>o.checked=o.key===n)}_selectDefaultsForLevel(t,n=this._root){if(n.children.length){if(n.children[0].level!==t){n.children.forEach(r=>this._selectDefaultsForLevel(t,r));return}this._selectDefaults(n)}}_selectDefaults(t){t.children.forEach(n=>{n.checked=n.isDefault,this._selectDefaults(n)})}}const _n=new Wv;function Yv(e){const t=[];return Array.isArray(e)?t.push(...e):e&&t.push(e),t}function Ad(e,{serializeVersion:t}={serializeVersion:!0}){const n=[[T.PACKAGE,e.PACKAGE.selected.key],[T.VERSION,t?e.VERSION.selected.key:null],[T.OP_SYSTEM,e.OP_SYSTEM.selected.key],[T.DISTRIBUTION,e.DISTRIBUTION.selected.key]],r=new URLSearchParams;for(const[i,o]of n)o&&r.set(i,o);return r}function Dd(e){function t(n,r){const i=e.get(n);if(!i)throw new Error(`Cannot extract value for: ${n}`);if(!r[i])throw new Error(`Bad node key for: ${n}`);return r[i]}try{return{[T.PACKAGE]:t(T.PACKAGE,Se),[T.VERSION]:e.has(T.VERSION)?t(T.VERSION,wn):null,[T.OP_SYSTEM]:t(T.OP_SYSTEM,Qe),[T.DISTRIBUTION]:t(T.DISTRIBUTION,A)}}catch(n){return console.log(`Cannot restore state from url due to error "${n}"`),null}}function Qv(){const e=window.parent;if(!e.location.search)return null;const t=new URLSearchParams(e.location.search);return Dd(t)}function Jv(e,t,n,{serializeVersion:r}={serializeVersion:!0}){F.useEffect(()=>{const i=window.parent,o=Ad(t,{serializeVersion:r}).toString(),s=new URL(i.location.toString());if(!s.search){s.search=o,i.history.replaceState(null,"",s);return}s.search.slice(1)!==o&&(s.search=o,i.history.pushState(null,"",s))}),parent.onpopstate=()=>{const i=window.parent,o=new URLSearchParams(i.location.search),s=Dd(o);s&&(e.setState(s),n(e.getState()))}}const os=function(e){let t,n=!1;return function(...r){return n||(t=e(r),n=!0),t}};function Xv(e){var t,n;return typeof((n=(t=e.wap_tms)==null?void 0:t.custom)==null?void 0:n.trackComponentClick)!="function"?null:e.wap_tms.custom.trackComponentClick.bind(e.wap_tms.custom)}class Zv{constructor(){ze(this,"_window");ze(this,"_consoleNotification",{notInitialized:os(()=>console.log("Adobe analytics is not initialized")),notFound:os(()=>console.log("Adobe analytics not found on a page")),devMode:os(()=>console.log("Analytics in dev mode"))});ze(this,"_send",t=>{if(!this._window){this._consoleNotification.notInitialized();return}const n=Ad(_n.getState()).toString(),r=Xv(this._window);if(!r){this._consoleNotification.notFound();return}try{r(t,n)}catch(i){console.error(i)}})}initialize(t){this._window=t}install(){this._send("install")}combinationView(){this._send("combination-view")}}const He=new Zv;function qv(){const e=Qv();e&&_n.setState(e);const t=F.createContext((r,i)=>{_n.select(r,i)});function n(){const[r,i]=F.useState(_n.getState());return Jv(_n,r,i),[r,(o,s)=>i(_n.select(o,s))]}return{SelectorContext:t,useSelector:n}}async function ey(e){e&&(navigator.clipboard?await navigator.clipboard.writeText(e):ty(e))}function ty(e){const t=ny(e);document.body.append(t),t.select(),document.execCommand("copy"),t.remove()}function ny(e){const t=document.createElement("textarea");t.style.fontSize="12pt",t.style.border="0",t.style.padding="0",t.style.margin="0",t.style.position="absolute",t.style.left="-9999px";const n=window.pageYOffset||document.documentElement.scrollTop;return t.style.top=`${n}px`,t.setAttribute("readonly",""),t.value=e,t}function ry(){return m.jsxs("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 205 205",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:[m.jsx("path",{fill:"none",stroke:"currentColor",strokeWidth:"10",d:"M 50 145 a 15 15 0 0 1 -15 -15 v -90 a 15 15 0 0 1 15 -15 h 70 a 15 15 0 0 1 15 15 v 5"}),m.jsx("rect",{x:"65",y:"60",width:"100",height:"120",rx:"15",fill:"none",stroke:"currentColor",strokeWidth:"10"})]})}function iy(){return m.jsx("svg",{version:"1.1",width:"24",height:"24",viewBox:"0 0 200 200",xmlns:"http://www.w3.org/2000/svg",className:"svg-icon",children:m.jsx("path",{strokeLinejoin:"round",strokeLinecap:"round",fill:"none",stroke:"currentColor",strokeWidth:"15",d:"M 40 100 L 90 150 L 170 40"})})}const b=({comment:e,command:t,onCopy:n})=>{const[r,i]=F.useState(!1),o=async()=>{r||(await ey(t),i(!0),setTimeout(()=>i(!1),1500),n==null||n())};return m.jsxs("div",{className:"st-code-snippet","data-cy":"instructions-step",children:[e&&m.jsx(Ud,{children:e}),m.jsxs("div",{"data-cy":"command",children:[t&&m.jsx("code",{className:"st-code-snippet-content",children:t}),t&&m.jsx("button",{className:"copy-button",type:"button","aria-label":"Copy","data-cy":"copy",onClick:o,children:r?m.jsx(iy,{}):m.jsx(ry,{})})]})]})},Ud=({children:e})=>m.jsxs("pre",{className:"st-code-snippet-comment",children:["# ",e]}),oy=({comment:e,snippets:t})=>m.jsxs("div",{className:"st-code-snippet-multi-line","data-cy":"command",children:[e&&m.jsx(Ud,{children:e}),t.map(n=>m.jsx(b,{...n},n.command))]});function sy(e){return e.host==="docs.openvino.ai"}const ss="production.docs.en",Fd=(ss==null?void 0:ss.includes("idz"))||!1,ls={link:"spark-hyperlink spark-hyperlink-primary spark-hyperlink-standard spark-focus-visible spark-focus-visible-self spark-focus-visible-snap spark-focus-visible-background",button:"spark-button spark-button-action spark-button-size-m spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",buttonContent:"spark-button-content"},we=({href:e,children:t,type:n="link",testId:r="link",onClick:i})=>{const o=!Fd&&sy(new URL(e))?"_parent":"_blank";return n==="link"?m.jsx("a",{href:e,className:ls.link,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t}):m.jsx("span",{className:ls.button,children:m.jsx("span",{className:ls.buttonContent,children:m.jsx("a",{href:e,target:o,rel:"noreferrer noopener","data-cy":r,onClick:()=>i==null?void 0:i(),children:t})})})},ly={heading:"spark-heading spark-font-200"},De=({title:e,accent:t=!1,dashed:n=!1,children:r,testId:i})=>m.jsxs("div",{className:`st-section ${t?"st-section-accent":""} ${n?"st-section-dashed":""}`,"data-cy":i,children:[m.jsx("span",{className:`st-section-title ${ly.heading}`,children:e}),m.jsx("div",{className:"st-section-content",children:F.Children.map(r,o=>m.jsx(ay,{children:o}))})]}),ay=({children:e})=>m.jsx("div",{className:"st-section-content-row",children:e}),uy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.apt.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={comment:m.jsxs(z,{ns:"translation",i18nKey:"distributions.apt.steps.addRepository",children:[m.jsx("b",{children:"Step 3:"})," Add the repository via the following command"]}),snippets:i.getAddRepositoryCommands(e,t.data.os).map(({ubuntuVersionNumber:l,command:a})=>({comment:`Ubuntu ${l}`,command:a}))},s={downloadKey:{comment:m.jsxs(z,{t:n,i18nKey:"download",values:{filename:i.keyFilename},children:[m.jsx("b",{children:"Step 1:"})," Download the ",m.jsx(we,{href:i.keyHref,children:i.keyFilename}),". You can also use the following command"]}),command:i.downloadKeyCommand},addKey:{comment:m.jsxs(z,{t:n,i18nKey:"addKey",children:[m.jsx("b",{children:"Step 2:"})," Add this key to the system keyring"]}),command:i.addKeyCommand},addRepository:o,updatePackages:{comment:m.jsxs(z,{t:n,i18nKey:"updateList",children:[m.jsx("b",{children:"Step 4:"})," Update the list of packages via the update command"]}),command:i.updatePackagesCommand},verifyAptCache:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 5:"})," Verify that the APT repository is properly set up. Run the apt-cache command to see a list of all available OpenVINO packages and components"]}),command:i.verifyAptCacheCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 6:"})," Install OpenVINO Runtime"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.downloadKey}),m.jsx(b,{...s.addKey}),m.jsx(oy,{...s.addRepository}),m.jsx(b,{...s.updatePackages}),m.jsx(b,{...s.verifyAptCache}),m.jsx(b,{...s.install})]})},cy=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.brew.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},fy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conan.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,{txtFilename:o,cmakeFilename:s}=i,l={createConanFile:{comment:m.jsxs(z,{t:n,i18nKey:"createConanFile",values:{txtFilename:o},children:[m.jsx("b",{children:"Step 1:"})," Create a ",m.jsx("b",{children:o})," file for your OpenVINO project and add “openvino” dependency in there"]}),command:i.conanTXTContent(e)},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",values:{cmakeFilename:s},children:[m.jsx("b",{children:"Step 2:"})," Run the command below to create ",m.jsx("b",{children:s})," file, which will be used to compile your project with OpenVINO"]}),command:i.install,onCopy:()=>He.install()},compile:{comment:m.jsxs(z,{t:n,i18nKey:"compile",children:[m.jsx("b",{children:"Step 3:"})," Configure and compile your project with OpenVINO"]}),command:i.compile}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...l.createConanFile}),m.jsx(b,{...l.install}),m.jsx(b,{...l.compile})]})},dy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.conda.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={createEnv:{comment:m.jsxs(z,{t:n,i18nKey:"createEnv",children:[m.jsx("b",{children:"Step 1:"})," Create the Anaconda environment (Python 3.10 used as an example)"]}),command:i.createEnv},activateEnv:{comment:m.jsxs(z,{t:n,i18nKey:"activateEnv",children:[m.jsx("b",{children:"Step 2:"})," Activate the Anaconda environment"]}),command:i.activateEnv},upgradePip:{comment:m.jsxs(z,{t:n,i18nKey:"update",children:[m.jsx("b",{children:"Step 3:"})," Update the Anaconda to latest version"]}),command:i.update},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:i.getInstall(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.createEnv}),m.jsx(b,{...o.activateEnv}),m.jsx(b,{...o.upgradePip}),m.jsx(b,{...o.install})]})},as=({ovPackage:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.download"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),i={[A.ARCHIVE]:e.key===Se.OPENVINO_BASE?n("downloadArchives"):n("downloadArchivesGenAI"),[A.DOCKER]:n("gotoDocker"),[A.SNAP]:n("gotoInstallInstruction")}[t.key],o=m.jsxs(m.Fragment,{children:[n("useFollowingLink"),m.jsx("br",{}),m.jsx("b",{children:m.jsx(we,{href:t.data.downloadLink,testId:"download-button",onClick:()=>He.install(),children:i})})]});return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{comment:o})})},py=({ovPackage:e,version:t,distribution:n})=>{const{t:r}=M("translation",{keyPrefix:"distributions.githubGitee"}),{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),o={clone:{comment:m.jsxs(z,{t:r,i18nKey:"steps.useGitClone",children:[m.jsx("b",{children:"Step 1:"})," Use Git to clone the OpenVINO toolkit repository"]}),command:n.data.commands.getCloneCommand(e,t),onCopy:()=>He.install()},build:{comment:m.jsxs(z,{t:r,i18nKey:"steps.buildInstructions",children:[m.jsx("b",{children:"Step 2:"})," Follow the ",m.jsx(we,{href:n.data.links.getBuildInstructionsLink(e,t),testId:"build-instructions-link",children:"instructions to build from source"})]})}};return m.jsxs(De,{title:i("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.clone}),m.jsx(b,{...o.build})]})},hy=({distribution:e,version:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.npm.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=e.data,o={install:{comment:m.jsx(z,{t:n,i18nKey:"install",children:"Download and install the package"}),command:i.getInstall(t),onCopy:()=>He.install()}};return m.jsx(De,{title:r("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...o.install})})},gy=({ovPackage:e,os:t,version:n,distribution:r})=>{const{t:i}=M("translation",{keyPrefix:"distributions.pip.steps"}),{t:o}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:s}=r.data,l=s.getCreateVenvCommand(t,n),a=s.getActivateVenvCommand(t,n),c=s.getInstallCommand({ovPackage:e,os:t,version:n}),p={createEnv:{comment:m.jsxs(z,{t:i,i18nKey:"createVenv",children:[m.jsx("b",{children:"Step 1:"})," Create virtual environment"]}),command:l},activateEnv:{comment:m.jsxs(z,{t:i,i18nKey:"activateVenv",children:[m.jsx("b",{children:"Step 2:"})," Activate virtual environment"]}),command:a},upgradePip:{comment:m.jsxs(z,{t:i,i18nKey:"upgradePip",children:[m.jsx("b",{children:"Step 3:"})," Upgrade pip to latest version"]}),command:s.upgradeCommand},install:{comment:m.jsxs(z,{t:i,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Download and install the package"]}),command:c,onCopy:()=>He.install()}};return m.jsxs(De,{title:o("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...p.createEnv}),m.jsx(b,{...p.activateEnv}),m.jsx(b,{...p.upgradePip}),m.jsx(b,{...p.install})]})},my=({distribution:e})=>{const{t}=M("translation",{keyPrefix:"distributions.vcpkg.steps"}),{t:n}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:r}=e.data,i={install:{comment:m.jsx(z,{t,i18nKey:"install",children:"Download and install the package"}),command:r.install,onCopy:()=>He.install()}};return m.jsx(De,{title:n("install"),accent:!0,testId:"instructions",children:m.jsx(b,{...i.install})})},vy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.yum.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{yumYear:i}=e.metadata,{commands:o}=t.data,s={createRepo:{comment:m.jsxs(z,{t:n,i18nKey:"createRepoFile",children:[m.jsx("b",{children:"Step 1:"})," Create the YUM repo file in the /tmp directory as a normal user"]}),command:o.getCreateRepoCommand(e)},moveRepoFile:{comment:m.jsxs(z,{t:n,i18nKey:"moveFile",values:{year:i,directory:o.directory},children:[m.jsx("b",{children:"Step 2:"})," Move the new openvino-",{year:i},".repo file to the YUM configuration directory ",m.jsx("b",{children:o.directory})]}),command:o.getMoveRepoFileCommand(e)},verifyRepo:{comment:m.jsxs(z,{t:n,i18nKey:"verify",children:[m.jsx("b",{children:"Step 3:"})," Verify that the new repo is properly setup by running the following command"]}),command:o.verifyRepoCommand},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 4:"})," Install OpenVINO Runtime"]}),command:o.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...s.createRepo}),m.jsx(b,{...s.moveRepoFile}),m.jsx(b,{...s.verifyRepo}),m.jsx(b,{...s.install})]})},yy=({version:e,distribution:t})=>{const{t:n}=M("translation",{keyPrefix:"distributions.zypper.steps"}),{t:r}=M("translation",{keyPrefix:"selectorForm.titles"}),{commands:i}=t.data,o={addRepo:{comment:m.jsxs(z,{t:n,i18nKey:"addRepo",children:[m.jsx("b",{children:"Step 1:"})," Create a ZYPPER repository file with the command below"]}),command:i.addRepo},refresh:{comment:m.jsxs(z,{t:n,i18nKey:"refresh",children:[m.jsx("b",{children:"Step 2:"})," Refresh repositories"]}),command:i.refresh},install:{comment:m.jsxs(z,{t:n,i18nKey:"install",children:[m.jsx("b",{children:"Step 3:"})," Install OpenVINO"]}),command:i.getInstallCommand(e),onCopy:()=>He.install()}};return m.jsxs(De,{title:r("install"),accent:!0,testId:"instructions",children:[m.jsx(b,{...o.addRepo}),m.jsx(b,{...o.refresh}),m.jsx(b,{...o.install})]})},wy=({state:e})=>{const t={ovPackage:e.PACKAGE.selected,os:e.OP_SYSTEM.selected,version:e.VERSION.selected,distribution:e.DISTRIBUTION.selected};if(t.distribution.key===A.PIP)return m.jsx(gy,{...t,distribution:t.distribution});if(t.distribution.key===A.ARCHIVE)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.DOCKER)return m.jsx(as,{...t,distribution:t.distribution});if(t.distribution.key===A.GITHUB||t.distribution.key===A.GITEE)return m.jsx(py,{...t,distribution:t.distribution});if(t.distribution.key===A.APT)return m.jsx(uy,{...t,distribution:t.distribution});if(t.distribution.key===A.YUM)return m.jsx(vy,{...t,distribution:t.distribution});if(t.distribution.key===A.CONDA)return m.jsx(dy,{...t,distribution:t.distribution});if(t.distribution.key===A.BREW)return m.jsx(cy,{...t,distribution:t.distribution});if(t.distribution.key===A.VCPKG)return m.jsx(my,{...t,distribution:t.distribution});if(t.distribution.key===A.CONAN)return m.jsx(fy,{...t,distribution:t.distribution});if(t.distribution.key===A.NPM)return m.jsx(hy,{...t,distribution:t.distribution});if(t.distribution.key===A.ZYPPER)return m.jsx(yy,{...t,distribution:t.distribution});if(t.distribution.key===A.SNAP)return m.jsx(as,{...t,distribution:t.distribution});const n=t.distribution;throw new Error(`${n}`)};function ky(){const{t:e}=M("common",{keyPrefix:"relatedTools"}),{t}=M("translation");return m.jsx(De,{title:t("selectorForm.titles.relatedTools"),testId:"relatedTools",accent:!0,dashed:!0,children:m.jsxs("div",{className:"st-related-tools-links",children:[m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino_notebooks",testId:"notebooks-link",children:e("OpenVINONotebooks")}),m.jsx(we,{href:"https://huggingface.co/docs/optimum/main/intel/openvino/inference",testId:"hf_optimum-link",children:"Hugging Face + Optimum Intel"}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"tokenizers",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html",testId:"openvino_tokenizers-link",children:"OpenVINO Tokenizers"}),"to streamline tokenizer conversion"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"nncf",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/openvino-workflow/model-optimization-guide/compressing-models-during-training.html",testId:"nncf-link",children:"NNCF"}),"for implementing compression algorithms on models"]})}),m.jsx("div",{children:m.jsxs(z,{t:e,i18nKey:"ovms",children:[m.jsx(we,{href:"https://docs.openvino.ai/2024/ovms_what_is_openvino_model_server.html",testId:"ovms-link",children:"OVMS"}),"for serving models optimized for deployment"]})})]})})}function Sy({state:e}){const t=e.PACKAGE.selected,n=e.DISTRIBUTION.selected,r=e.VERSION.selected,{t:i}=M("translation",{keyPrefix:"selectorForm.titles"}),{t:o}=M("common",{keyPrefix:"resources"});let s=m.jsx(m.Fragment,{});if(A.GITHUB===n.key||A.GITEE===n.key){const l=n.key===A.GITHUB?t.key===Se.OPENVINO_BASE?o("githubRepository"):o("githubGenAIRepository"):t.key===Se.OPENVINO_BASE?o("giteeRepository"):o("giteeGenAIRepository");s=m.jsxs(m.Fragment,{children:[m.jsx(we,{href:n.data.links.getBuildInstructionsLink(t,r),testId:"install-instructions-link",children:o("installationInstructions")}),m.jsx(we,{href:n.data.links.getRepositoryLink(t,r),testId:"repository-link",children:l})]})}else s=m.jsx(we,{href:n.data.linksSet.installation,testId:"install-instructions-link",children:o("installationInstructions")});return m.jsx(De,{title:i("resources"),testId:"resources",accent:!0,children:m.jsxs("div",{className:"st-resources-links",children:[m.jsxs("div",{children:[s,m.jsx(we,{href:"https://github.com/openvinotoolkit/openvino/releases",testId:"previous-releases-link",children:o("prevReleases")}),m.jsx(we,{href:r.metadata.systemRequirementsLink,testId:"system-requirements-link",children:o("systemRequirements")})]}),m.jsxs("div",{children:[m.jsx(we,{href:r.metadata.getStartedLink,testId:"get-started-link",children:o("getStarted")}),m.jsx(we,{href:r.metadata.troubleshootingLink,testId:"troubleshooting-link",children:o("troubleshooting")})]})]})})}const sn={toggleButton:"spark-button spark-button-size-l spark-focus-visible spark-focus-visible-self spark-focus-visible-snap",toggleButtonGroup:"spark-button-group spark-button-group-orientation-horizontal spark-button-group-align-start spark-button-group-spacing-l",actionButton:"spark-button-action",secondaryButton:"spark-button-secondary",disabledButton:"spark-button-disabled",buttonContent:"spark-button-content",fontXs:"spark-font-25"},xy=({onClick:e,checked:t=!1,disabled:n=!1,title:r,subtitle:i,value:o})=>m.jsx("button",{className:`${sn.toggleButton} ${t?sn.actionButton:sn.secondaryButton} ${n&&sn.disabledButton}`,type:"button",role:"radio","aria-checked":t,onClick:()=>e==null?void 0:e(),"data-cy":o,"aria-label":r,children:m.jsxs("span",{className:sn.buttonContent,children:[m.jsx("span",{className:"title",children:r}),i&&m.jsx("span",{className:`${sn.fontXs} subtitle`,children:i})]})}),Oy=({children:e,className:t})=>m.jsx("div",{className:`option-button-group ${t||""} ${sn.toggleButtonGroup}`,children:e});function ki({title:e,options:t,level:n}){const r=F.useContext(zd),i=t.map(({level:o,key:s,checked:l,metadata:a})=>m.jsx(xy,{value:`${o}_${s}`,checked:l,title:a.title,subtitle:a.subtitle,onClick:()=>r(o,s)},s));return m.jsx(De,{title:e,testId:n,children:m.jsx(Oy,{children:i})})}function Py({state:e}){const t=e.PACKAGE.nodes,n=e.VERSION.nodes,r=e.OP_SYSTEM.nodes,i=e.DISTRIBUTION.nodes;F.useEffect(()=>He.combinationView(),[e]);const{t:o}=M("translation",{keyPrefix:"selectorForm.titles"});return m.jsxs(m.Fragment,{children:[m.jsx(ki,{title:o("package"),options:t,level:T.PACKAGE}),m.jsx(ki,{title:o("version"),options:n,level:T.VERSION}),m.jsx(ki,{title:o("os"),options:r,level:T.OP_SYSTEM}),m.jsx(ki,{title:o("distribution"),options:i,level:T.DISTRIBUTION})]})}const{SelectorContext:zd,useSelector:Ny}=qv();He.initialize(window.parent);function _y(){const[e,t]=Ny();return m.jsx("div",{className:`st-responsive-container ${Fd?"idz-page":""}`,children:m.jsxs(zd.Provider,{value:t,children:[m.jsx(Py,{state:e}),m.jsx(wy,{state:e}),m.jsx(Sy,{state:e}),m.jsx(ky,{})]})})}ds.createRoot(document.getElementById("root")).render(m.jsx(np.StrictMode,{children:m.jsx(_y,{})})); diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index 94c0332790663a..d0da8fa4244dd6 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -28,7 +28,7 @@ hardware and environments, on-premises and on-device, in the browser or in the c
  • New GenAI API

    Generative AI in only a few lines of code!

    - Check out our guide + Check out our guide
  • OpenVINO models on Hugging Face!

    @@ -194,6 +194,7 @@ Key Features GET STARTED LEARN OPENVINO - OPENVINO WORKFLOW + HOW TO USE - MAIN WORKFLOW + HOW TO USE - GENERATIVE AI WORKFLOW DOCUMENTATION ABOUT OPENVINO \ No newline at end of file diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp index af609088679e14..006a4e22e06304 100644 --- a/src/frontends/tensorflow/src/frontend.cpp +++ b/src/frontends/tensorflow/src/frontend.cpp @@ -471,7 +471,7 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr "provides conversion extension(s): " << unsupported_ops_from_tokenizers << ". Install OpenVINO Tokenizers, refer to the documentation: " - "https://docs.openvino.ai/2024/learn-openvino/llm_inference_guide/ov-tokenizers.html \n"; + "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n"; } } From 0848f8630aca8e33bfbf56b68809d81c3a906c21 Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Fri, 17 Jan 2025 15:57:06 +0100 Subject: [PATCH 36/97] [PT FE] Improve support for complex data type (#28482) ### Details: - *Remove transformations for FFT* - *Use `ComplexTypeMark` to provide information about a complex type* ### Tickets: - *CVS-159375* --------- Signed-off-by: Maxim Vafin Co-authored-by: Roman Kazantsev --- src/frontends/pytorch/src/frontend.cpp | 9 +- src/frontends/pytorch/src/op/complex.cpp | 84 +++++++ src/frontends/pytorch/src/op/fft.cpp | 208 ++++++++++++++++++ src/frontends/pytorch/src/op/permute.cpp | 35 ++- src/frontends/pytorch/src/op/reshape.cpp | 26 ++- src/frontends/pytorch/src/op/size.cpp | 23 +- src/frontends/pytorch/src/op/stft.cpp | 9 +- src/frontends/pytorch/src/op_table.cpp | 21 +- .../transforms/irfftn_complex_replacer.cpp | 164 -------------- .../transforms/irfftn_complex_replacer.hpp | 24 -- .../src/transforms/rfftn_complex_replacer.cpp | 163 -------------- .../src/transforms/rfftn_complex_replacer.hpp | 24 -- src/frontends/pytorch/src/utils.cpp | 24 +- src/frontends/pytorch/src/utils.hpp | 4 +- .../layer_tests/pytorch_tests/test_permute.py | 43 ++-- .../layer_tests/pytorch_tests/test_reshape.py | 44 ++-- tests/layer_tests/pytorch_tests/test_size.py | 30 ++- tests/layer_tests/pytorch_tests/test_stft.py | 12 +- 18 files changed, 497 insertions(+), 450 deletions(-) create mode 100644 src/frontends/pytorch/src/op/complex.cpp create mode 100644 src/frontends/pytorch/src/op/fft.cpp delete mode 100644 src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp delete mode 100644 src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp delete mode 100644 src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp delete mode 100644 src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index 04ba9a9c92c281..bb69e8fa313130 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -30,7 +30,6 @@ #include "transforms/dict_resolver.hpp" #include "transforms/einsum_list_construct.hpp" #include "transforms/index_loop_getitem_replacer.hpp" -#include "transforms/irfftn_complex_replacer.hpp" #include "transforms/listconstruct_replacer.hpp" #include "transforms/min_max_prim_list_construct_replacer.hpp" #include "transforms/prim_list_construct_pad.hpp" @@ -40,7 +39,6 @@ #include "transforms/quantized_node_remover.hpp" #include "transforms/remove_packing_ops.hpp" #include "transforms/reverseprop_resolver.hpp" -#include "transforms/rfftn_complex_replacer.hpp" #include "transforms/softmax_reshape_elimination.hpp" #include "transforms/string_equality_replacer.hpp" #include "transforms/torchfx_gptq_pattern_replacer.hpp" @@ -69,6 +67,11 @@ std::map get_unconverted_types_from_model(const std::s if (!unconverted_ops_types.count(op_type_it->second)) { unconverted_ops_types.emplace(op_type_it->second, std::move(exception_msg)); } + } else if (const auto& fw_node = ov::as_type_ptr(node)) { + auto op_type = std::string(fw_node->get_type_name()); + if (!unconverted_ops_types.count(op_type)) { + unconverted_ops_types.emplace(op_type, "This is OpenVINO internal type."); + } } if (const auto& fw_node = ov::as_type_ptr(node)) { for (size_t i = 0; i < fw_node->get_internal_subgraphs_size(); ++i) { @@ -283,8 +286,6 @@ void FrontEnd::normalize(const std::shared_ptr& model) const { manager.register_pass(); manager.register_pass(); manager.register_pass(); - manager.register_pass(); - manager.register_pass(); manager.register_pass(); manager.register_pass(); manager.register_pass(); diff --git a/src/frontends/pytorch/src/op/complex.cpp b/src/frontends/pytorch/src/op/complex.cpp new file mode 100644 index 00000000000000..8ec0f5435e358b --- /dev/null +++ b/src/frontends/pytorch/src/op/complex.cpp @@ -0,0 +1,84 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/complex_type_mark.hpp" +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/split.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/unsqueeze.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_complex(const NodeContext& context) { + num_inputs_check(context, 2, 2); + auto real = context.get_input(0); + auto imag = context.get_input(1); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + real = context.mark_node(std::make_shared(real, const_neg_1)); + imag = context.mark_node(std::make_shared(imag, const_neg_1)); + + auto complex = context.mark_node(std::make_shared(OutputVector{real, imag}, -1)); + + return {context.mark_node(std::make_shared(complex, complex->get_element_type()))}; +}; + +OutputVector translate_imag(const NodeContext& context) { + num_inputs_check(context, 1, 1, true); + auto complex = context.get_input(0); + + auto complex_type_mark = as_type_ptr(complex.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::imag operation expects complex type tensor on input."); + + complex = complex_type_mark->input_value(0); + auto axis = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto imag = context.mark_node(std::make_shared(complex, axis, 2))->output(1); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + return {context.mark_node(std::make_shared(imag, const_neg_1))}; +}; + +OutputVector translate_real(const NodeContext& context) { + num_inputs_check(context, 1, 1, true); + auto complex = context.get_input(0); + + auto complex_type_mark = as_type_ptr(complex.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::real operation expects complex type tensor on input."); + + complex = complex_type_mark->input_value(0); + auto axis = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto real = context.mark_node(std::make_shared(complex, axis, 2))->output(0); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + return {context.mark_node(std::make_shared(real, const_neg_1))}; +}; + +OutputVector translate_view_as_real(const NodeContext& context) { + num_inputs_check(context, 1, 1, true); + auto complex = context.get_input(0); + + auto complex_type_mark = as_type_ptr(complex.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::real operation expects complex type tensor on input."); + + return {complex_type_mark->input_value(0)}; +}; + +OutputVector translate_view_as_complex(const NodeContext& context) { + num_inputs_check(context, 1, 1); + auto complex = context.get_input(0); + + return {context.mark_node(std::make_shared(complex, complex.get_element_type()))}; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/fft.cpp b/src/frontends/pytorch/src/op/fft.cpp new file mode 100644 index 00000000000000..0c2eb17c49d305 --- /dev/null +++ b/src/frontends/pytorch/src/op/fft.cpp @@ -0,0 +1,208 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/complex_type_mark.hpp" +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/divide.hpp" +#include "openvino/op/equal.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/irdft.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/range.hpp" +#include "openvino/op/rdft.hpp" +#include "openvino/op/reduce_prod.hpp" +#include "openvino/op/reshape.hpp" +#include "openvino/op/scatter_update.hpp" +#include "openvino/op/select.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/sqrt.hpp" +#include "openvino/op/squeeze.hpp" +#include "openvino/op/subtract.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_fft_rfftn(const NodeContext& context) { + // aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + num_inputs_check(context, 1, 4); + auto input = context.get_input(0); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {-1})); + auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + + Output input_shape; + Output input_rank_scalar; + std::tie(input_shape, input_rank_scalar) = get_shape_rank(context, input, true); + + Output raw_s; + // Inputs can be either none or List. Check whether input values should be used or should be set to default values. + if (!context.input_is_none(1)) { + // s is provided, load from input. + raw_s = get_input_concat_if_list(context, 1); + raw_s = context.mark_node(std::make_shared(raw_s, element::i32)); + } + Output dim; + // Handle dim parameter containing vector of integers indicating dimensions to be transformed. + if (!context.input_is_none(2)) { + // dim is provided, load from input. + dim = get_input_concat_if_list(context, 2); + dim = context.mark_node(std::make_shared(dim, element::i32)); + } else if (!context.input_is_none(1)) { + // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. + auto s_len = context.mark_node(std::make_shared(raw_s, element::i32)); + auto slice_start = context.mark_node(std::make_shared(input_rank_scalar, s_len)); + auto slice_start_scalar = context.mark_node(std::make_shared(slice_start)); + dim = context.mark_node( + std::make_shared(slice_start_scalar, input_rank_scalar, const_1, element::i32)); + } else { + // Dim and s are set to default, use all of dimensions. + dim = context.mark_node(std::make_shared(const_0, input_rank_scalar, const_1, element::i32)); + } + + Output s; + if (context.input_is_none(1)) { + // Value for s was set to default, use full size for all dimensions. + s = context.mark_node(std::make_shared(input_shape, dim, const_0)); + } else { + // Values for s were provided. Replace -1 values with default full size in given dimension. + auto full_s_cond = context.mark_node(std::make_shared(raw_s, const_neg_1)); + auto full_s_values = context.mark_node(std::make_shared(input_shape, dim, const_0)); + s = context.mark_node(std::make_shared(full_s_cond, full_s_values, raw_s)); + } + + // Handle norm parameter indicating normalization mode to use. Defaults to "backward". + std::string norm = "backward"; + if (!context.input_is_none(3)) { + norm = context.const_input(3); + } + + auto rdft = context.mark_node(std::make_shared(input, dim, s)); + + // Apply normalizations + auto n_int = context.mark_node(std::make_shared(s, const_0)); + auto n = context.mark_node(std::make_shared(n_int, rdft)); + Output normalized_rfftn; + if (norm == "forward") { + // Normalize by 1/n + normalized_rfftn = context.mark_node(std::make_shared(rdft, n)); + } else if (norm == "backward") { + // No normalization + normalized_rfftn = rdft; + } else if (norm == "ortho") { + // Normalize by 1/sqrt(n) + auto sqrt_n = context.mark_node(std::make_shared(n)); + normalized_rfftn = context.mark_node(std::make_shared(rdft, sqrt_n)); + } else { + FRONT_END_THROW( + "aten::fft_rfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); + } + + return {std::make_shared(normalized_rfftn, normalized_rfftn.get_element_type())}; +} + +OutputVector translate_fft_irfftn(const NodeContext& context) { + // aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + num_inputs_check(context, 1, 4, true); + auto input = context.get_input(0); + + auto complex_type_mark = as_type_ptr(input.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(complex_type_mark, "aten::fft_irfftn operation expects complex type tensor on input."); + input = complex_type_mark->input_value(0); + + auto const_neg_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto const_0 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto const_scalar_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + auto const_scalar_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + auto const_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {2})); + + // Input shape of complex number (excluding dimension created by concatenation of real and imag) + auto complex_input_shape = get_complex_shape(context, input); + auto input_rank = context.mark_node(std::make_shared(complex_input_shape, element::i32)); + auto input_rank_scalar = context.mark_node(std::make_shared(input_rank)); + + Output raw_s; + // Inputs can be either none or List. Check whether input values should be used or should be set to default values. + if (!context.input_is_none(1)) { + // s is provided, load from input. + raw_s = get_input_concat_if_list(context, 1); + raw_s = context.mark_node(std::make_shared(raw_s, element::i32)); + } + + // Handle dim parameter containing vector of integers indicating dimensions to be transformed. + Output dim; + if (!context.input_is_none(2)) { + // Dim values is provided, load from input. + dim = get_input_concat_if_list(context, 2); + dim = context.mark_node(std::make_shared(dim, element::i32)); + } else if (!context.input_is_none(1)) { + // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. + auto s_len = context.mark_node(std::make_shared(raw_s, element::i32)); + auto range_start = context.mark_node(std::make_shared(input_rank, s_len)); + auto range_start_scalar = context.mark_node(std::make_shared(range_start)); + dim = context.mark_node( + std::make_shared(range_start_scalar, input_rank_scalar, const_scalar_1, element::i32)); + } else { + // Dim and s are set to default, use all of dimensions. + dim = context.mark_node( + std::make_shared(const_scalar_0, input_rank_scalar, const_scalar_1, element::i32)); + } + + // Calculate default s values. Use full available size except last element, which is set to even value in last + // dimension: s[-1] = 2 * (complex_input_shape[dim[-1]]) + auto default_s_raw = context.mark_node(std::make_shared(complex_input_shape, dim, const_0)); + auto last_s = context.mark_node(std::make_shared(default_s_raw, const_neg_1, const_0)); + auto last_s_m_1 = context.mark_node(std::make_shared(last_s, const_1)); + auto s_upd = context.mark_node(std::make_shared(last_s_m_1, const_2)); + auto s_shape = context.mark_node(std::make_shared(default_s_raw, element::i32)); + auto last_s_idx = context.mark_node(std::make_shared(s_shape, const_1)); + auto default_s = context.mark_node(std::make_shared(default_s_raw, last_s_idx, s_upd, const_0)); + + // Handle s parameter containing vector of intigers indicating signal sizes for dimensions. + Output s; + if (!context.input_is_none(1)) { + // Values for s were provided. Replace -1 values with default full size in given dimension. + auto full_s_cond = context.mark_node(std::make_shared(raw_s, const_neg_1)); + s = context.mark_node(std::make_shared(full_s_cond, default_s, raw_s)); + } else { + // Value for s was set to default. + s = default_s; + } + + // Handle norm parameter indicating normalization mode to use. Defaults to "backward". + std::string norm = "backward"; + if (!context.input_is_none(3)) { + norm = context.const_input(3); + } + + auto irdft = context.mark_node(std::make_shared(input, dim, s)); + + // Apply normalizations. + auto n_int = context.mark_node(std::make_shared(s, const_0)); + auto n = context.mark_node(std::make_shared(n_int, irdft)); + Output normalized_irfftn; + if (norm == "forward") { + normalized_irfftn = context.mark_node(std::make_shared(irdft, n)); + } else if (norm == "backward") { + normalized_irfftn = irdft; + } else if (norm == "ortho") { + auto sqrt_n = context.mark_node(std::make_shared(n)); + normalized_irfftn = context.mark_node(std::make_shared(irdft, sqrt_n)); + } else { + FRONT_END_THROW( + "aten::fft_irfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); + } + return {normalized_irfftn}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/permute.cpp b/src/frontends/pytorch/src/op/permute.cpp index 46016ca8ca16a0..c724e38b8077b2 100644 --- a/src/frontends/pytorch/src/op/permute.cpp +++ b/src/frontends/pytorch/src/op/permute.cpp @@ -3,7 +3,10 @@ // #include "openvino/core/validation_util.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/concat.hpp" +#include "openvino/op/subtract.hpp" #include "openvino/op/transpose.hpp" #include "utils.hpp" @@ -12,17 +15,41 @@ namespace frontend { namespace pytorch { namespace op { +using namespace ov::op; + OutputVector translate_permute(const NodeContext& context) { - num_inputs_check(context, 2, 2); + num_inputs_check(context, 2, 2, true); auto data = context.get_input(0); auto order = get_input_concat_if_list(context, 1); - auto rank = std::get<1>(get_shape_rank(context, data)); - auto rank_converted = context.mark_node(std::make_shared(rank, order)); + + Output rank; + auto complex_type_mark = as_type_ptr(data.get_node_shared_ptr()); + if (complex_type_mark) { + data = complex_type_mark->input_value(0); + rank = std::get<1>(get_shape_rank(context, data)); + auto const_1 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})); + rank = context.mark_node(std::make_shared(rank, const_1)); + } else { + rank = std::get<1>(get_shape_rank(context, data)); + } + + auto rank_converted = context.mark_node(std::make_shared(rank, order)); auto order_normalized = normalize_axis(context, order, rank_converted); + + if (complex_type_mark) { + auto to_concat = OutputVector{order_normalized, rank_converted}; + order_normalized = context.mark_node(std::make_shared(to_concat, 0)); + } + if (const auto order_const = ov::util::get_constant_from_source(order_normalized)) { order_normalized = order_const; } - return {context.mark_node(std::make_shared(data, order_normalized))}; + auto permute = context.mark_node(std::make_shared(data, order_normalized)); + if (complex_type_mark) { + const auto& complex_dtype = complex_type_mark->get_complex_part_type(); + permute = context.mark_node(std::make_shared(permute, complex_dtype)); + } + return {permute}; } } // namespace op diff --git a/src/frontends/pytorch/src/op/reshape.cpp b/src/frontends/pytorch/src/op/reshape.cpp index 7524d0e3c4aaf4..b9dcfc8d9afc4a 100644 --- a/src/frontends/pytorch/src/op/reshape.cpp +++ b/src/frontends/pytorch/src/op/reshape.cpp @@ -4,6 +4,7 @@ #include "openvino/op/reshape.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/squeeze.hpp" @@ -15,15 +16,34 @@ namespace frontend { namespace pytorch { namespace op { +using namespace ov::op; + OutputVector translate_reshape(const NodeContext& context) { // Translation is used by both aten::view and aten::reshape. // Schema: aten::view(Tensor input, int[] shape) -> Tensor // Schema: aten::reshape(Tensor input, int[] shape) -> Tensor // For shape parameter, int[] is converted into single dimensional Tensor. - num_inputs_check(context, 2, 2); + num_inputs_check(context, 2, 2, true); + auto tensor = context.get_input(0); auto shape = get_input_concat_if_list(context, 1); - auto reshape = std::make_shared(context.get_input(0), shape, false); - return {context.mark_node(reshape)}; + + auto complex_type_mark = as_type_ptr(tensor.get_node_shared_ptr()); + if (complex_type_mark) { + tensor = complex_type_mark->input_value(0); + auto const_2 = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {2})); + const_2 = context.mark_node(std::make_shared(const_2, shape)); + + shape = context.mark_node(std::make_shared(OutputVector{shape, const_2}, 0)); + } + + auto reshape = context.mark_node(std::make_shared(tensor, shape, false)); + + if (complex_type_mark) { + const auto& complex_dtype = complex_type_mark->get_complex_part_type(); + return {context.mark_node(std::make_shared(reshape, complex_dtype))}; + } else { + return {reshape}; + } }; } // namespace op diff --git a/src/frontends/pytorch/src/op/size.cpp b/src/frontends/pytorch/src/op/size.cpp index d8f1ee28123c10..2eca5f2707e53d 100644 --- a/src/frontends/pytorch/src/op/size.cpp +++ b/src/frontends/pytorch/src/op/size.cpp @@ -2,10 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/shape_of.hpp" +#include "openvino/op/slice.hpp" #include "utils.hpp" namespace ov { @@ -16,10 +18,25 @@ namespace op { using namespace ov::op; OutputVector translate_size(const NodeContext& context) { - num_inputs_check(context, 1, 2); - auto shape = context.mark_node(std::make_shared(context.get_input(0), element::i64)); + num_inputs_check(context, 1, 2, true); + auto data = context.get_input(0); + Output shape; + + auto complex_type_mark = as_type_ptr(data.get_node_shared_ptr()); + if (complex_type_mark) { + data = complex_type_mark->input_value(0); + shape = context.mark_node(std::make_shared(data, element::i64)); + + auto zero = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {0})); + auto stop = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {-1})); + auto step = context.mark_node(v0::Constant::create(element::i32, Shape{1}, {1})); + shape = context.mark_node(std::make_shared(shape, zero, stop, step, zero)); + } else { + shape = context.mark_node(std::make_shared(data, element::i64)); + } + if (context.input_is_none(1)) { - return shape->outputs(); + return {shape}; } else { auto axis_0 = context.mark_node(v0::Constant::create(element::i32, Shape{}, {0})); return {context.mark_node(std::make_shared(shape, context.get_input(1), axis_0))}; diff --git a/src/frontends/pytorch/src/op/stft.cpp b/src/frontends/pytorch/src/op/stft.cpp index 8e478835fdcdd6..678f44dcbe1edf 100644 --- a/src/frontends/pytorch/src/op/stft.cpp +++ b/src/frontends/pytorch/src/op/stft.cpp @@ -4,6 +4,7 @@ #include "openvino/op/stft.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/node_context.hpp" #include "openvino/op/broadcast.hpp" #include "openvino/op/constant.hpp" @@ -78,8 +79,6 @@ OutputVector translate_stft(const NodeContext& context) { if (!context.input_is_none(7)) { return_complex = context.const_input(7); } - PYTORCH_OP_CONVERSION_CHECK(!return_complex, - "aten::stft conversion is currently supported with return_complex=False only."); // Perform STFT constexpr bool transpose_frames = true; @@ -88,8 +87,10 @@ OutputVector translate_stft(const NodeContext& context) { if (normalized) { const auto nfft_convert = context.mark_node(std::make_shared(n_fft, stft)); const auto divisor = context.mark_node(std::make_shared(nfft_convert)); - const auto norm_stft = context.mark_node(std::make_shared(stft, divisor)); - return {norm_stft}; + stft = context.mark_node(std::make_shared(stft, divisor)); + } + if (return_complex) { + return {context.mark_node(std::make_shared(stft, stft->get_element_type()))}; } else { return {stft}; } diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index fe4e84bd47d45e..f00391e08e2a32 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -59,6 +59,7 @@ OP_CONVERTER(translate_celu); OP_CONVERTER(translate_channel_shuffle); OP_CONVERTER(translate_clamp); OP_CONVERTER(translate_col2im); +OP_CONVERTER(translate_complex); OP_CONVERTER(translate_constant); OP_CONVERTER(translate_conv_transposend); OP_CONVERTER(translate_convnd); @@ -86,6 +87,8 @@ OP_CONVERTER(translate_expm1); OP_CONVERTER(translate_eye); OP_CONVERTER(translate_fake_quantize_per_channel_affine); OP_CONVERTER(translate_fake_quantize_per_tensor_affine); +OP_CONVERTER(translate_fft_irfftn); +OP_CONVERTER(translate_fft_rfftn); OP_CONVERTER(translate_fill); OP_CONVERTER(translate_fill_diagonal); OP_CONVERTER(translate_flatten); @@ -108,6 +111,7 @@ OP_CONVERTER(translate_hann_window); OP_CONVERTER(translate_hardtanh); OP_CONVERTER(translate_if); OP_CONVERTER(translate_im2col); +OP_CONVERTER(translate_imag); OP_CONVERTER(translate_index); OP_CONVERTER(translate_index_add); OP_CONVERTER(translate_index_copy_); @@ -192,6 +196,7 @@ OP_CONVERTER(translate_randn); OP_CONVERTER(translate_randint); OP_CONVERTER(translate_rand_like); OP_CONVERTER(translate_randn_like); +OP_CONVERTER(translate_real); OP_CONVERTER(translate_reciprocal); OP_CONVERTER(translate_relu6); OP_CONVERTER(translate_remainder); @@ -246,6 +251,8 @@ OP_CONVERTER(translate_upsample_nearest3d); OP_CONVERTER(translate_upsample_trilinear3d); OP_CONVERTER(translate_var); OP_CONVERTER(translate_var_mean); +OP_CONVERTER(translate_view_as_complex); +OP_CONVERTER(translate_view_as_real); OP_CONVERTER(translate_weight_norm); OP_CONVERTER(translate_where); OP_CONVERTER(translate_zeros); @@ -423,7 +430,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::clip", op::translate_clamp}, {"aten::clone", op::skip_node}, // ignore clone operators that are inserted by PyTorch autograd {"aten::col2im", op::translate_col2im}, - // aten::complex - Supported in limited set of patterns + {"aten::complex", op::translate_complex}, {"aten::concat", op::translate_cat}, {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, // we assume all tensors are contiguous @@ -468,8 +475,8 @@ const std::unordered_map get_supported_ops_ts() { {"aten::fake_quantize_per_channel_affine", op::translate_fake_quantize_per_channel_affine}, {"aten::fake_quantize_per_tensor_affine", op::translate_fake_quantize_per_tensor_affine}, {"aten::feature_dropout", op::skip_node}, - // aten::fft_irfftn - Supported in limited set of patterns - // aten::fft_rfftn - Supported in limited set of patterns + {"aten::fft_irfftn", op::translate_fft_irfftn}, + {"aten::fft_rfftn", op::translate_fft_rfftn}, {"aten::fill", op::translate_fill}, {"aten::fill_diagonal", op::translate_fill_diagonal}, {"aten::flatten", op::quantizable_op}, @@ -496,7 +503,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::hardswish", op::quantizable_op>}, {"aten::hardtanh", op::quantizable_op}, {"aten::im2col", op::translate_im2col}, - // aten::imag - Supported in limited set of patterns + {"aten::imag", op::translate_imag}, // aten::index - Supported in limited set of patterns {"aten::index_copy_", op::inplace_op}, {"aten::index_fill_", op::inplace_op}, @@ -604,7 +611,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::randint", op::translate_randint}, {"aten::randn", op::translate_randn}, {"aten::randn_like", op::translate_randn_like}, - // aten::real - Supported in limited set of patterns + {"aten::real", op::translate_real}, {"aten::reciprocal", op::optional_out}, {"aten::reciprocal_", op::inplace_op}, // aten::reflection_pad2d - Supported in limited set of patterns @@ -696,6 +703,8 @@ const std::unordered_map get_supported_ops_ts() { {"aten::var_mean", op::translate_var_mean}, {"aten::view", op::quantizable_op}, {"aten::view_as", op::translate_reshape_as}, + {"aten::view_as_complex", op::translate_view_as_complex}, + {"aten::view_as_real", op::translate_view_as_real}, {"aten::wait", op::skip_node}, {"aten::where", op::translate_where}, {"aten::zero", op::translate_zeros_like}, @@ -979,6 +988,8 @@ const std::unordered_map get_supported_ops_fx() { {"aten.var.correction", op::translate_var_fx}, {"aten.var_mean.correction", op::translate_var_mean_fx}, {"aten.view.default", op::translate_reshape}, + {"aten.view_as_complex.default", op::translate_view_as_complex}, + {"aten.view_as_real.default", op::translate_view_as_real}, {"aten.where.self", op::translate_where}, {"aten.zeros.default", op::translate_zeros_fx}, {"aten.zeros.names", op::translate_zeros_fx}, diff --git a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp deleted file mode 100644 index cb80987e4511ae..00000000000000 --- a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "irfftn_complex_replacer.hpp" - -#include "openvino/core/rt_info.hpp" -#include "openvino/op/concat.hpp" -#include "openvino/op/convert.hpp" -#include "openvino/op/convert_like.hpp" -#include "openvino/op/equal.hpp" -#include "openvino/op/gather.hpp" -#include "openvino/op/irdft.hpp" -#include "openvino/op/multiply.hpp" -#include "openvino/op/range.hpp" -#include "openvino/op/reduce_prod.hpp" -#include "openvino/op/scatter_update.hpp" -#include "openvino/op/select.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/op/sqrt.hpp" -#include "openvino/op/squeeze.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/unsqueeze.hpp" -#include "openvino/op/util/framework_node.hpp" -#include "openvino/pass/pattern/matcher.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -using namespace ov::pass; -using namespace ov::op; - -IRFFTNComplexReplacer::IRFFTNComplexReplacer() { - // Transformation used to replace combination of aten::complex -> aten::fft_irfftn torch operators. - // Pattern: aten::complex -> aten::fft_irfftn - auto fft_op = pattern::wrap_type(); - - ov::matcher_pass_callback irfftn_callback = [](pattern::Matcher& m) { - // "aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" - auto irfftn_op = cast_fw_node(m.get_match_root(), "aten::fft_irfftn"); - if (!irfftn_op) { - return false; - } - auto const_neg_1 = v0::Constant::create(element::i32, Shape{1}, {-1}); - auto const_0 = v0::Constant::create(element::i32, Shape{1}, {0}); - auto const_scalar_0 = v0::Constant::create(element::i32, Shape{}, {0}); - auto const_1 = v0::Constant::create(element::i32, Shape{1}, {1}); - auto const_scalar_1 = v0::Constant::create(element::i32, Shape{}, {1}); - auto const_2 = v0::Constant::create(element::i32, Shape{1}, {2}); - - // Check whether input node being aten::complex. - auto fw_node_complex_input = cast_fw_node(irfftn_op->input_value(0).get_node_shared_ptr(), "aten::complex"); - if (!fw_node_complex_input) { - return false; - } - - // Concatenate real and imag parts over additional, last dimension. - auto real = std::make_shared(fw_node_complex_input->input_value(0), const_neg_1); - auto imag = std::make_shared(fw_node_complex_input->input_value(1), const_neg_1); - NodeVector complex = {real, imag}; - auto input = std::make_shared(complex, -1); - - // Input shape of complex number (excluding dimension created by concatenation of real and imag) - auto complex_input_shape = std::make_shared(fw_node_complex_input->input_value(0), element::i32); - auto input_rank = std::make_shared(complex_input_shape, element::i32); - auto input_rank_scalar = std::make_shared(input_rank); - - // Inputs can be either none or ListConstruct. Check whether input values should be used or should be set to - // default values. - bool dim_use_default = is_none_node(irfftn_op->input_value(2)); - bool s_use_default = is_none_node(irfftn_op->input_value(1)); - // Can be None constant, when used check s_use_default. - auto raw_s_input_maybe = concat_list_construct(irfftn_op->input_value(1)); - raw_s_input_maybe = std::make_shared(raw_s_input_maybe, element::i32); - - // Handle dim parameter containing vector of integers indicating dimensions to be transformed. - std::shared_ptr dim; - if (!dim_use_default) { - // Dim values is provided, load from input. - dim = std::make_shared(concat_list_construct(irfftn_op->input_value(2)), element::i32); - } else if (!s_use_default) { - // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. - auto s_len = std::make_shared(raw_s_input_maybe, element::i32); - auto range_start = std::make_shared(input_rank, s_len); - auto range_start_scalar = std::make_shared(range_start); - dim = std::make_shared(range_start_scalar, input_rank_scalar, const_scalar_1, element::i32); - } else { - // Dim and s are set to default, use all of dimensions. - dim = std::make_shared(const_scalar_0, input_rank_scalar, const_scalar_1, element::i32); - } - - // Calculate default s values. Use full available size except last element, which is set to even value in last - // dimension: s[-1] = 2 * (complex_input_shape[dim[-1]]) - auto default_s_raw = std::make_shared(complex_input_shape, dim, const_0); - auto last_s = std::make_shared(default_s_raw, const_neg_1, const_0); - auto last_s_m_1 = std::make_shared(last_s, const_1); - auto s_upd = std::make_shared(last_s_m_1, const_2); - auto s_shape = std::make_shared(default_s_raw, element::i32); - auto last_s_idx = std::make_shared(s_shape, const_1); - auto default_s = std::make_shared(default_s_raw, last_s_idx, s_upd, const_0); - - // Handle s parameter containing vector of intigers indicating signal sizes for dimensions. - std::shared_ptr s; - if (!s_use_default) { - // Values for s were provided. Replace -1 values with default full size in given dimension. - auto full_s_cond = std::make_shared(raw_s_input_maybe, const_neg_1); - s = std::make_shared(full_s_cond, default_s, raw_s_input_maybe); - } else { - // Value for s was set to default. - s = default_s; - } - - // Handle norm parameter indicating normalization mode to use. Defaults to "backward". - std::string norm; - if (const auto& fw_node_mode = - ov::as_type_ptr(irfftn_op->input_value(3).get_node_shared_ptr())) { - const auto& attrs = fw_node_mode->get_attrs(); - if (attrs.find("string_value") != attrs.end()) { - norm = attrs.at("string_value"); - } else { - norm = "backward"; - } - } else { - add_exception_to_fw_node(irfftn_op, "aten::fft_irfftn: could not retrive value for norm attribute."); - return false; - } - - auto irdft = std::make_shared(input, dim, s); - - // Apply normalizations. - auto n_int = std::make_shared(s, const_0); - auto n = std::make_shared(n_int, irdft); - std::shared_ptr normalized_irfftn; - if (norm == "forward") { - normalized_irfftn = std::make_shared(irdft, n); - } else if (norm == "backward") { - normalized_irfftn = irdft; - } else if (norm == "ortho") { - auto sqrt_n = std::make_shared(n); - normalized_irfftn = std::make_shared(irdft, sqrt_n); - } else { - add_exception_to_fw_node( - irfftn_op, - "aten::fft_irfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); - return false; - } - - copy_runtime_info({irfftn_op, fw_node_complex_input}, normalized_irfftn); - normalized_irfftn->set_friendly_name(irfftn_op->get_friendly_name()); - replace_node(irfftn_op, normalized_irfftn); - return true; - }; - auto m = std::make_shared(fft_op, "ov::frontend::pytorch::pass::IRFFTNComplexReplacer"); - this->register_matcher(m, irfftn_callback); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp b/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp deleted file mode 100644 index c75c6e51f92571..00000000000000 --- a/src/frontends/pytorch/src/transforms/irfftn_complex_replacer.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" -#include "openvino/pass/pass.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -class IRFFTNComplexReplacer : public ov::pass::MatcherPass { -public: - OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::IRFFTNComplexReplacer"); - IRFFTNComplexReplacer(); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp deleted file mode 100644 index b90e3121930c71..00000000000000 --- a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.cpp +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "rfftn_complex_replacer.hpp" - -#include "openvino/core/rt_info.hpp" -#include "openvino/op/convert.hpp" -#include "openvino/op/convert_like.hpp" -#include "openvino/op/divide.hpp" -#include "openvino/op/equal.hpp" -#include "openvino/op/gather.hpp" -#include "openvino/op/range.hpp" -#include "openvino/op/rdft.hpp" -#include "openvino/op/reduce_prod.hpp" -#include "openvino/op/select.hpp" -#include "openvino/op/shape_of.hpp" -#include "openvino/op/slice.hpp" -#include "openvino/op/split.hpp" -#include "openvino/op/sqrt.hpp" -#include "openvino/op/squeeze.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/util/framework_node.hpp" -#include "openvino/pass/pattern/matcher.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" -#include "utils.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -using namespace ov::pass; -using namespace ov::op; - -RFFTNComplexReplacer::RFFTNComplexReplacer() { - // Transformation used to replace combination of aten::fft_rfftn -> {aten::real, aten::imag} torch operators. - // Pattern: aten::fft_rfftn -> {aten::real, aten::imag} - auto fft_op = pattern::wrap_type(); - ov::matcher_pass_callback rfftn_callback = [](pattern::Matcher& m) { - // Schema: "aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" - auto rfftn_op = cast_fw_node(m.get_match_root(), "aten::fft_rfftn"); - if (!rfftn_op) { - return false; - } - auto const_neg_1 = v0::Constant::create(element::i32, Shape{}, {-1}); - auto const_0 = v0::Constant::create(element::i32, Shape{}, {0}); - auto const_1 = v0::Constant::create(element::i32, Shape{}, {1}); - - auto input = rfftn_op->input_value(0); - auto input_shape = std::make_shared(input, element::i32); - auto input_rank = std::make_shared(input_shape, element::i32); - auto input_rank_scalar = std::make_shared(input_rank); - - // Inputs can be either none or ListConstruct. Check whether input values should be used or should be set to - // default values. - bool dim_use_default = is_none_node(rfftn_op->input_value(2)); - bool s_use_default = is_none_node(rfftn_op->input_value(1)); - // Can be None constant, when used check s_use_default. - auto raw_s_input_maybe = concat_list_construct(rfftn_op->input_value(1)); - raw_s_input_maybe = std::make_shared(raw_s_input_maybe, element::i32); - - // Handle dim parameter containing vector of intigers indicating dimensions to be transformed. - std::shared_ptr dim; - if (!dim_use_default) { - // Dim values is provided, load from input. - dim = std::make_shared(concat_list_construct(rfftn_op->input_value(2)), element::i32); - } else if (!s_use_default) { - // If dim is default and s is provided, use last s_len dimensions where s_len is length of s. - auto s_len = std::make_shared(raw_s_input_maybe, element::i32); - auto slice_start = std::make_shared(input_rank, s_len); - auto slice_start_scalar = std::make_shared(slice_start); - dim = std::make_shared(slice_start_scalar, input_rank_scalar, const_1, element::i32); - } else { - // Dim and s are set to default, use all of dimensions. - dim = std::make_shared(const_0, input_rank_scalar, const_1, element::i32); - } - - // Handle s parameter containing vector of intigers indicating signal sizes for dimensions. - std::shared_ptr s; - if (!s_use_default) { - // Values for s were provided. Replace -1 values with default full size in given dimension. - auto full_s_cond = std::make_shared(raw_s_input_maybe, const_neg_1); - auto full_s_values = std::make_shared(input_shape, dim, const_0); - s = std::make_shared(full_s_cond, full_s_values, raw_s_input_maybe); - } else { - // Value for s was set to default, use full size for all dimensions. - s = std::make_shared(input_shape, dim, const_0); - } - - // Handle norm parameter indicating normalization mode to use. Defaults to "backward". - std::string norm; - if (const auto& fw_node_mode = - ov::as_type_ptr(rfftn_op->input_value(3).get_node_shared_ptr())) { - const auto& attrs = fw_node_mode->get_attrs(); - if (attrs.find("string_value") != attrs.end()) { - norm = attrs.at("string_value"); - } else { - norm = "backward"; - } - } else { - add_exception_to_fw_node(rfftn_op, "aten::fft_rfftn: could not retrive value for norm attribute."); - return false; - } - - auto rdft = std::make_shared(input, dim, s); - - // Apply normalizations - auto n_int = std::make_shared(s, const_0); - auto n = std::make_shared(n_int, rdft); - std::shared_ptr normalized_rfftn; - if (norm == "forward") { - // Normalize by 1/n - normalized_rfftn = std::make_shared(rdft, n); - } else if (norm == "backward") { - // No normalization - normalized_rfftn = rdft; - } else if (norm == "ortho") { - // Normalize by 1/sqrt(n) - auto sqrt_n = std::make_shared(n); - normalized_rfftn = std::make_shared(rdft, sqrt_n); - } else { - add_exception_to_fw_node( - rfftn_op, - "aten::fft_rfftn: unrecognized normalization mode. Only forward, backward and ortho are supported."); - return false; - } - - // Replace outputs that are either torch operators aten::real or aten::imag. Apply squeeze to remove last - // dimension used to concatenate. - auto normalized_rfftn_splitted = std::make_shared(normalized_rfftn, const_neg_1, 2); - auto rfftn_outs = rfftn_op->get_users(); - bool rval = false; - for (auto& out : rfftn_outs) { - if (auto real_op = cast_fw_node(out, "aten::real")) { - auto squeezed = std::make_shared(normalized_rfftn_splitted->output(0), const_neg_1); - copy_runtime_info({rfftn_op, real_op}, squeezed); - squeezed->set_friendly_name(real_op->get_friendly_name()); - replace_node(real_op, squeezed); - rval = true; - } - if (auto imag_op = cast_fw_node(out, "aten::imag")) { - auto squeezed = std::make_shared(normalized_rfftn_splitted->output(1), const_neg_1); - copy_runtime_info({rfftn_op, imag_op}, squeezed); - squeezed->set_friendly_name(imag_op->get_friendly_name()); - replace_node(imag_op, squeezed); - rval = true; - } - } - add_exception_to_fw_node( - rfftn_op, - "aten::fft_rfftn: Unsupported output node. Only aten::real and aten::imag are supported."); - return rval; - }; - - auto m = std::make_shared(fft_op, "ov::frontend::pytorch::pass::RFFTNComplexReplacer"); - this->register_matcher(m, rfftn_callback); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp b/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp deleted file mode 100644 index 5420b7c9a01a04..00000000000000 --- a/src/frontends/pytorch/src/transforms/rfftn_complex_replacer.hpp +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/pass/graph_rewrite.hpp" -#include "openvino/pass/pass.hpp" - -namespace ov { -namespace frontend { -namespace pytorch { -namespace pass { - -class RFFTNComplexReplacer : public ov::pass::MatcherPass { -public: - OPENVINO_MATCHER_PASS_RTTI("ov::frontend::pytorch::pass::RFFTNComplexReplacer"); - RFFTNComplexReplacer(); -}; - -} // namespace pass -} // namespace pytorch -} // namespace frontend -} // namespace ov diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index da0b5c5cd24d61..70ba4171770fbd 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -7,6 +7,7 @@ #include "op_table.hpp" #include "openvino/core/rt_info.hpp" #include "openvino/core/validation_util.hpp" +#include "openvino/frontend/complex_type_mark.hpp" #include "openvino/frontend/pytorch/decoder.hpp" #include "openvino/op/add.hpp" #include "openvino/op/broadcast.hpp" @@ -40,15 +41,24 @@ namespace pytorch { using namespace ov::op; -void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs) { +void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs, bool allow_complex) { auto num_inputs = context.get_input_size(); FRONT_END_OP_CONVERSION_CHECK(num_inputs >= min_inputs, "Got less inputs ", num_inputs, " than expected ", min_inputs); + if (!allow_complex) { + // verify that no input is complex + for (int i = 0; i < static_cast(std::min(num_inputs, max_inputs)); ++i) { + auto input = context.get_input(i); + auto complex_type_mark = as_type_ptr(input.get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(!complex_type_mark, "The operation doesn't allow complex type."); + } + } + // Check that additional inputs are all None, otherwise raise exception for (auto i = max_inputs; i < num_inputs; i++) { - FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(i), "Got more inputs than expected."); + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(i), "Got more inputs than expected: ", i + 1); } } @@ -836,6 +846,16 @@ bool index_tensor_on_list(ov::pass::NodeRegistry& rg, return true; } +Output get_complex_shape(const NodeContext& context, const Output& complex_input) { + auto input_shape = context.mark_node(std::make_shared(complex_input, element::i32)); + + auto zero = v0::Constant::create(element::i32, Shape{1}, {0}); + auto stop = v0::Constant::create(element::i32, Shape{1}, {-1}); + auto step = v0::Constant::create(element::i32, Shape{1}, {1}); + // Removing last dim from shape + return context.mark_node(std::make_shared(input_shape, zero, stop, step, zero)); +} + } // namespace pytorch } // namespace frontend } // namespace ov diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index 5eb3f4aa4f64c0..ece73b3ea86ea1 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -35,7 +35,7 @@ const std::string& get_pytorch_prefix(); OPENVINO_ASSERT_HELPER(::ov::frontend::OpConversionFailure, "", (COND), get_pytorch_prefix(), __VA_ARGS__) #endif -void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs); +void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_inputs, bool allow_complex = false); Output make_optional_bias(const Output& base_op, const NodeContext& context, @@ -136,6 +136,8 @@ bool index_tensor_on_list(ov::pass::NodeRegistry& rg, Output& new_output, bool& use_input_as_output); +Output get_complex_shape(const NodeContext& context, const Output& complex_input); + namespace op { template OutputVector inplace_op(const NodeContext& context) { diff --git a/tests/layer_tests/pytorch_tests/test_permute.py b/tests/layer_tests/pytorch_tests/test_permute.py index d8fb94145bada7..efbd77d371eb89 100644 --- a/tests/layer_tests/pytorch_tests/test_permute.py +++ b/tests/layer_tests/pytorch_tests/test_permute.py @@ -11,46 +11,54 @@ def _prepare_input(self): import numpy as np return (np.random.randn(1, 3, 224, 224).astype(np.float32),) - def create_model(self, order): + def create_model(self, order, complex_type): import torch class aten_permute(torch.nn.Module): - def __init__(self, order): - super(aten_permute, self).__init__() + def __init__(self, order, complex_type): + super().__init__() self.order = order + self.complex_type = complex_type def forward(self, x): - return torch.permute(x, self.order) - - ref_net = None - - return aten_permute(order), ref_net, "aten::permute" - - @pytest.mark.parametrize("order", [[0, 2, 3, 1], [0, 3, 1, 2], [0, -1, 1, -2]]) + if self.complex_type: + x = torch.reshape(x, x.shape[:-1] + (-1, 2)) + x = torch.view_as_complex(x) + res = torch.permute(x, self.order) + if self.complex_type: + res = torch.view_as_real(res) + return res + + return aten_permute(order, complex_type), None, "aten::permute" + + @pytest.mark.parametrize("order", [[0, 2, 3, 1], + [0, 3, 1, 2], + [0, -1, 1, -2]]) + @pytest.mark.parametrize("complex_type", [True, False]) @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_torch_export - def test_permute(self, order, ie_device, precision, ir_version): - self._test(*self.create_model(order), ie_device, precision, ir_version) + def test_permute(self, order, complex_type, ie_device, precision, ir_version): + self._test(*self.create_model(order, complex_type), ie_device, precision, ir_version) class TestPermuteList(PytorchLayerTest): def _prepare_input(self, permute_shape): import numpy as np - return (np.random.randn(1, 3, 224, 224).astype(np.float32), np.random.randn(*permute_shape).astype(np.float32)) + return (np.random.randn(1, 3, 224, 224).astype(np.float32), + np.random.randn(*permute_shape).astype(np.float32)) def create_model(self): import torch - class aten_permute(torch.nn.Module): - + class aten_permute_list(torch.nn.Module): def forward(self, x, y): y_shape = y.shape return torch.permute(x, [y_shape[0] - 1, y_shape[1] - 1, y_shape[2] - 1, y_shape[3] - 1]) ref_net = None - return aten_permute(), ref_net, ["aten::permute", "prim::ListConstruct"] + return aten_permute_list(), ref_net, ["aten::permute", "prim::ListConstruct"] @pytest.mark.parametrize("order", [[1, 3, 4, 2], [1, 4, 2, 3]]) @pytest.mark.nightly @@ -58,4 +66,5 @@ def forward(self, x, y): @pytest.mark.precommit_torch_export def test_permute_list(self, order, ie_device, precision, ir_version): self._test(*self.create_model(), ie_device, precision, ir_version, - kwargs_to_prepare_input={"permute_shape": order}, dynamic_shapes=ie_device != "GPU") + kwargs_to_prepare_input={"permute_shape": order}, + dynamic_shapes=ie_device != "GPU") diff --git a/tests/layer_tests/pytorch_tests/test_reshape.py b/tests/layer_tests/pytorch_tests/test_reshape.py index 7174d6022b4ca1..5266e8e00c5c1d 100644 --- a/tests/layer_tests/pytorch_tests/test_reshape.py +++ b/tests/layer_tests/pytorch_tests/test_reshape.py @@ -1,31 +1,38 @@ # Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import random import numpy as np import pytest -import random from pytorch_layer_test_class import PytorchLayerTest class TestReshape(PytorchLayerTest): - def _prepare_input(self): - return (np.random.uniform(0, 50, (1, 12, 12, 24)).astype(np.float32)) + def _prepare_input(self, complex_type): + shape = (1, 12, 12, 24) + if complex_type: + shape += (2,) + return (np.random.uniform(0, 50, shape).astype(np.float32)) - def create_model(self, shape): + def create_model(self, shape, complex_type): import torch class aten_reshape(torch.nn.Module): - def __init__(self, shape): - super(aten_reshape, self).__init__() + def __init__(self, shape, complex_type): + super().__init__() self.shape = shape + self.complex_type = complex_type def forward(self, x): - return torch.reshape(x, self.shape) + if self.complex_type: + x = torch.view_as_complex(x) + res = torch.reshape(x, self.shape) + if self.complex_type: + res = torch.view_as_real(res) + return res - ref_net = None - - return aten_reshape(shape), ref_net, "aten::reshape" + return aten_reshape(shape, complex_type), None, "aten::reshape" @pytest.mark.parametrize(("shape"), [ [-1, 6], @@ -37,16 +44,20 @@ def forward(self, x): [24, 1, -1, 12], [24, 1, 1, -1, 12], ]) + @pytest.mark.parametrize("complex_type", [True, False]) @pytest.mark.nightly @pytest.mark.precommit @pytest.mark.precommit_torch_export @pytest.mark.precommit_fx_backend - def test_reshape(self, shape, ie_device, precision, ir_version): - self._test(*self.create_model(shape), ie_device, precision, ir_version) + def test_reshape(self, shape, complex_type, ie_device, precision, ir_version): + self._test(*self.create_model(shape, complex_type), + ie_device, precision, ir_version, + kwargs_to_prepare_input={"complex_type": complex_type}) + class TestDynamicReshape(PytorchLayerTest): def _prepare_input(self): - last_dym = random.randint(1,2) + last_dym = random.randint(1, 2) return (np.random.uniform(0, 50, (1, 12, 12, 24)).astype(np.float32), last_dym) def create_model(self, shape): @@ -54,17 +65,14 @@ def create_model(self, shape): class aten_reshape(torch.nn.Module): def __init__(self, shape): - super(aten_reshape, self).__init__() + super().__init__() self.shape = shape def forward(self, x, dym): - #return torch.reshape(x, self.shape) dym2 = int(torch.ops.aten.sym_size(x, 3)/dym) return torch.reshape(x, [12, 12, dym2, dym]) - ref_net = None - - return aten_reshape(shape), ref_net, "aten::reshape" + return aten_reshape(shape), None, "aten::reshape" @pytest.mark.parametrize(("shape"), [ [12, 12, 24, 1], diff --git a/tests/layer_tests/pytorch_tests/test_size.py b/tests/layer_tests/pytorch_tests/test_size.py index 050d1d818df1b2..f3e0e98dccb327 100644 --- a/tests/layer_tests/pytorch_tests/test_size.py +++ b/tests/layer_tests/pytorch_tests/test_size.py @@ -7,24 +7,38 @@ class TestSize(PytorchLayerTest): - def _prepare_input(self, input_shape): + def _prepare_input(self, input_shape, complex_type): import numpy as np + if complex_type: + input_shape += [2] return (np.random.randn(*input_shape).astype(np.float32),) - def create_model(self): + def create_model(self, complex_type): import torch class aten_size(torch.nn.Module): + def __init__(self, complex_type): + super().__init__() + self.complex_type = complex_type + def forward(self, x): + if self.complex_type: + x = torch.view_as_complex(x) return torch.tensor(x.shape) - ref_net = None + op = aten_size(complex_type) - op = aten_size() + return op, None, "aten::size" - return op, ref_net, "aten::size" @pytest.mark.nightly @pytest.mark.precommit - @pytest.mark.parametrize("input_shape", [[1,], [1, 2], [1, 2, 3], [1, 2, 3, 4], [1, 2, 3, 4, 5]]) - def test_size(self, input_shape, ie_device, precision, ir_version): - self._test(*self.create_model(), ie_device, precision, ir_version, kwargs_to_prepare_input={"input_shape": input_shape}) + @pytest.mark.parametrize("input_shape", [[1,], + [1, 2], + [1, 2, 3], + [1, 2, 3, 4], + [1, 2, 3, 4, 5]]) + @pytest.mark.parametrize("complex_type", [True, False]) + def test_size(self, input_shape, complex_type, ie_device, precision, ir_version): + self._test(*self.create_model(complex_type), ie_device, precision, ir_version, + kwargs_to_prepare_input={"input_shape": input_shape, + "complex_type": complex_type}) diff --git a/tests/layer_tests/pytorch_tests/test_stft.py b/tests/layer_tests/pytorch_tests/test_stft.py index f90962e5f1daa7..a2097b1f1fe453 100644 --- a/tests/layer_tests/pytorch_tests/test_stft.py +++ b/tests/layer_tests/pytorch_tests/test_stft.py @@ -98,7 +98,7 @@ def __init__(self, n_fft, hop_length, win_length, center, pad_mode, normalized, self.return_complex = return_complex def forward(self, x): - return torch.stft( + stft = torch.stft( x, self.n_fft, hop_length=self.hop_length, @@ -110,6 +110,10 @@ def forward(self, x): onesided=self.onesided, return_complex=self.return_complex, ) + if self.return_complex: + return torch.view_as_real(stft) + else: + return stft ref_net = None @@ -128,9 +132,9 @@ def forward(self, x): [16, None, None, False, "reflect", False, True, False], # hop & win length None [16, 4, None, False, "reflect", False, True, False], # win_length None [16, 4, 16, False, "reflect", True, True, False], # normalized True + [16, 4, 16, False, "reflect", False, True, True], # return_complex True # Unsupported cases: [16, 4, 16, False, "reflect", False, False, False], # onesided False - [16, 4, 16, False, "reflect", False, True, True], # reutrn_complex True ]) def test_stft_not_supported_attrs(self, n_fft, hop_length, win_length, center, pad_mode, normalized, onesided, return_complex, ie_device, precision, ir_version, trace_model): if ie_device == "GPU": @@ -144,9 +148,5 @@ def test_stft_not_supported_attrs(self, n_fft, hop_length, win_length, center, p pytest.xfail( reason="aten::stft conversion is currently supported with onesided=True only") - if return_complex is True: - pytest.xfail( - reason="aten::stft conversion is currently supported with return_complex=False only") - self._test(*self.create_model_with_attrs(n_fft, hop_length, win_length, center, pad_mode, normalized, onesided, return_complex), ie_device, precision, ir_version, kwargs_to_prepare_input={}, trace_model=trace_model) From 775ae1adb7ea3b104959a73f84bd0613ee3135fa Mon Sep 17 00:00:00 2001 From: Alina Kladieva Date: Fri, 17 Jan 2025 22:58:35 +0100 Subject: [PATCH 37/97] Temporarily remove TF layer tests & TF models tests from required (#28533) ### Tickets: - CVS-160803 --------- Signed-off-by: Alina Kladieva --- .github/workflows/linux_arm64.yml | 2 +- .github/workflows/ubuntu_22.yml | 4 ++-- .github/workflows/ubuntu_24.yml | 2 +- .github/workflows/windows_vs2019_release.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index ca1ca6e056e23d..16a1f745ba413a 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -250,7 +250,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_linux_arm64 needs: [Smart_CI, Build, Debian_Packages, Samples, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, - TensorFlow_Models_Tests, PyTorch_Models_Tests, Openvino_tokenizers, TensorFlow_Layer_Tests, Pytorch_Layer_Tests] + TensorFlow_Models_Tests, PyTorch_Models_Tests, Openvino_tokenizers, Pytorch_Layer_Tests] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ubuntu_22.yml b/.github/workflows/ubuntu_22.yml index e5c7d25003de1e..2a10877a07ab7e 100644 --- a/.github/workflows/ubuntu_22.yml +++ b/.github/workflows/ubuntu_22.yml @@ -557,8 +557,8 @@ jobs: Overall_Status: name: ci/gha_overall_status - needs: [Smart_CI, Build, Debian_Packages, Samples, Conformance, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, TensorFlow_Layer_Tests, Pytorch_Layer_Tests, - CPU_Functional_Tests, TensorFlow_Models_Tests_Precommit, PyTorch_Models_Tests, JAX_Models_Tests_Precommit, NVIDIA_Plugin, Openvino_tokenizers, iGPU] + needs: [Smart_CI, Build, Debian_Packages, Samples, Conformance, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, Pytorch_Layer_Tests, + CPU_Functional_Tests, PyTorch_Models_Tests, JAX_Models_Tests_Precommit, NVIDIA_Plugin, Openvino_tokenizers, iGPU] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ubuntu_24.yml b/.github/workflows/ubuntu_24.yml index beac15bfbda97d..6fb0051dcf3e11 100644 --- a/.github/workflows/ubuntu_24.yml +++ b/.github/workflows/ubuntu_24.yml @@ -190,7 +190,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_ubuntu_24 - needs: [Smart_CI, Build, Debian_Packages, Samples, Python_Unit_Tests, Pytorch_Layer_Tests, TensorFlow_Layer_Tests, Openvino_tokenizers] + needs: [Smart_CI, Build, Debian_Packages, Samples, Python_Unit_Tests, Pytorch_Layer_Tests, Openvino_tokenizers] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 5708b529f25acc..025df08bf4b032 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -614,7 +614,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_windows - needs: [ Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, Openvino_tokenizers, TensorFlow_Layer_Tests, Pytorch_Layer_Tests ] + needs: [ Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, Openvino_tokenizers, Pytorch_Layer_Tests ] if: ${{ always() }} runs-on: ubuntu-latest steps: From 2ac2b5070819e45ee1b648f61a74d9b467983647 Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Sat, 18 Jan 2025 08:28:27 +0100 Subject: [PATCH 38/97] [CLANG_FORMAT] Enable clang-format for TPP adaptation source code (#28522) --- .github/workflows/code_style.yml | 12 +- .../emitters/tpp/x64/jit_brgemm_emitter.cpp | 46 ++++---- .../emitters/tpp/x64/jit_scalar_emitter.cpp | 1 + .../emitters/tpp/x64/jit_scalar_emitter.hpp | 15 ++- .../src/transformations/tpp/x64/op/brgemm.cpp | 58 ++++++---- .../src/transformations/tpp/x64/op/brgemm.hpp | 40 ++++--- .../transformations/tpp/x64/op/descriptor.cpp | 104 +++++++++--------- .../transformations/tpp/x64/op/descriptor.hpp | 36 +++--- .../transformations/tpp/x64/op/eltwise.cpp | 93 ++++++++-------- .../transformations/tpp/x64/op/eltwise.hpp | 30 ++--- .../transformations/tpp/x64/op/equation.cpp | 20 ++-- .../transformations/tpp/x64/op/equation.hpp | 14 ++- .../transformations/tpp/x64/op/factory.cpp | 46 ++++---- .../transformations/tpp/x64/op/factory.hpp | 11 +- .../transformations/tpp/x64/op/modifiers.hpp | 28 ++--- .../src/transformations/tpp/x64/op/reduce.cpp | 16 +-- .../src/transformations/tpp/x64/op/reduce.hpp | 16 +-- .../src/transformations/tpp/x64/op/scalar.cpp | 13 ++- .../src/transformations/tpp/x64/op/scalar.hpp | 10 +- .../tpp/x64/pass/brgemm_to_brgemm_tpp.cpp | 57 ++++++---- .../tpp/x64/pass/brgemm_to_brgemm_tpp.hpp | 3 +- .../tpp/x64/pass/eltwise_to_eltwise_tpp.cpp | 21 ++-- .../tpp/x64/pass/eltwise_to_eltwise_tpp.hpp | 3 +- .../tpp/x64/pass/fuse_tpp_to_equations.cpp | 28 +++-- .../tpp/x64/pass/fuse_tpp_to_equations.hpp | 3 +- .../x64/pass/lowered/brgemm_tpp_blocking.cpp | 28 +++-- .../x64/pass/lowered/brgemm_tpp_blocking.hpp | 9 +- .../x64/pass/lowered/set_tpp_leading_dim.cpp | 74 ++++++------- .../x64/pass/lowered/set_tpp_leading_dim.hpp | 5 +- .../tpp/x64/pass/scalar_to_scalar_tpp.cpp | 17 ++- .../tpp/x64/pass/scalar_to_scalar_tpp.hpp | 3 +- 31 files changed, 464 insertions(+), 396 deletions(-) diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index 3969da2b97c5a1..97b399b1abf48d 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -24,9 +24,11 @@ jobs: sudo apt update sudo apt --assume-yes install clang-format-15 - # Run cmake with -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT in order to enable codestyle check for ITT collector + # Run cmake with extra options to cover as much source code as possible: + # - -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT to enable codestyle check for ITT collector + # - -DENABLE_SNIPPETS_LIBXSMM_TPP to cover snippets TPP adaptation - name: CMake configure - run: cmake -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -B build + run: cmake -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -DENABLE_SNIPPETS_LIBXSMM_TPP=ON -B build - name: Create code style diff run: cmake --build build --target clang_format_fix_all -j8 @@ -54,9 +56,11 @@ jobs: sudo apt update sudo apt --assume-yes install binutils-aarch64-linux-gnu gcc-aarch64-linux-gnu g++-aarch64-linux-gnu scons clang-format-15 - # Run cmake with -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT in order to enable codestyle check for ITT collector + # Run cmake with extra options to cover as much source code as possible: + # - -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT to enable codestyle check for ITT collector + # - -DENABLE_SNIPPETS_LIBXSMM_TPP to cover snippets TPP adaptation - name: CMake configure - run: cmake -DENABLE_CLANG_FORMAT=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -DCMAKE_TOOLCHAIN_FILE=cmake/arm64.toolchain.cmake -B build_arm64 + run: cmake -DENABLE_CLANG_FORMAT=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT -DENABLE_SNIPPETS_LIBXSMM_TPP=ON -DCMAKE_TOOLCHAIN_FILE=cmake/arm64.toolchain.cmake -B build_arm64 - name: Create code style diff run: cmake --build build_arm64 --target clang_format_fix_all -j8 diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.cpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.cpp index 0fcb394a8a5bde..e873d7f7aa98eb 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_brgemm_emitter.cpp @@ -3,6 +3,7 @@ // #include "jit_brgemm_emitter.hpp" + #include "emitters/snippets/x64/jit_snippets_emitters.hpp" #include "transformations/tpp/x64/op/brgemm.hpp" @@ -28,18 +29,15 @@ BrgemmTppEmitter::BrgemmTppEmitter(jit_generator* h, cpu_isa_t isa, const Expres const auto& input_1_desc = expr->get_input_port_descriptor(1); const auto& output_desc = expr->get_output_port_descriptor(0); - std::vector leading_dimensions {brgemm_node->get_input_stride(0), - brgemm_node->get_input_stride(1), - brgemm_node->get_output_stride(0)}; + std::vector leading_dimensions{brgemm_node->get_input_stride(0), + brgemm_node->get_input_stride(1), + brgemm_node->get_output_stride(0)}; auto in_0_prec = ov_to_xsmm_dtype(brgemm_node->get_input_element_type(0)); auto in_1_prec = ov_to_xsmm_dtype(brgemm_node->get_input_element_type(1)); - exec_dtype = in_0_prec == LIBXSMM_DATATYPE_I8 || in_0_prec == LIBXSMM_DATATYPE_U8 ? - LIBXSMM_DATATYPE_I32 : - LIBXSMM_DATATYPE_F32; - auto out_0_prec = exec_dtype == LIBXSMM_DATATYPE_I32 ? - LIBXSMM_DATATYPE_I32 : - LIBXSMM_DATATYPE_F32; + exec_dtype = in_0_prec == LIBXSMM_DATATYPE_I8 || in_0_prec == LIBXSMM_DATATYPE_U8 ? LIBXSMM_DATATYPE_I32 + : LIBXSMM_DATATYPE_F32; + auto out_0_prec = exec_dtype == LIBXSMM_DATATYPE_I32 ? LIBXSMM_DATATYPE_I32 : LIBXSMM_DATATYPE_F32; const auto beta = brgemm_node->get_beta(); OV_CPU_JIT_EMITTER_ASSERT(beta == 0 || beta == 1, "Detected unsupported beta value: " + std::to_string(beta)); @@ -54,18 +52,14 @@ BrgemmTppEmitter::BrgemmTppEmitter(jit_generator* h, cpu_isa_t isa, const Expres const auto N = static_cast(*subtensor_in1.rbegin()); const bool is_f32_gemm = in_0_prec == in_1_prec && in_0_prec == LIBXSMM_DATATYPE_F32; - const bool is_bf16_gemm = in_0_prec == in_1_prec && in_0_prec == LIBXSMM_DATATYPE_BF16; + const bool is_bf16_gemm = in_0_prec == in_1_prec && in_0_prec == LIBXSMM_DATATYPE_BF16; const bool is_i8_gemm = in_0_prec == LIBXSMM_DATATYPE_U8 || in_0_prec == LIBXSMM_DATATYPE_I8; - OV_CPU_JIT_EMITTER_ASSERT(is_f32_gemm || - (is_bf16_gemm && K % 2 == 0) || - (is_i8_gemm && K % 4 == 0), + OV_CPU_JIT_EMITTER_ASSERT(is_f32_gemm || (is_bf16_gemm && K % 2 == 0) || (is_i8_gemm && K % 4 == 0), "Unsupported parameter combination for kernel configuration"); - m_compile_flags = is_f32_gemm ? - LIBXSMM_GEMM_FLAGS('N', 'N') : - LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') | - LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | - LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG; + m_compile_flags = is_f32_gemm ? LIBXSMM_GEMM_FLAGS('N', 'N') + : LIBXSMM_GEMM_VNNI_FLAGS('N', 'N', 'V', 'N') | + LIBXSMM_GEMM_FLAG_NO_SETUP_TILECONFIG | LIBXSMM_GEMM_FLAG_NO_RESET_TILECONFIG; if (beta == 0) m_compile_flags |= LIBXSMM_GEMM_FLAG_BETA_0; @@ -79,9 +73,15 @@ BrgemmTppEmitter::BrgemmTppEmitter(jit_generator* h, cpu_isa_t isa, const Expres m_compile_flags |= LIBXSMM_GEMM_FLAG_B_UNSIGNED; } - m_shape = libxsmm_create_gemm_shape(N, M, K, - io_strides[1], io_strides[0], io_strides[2], - in_1_prec, in_0_prec, out_0_prec, + m_shape = libxsmm_create_gemm_shape(N, + M, + K, + io_strides[1], + io_strides[0], + io_strides[2], + in_1_prec, + in_0_prec, + out_0_prec, exec_dtype); m_prefetching_flags = LIBXSMM_GEMM_PREFETCH_NONE; } @@ -91,7 +91,7 @@ std::set> BrgemmTppEmitter::get_supported_precisions( return {{element::f32, element::f32}}; } -void BrgemmTppEmitter::validate_arguments(const std::vector &in, const std::vector &out) const { +void BrgemmTppEmitter::validate_arguments(const std::vector& in, const std::vector& out) const { OV_CPU_JIT_EMITTER_ASSERT(in.size() == 2, "Expects 2 input regs, got" + std::to_string(in.size())); OV_CPU_JIT_EMITTER_ASSERT(out.size() == 1, "Expects 1 output reg, got" + std::to_string(out.size())); } @@ -100,7 +100,7 @@ const uintptr_t BrgemmTppEmitter::get_compiled_kernel_ptr() const { return COMPILE_TPP_KERNEL(libxsmm_dispatch_gemm(m_shape, m_compile_flags, m_prefetching_flags)); } -void BrgemmTppEmitter::execute_brgemm_kernel(libxsmm_gemmfunction brg_kernel, void *in0, void *in1, void *out0) { +void BrgemmTppEmitter::execute_brgemm_kernel(libxsmm_gemmfunction brg_kernel, void* in0, void* in1, void* out0) { libxsmm_gemm_param gemm_p; gemm_p.a.primary = in1; gemm_p.b.primary = in0; diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.cpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.cpp index 5b156100073534..fa38eacb4e870b 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.cpp @@ -3,6 +3,7 @@ // #include "jit_scalar_emitter.hpp" + #include "emitters/snippets/x64/jit_snippets_emitters.hpp" using namespace Xbyak; diff --git a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.hpp b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.hpp index e8235f6b86e88c..c59153fdb3aaec 100644 --- a/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.hpp +++ b/src/plugins/intel_cpu/src/emitters/tpp/x64/jit_scalar_emitter.hpp @@ -3,8 +3,8 @@ // #pragma once -#include "snippets/lowered/expression.hpp" #include "emitters/plugin/x64/jit_emitter.hpp" +#include "snippets/lowered/expression.hpp" namespace ov { namespace intel_cpu { @@ -13,11 +13,16 @@ class ScalarTppEmitter : public jit_emitter { ScalarTppEmitter(dnnl::impl::cpu::x64::jit_generator* h, dnnl::impl::cpu::x64::cpu_isa_t isa, const ov::snippets::lowered::ExpressionPtr& expr); - size_t get_inputs_num() const override {return 0;} - size_t aux_gprs_count() const override {return 1;} + size_t get_inputs_num() const override { + return 0; + } + size_t aux_gprs_count() const override { + return 1; + } + private: void emit_impl(const std::vector& in, const std::vector& out) const override; }; -} // namespace intel_cpu -} // namespace ov +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp index d9f0bc947db958..4c5f2925ef0735 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp @@ -3,50 +3,60 @@ // #include "brgemm.hpp" + #include "snippets/itt.hpp" -#include "snippets/utils/utils.hpp" #include "snippets/lowered/port_descriptor.hpp" +#include "snippets/utils/utils.hpp" #include "utils/general_utils.h" - namespace ov { namespace intel_cpu { namespace tpp { namespace op { -BrgemmTPP::BrgemmTPP(const Output& A, const Output& B, - const size_t offset_a, const size_t offset_b, const size_t offset_c, - std::vector layout_a, std::vector layout_b, std::vector layout_c, +BrgemmTPP::BrgemmTPP(const Output& A, + const Output& B, + const size_t offset_a, + const size_t offset_b, + const size_t offset_c, + std::vector layout_a, + std::vector layout_b, + std::vector layout_c, const float beta) : MemoryAccess(std::set{0, 1}, std::set{0}), modifier::TensorProcessingPrimitive(), - Brgemm(A, B, - offset_a, offset_b, offset_c, - std::move(layout_a), std::move(layout_b), std::move(layout_c)) { + Brgemm(A, B, offset_a, offset_b, offset_c, std::move(layout_a), std::move(layout_b), std::move(layout_c)) { set_beta(beta); } -BrgemmTPP::BrgemmTPP(const Output& A, const Output& B, - const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c, - std::vector layout_a, std::vector layout_b, std::vector layout_c, +BrgemmTPP::BrgemmTPP(const Output& A, + const Output& B, + const PortDescriptor& desc_a, + const PortDescriptor& desc_b, + const PortDescriptor& desc_c, + std::vector layout_a, + std::vector layout_b, + std::vector layout_c, const float beta) : MemoryAccess(PortMap{{0, desc_a}, {1, desc_b}}, PortMap{{0, desc_c}}), modifier::TensorProcessingPrimitive(), - Brgemm(A, B, - desc_a, desc_b, desc_c, - std::move(layout_a), std::move(layout_b), std::move(layout_c)) { + Brgemm(A, B, desc_a, desc_b, desc_c, std::move(layout_a), std::move(layout_b), std::move(layout_c)) { set_beta(beta); } std::shared_ptr BrgemmTPP::clone_with_new_inputs(const OutputVector& new_args) const { INTERNAL_OP_SCOPE(BrgemmTPP_clone_with_new_inputs); check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), new_args.at(1), - get_input_port_descriptor(0), get_input_port_descriptor(1), get_output_port_descriptor(0), - snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(0))->get_layout(), - snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(1))->get_layout(), - snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(output(0))->get_layout(), - m_beta); + return std::make_shared( + new_args.at(0), + new_args.at(1), + get_input_port_descriptor(0), + get_input_port_descriptor(1), + get_output_port_descriptor(0), + snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(0))->get_layout(), + snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(input(1))->get_layout(), + snippets::lowered::PortDescriptorUtils::get_port_descriptor_ptr(output(0))->get_layout(), + m_beta); } bool BrgemmTPP::visit_attributes(AttributeVisitor& visitor) { @@ -55,7 +65,7 @@ bool BrgemmTPP::visit_attributes(AttributeVisitor& visitor) { return Brgemm::visit_attributes(visitor); } -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp index c9199c3c7f82df..4a147f79b2a37e 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp @@ -4,8 +4,8 @@ #pragma once -#include "transformations/snippets/x64/op/brgemm_cpu.hpp" #include "modifiers.hpp" +#include "transformations/snippets/x64/op/brgemm_cpu.hpp" namespace ov { namespace intel_cpu { @@ -22,13 +22,23 @@ class BrgemmTPP : virtual public modifier::TensorProcessingPrimitive, public sni public: OPENVINO_OP("Brgemm", "TppOpset", snippets::op::Brgemm); - BrgemmTPP(const Output& A, const Output& B, - size_t offset_a = 0, size_t offset_b = 0, size_t offset_c = 0, - std::vector layout_a = {}, std::vector layout_b = {}, std::vector layout_c = {}, + BrgemmTPP(const Output& A, + const Output& B, + size_t offset_a = 0, + size_t offset_b = 0, + size_t offset_c = 0, + std::vector layout_a = {}, + std::vector layout_b = {}, + std::vector layout_c = {}, float beta = 1); - BrgemmTPP(const Output& A, const Output& B, - const PortDescriptor& desc_a, const PortDescriptor& desc_b, const PortDescriptor& desc_c, - std::vector layout_a = {}, std::vector layout_b = {}, std::vector layout_c = {}, + BrgemmTPP(const Output& A, + const Output& B, + const PortDescriptor& desc_a, + const PortDescriptor& desc_b, + const PortDescriptor& desc_c, + std::vector layout_a = {}, + std::vector layout_b = {}, + std::vector layout_c = {}, float beta = 1); BrgemmTPP() = default; @@ -36,14 +46,18 @@ class BrgemmTPP : virtual public modifier::TensorProcessingPrimitive, public sni bool visit_attributes(AttributeVisitor& visitor) override; - float get_beta() const { return m_beta; } - void set_beta(float beta) { m_beta = beta; } + float get_beta() const { + return m_beta; + } + void set_beta(float beta) { + m_beta = beta; + } private: float m_beta = 0.f; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.cpp index a5f297a491af8b..7cfbba2aeb5be3 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.cpp @@ -11,63 +11,63 @@ namespace op { std::ostream& operator<<(std::ostream& os, const OpDescTPP& od) { switch (od.m_arity) { - case OpDescTPP::ARITY::ZERO: - os << "ARG#" << static_cast(od.m_value); + case OpDescTPP::ARITY::ZERO: + os << "ARG#" << static_cast(od.m_value); + break; + case OpDescTPP::ARITY::UNARY: + switch (static_cast(od.m_value)) { + case LIBXSMM_MELTW_TYPE_UNARY_EXP: + os << "EXP"; break; - case OpDescTPP::ARITY::UNARY: - switch (static_cast(od.m_value)) { - case LIBXSMM_MELTW_TYPE_UNARY_EXP: - os << "EXP"; - break; - case LIBXSMM_MELTW_TYPE_UNARY_X2: - os << "SQ"; - break; - case LIBXSMM_MELTW_TYPE_UNARY_SQRT: - os << "SQRT"; - break; - case LIBXSMM_MELTW_TYPE_UNARY_RELU: - os << "RELU"; - break; - case LIBXSMM_MELTW_TYPE_UNARY_RECIPROCAL: - os << "RECIPROCAL"; - break; - case LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD: - os << "REDUCE_ADD"; - break; - case LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_MAX: - os << "REDUCE_MAX"; - break; - default: - OPENVINO_THROW("Unsupported TPP Unary op type for serialization"); - } + case LIBXSMM_MELTW_TYPE_UNARY_X2: + os << "SQ"; break; - case OpDescTPP::ARITY::BINARY: - switch (static_cast(od.m_value)) { - case LIBXSMM_MELTW_TYPE_BINARY_ADD: - os << "ADD"; - break; - case LIBXSMM_MELTW_TYPE_BINARY_SUB: - os << "SUB"; - break; - case LIBXSMM_MELTW_TYPE_BINARY_MUL: - os << "MUL"; - break; - case LIBXSMM_MELTW_TYPE_BINARY_DIV: - os << "DIV"; - break; - default: - OPENVINO_THROW("Unsupported TPP Binary op type for serialization"); - } + case LIBXSMM_MELTW_TYPE_UNARY_SQRT: + os << "SQRT"; break; - case OpDescTPP::ARITY::UNDEFINED: - os << "Undefined"; + case LIBXSMM_MELTW_TYPE_UNARY_RELU: + os << "RELU"; + break; + case LIBXSMM_MELTW_TYPE_UNARY_RECIPROCAL: + os << "RECIPROCAL"; + break; + case LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD: + os << "REDUCE_ADD"; + break; + case LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_MAX: + os << "REDUCE_MAX"; + break; + default: + OPENVINO_THROW("Unsupported TPP Unary op type for serialization"); + } + break; + case OpDescTPP::ARITY::BINARY: + switch (static_cast(od.m_value)) { + case LIBXSMM_MELTW_TYPE_BINARY_ADD: + os << "ADD"; + break; + case LIBXSMM_MELTW_TYPE_BINARY_SUB: + os << "SUB"; + break; + case LIBXSMM_MELTW_TYPE_BINARY_MUL: + os << "MUL"; + break; + case LIBXSMM_MELTW_TYPE_BINARY_DIV: + os << "DIV"; break; default: - OPENVINO_THROW("Unhandled ARITY"); + OPENVINO_THROW("Unsupported TPP Binary op type for serialization"); + } + break; + case OpDescTPP::ARITY::UNDEFINED: + os << "Undefined"; + break; + default: + OPENVINO_THROW("Unhandled ARITY"); } return os; } -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.hpp index e6aac272a905b7..f205c5f58bde46 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/descriptor.hpp @@ -14,16 +14,20 @@ namespace op { class OpDescTPP { public: // Note: zero arity represent equation arguments - enum class ARITY {UNDEFINED, UNARY, BINARY, ZERO}; + enum class ARITY { UNDEFINED, UNARY, BINARY, ZERO }; OpDescTPP() = default; // Note: for zero arity op_type is interpreted as the argument index (op inputs and args have different order) OpDescTPP(ARITY arity, int arg_idx) : m_arity(arity), m_value{arg_idx}, m_flags{0} { OPENVINO_ASSERT(m_arity == ARITY::ZERO, "Only zero-arity op descs could be created directly"); } - explicit OpDescTPP(libxsmm_meltw_binary_type op_type, libxsmm_bitfield flags = LIBXSMM_MELTW_FLAG_BINARY_NONE) : - m_arity{ARITY::BINARY}, m_value{op_type}, m_flags{flags} {} - explicit OpDescTPP(libxsmm_meltw_unary_type op_type, libxsmm_bitfield flags = LIBXSMM_MELTW_FLAG_UNARY_NONE) : - m_arity{ARITY::UNARY}, m_value{op_type}, m_flags{flags} {} + explicit OpDescTPP(libxsmm_meltw_binary_type op_type, libxsmm_bitfield flags = LIBXSMM_MELTW_FLAG_BINARY_NONE) + : m_arity{ARITY::BINARY}, + m_value{op_type}, + m_flags{flags} {} + explicit OpDescTPP(libxsmm_meltw_unary_type op_type, libxsmm_bitfield flags = LIBXSMM_MELTW_FLAG_UNARY_NONE) + : m_arity{ARITY::UNARY}, + m_value{op_type}, + m_flags{flags} {} operator libxsmm_meltw_binary_type() const { OPENVINO_ASSERT(m_arity == ARITY::BINARY, "Unsupported TPP OpDesc conversion"); return static_cast(m_value); @@ -36,17 +40,21 @@ class OpDescTPP { OPENVINO_ASSERT(m_arity == ARITY::ZERO, "Unsupported TPP OpDesc conversion"); return m_value; } - ARITY get_arity() const { return m_arity; } - libxsmm_bitfield get_flags() const { return m_flags; } + ARITY get_arity() const { + return m_arity; + } + libxsmm_bitfield get_flags() const { + return m_flags; + } friend std::ostream& operator<<(std::ostream& os, const OpDescTPP& od); private: - const ARITY m_arity {ARITY::UNDEFINED}; - const int m_value {-1}; - const libxsmm_bitfield m_flags {0}; + const ARITY m_arity{ARITY::UNDEFINED}; + const int m_value{-1}; + const libxsmm_bitfield m_flags{0}; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp index 1df57fdd5a8f4f..44aaf251bc201f 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp @@ -9,31 +9,28 @@ namespace intel_cpu { namespace tpp { namespace op { -#define GENERAL_AUX_METHODS(OP, OP_TYPE, ...) \ - std::shared_ptr OP::clone_with_new_inputs(const OutputVector& new_args) const {\ - check_new_args_count(this, new_args);\ - const auto& new_op = std::make_shared(__VA_ARGS__);\ - new_op->clone_memory_access_ports(*this);\ - return new_op;\ -} \ - bool OP::visit_attributes(AttributeVisitor& visitor) {\ - return OP_TYPE::visit_attributes(visitor);\ -}\ +#define GENERAL_AUX_METHODS(OP, OP_TYPE, ...) \ + std::shared_ptr OP::clone_with_new_inputs(const OutputVector& new_args) const { \ + check_new_args_count(this, new_args); \ + const auto& new_op = std::make_shared(__VA_ARGS__); \ + new_op->clone_memory_access_ports(*this); \ + return new_op; \ + } \ + bool OP::visit_attributes(AttributeVisitor& visitor) { return OP_TYPE::visit_attributes(visitor); } // Note: Unary Ops don't require broadcasting flags update => no need to override validate_and_infer_types -#define BINARY_AUX_METHODS(BINARY_OP, OV_OP) GENERAL_AUX_METHODS(BINARY_OP, BinaryEltwiseTPP, new_args.at(0), new_args.at(1), this->get_autob())\ -void BINARY_OP::validate_and_infer_types() {\ - OV_OP::validate_and_infer_types();\ - m_flags = get_broadcasting_flags(get_input_partial_shape(0), get_input_partial_shape(1));\ -} +#define BINARY_AUX_METHODS(BINARY_OP, OV_OP) \ + GENERAL_AUX_METHODS(BINARY_OP, BinaryEltwiseTPP, new_args.at(0), new_args.at(1), this->get_autob()) \ + void BINARY_OP::validate_and_infer_types() { \ + OV_OP::validate_and_infer_types(); \ + m_flags = get_broadcasting_flags(get_input_partial_shape(0), get_input_partial_shape(1)); \ + } #define UNARY_AUX_METHODS(UNARY_OP) GENERAL_AUX_METHODS(UNARY_OP, UnaryEltwiseTPP, new_args.at(0)) bool EltwiseTPP::is_supported(const std::shared_ptr& node) { - return ov::is_type(node) || - ov::is_type(node) || - ov::is_type(node) || - ov::is_type(node); + return ov::is_type(node) || ov::is_type(node) || + ov::is_type(node) || ov::is_type(node); } bool EltwiseTPP::visit_attributes(AttributeVisitor& visitor) { @@ -46,13 +43,14 @@ BinaryEltwiseTPP::BinaryEltwiseTPP(libxsmm_meltw_binary_type op_type) : EltwiseT ctor_initialize(std::set{0, 1}, std::set{0}); } - - -libxsmm_bitfield BinaryEltwiseTPP::get_broadcasting_flags(const ov::PartialShape& pshape_0, const ov::PartialShape& pshape_1) { - return get_broadcasting_flags(snippets::utils::pshape_to_vdims(pshape_0), snippets::utils::pshape_to_vdims(pshape_1)); +libxsmm_bitfield BinaryEltwiseTPP::get_broadcasting_flags(const ov::PartialShape& pshape_0, + const ov::PartialShape& pshape_1) { + return get_broadcasting_flags(snippets::utils::pshape_to_vdims(pshape_0), + snippets::utils::pshape_to_vdims(pshape_1)); } -libxsmm_bitfield BinaryEltwiseTPP::get_broadcasting_flags(const snippets::VectorDims& shape_0, const snippets::VectorDims& shape_1) { +libxsmm_bitfield BinaryEltwiseTPP::get_broadcasting_flags(const snippets::VectorDims& shape_0, + const snippets::VectorDims& shape_1) { auto get_subshape = [](const snippets::VectorDims& shape) { snippets::VectorDims subshape(2, 1); for (size_t i = 0; i < std::min(subshape.size(), shape.size()); i++) { @@ -63,8 +61,7 @@ libxsmm_bitfield BinaryEltwiseTPP::get_broadcasting_flags(const snippets::Vector snippets::VectorDims subshape_0 = get_subshape(shape_0); snippets::VectorDims subshape_1 = get_subshape(shape_1); - if (snippets::utils::is_dynamic_vdims(subshape_0) || - snippets::utils::is_dynamic_vdims(subshape_1)) + if (snippets::utils::is_dynamic_vdims(subshape_0) || snippets::utils::is_dynamic_vdims(subshape_1)) return LIBXSMM_MELTW_FLAG_BINARY_NONE; if (subshape_0 == subshape_1) { return LIBXSMM_MELTW_FLAG_BINARY_NONE; @@ -99,62 +96,64 @@ UnaryEltwiseTPP::UnaryEltwiseTPP(libxsmm_meltw_unary_type op_type) : EltwiseTPP( } Add::Add(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) -: BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_ADD), ov::op::v1::Add(arg0, arg1, auto_broadcast) { + : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_ADD), + ov::op::v1::Add(arg0, arg1, auto_broadcast) { m_flags = get_broadcasting_flags(arg0.get_partial_shape(), arg1.get_partial_shape()); } BINARY_AUX_METHODS(Add, ov::op::v1::Add) Subtract::Subtract(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) - : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_SUB), ov::op::v1::Subtract(arg0, arg1, auto_broadcast) { + : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_SUB), + ov::op::v1::Subtract(arg0, arg1, auto_broadcast) { m_flags = get_broadcasting_flags(arg0.get_partial_shape(), arg1.get_partial_shape()); } BINARY_AUX_METHODS(Subtract, ov::op::v1::Subtract) Multiply::Multiply(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) - : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_MUL), ov::op::v1::Multiply(arg0, arg1, auto_broadcast) { + : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_MUL), + ov::op::v1::Multiply(arg0, arg1, auto_broadcast) { m_flags = get_broadcasting_flags(arg0.get_partial_shape(), arg1.get_partial_shape()); } BINARY_AUX_METHODS(Multiply, ov::op::v1::Multiply) Divide::Divide(const Output& arg0, const Output& arg1, const AutoBroadcastSpec& auto_broadcast) - : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_DIV), ov::op::v1::Divide(arg0, arg1, auto_broadcast) { + : BinaryEltwiseTPP(LIBXSMM_MELTW_TYPE_BINARY_DIV), + ov::op::v1::Divide(arg0, arg1, auto_broadcast) { m_flags = get_broadcasting_flags(arg0.get_partial_shape(), arg1.get_partial_shape()); } BINARY_AUX_METHODS(Divide, ov::op::v1::Divide) -Exp::Exp(const Output& arg0) : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_EXP), ov::op::v0::Exp(arg0) { -} +Exp::Exp(const Output& arg0) : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_EXP), ov::op::v0::Exp(arg0) {} UNARY_AUX_METHODS(Exp) -Relu::Relu(const Output& arg0) : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_RELU), ov::op::v0::Relu(arg0) { -} +Relu::Relu(const Output& arg0) : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_RELU), ov::op::v0::Relu(arg0) {} UNARY_AUX_METHODS(Relu) -Reciprocal::Reciprocal(const Output& arg) : - UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_RECIPROCAL), ov::snippets::op::PowerStatic(arg, -1.f) { -} +Reciprocal::Reciprocal(const Output& arg) + : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_RECIPROCAL), + ov::snippets::op::PowerStatic(arg, -1.f) {} UNARY_AUX_METHODS(Reciprocal) -Square::Square(const Output& arg) : - UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_X2), ov::snippets::op::PowerStatic(arg, 2.f) { -} +Square::Square(const Output& arg) + : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_X2), + ov::snippets::op::PowerStatic(arg, 2.f) {} UNARY_AUX_METHODS(Square) -SquareRoot::SquareRoot(const Output& arg) : - UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_SQRT), ov::snippets::op::PowerStatic(arg, 0.5f) { -} +SquareRoot::SquareRoot(const Output& arg) + : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_SQRT), + ov::snippets::op::PowerStatic(arg, 0.5f) {} UNARY_AUX_METHODS(SquareRoot) -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp index a61668c2a04328..7338450ff8257d 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.hpp @@ -4,18 +4,17 @@ #pragma once +#include "descriptor.hpp" #include "modifiers.hpp" #include "openvino/op/add.hpp" -#include "openvino/op/subtract.hpp" -#include "openvino/op/multiply.hpp" #include "openvino/op/divide.hpp" #include "openvino/op/exp.hpp" +#include "openvino/op/multiply.hpp" #include "openvino/op/relu.hpp" +#include "openvino/op/subtract.hpp" #include "snippets/op/powerstatic.hpp" #include "snippets/utils/utils.hpp" -#include "descriptor.hpp" - namespace ov { namespace intel_cpu { namespace tpp { @@ -27,17 +26,20 @@ class EltwiseTPP : public modifier::TensorProcessingPrimitive { public: static bool is_supported(const std::shared_ptr& node); bool visit_attributes(AttributeVisitor& visitor); - virtual OpDescTPP get_op_desc() const = 0; + virtual OpDescTPP get_op_desc() const = 0; }; class BinaryEltwiseTPP : public EltwiseTPP { public: BinaryEltwiseTPP(libxsmm_meltw_binary_type op_type); - OpDescTPP get_op_desc() const override { return OpDescTPP(m_op_type, m_flags); } + OpDescTPP get_op_desc() const override { + return OpDescTPP(m_op_type, m_flags); + } protected: static libxsmm_bitfield get_broadcasting_flags(const ov::PartialShape& pshape_0, const ov::PartialShape& pshape_1); - static libxsmm_bitfield get_broadcasting_flags(const snippets::VectorDims& pshape_0, const snippets::VectorDims& pshape_1); + static libxsmm_bitfield get_broadcasting_flags(const snippets::VectorDims& pshape_0, + const snippets::VectorDims& pshape_1); libxsmm_bitfield m_flags; libxsmm_meltw_binary_type m_op_type; }; @@ -45,7 +47,10 @@ class BinaryEltwiseTPP : public EltwiseTPP { class UnaryEltwiseTPP : public EltwiseTPP { public: UnaryEltwiseTPP(libxsmm_meltw_unary_type op_type); - OpDescTPP get_op_desc() const override { return OpDescTPP(m_op_type); } + OpDescTPP get_op_desc() const override { + return OpDescTPP(m_op_type); + } + private: libxsmm_meltw_unary_type m_op_type; }; @@ -110,7 +115,6 @@ class Reciprocal : public UnaryEltwiseTPP, public ov::snippets::op::PowerStatic bool visit_attributes(AttributeVisitor& visitor) override; }; - class Square : public UnaryEltwiseTPP, public ov::snippets::op::PowerStatic { public: OPENVINO_OP("Square", "TppOpset", snippets::op::PowerStatic); @@ -127,7 +131,7 @@ class SquareRoot : public UnaryEltwiseTPP, public ov::snippets::op::PowerStatic bool visit_attributes(AttributeVisitor& visitor) override; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.cpp index 8a22aa400cc16c..04306ca8f8b6c5 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.cpp @@ -9,9 +9,10 @@ namespace intel_cpu { namespace tpp { namespace op { -EquationTPP::EquationTPP(const OutputVector& arguments, std::vector op_descs) : - modifier::TensorProcessingPrimitive(), ov::op::Op(arguments), - m_op_descs(std::move(op_descs)) { +EquationTPP::EquationTPP(const OutputVector& arguments, std::vector op_descs) + : modifier::TensorProcessingPrimitive(), + ov::op::Op(arguments), + m_op_descs(std::move(op_descs)) { // Initialize input/output ports as memory access ports std::set ma_iport_idx; for (size_t i = 0; i < get_input_size(); i++) @@ -43,13 +44,14 @@ void EquationTPP::validate_and_infer_types() { for (size_t i = 1; i < get_input_size(); i++) { OPENVINO_ASSERT(element::Type::merge(etype, etype, get_input_element_type(i)), "Incompatible element types in TPP equation"); - OPENVINO_ASSERT(ov::PartialShape::broadcast_merge_into(shape, get_input_partial_shape(i), ov::op::AutoBroadcastType::NUMPY), - "Incompatible element types in TPP equation"); + OPENVINO_ASSERT( + ov::PartialShape::broadcast_merge_into(shape, get_input_partial_shape(i), ov::op::AutoBroadcastType::NUMPY), + "Incompatible element types in TPP equation"); } set_output_type(0, etype, shape); } -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.hpp index 4ba53393336ad4..bf16f149b415de 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/equation.hpp @@ -4,9 +4,9 @@ #pragma once +#include "descriptor.hpp" #include "modifiers.hpp" #include "openvino/op/op.hpp" -#include "descriptor.hpp" namespace ov { namespace intel_cpu { @@ -20,13 +20,15 @@ class EquationTPP : public modifier::TensorProcessingPrimitive, public ov::op::O std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; void validate_and_infer_types() override; bool visit_attributes(AttributeVisitor& visitor) override; - const std::vector& get_op_descs() { return m_op_descs; } + const std::vector& get_op_descs() { + return m_op_descs; + } private: std::vector m_op_descs; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp index 3fdcd30e7c4eb6..e0e890a347a026 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp @@ -3,10 +3,11 @@ // #include "factory.hpp" + #include "eltwise.hpp" -#include "reduce.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "ov_ops/type_relaxed.hpp" +#include "reduce.hpp" namespace ov { namespace intel_cpu { @@ -38,37 +39,39 @@ struct CustomPowerStaticBuilder : public NodeFactory::TPPCustomBuilder { } }; -} // namespace -#define CREATE_UNARY_TPP_NODE(tpp_node_type) \ - [](const std::shared_ptr& node) -> std::shared_ptr { \ +} // namespace +#define CREATE_UNARY_TPP_NODE(tpp_node_type) \ + [](const std::shared_ptr& node) -> std::shared_ptr { \ return std::make_shared(node->get_input_source_output(0)); \ } -#define CREATE_BINARY_TPP_NODE(tpp_node_type) \ - [](const std::shared_ptr& node) -> std::shared_ptr { \ - return std::make_shared(node->get_input_source_output(0), node->get_input_source_output(1), node->get_autob()); \ +#define CREATE_BINARY_TPP_NODE(tpp_node_type) \ + [](const std::shared_ptr& node) -> std::shared_ptr { \ + return std::make_shared(node->get_input_source_output(0), \ + node->get_input_source_output(1), \ + node->get_autob()); \ } -#define CREATE_REDUCE_TPP_NODE(tpp_node_type) \ - [](const std::shared_ptr& node) -> std::shared_ptr { \ - const auto& reduce = ov::as_type_ptr(node); \ - OPENVINO_ASSERT(reduce, "Attempt to create TPP Reduce from invalid node"); \ +#define CREATE_REDUCE_TPP_NODE(tpp_node_type) \ + [](const std::shared_ptr& node) -> std::shared_ptr { \ + const auto& reduce = ov::as_type_ptr(node); \ + OPENVINO_ASSERT(reduce, "Attempt to create TPP Reduce from invalid node"); \ return std::make_shared(reduce->get_input_source_output(0), reduce->get_axis()); \ } -std::unordered_map NodeFactory::m_direct_mapping { +std::unordered_map NodeFactory::m_direct_mapping{ {ov::op::v1::Add::get_type_info_static(), CREATE_BINARY_TPP_NODE(Add)}, {ov::op::v1::Subtract::get_type_info_static(), CREATE_BINARY_TPP_NODE(Subtract)}, {ov::op::v1::Multiply::get_type_info_static(), CREATE_BINARY_TPP_NODE(Multiply)}, {ov::op::v1::Divide::get_type_info_static(), CREATE_BINARY_TPP_NODE(Divide)}, {ov::op::v0::Exp::get_type_info_static(), CREATE_UNARY_TPP_NODE(Exp)}, {ov::op::v0::Relu::get_type_info_static(), CREATE_UNARY_TPP_NODE(Relu)}, - // Note that we don't support conversion from ngraph ops here, since they have a broader semantics (e.g. multiple axis provided at a secont input) + // Note that we don't support conversion from ngraph ops here, since they have a broader semantics (e.g. multiple + // axis provided at a secont input) {ov::snippets::op::ReduceMax::get_type_info_static(), CREATE_REDUCE_TPP_NODE(ReduceMax)}, {ov::snippets::op::ReduceSum::get_type_info_static(), CREATE_REDUCE_TPP_NODE(ReduceSum)}, }; - std::vector NodeFactory::m_custom_mapping{CustomPowerStaticBuilder()}; std::shared_ptr NodeFactory::create(const std::shared_ptr& n) { @@ -95,13 +98,16 @@ bool NodeFactory::is_supported(const std::shared_ptr& n) { // Note: verify that TypeRelaxed property is maintained (mismatched input precisions) // after low precisions are enabled (ticket: 132328) const auto& ins = n->inputs(); - auto is_fp32_input = [](const ov::Input& in){ return in.get_element_type() == element::f32; }; + auto is_fp32_input = [](const ov::Input& in) { + return in.get_element_type() == element::f32; + }; const bool all_inputs_fp32 = std::all_of(ins.begin(), ins.end(), is_fp32_input); return (m_direct_mapping.count(n->get_type_info()) || - std::any_of(m_custom_mapping.begin(), m_custom_mapping.end(), matches)) && all_inputs_fp32; + std::any_of(m_custom_mapping.begin(), m_custom_mapping.end(), matches)) && + all_inputs_fp32; } -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp index b3090ebdec47e2..9cfcc2f6226205 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp @@ -4,8 +4,8 @@ #pragma once -#include "openvino/op/op.hpp" #include "openvino/core/type.hpp" +#include "openvino/op/op.hpp" namespace ov { namespace intel_cpu { @@ -21,11 +21,12 @@ class NodeFactory { tpp_matcher matcher; tpp_builder builder; }; + private: static std::unordered_map m_direct_mapping; static std::vector m_custom_mapping; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp index 507276a1c2c898..d9ecc3629f2430 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp @@ -16,19 +16,19 @@ namespace modifier { * @ingroup snippets */ class TensorProcessingPrimitive : virtual public snippets::modifier::MemoryAccess { - public: - void clone_memory_access_ports(const TensorProcessingPrimitive& other) { - m_input_ports = other.m_input_ports; - m_output_ports = other.m_output_ports; - } - static bool visit_attributes(AttributeVisitor& visitor) { - std::string modifier{"TPP"}; - visitor.on_attribute("modifier", modifier); - return true; - } +public: + void clone_memory_access_ports(const TensorProcessingPrimitive& other) { + m_input_ports = other.m_input_ports; + m_output_ports = other.m_output_ports; + } + static bool visit_attributes(AttributeVisitor& visitor) { + std::string modifier{"TPP"}; + visitor.on_attribute("modifier", modifier); + return true; + } }; -} // namespace modifier -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace modifier +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp index 63119623856bc7..11fc73b949a55c 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp @@ -10,8 +10,8 @@ namespace tpp { namespace op { ReduceMax::ReduceMax(const Output& arg, size_t axis) - : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_MAX), ov::snippets::op::ReduceMax(arg, axis) { -} + : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_MAX), + ov::snippets::op::ReduceMax(arg, axis) {} std::shared_ptr ReduceMax::clone_with_new_inputs(const OutputVector& new_args) const { check_new_args_count(this, new_args); @@ -26,8 +26,8 @@ bool ReduceMax::visit_attributes(AttributeVisitor& visitor) { } ReduceSum::ReduceSum(const Output& arg, size_t axis) - : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD), ov::snippets::op::ReduceSum(arg, axis) { -} + : UnaryEltwiseTPP(LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD), + ov::snippets::op::ReduceSum(arg, axis) {} std::shared_ptr ReduceSum::clone_with_new_inputs(const OutputVector& new_args) const { check_new_args_count(this, new_args); @@ -41,7 +41,7 @@ bool ReduceSum::visit_attributes(AttributeVisitor& visitor) { return UnaryEltwiseTPP::visit_attributes(visitor); } -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp index 9542c4ec90b0b6..f66e913f85b6e7 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp @@ -4,12 +4,10 @@ #pragma once -#include "modifiers.hpp" #include "eltwise.hpp" -#include "snippets/op/reduce.hpp" - - #include "libxsmm_typedefs.h" +#include "modifiers.hpp" +#include "snippets/op/reduce.hpp" namespace ov { namespace intel_cpu { @@ -24,6 +22,7 @@ class ReduceMax : public UnaryEltwiseTPP, public ov::snippets::op::ReduceMax { ReduceMax(const Output& arg, size_t axis); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; bool visit_attributes(AttributeVisitor& visitor) override; + private: libxsmm_meltw_binary_type m_op_type; }; @@ -34,11 +33,12 @@ class ReduceSum : public UnaryEltwiseTPP, public ov::snippets::op::ReduceSum { ReduceSum(const Output& arg, size_t axis); std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; bool visit_attributes(AttributeVisitor& visitor) override; + private: libxsmm_meltw_binary_type m_op_type; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp index 566a2a5afde658..98a107380aa7d4 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp @@ -3,6 +3,7 @@ // #include "scalar.hpp" + #include "modifiers.hpp" namespace ov { @@ -21,11 +22,11 @@ std::shared_ptr Scalar::clone_with_new_inputs(const OutputVector& new_args bool Scalar::visit_attributes(AttributeVisitor& visitor) { modifier::TensorProcessingPrimitive::visit_attributes(visitor); - return snippets::op::Scalar::visit_attributes(visitor);; + return snippets::op::Scalar::visit_attributes(visitor); + ; } - -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp index f9578c20fb13f5..c619d7b6ab1937 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp @@ -4,8 +4,8 @@ #pragma once -#include "modifiers.hpp" #include "eltwise.hpp" +#include "modifiers.hpp" #include "snippets/op/reduce.hpp" namespace ov { @@ -26,7 +26,7 @@ class Scalar : public ov::snippets::op::Scalar { bool visit_attributes(AttributeVisitor& visitor) override; }; -} // namespace op -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace op +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp index 53992b1e67da9c..571e292104d132 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp @@ -2,22 +2,18 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "snippets/itt.hpp" - #include "brgemm_to_brgemm_tpp.hpp" -#include "snippets/utils/utils.hpp" -#include "snippets/op/brgemm.hpp" -#include "transformations/tpp/x64/op/brgemm.hpp" - +#include "cpu_shape.h" #include "openvino/core/rt_info.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" #include "openvino/pass/pattern/matcher.hpp" - -#include "cpu_shape.h" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "snippets/itt.hpp" +#include "snippets/op/brgemm.hpp" +#include "snippets/utils/utils.hpp" +#include "transformations/tpp/x64/op/brgemm.hpp" #include "utils/general_utils.h" - namespace ov { namespace intel_cpu { namespace tpp { @@ -27,13 +23,15 @@ using namespace snippets::lowered; bool BrgemmToBrgemmTPP::is_supported_brgemm_configuration(const std::vector>& layouts, const ov::element::TypeVector& precisions) { - OPENVINO_ASSERT(layouts.size() == 3 && precisions.size() == 3, "snippets::op::Brgemm must have 2 inputs and 1 output"); + OPENVINO_ASSERT(layouts.size() == 3 && precisions.size() == 3, + "snippets::op::Brgemm must have 2 inputs and 1 output"); const bool supported_layouts = std::all_of(layouts.begin(), layouts.end(), [](const std::vector& layout) { return layout.empty() || layout.back() == layout.size() - 1; }); - const bool supported_precisions = std::all_of(precisions.begin(), precisions.end(), [](const ov::element::Type& et) { - return et == ov::element::f32; - }); + const bool supported_precisions = + std::all_of(precisions.begin(), precisions.end(), [](const ov::element::Type& et) { + return et == ov::element::f32; + }); return supported_layouts && supported_precisions; } @@ -79,17 +77,28 @@ BrgemmToBrgemmTPP::BrgemmToBrgemmTPP() { if (precision_a == ov::element::f32) { brgemm_tpp = std::make_shared(brgemm->input_value(0), brgemm->input_value(1), - offset_a, offset_b, offset_c, - layout_a, layout_b, layout_c); + offset_a, + offset_b, + offset_c, + layout_a, + layout_b, + layout_c); } OPENVINO_ASSERT(brgemm_tpp, "Failed to create BrgemmTPP node in the BrgemmToBrgemmTPP pass"); brgemm_tpp->set_friendly_name(brgemm->get_friendly_name()); ov::replace_node(brgemm, brgemm_tpp); - // Set FULL_DIM tensors on ports to avoid automatic loop markup (blocked loops will be inserted in a separate transformation) - PortDescriptorUtils::set_port_descriptor(brgemm_tpp->input(0), brgemm_in0_desc->get_subtensor(), brgemm_in0_desc->get_layout()); - PortDescriptorUtils::set_port_descriptor(brgemm_tpp->input(1), brgemm_in1_desc->get_subtensor(), brgemm_in1_desc->get_layout()); - PortDescriptorUtils::set_port_descriptor(brgemm_tpp->output(0), brgemm_out_desc->get_subtensor(), brgemm_out_desc->get_layout()); + // Set FULL_DIM tensors on ports to avoid automatic loop markup (blocked loops will be inserted in a separate + // transformation) + PortDescriptorUtils::set_port_descriptor(brgemm_tpp->input(0), + brgemm_in0_desc->get_subtensor(), + brgemm_in0_desc->get_layout()); + PortDescriptorUtils::set_port_descriptor(brgemm_tpp->input(1), + brgemm_in1_desc->get_subtensor(), + brgemm_in1_desc->get_layout()); + PortDescriptorUtils::set_port_descriptor(brgemm_tpp->output(0), + brgemm_out_desc->get_subtensor(), + brgemm_out_desc->get_layout()); // need to run validate_and_infer_types manually: either input shapes were updated or // output Layout was updated (out shape will be updated in validate_and_infer_types()) @@ -101,7 +110,7 @@ BrgemmToBrgemmTPP::BrgemmToBrgemmTPP() { auto m = std::make_shared(m_brgemm, matcher_name); register_matcher(m, callback); } -} // namespace pass -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace pass +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp index 2b73104d1e1335..6e1d9f110c6aec 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp @@ -16,7 +16,7 @@ namespace pass { * @brief Converts Snippets Brgemm to BrgemmTPP operation, if possible. Only fp32 Brgemms are currently converted. * @ingroup snippets */ -class BrgemmToBrgemmTPP: public ov::pass::MatcherPass { +class BrgemmToBrgemmTPP : public ov::pass::MatcherPass { public: OPENVINO_MATCHER_PASS_RTTI("BrgemmToBrgemmTPP"); BrgemmToBrgemmTPP(); @@ -25,7 +25,6 @@ class BrgemmToBrgemmTPP: public ov::pass::MatcherPass { const ov::element::TypeVector& precisions); }; - } // namespace pass } // namespace tpp } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp index da83038f5455f8..63dd44ca133fa0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp @@ -2,17 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "snippets/itt.hpp" -#include "snippets/utils/utils.hpp" #include "eltwise_to_eltwise_tpp.hpp" -#include "openvino/pass/pattern/op/wrap_type.hpp" - -#include "transformations/tpp/x64/op/factory.hpp" #include "openvino/op/util/binary_elementwise_arithmetic.hpp" #include "openvino/op/util/unary_elementwise_arithmetic.hpp" +#include "openvino/pass/pattern/op/wrap_type.hpp" +#include "snippets/itt.hpp" #include "snippets/lowered/port_descriptor.hpp" #include "snippets/op/reduce.hpp" +#include "snippets/utils/utils.hpp" +#include "transformations/tpp/x64/op/factory.hpp" namespace ov { namespace intel_cpu { @@ -29,7 +28,6 @@ EltwiseToEltwiseTPP::EltwiseToEltwiseTPP() { ov::op::util::BinaryElementwiseArithmetic, ov::snippets::op::ReduceBase>(is_supported_by_tpp); - auto callback = [=](ov::pass::pattern::Matcher& m) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "ov::intel_cpu::pass::EltwiseToEltwiseTPP") const auto node = m.get_match_root(); @@ -41,7 +39,8 @@ EltwiseToEltwiseTPP::EltwiseToEltwiseTPP() { OPENVINO_ASSERT(tpp_eltwise, "Failed to create TPP node"); const size_t M_block = 32; - const size_t N_block = ov::is_type(node) ? ov::snippets::utils::get_full_dim_value() : 64; + const size_t N_block = + ov::is_type(node) ? ov::snippets::utils::get_full_dim_value() : 64; ov::replace_node_update_name(node, tpp_eltwise); for (size_t i = 0; i < node->get_input_size(); i++) ov::snippets::lowered::PortDescriptorUtils::set_port_descriptor(tpp_eltwise->input(i), {M_block, N_block}); @@ -54,7 +53,7 @@ EltwiseToEltwiseTPP::EltwiseToEltwiseTPP() { auto m = std::make_shared(supported_eltwise, matcher_name); register_matcher(m, callback); } -} // namespace pass -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace pass +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp index f0bdab120c3498..0b68074c657c15 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp @@ -16,13 +16,12 @@ namespace pass { * @brief Converts elementwise operations supported by the TPP backend to the dedicated TPP opset * @ingroup snippets */ -class EltwiseToEltwiseTPP: public ov::pass::MatcherPass { +class EltwiseToEltwiseTPP : public ov::pass::MatcherPass { public: OPENVINO_MATCHER_PASS_RTTI("EltwiseToEltwiseTPP"); EltwiseToEltwiseTPP(); }; - } // namespace pass } // namespace tpp } // namespace intel_cpu diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp index 885ff753843588..b64522154adc9e 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp @@ -3,25 +3,25 @@ // #include "fuse_tpp_to_equations.hpp" -#include "transformations/tpp/x64/op/eltwise.hpp" -#include "transformations/tpp/x64/op/equation.hpp" -#include "snippets/utils/utils.hpp" #include "snippets/itt.hpp" #include "snippets/lowered/port_descriptor.hpp" +#include "snippets/utils/utils.hpp" +#include "transformations/tpp/x64/op/eltwise.hpp" +#include "transformations/tpp/x64/op/equation.hpp" namespace ov { namespace intel_cpu { namespace tpp { namespace pass { -using snippets::lowered::ExpressionPtr; using snippets::lowered::ExpressionPort; +using snippets::lowered::ExpressionPtr; using NodePtr = std::shared_ptr; bool FuseTPPToEquations::fuse_from_root(const NodePtr& root, const std::shared_ptr& m) { using snippets::lowered::PortDescriptorUtils; OutputVector eq_ivals; std::vector op_descs; - std::unordered_map node_replace_map; + std::unordered_map node_replace_map; // Only ops with one out are supported due to Equations restrictions auto supported_num_out = [](const Output& out) { const auto& n = out.get_node_shared_ptr(); @@ -30,10 +30,10 @@ bool FuseTPPToEquations::fuse_from_root(const NodePtr& root, const std::shared_p auto get_tpp_op = [](const NodePtr& n) { auto tpp = std::dynamic_pointer_cast(n); bool not_supported_op = - // ticket: 152532 - ov::is_type(n) || - // ticket: 152510 - ov::is_type(n); + // ticket: 152532 + ov::is_type(n) || + // ticket: 152510 + ov::is_type(n); return not_supported_op ? nullptr : tpp; }; @@ -78,7 +78,6 @@ bool FuseTPPToEquations::fuse_from_root(const NodePtr& root, const std::shared_p } } - auto equation = std::make_shared(eq_ivals, op_descs); for (auto& kv : node_replace_map) @@ -110,8 +109,7 @@ bool FuseTPPToEquations::run_on_model(const std::shared_ptr& m) { return modified; } - -} // namespace pass -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace pass +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp index a99330845d443d..326766d000f69a 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.hpp @@ -16,11 +16,12 @@ namespace pass { * @brief Converts a group of elementwise operations into a fused TPP Equation node * @ingroup snippets */ -class FuseTPPToEquations: public ov::pass::ModelPass { +class FuseTPPToEquations : public ov::pass::ModelPass { public: OPENVINO_MODEL_PASS_RTTI("FuseTPPToEquations"); FuseTPPToEquations() = default; bool run_on_model(const std::shared_ptr& m) override; + private: static bool fuse_from_root(const std::shared_ptr&, const std::shared_ptr& m); }; diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.cpp index fa545c26dbb53e..d9485b1c6b7b9d 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.cpp @@ -11,7 +11,6 @@ #include "snippets/utils/utils.hpp" #include "transformations/tpp/x64/op/brgemm.hpp" - namespace ov { namespace intel_cpu { namespace tpp { @@ -28,28 +27,35 @@ bool BrgemmTPPBlocking::SetBrgemmBeta::run(ov::snippets::lowered::LinearIR& line return true; } -std::shared_ptr BrgemmTPPBlocking::SetBrgemmBeta::merge(const std::shared_ptr& other) { +std::shared_ptr BrgemmTPPBlocking::SetBrgemmBeta::merge( + const std::shared_ptr& other) { return !other || ov::is_type(other) ? std::make_shared() : nullptr; } -std::tuple BrgemmTPPBlocking::get_blocking_params(const ov::snippets::lowered::ExpressionPtr& brgemm_expr) const { +std::tuple BrgemmTPPBlocking::get_blocking_params( + const ov::snippets::lowered::ExpressionPtr& brgemm_expr) const { size_t m, n, k; std::tie(m, n, k) = get_brgemm_dimensions(brgemm_expr); - OPENVINO_ASSERT(!is_dynamic_value(m) && !is_dynamic_value(n) && !is_dynamic_value(n), "BrgemmTPP doesn't support dynamic shapes"); + OPENVINO_ASSERT(!is_dynamic_value(m) && !is_dynamic_value(n) && !is_dynamic_value(n), + "BrgemmTPP doesn't support dynamic shapes"); size_t m_blk, n_blk, k_blk; std::tie(m_blk, n_blk, k_blk) = BrgemmBlockingBase::get_blocking_params(brgemm_expr); - auto get_projected_blk = [](const size_t dim, const size_t blk) { return ov::snippets::utils::is_full_dim_value(blk) ? dim : blk; }; + auto get_projected_blk = [](const size_t dim, const size_t blk) { + return ov::snippets::utils::is_full_dim_value(blk) ? dim : blk; + }; return std::make_tuple(get_projected_blk(m, m_blk), get_projected_blk(n, n_blk), get_projected_blk(k, k_blk)); } -ov::snippets::lowered::SpecificIterationHandlers BrgemmTPPBlocking::get_k_loop_handlers(size_t work_amount, size_t block_size) const { - ov::snippets::lowered::SpecificIterationHandlers handlers = ov::snippets::lowered::pass::BrgemmBlockingBase::get_k_loop_handlers(work_amount, block_size); +ov::snippets::lowered::SpecificIterationHandlers BrgemmTPPBlocking::get_k_loop_handlers(size_t work_amount, + size_t block_size) const { + ov::snippets::lowered::SpecificIterationHandlers handlers = + ov::snippets::lowered::pass::BrgemmBlockingBase::get_k_loop_handlers(work_amount, block_size); handlers.register_pass(); return handlers; } -} // namespace pass -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace pass +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.hpp index 908d12087175aa..31f4bfeadc8979 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/brgemm_tpp_blocking.hpp @@ -36,12 +36,15 @@ class BrgemmTPPBlocking : public ov::snippets::lowered::pass::BrgemmBlocking merge(const std::shared_ptr& other) override; + std::shared_ptr merge( + const std::shared_ptr& other) override; }; private: - std::tuple get_blocking_params(const ov::snippets::lowered::ExpressionPtr& brgemm_expr) const override; - ov::snippets::lowered::SpecificIterationHandlers get_k_loop_handlers(size_t work_amount, size_t block_size) const override; + std::tuple get_blocking_params( + const ov::snippets::lowered::ExpressionPtr& brgemm_expr) const override; + ov::snippets::lowered::SpecificIterationHandlers get_k_loop_handlers(size_t work_amount, + size_t block_size) const override; }; } // namespace pass diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp index dcd97fdd74b638..42c30bb112263c 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp @@ -2,13 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "snippets/itt.hpp" -#include "snippets/op/buffer.hpp" -#include "transformations/tpp/x64/op/modifiers.hpp" #include "set_tpp_leading_dim.hpp" -#include "snippets/op/brgemm.hpp" + +#include "snippets/itt.hpp" #include "snippets/lowered/loop_manager.hpp" +#include "snippets/op/brgemm.hpp" +#include "snippets/op/buffer.hpp" #include "snippets/utils/utils.hpp" +#include "transformations/tpp/x64/op/modifiers.hpp" namespace ov { namespace intel_cpu { @@ -24,7 +25,7 @@ using LoopPort = snippets::lowered::LoopPort; bool has_directly_connected_buffer(const ExpressionPort& port, const snippets::lowered::LoopManagerPtr& loop_mngr) { auto accepted_loops = [&loop_mngr, &port](const std::vector& orig, const std::vector& connect) { size_t connect_idx = 0; - auto pred = [&port](const LoopPort& loop_port ) { + auto pred = [&port](const LoopPort& loop_port) { return *loop_port.get_expr_port() == port; }; for (const auto orig_loop : orig) { @@ -36,9 +37,8 @@ bool has_directly_connected_buffer(const ExpressionPort& port, const snippets::l // as long as the port is the loop entry/exit, and it is not incremented. // This is the case for Brgemm K-blocking loops, for example. const auto loop_info = loop_mngr->get_loop_info(orig_loop); - const auto& border_points = port.get_type() == ExpressionPort::Type::Input ? - loop_info->get_input_ports() : - loop_info->get_output_ports(); + const auto& border_points = port.get_type() == ExpressionPort::Type::Input ? loop_info->get_input_ports() + : loop_info->get_output_ports(); const auto& found = std::find_if(border_points.begin(), border_points.end(), pred); if (found == border_points.end() || found->is_incremented()) return false; @@ -87,36 +87,35 @@ size_t get_leading_dim(ExpressionPort port, const snippets::lowered::LoopManager } OPENVINO_ASSERT(layout.empty() || (layout.back() == layout.size() - 1 && layout.size() == shape.size()), - "get_leading_dim detected invalid layout values: check shape + layout combination"); + "get_leading_dim detected invalid layout values: check shape + layout combination"); const auto dim = [&]() -> size_t { - switch (port.get_type()) { - // Input shape is original, so we need to correctly read this data by order - // Example: - // Original shape (shape) = [1, 49, 2, 23] - // Layout (transpose order) = [2, 0, 1, 3] - // Transposed shape = [2, 1, 49, 23] - // The leading dimension is equal to stride of shape[layout[3]] = 2 x 23 - case ExpressionPort::Type::Input : - return snippets::utils::get_input_dim_idx(layout, 1); // `1` in example - // Output shape is already transposed, we need to correctly write the data with original shape by the order - // Example: - // Original transposed shape (shape) = [49, 2, 7, 39] - // Layout (transpose order) = [2, 0, 1, 3] - // Before leading dimension with index 3 there is dimension with index 2 in planar layout. - // Since we have non-planar layout, we have to find this before LD dim in transposed order. - // In layout 2nd idx is first element, it means, that the leading dimension is equal to stride of shape[0] - case ExpressionPort::Type::Output : - return snippets::utils::get_output_dim_idx(layout, 1); // 0 in the example: shape[0] = 49 - default: - OPENVINO_THROW("Unsupported Expression port type"); + switch (port.get_type()) { + // Input shape is original, so we need to correctly read this data by order + // Example: + // Original shape (shape) = [1, 49, 2, 23] + // Layout (transpose order) = [2, 0, 1, 3] + // Transposed shape = [2, 1, 49, 23] + // The leading dimension is equal to stride of shape[layout[3]] = 2 x 23 + case ExpressionPort::Type::Input: + return snippets::utils::get_input_dim_idx(layout, 1); // `1` in example + // Output shape is already transposed, we need to correctly write the data with original shape by the order + // Example: + // Original transposed shape (shape) = [49, 2, 7, 39] + // Layout (transpose order) = [2, 0, 1, 3] + // Before leading dimension with index 3 there is dimension with index 2 in planar layout. + // Since we have non-planar layout, we have to find this before LD dim in transposed order. + // In layout 2nd idx is first element, it means, that the leading dimension is equal to stride of shape[0] + case ExpressionPort::Type::Output: + return snippets::utils::get_output_dim_idx(layout, 1); // 0 in the example: shape[0] = 49 + default: + OPENVINO_THROW("Unsupported Expression port type"); } }; - return layout.size() == 1 ? - shape.back() : - std::accumulate(shape.cbegin() + dim() + 1, shape.cend(), 1, std::multiplies()); + return layout.size() == 1 ? shape.back() + : std::accumulate(shape.cbegin() + dim() + 1, shape.cend(), 1, std::multiplies()); } -} // namespace +} // namespace SetTPPLeadingDim::SetTPPLeadingDim() : RangedPass() {} @@ -151,8 +150,7 @@ bool SetTPPLeadingDim::run(snippets::lowered::LinearIR& linear_ir, return modified; } - -} // namespace pass -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace pass +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp index d755e4813dde8e..6be200c30b7c1c 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp @@ -15,10 +15,11 @@ namespace pass { * @interface SetTPPLeadingDim * @brief TPP leading dimension depends on the operation it is connected to. If it's a Parameter or Result * we can compute LD based on shape, if it's a Buffer - we need to consider allocation shape. - * This transformation should be performed before InsertTailLoop because it may change graph connectivity for 1st and last iterations. + * This transformation should be performed before InsertTailLoop because it may change graph connectivity for 1st and + * last iterations. * @ingroup snippets */ -class SetTPPLeadingDim: public snippets::lowered::pass::RangedPass { +class SetTPPLeadingDim : public snippets::lowered::pass::RangedPass { public: OPENVINO_RTTI("SetTPPLeadingDim", "0", snippets::lowered::pass::RangedPass); SetTPPLeadingDim(); diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp index 0b9f41d47aa0da..06ca575f314b4b 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp @@ -2,14 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "snippets/itt.hpp" #include "scalar_to_scalar_tpp.hpp" + #include "openvino/pass/pattern/op/wrap_type.hpp" +#include "snippets/itt.hpp" +#include "snippets/lowered/port_connector.hpp" #include "snippets/op/scalar.hpp" -#include "transformations/tpp/x64/op/scalar.hpp" #include "transformations/tpp/x64/op/modifiers.hpp" -#include "snippets/lowered/port_connector.hpp" - +#include "transformations/tpp/x64/op/scalar.hpp" namespace ov { namespace intel_cpu { @@ -21,7 +21,6 @@ ScalarToScalarTPP::ScalarToScalarTPP() { auto snippets_scalar = ov::pass::pattern::wrap_type(); - auto callback = [=](ov::pass::pattern::Matcher& m) { OV_ITT_SCOPED_TASK(ov::pass::itt::domains::SnippetsTransform, "ov::intel_cpu::pass::ScalarToScalarTPP") const auto node = ov::as_type_ptr(m.get_match_root()); @@ -51,7 +50,7 @@ ScalarToScalarTPP::ScalarToScalarTPP() { auto m = std::make_shared(snippets_scalar, matcher_name); register_matcher(m, callback); } -} // namespace pass -} // namespace tpp -} // namespace intel_cpu -} // namespace ov +} // namespace pass +} // namespace tpp +} // namespace intel_cpu +} // namespace ov diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp index a56e23363067e2..2a7e712ab1baea 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp @@ -16,13 +16,12 @@ namespace pass { * @brief Converts snippets::op::Scalar to tpp::op::Scalar, since TPP operations require a dedicated emitter * @ingroup snippets */ -class ScalarToScalarTPP: public ov::pass::MatcherPass { +class ScalarToScalarTPP : public ov::pass::MatcherPass { public: OPENVINO_MATCHER_PASS_RTTI("ScalarToScalarTPP"); ScalarToScalarTPP(); }; - } // namespace pass } // namespace tpp } // namespace intel_cpu From 6ff0e26a562540216fecf0c133141962fb8f7a3d Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Sat, 18 Jan 2025 13:49:08 +0400 Subject: [PATCH 39/97] Revert "Temporarily remove TF layer tests & TF models tests from required" (#28534) Reverts openvinotoolkit/openvino#28533 --- .github/workflows/linux_arm64.yml | 2 +- .github/workflows/ubuntu_22.yml | 4 ++-- .github/workflows/ubuntu_24.yml | 2 +- .github/workflows/windows_vs2019_release.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/linux_arm64.yml b/.github/workflows/linux_arm64.yml index 16a1f745ba413a..ca1ca6e056e23d 100644 --- a/.github/workflows/linux_arm64.yml +++ b/.github/workflows/linux_arm64.yml @@ -250,7 +250,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_linux_arm64 needs: [Smart_CI, Build, Debian_Packages, Samples, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, - TensorFlow_Models_Tests, PyTorch_Models_Tests, Openvino_tokenizers, Pytorch_Layer_Tests] + TensorFlow_Models_Tests, PyTorch_Models_Tests, Openvino_tokenizers, TensorFlow_Layer_Tests, Pytorch_Layer_Tests] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ubuntu_22.yml b/.github/workflows/ubuntu_22.yml index 2a10877a07ab7e..e5c7d25003de1e 100644 --- a/.github/workflows/ubuntu_22.yml +++ b/.github/workflows/ubuntu_22.yml @@ -557,8 +557,8 @@ jobs: Overall_Status: name: ci/gha_overall_status - needs: [Smart_CI, Build, Debian_Packages, Samples, Conformance, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, Pytorch_Layer_Tests, - CPU_Functional_Tests, PyTorch_Models_Tests, JAX_Models_Tests_Precommit, NVIDIA_Plugin, Openvino_tokenizers, iGPU] + needs: [Smart_CI, Build, Debian_Packages, Samples, Conformance, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, TensorFlow_Layer_Tests, Pytorch_Layer_Tests, + CPU_Functional_Tests, TensorFlow_Models_Tests_Precommit, PyTorch_Models_Tests, JAX_Models_Tests_Precommit, NVIDIA_Plugin, Openvino_tokenizers, iGPU] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/ubuntu_24.yml b/.github/workflows/ubuntu_24.yml index 6fb0051dcf3e11..beac15bfbda97d 100644 --- a/.github/workflows/ubuntu_24.yml +++ b/.github/workflows/ubuntu_24.yml @@ -190,7 +190,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_ubuntu_24 - needs: [Smart_CI, Build, Debian_Packages, Samples, Python_Unit_Tests, Pytorch_Layer_Tests, Openvino_tokenizers] + needs: [Smart_CI, Build, Debian_Packages, Samples, Python_Unit_Tests, Pytorch_Layer_Tests, TensorFlow_Layer_Tests, Openvino_tokenizers] if: ${{ always() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 025df08bf4b032..5708b529f25acc 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -614,7 +614,7 @@ jobs: Overall_Status: name: ci/gha_overall_status_windows - needs: [ Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, Openvino_tokenizers, Pytorch_Layer_Tests ] + needs: [ Smart_CI, Build, Samples, CXX_Unit_Tests, Python_Unit_Tests, CPU_Functional_Tests, Openvino_tokenizers, TensorFlow_Layer_Tests, Pytorch_Layer_Tests ] if: ${{ always() }} runs-on: ubuntu-latest steps: From 6daaa8b50217d53800bc204e1f4b02f48e92c836 Mon Sep 17 00:00:00 2001 From: Anastasia Kuporosova Date: Sun, 19 Jan 2025 21:01:58 +0100 Subject: [PATCH 40/97] remove contrains file from tools (#28496) ### Details: - Looks like it isn't needed anymore ### Tickets: - *ticket-id* --- tools/constraints.txt | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 tools/constraints.txt diff --git a/tools/constraints.txt b/tools/constraints.txt deleted file mode 100644 index 73d244545aa499..00000000000000 --- a/tools/constraints.txt +++ /dev/null @@ -1,16 +0,0 @@ -# EXCEPTIONS -# some package versions need to be specified in respective requirements.txt -# files because the version differs between them: -# tensorflow, numpy -h5py>=3.1.0,<3.11.0 -onnx>=1.8.1,<=1.17.0 -pytest>=5.0,<8.4 -protobuf>=3.18.1,<4.0.0 -defusedxml>=0.7.1 -requests>=2.25.1 -coverage>=4.4.2,<=7.0.5 -astroid>=2.9.0 -pylint>=2.7.0 -pyenchant>=3.0.0 -urllib3>=1.26.4 -openvino-telemetry>=2023.2.1 From f9354766af07de5d662c6a7572cce0a53c34c082 Mon Sep 17 00:00:00 2001 From: Artemy Skrebkov Date: Sun, 19 Jan 2025 21:27:55 +0000 Subject: [PATCH 41/97] Fixes dynamic shapes (#27776) ### Details: * Aligns fixes in dynamic shape serializing with https://github.com/intel-innersource/applications.ai.vpu-accelerators.vpux-plugin/pull/14490 * Fix parsing of shapes for SIT ### Tickets: - E147314 - E147315 --------- Signed-off-by: Skrebkov, Artemy Co-authored-by: Pawel Raasz --- samples/cpp/benchmark_app/main.cpp | 17 ++++++------ .../src/backend/src/zero_infer_request.cpp | 5 ++-- .../src/ze_graph_ext_wrappers.cpp | 18 ++++++++++--- .../tools/single-image-test/main.cpp | 26 ++++++++++++------- 4 files changed, 41 insertions(+), 25 deletions(-) diff --git a/samples/cpp/benchmark_app/main.cpp b/samples/cpp/benchmark_app/main.cpp index 2b51b6f1f87251..4050f54f867969 100644 --- a/samples/cpp/benchmark_app/main.cpp +++ b/samples/cpp/benchmark_app/main.cpp @@ -523,9 +523,7 @@ int main(int argc, char* argv[]) { } } auto result = std::find_if(config.begin(), config.end(), [&](const std::pair& item) { - if (device_name.find(item.first) == 0) - return true; - return false; + return device_name.find(item.first) == 0; }); ov::AnyMap device_config = {}; if (result != config.end()) @@ -548,6 +546,11 @@ int main(int argc, char* argv[]) { } bool isDynamicNetwork = false; + auto areNetworkInputsDynamic = [](const benchmark_app::InputsInfo& input_info) { + return std::any_of(input_info.begin(), input_info.end(), [](const auto& info) { + return info.second.partialShape.is_dynamic(); + }); + }; if (FLAGS_load_from_file && !isNetworkCompiled) { if (!FLAGS_mean_values.empty() || !FLAGS_scale_values.empty()) { @@ -722,12 +725,7 @@ int main(int argc, char* argv[]) { model = preproc.build(); // Check if network has dynamic shapes - auto input_info = app_inputs_info[0]; - isDynamicNetwork = std::any_of(input_info.begin(), - input_info.end(), - [](const std::pair& i) { - return i.second.partialShape.is_dynamic(); - }); + isDynamicNetwork = areNetworkInputsDynamic(app_inputs_info.at(0)); topology_name = model->get_friendly_name(); @@ -789,6 +787,7 @@ int main(int argc, char* argv[]) { FLAGS_scale_values, FLAGS_mean_values, compiledModel.inputs()); + isDynamicNetwork = areNetworkInputsDynamic(app_inputs_info.at(0)); batchSize = get_batch_size(app_inputs_info.at(0)); warn_if_no_batch(app_inputs_info.at(0)); diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index 008e2bdd6d39de..b7049f62af6d31 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -54,9 +54,8 @@ void check_level_zero_attributes_match(const IODescriptor& ioDescriptor, const A '\n' + "Given: " + std::to_string(ovDimensions.size())); for (size_t index = 0; index < ovDimensions.size(); ++index) { - OPENVINO_ASSERT( - ioDescriptor.shapeFromCompiler.is_dynamic() || ovDimensions[index] == zeDescriptor.info.dims[index], - "Shape mismatch for input/output named " + ioDescriptor.nameFromCompiler); + OPENVINO_ASSERT(ovDimensions[index] == zeDescriptor.info.dims[index], + "Shape mismatch for input/output named " + ioDescriptor.nameFromCompiler); } for (size_t index = ovDimensions.size(); index < ZE_MAX_GRAPH_ARGUMENT_DIMENSIONS_SIZE; ++index) { OPENVINO_ASSERT(zeDescriptor.info.dims[index] == 0 || zeDescriptor.info.dims[index] == 1, diff --git a/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp b/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp index 2f6eded512ab8e..a3626a79475dcd 100644 --- a/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp +++ b/src/plugins/intel_npu/src/compiler_adapter/src/ze_graph_ext_wrappers.cpp @@ -12,7 +12,9 @@ #include "intel_npu/utils/zero/zero_api.hpp" #include "intel_npu/utils/zero/zero_result.hpp" #include "intel_npu/utils/zero/zero_wrappers.hpp" +#include "openvino/core/dimension.hpp" #include "openvino/core/model.hpp" +#include "openvino/core/partial_shape.hpp" #define NotSupportQuery(T) (T <= ZE_GRAPH_EXT_VERSION_1_2) @@ -400,7 +402,8 @@ ze_graph_handle_t ZeGraphExtWrappers::getGraphHandle(const std::vector& static IODescriptor getIODescriptor(const ze_graph_argument_properties_3_t& arg, const std::optional& metadata) { ov::element::Type_t precision = toOVElementType(arg.devicePrecision); - ov::Shape shapeFromCompiler, shapeFromIRModel; + ov::Shape shapeFromCompiler; + ov::PartialShape shapeFromIRModel; std::unordered_set outputTensorNames; for (uint32_t id = 0; id < arg.associated_tensor_names_count; id++) { @@ -410,8 +413,17 @@ static IODescriptor getIODescriptor(const ze_graph_argument_properties_3_t& arg, shapeFromCompiler.push_back(arg.dims[id]); } if (metadata.has_value()) { + const auto dynamicDim = std::numeric_limits::max(); + shapeFromIRModel.reserve(metadata->shape_size); for (uint32_t id = 0; id < metadata->shape_size; id++) { - shapeFromIRModel.push_back(metadata->shape[id]); + if (metadata->shape[id] != dynamicDim) { + shapeFromIRModel.push_back(metadata->shape[id]); + } else { + // lower bound is ignored, so we set it to 1 just to satisfy the Dimension constructor, + // upper bound is set to the value from shapeFromCompiler as it is filled with upper bounds + // in case of dynamic dimensions + shapeFromIRModel.push_back(ov::Dimension(1, shapeFromCompiler[id])); + } } } @@ -433,7 +445,7 @@ static IODescriptor getIODescriptor(const ze_graph_argument_properties_3_t& arg, return {std::move(nameFromCompiler), precision, - std::move(shapeFromCompiler), + shapeFromCompiler, isStateInput, isStateOutput, isShapeTensor, diff --git a/src/plugins/intel_npu/tools/single-image-test/main.cpp b/src/plugins/intel_npu/tools/single-image-test/main.cpp index 699e252eacf181..3188075fc58148 100644 --- a/src/plugins/intel_npu/tools/single-image-test/main.cpp +++ b/src/plugins/intel_npu/tools/single-image-test/main.cpp @@ -1569,8 +1569,8 @@ std::pair runInfer(ov::InferRequest& inferRequest, ov::Compi TensorMap out; for (const auto& outputInfo : compiledModel.outputs()) { - const std::string layer_name = outputInfo.get_any_name(); - out.insert({layer_name, inferRequest.get_tensor(layer_name)}); + const std::string layerName = outputInfo.get_any_name(); + out.insert({layerName, inferRequest.get_tensor(layerName)}); } ProfVec profData{}; @@ -1807,11 +1807,17 @@ bool testMeanIoU(const TensorMap& outputs, const TensorMap& references, const La } static ov::Shape parseDataShape(const std::string& dataShapeStr) { - std::vector dataShape; - std::istringstream ss(dataShapeStr); - std::string token; - while (std::getline(ss, token, ',')) { - dataShape.push_back(std::stoul(token)); + std::vector dataShape; + std::stringstream ss(dataShapeStr); + + char ch; // To discard non-numeric characters + int64_t dim; + while (ss >> ch) { + if (std::isdigit(ch)) { + ss.putback(ch); + ss >> dim; + dataShape.push_back(dim); + } } return ov::Shape(dataShape); } @@ -1906,11 +1912,11 @@ static int runSingleImageTest() { auto model = core.read_model(FLAGS_network); nameIOTensors(model); - auto inputs_info = std::const_pointer_cast(model)->inputs(); - InputsInfo info_map; + auto inputsInfo = std::const_pointer_cast(model)->inputs(); + InputsInfo infoMap; std::cout << "Performing reshape" << std::endl; - reshape(std::move(inputs_info), info_map, model, FLAGS_shape, + reshape(std::move(inputsInfo), infoMap, model, FLAGS_shape, FLAGS_override_model_batch_size, FLAGS_device); ov::preprocess::PrePostProcessor ppp(model); From 5fba4415e2db090284b907c8ca8888f80f0a419c Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 20 Jan 2025 06:05:05 +0100 Subject: [PATCH 42/97] [PT FE] Support different aliases of existing operations (#28531) ### Details: - *Support: `aten::equal`, `aten::index_put`, `aten::logsumexp`, `prim::abs`* ### Tickets: - *ticket-id* Signed-off-by: Maxim Vafin --- src/frontends/pytorch/src/op/index_put_.cpp | 2 +- src/frontends/pytorch/src/op/log.cpp | 8 +++-- src/frontends/pytorch/src/op_table.cpp | 7 ++-- .../pytorch_tests/test_logsumexp.py | 34 +++++++++++++++++++ .../pytorch_tests/test_unary_ops.py | 27 +++++++++++++-- 5 files changed, 70 insertions(+), 8 deletions(-) create mode 100644 tests/layer_tests/pytorch_tests/test_logsumexp.py diff --git a/src/frontends/pytorch/src/op/index_put_.cpp b/src/frontends/pytorch/src/op/index_put_.cpp index 1b5725a8a95bb3..4591862d8f04c1 100644 --- a/src/frontends/pytorch/src/op/index_put_.cpp +++ b/src/frontends/pytorch/src/op/index_put_.cpp @@ -10,7 +10,7 @@ namespace frontend { namespace pytorch { namespace op { -OutputVector translate_index_put_(const NodeContext& context) { +OutputVector translate_index_put(const NodeContext& context) { // Pass as PtFrameworkNode to register as `inplace_op`. Conversion to OV operators is done as transformation. auto node = std::make_shared(context.get_decoder(), context.inputs()); return {context.mark_node(node)}; diff --git a/src/frontends/pytorch/src/op/log.cpp b/src/frontends/pytorch/src/op/log.cpp index e932538c86520e..dbda6329deeb4f 100644 --- a/src/frontends/pytorch/src/op/log.cpp +++ b/src/frontends/pytorch/src/op/log.cpp @@ -77,7 +77,7 @@ OutputVector translate_log10(const NodeContext& context) { }; OutputVector translate_logsumexp(const NodeContext& context) { - num_inputs_check(context, 1, 2); + num_inputs_check(context, 1, 3); auto input = context.get_input(0); ov::Output dim; if (!context.input_is_none(1)) { @@ -85,8 +85,12 @@ OutputVector translate_logsumexp(const NodeContext& context) { } else { dim = context.mark_node(get_axes_range(context, 0)); } + bool keepdim = false; + if (!context.input_is_none(2)) { + keepdim = context.const_input(2); + } auto exp = context.mark_node(std::make_shared(input)); - auto sum = context.mark_node(std::make_shared(exp, dim, false)); + auto sum = context.mark_node(std::make_shared(exp, dim, keepdim)); auto log = context.mark_node(std::make_shared(sum)); return {log}; }; diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index f00391e08e2a32..27dd55f77955e0 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -116,7 +116,7 @@ OP_CONVERTER(translate_index); OP_CONVERTER(translate_index_add); OP_CONVERTER(translate_index_copy_); OP_CONVERTER(translate_index_fill_); -OP_CONVERTER(translate_index_put_); +OP_CONVERTER(translate_index_put); OP_CONVERTER(translate_index_select); OP_CONVERTER(translate_instance_norm); OP_CONVERTER(translate_int); @@ -464,6 +464,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::empty", op::translate_empty}, {"aten::empty_like", op::translate_empty_like}, {"aten::eq", op::translate_1to1_match_2_inputs_align_types}, + {"aten::equal", op::translate_1to1_match_2_inputs_align_types}, {"aten::erf", op::translate_erf}, {"aten::erfc", op::translate_erfc}, {"aten::exp", op::optional_out, 1>}, @@ -507,7 +508,7 @@ const std::unordered_map get_supported_ops_ts() { // aten::index - Supported in limited set of patterns {"aten::index_copy_", op::inplace_op}, {"aten::index_fill_", op::inplace_op}, - {"aten::index_put_", op::inplace_op}, + {"aten::index_put", op::translate_index_put}, {"aten::index_add", op::translate_index_add}, {"aten::index_select", op::translate_index_select}, {"aten::instance_norm", op::translate_instance_norm}, @@ -550,6 +551,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::log2_", op::inplace_op}, {"aten::log10", op::optional_out}, {"aten::log10_", op::inplace_op}, + {"aten::logsumexp", op::translate_logsumexp}, {"aten::lstm", op::translate_lstm}, {"aten::lt", op::translate_1to1_match_2_inputs_align_types}, {"aten::masked_fill", op::translate_masked_fill}, @@ -714,6 +716,7 @@ const std::unordered_map get_supported_ops_ts() { {"ov_ext::embedding", op::translate_embedding_ext}, {"ov_ext::conv1d", op::translate_conv1d_ext}, {"ov_ext::linear", op::translate_linear}, + {"prim::abs", op::translate_1to1_match_1_inputs}, {"prim::Constant", op::translate_constant}, {"prim::device", op::translate_constant}, // prim::DictConstruct - Supported in limited set of patterns diff --git a/tests/layer_tests/pytorch_tests/test_logsumexp.py b/tests/layer_tests/pytorch_tests/test_logsumexp.py new file mode 100644 index 00000000000000..806e3b80540d5a --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_logsumexp.py @@ -0,0 +1,34 @@ +# Copyright (C) 2018-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class aten_logsumexp(torch.nn.Module): + def __init__(self, dim, keepdim) -> None: + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, input_tensor): + return torch.logsumexp(input_tensor, dim=self.dim, keepdim=self.keepdim) + + +class TestLogsumexp(PytorchLayerTest): + def _prepare_input(self): + return (np.random.randn(2, 5, 9, 7),) + + @pytest.mark.parametrize("dim", [ + 0, 1, 2, 3, -1, -2, -3, -4 + ]) + @pytest.mark.parametrize("keepdim", [True, False]) + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_fx_backend + def test_logsumexp(self, dim, keepdim, ie_device, precision, ir_version): + self._test(aten_logsumexp(dim, keepdim), None, "aten::logsumexp", + ie_device, precision, ir_version) diff --git a/tests/layer_tests/pytorch_tests/test_unary_ops.py b/tests/layer_tests/pytorch_tests/test_unary_ops.py index 9807343080043c..584a80fe4ce254 100644 --- a/tests/layer_tests/pytorch_tests/test_unary_ops.py +++ b/tests/layer_tests/pytorch_tests/test_unary_ops.py @@ -75,7 +75,7 @@ class unary_op_net(torch.nn.Module): def __init__(self, op, dtype): - super(unary_op_net, self).__init__() + super().__init__() self.dtype = dtype self.op = op @@ -87,7 +87,7 @@ def forward(self, x): class unary_op_out_net(torch.nn.Module): def __init__(self, op, dtype): - super(unary_op_out_net, self).__init__() + super().__init__() self.dtype = dtype self.op = op @@ -101,7 +101,7 @@ def forward(self, x): class unary_func_op_inplace_net(torch.nn.Module): def __init__(self, op, dtype): - super(unary_func_op_inplace_net, self).__init__() + super().__init__() self.dtype = dtype self.op = op @@ -111,6 +111,17 @@ def forward(self, x): return y, x1 +class prim_abs_net(torch.nn.Module): + def __init__(self, dtype): + super().__init__() + self.dtype = dtype + + def forward(self, x): + x1 = x.to(self.dtype) + y = abs(x1) + return y, x1 + + class TestUnaryOp(PytorchLayerTest): def _prepare_input(self): # random number in range [1, 11) @@ -265,3 +276,13 @@ def test_unary_func_op_inplace(self, op_type, dtype, ie_device, precision, ir_ve self.dtype = dtype self._test(unary_func_op_inplace_net(OPS[op_type], dtype), None, op_type + "_", ie_device, precision, ir_version) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + @pytest.mark.precommit_fx_backend + @pytest.mark.parametrize("dtype", [torch.float32, torch.float64, torch.int8, torch.uint8, torch.int32, torch.int64]) + def test_prim_abs(self, dtype, ie_device, precision, ir_version): + self.dtype = dtype + self._test(prim_abs_net(dtype), None, "prim::abs", + ie_device, precision, ir_version) From d757efd7fb3415a3dbda10941b3dae0ace0ac16e Mon Sep 17 00:00:00 2001 From: Maxim Vafin Date: Mon, 20 Jan 2025 07:32:00 +0100 Subject: [PATCH 43/97] [PT FE] Support aten::concatenate (#28518) ### Details: - *Support `aten::concatenate`* ### Tickets: - *CVS-160777* Signed-off-by: Maxim Vafin --- src/frontends/pytorch/src/op_table.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 27dd55f77955e0..00e3a55b0bc327 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -432,6 +432,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::col2im", op::translate_col2im}, {"aten::complex", op::translate_complex}, {"aten::concat", op::translate_cat}, + {"aten::concatenate", op::translate_cat}, {"aten::contiguous", op::skip_node}, // In openvino how tensors are stored in memory is internal plugin detail, // we assume all tensors are contiguous {"aten::conv_transpose1d", op::translate_conv_transposend}, From 78a1d1b907cc336e93df0c599202af76f09cb20c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 15:36:13 +0400 Subject: [PATCH 44/97] Bump paddlepaddle from 2.6.1 to 2.6.2 in /tests (#28547) Bumps [paddlepaddle](https://github.com/paddlepaddle/paddle) from 2.6.1 to 2.6.2.
    Commits

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=paddlepaddle&package-manager=pip&previous-version=2.6.1&new-version=2.6.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index 30ba701095ecf4..a806b7dfb47c18 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -13,7 +13,7 @@ defusedxml>=0.7.1 tensorflow>=2.5,<2.19.0 requests>=2.25.1 opencv-python>=4.5 -paddlepaddle==2.6.1 +paddlepaddle==2.6.2 protobuf>=3.18.1,<6.0.0 py>=1.9.0 pytest>=5.0,<8.4 From ace5379eb62846d6167bca15e9ff17cceaf6a4e1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:02:38 +0000 Subject: [PATCH 45/97] Bump pytest-xdist from 2.1.0 to 3.6.1 in /tests (#28548) Bumps [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) from 2.1.0 to 3.6.1.
    Changelog

    Sourced from pytest-xdist's changelog.

    pytest-xdist 3.6.1 (2024-04-28)

    Bug Fixes

    • [#1071](https://github.com/pytest-dev/pytest-xdist/issues/1071) <https://github.com/pytest-dev/pytest-xdist/issues/1071>_: Add backward compatibility for deadlock issue with the execnet new main_thread_only "execmodel" triggered when pytest-cov accesses rinfo.

    pytest-xdist 3.6.0 (2024-04-19)

    This release was YANKED due to a regression fixed in 3.6.1.

    Features

    • [#1027](https://github.com/pytest-dev/pytest-xdist/issues/1027) <https://github.com/pytest-dev/pytest-xdist/pull/1027>_:pytest-xdist workers now always execute the tests in the main thread. Previously some tests might end up executing in a separate thread other than main in the workers, due to some internal execnet`` details. This can cause problems specially with async frameworks where the event loop is running in the ``main`` thread (for example #620 pytest-dev/pytest-xdist#620`__).

    Bug Fixes

    • [#1024](https://github.com/pytest-dev/pytest-xdist/issues/1024) <https://github.com/pytest-dev/pytest-xdist/issues/1024>_: Added proper handling of shouldstop (such as set by --max-fail) and shouldfail conditions in workers. Previously, a worker might have continued executing further tests before the controller could terminate the session.

    • [#1028](https://github.com/pytest-dev/pytest-xdist/issues/1028) <https://github.com/pytest-dev/pytest-xdist/issues/1028>_: Fixed compatibility issue between looponfail and editable installs.

    • [#620](https://github.com/pytest-dev/pytest-xdist/issues/620) <https://github.com/pytest-dev/pytest-xdist/issues/620>_: Use the new main_thread_only execnet "execmodel" so that code which expects to only run in the main thread will now work as expected.

    • [#937](https://github.com/pytest-dev/pytest-xdist/issues/937) <https://github.com/pytest-dev/pytest-xdist/issues/937>_: Fixed a bug where plugin would raise an incompatibility error with --pdb despite using -n0.

    Removals

    • [#1053](https://github.com/pytest-dev/pytest-xdist/issues/1053) <https://github.com/pytest-dev/pytest-xdist/issues/1053>_: Dropped support for Python 3.7.

    • [#1057](https://github.com/pytest-dev/pytest-xdist/issues/1057) <https://github.com/pytest-dev/pytest-xdist/issues/1057>_: pytest>=7.0.0 is now required.

      execnet>=2.1.0 is now required.

    Trivial Changes

    • [#1020](https://github.com/pytest-dev/pytest-xdist/issues/1020) <https://github.com/pytest-dev/pytest-xdist/issues/1020>_: pytest-xdist's setup.py file is removed.

      If you relied on this file, e.g. to install pytest using setup.py install, please see Why you shouldn't invoke setup.py directly <https://blog.ganssle.io/articles/2021/10/setup-py-deprecated.html#summary>_ for alternatives.

    ... (truncated)

    Commits
    • 4dd2978 Release 3.6.1
    • b397288 Merge pull request #1072 from zmedico/gateway-cache-rinfo
    • 12b3cce Cache execnet gateway rinfo during WorkerController setup
    • c93a106 build(deps): bump hynek/build-and-inspect-python-package (#1066)
    • 52e2022 [pre-commit.ci] pre-commit autoupdate (#1073)
    • 699f939 Merge pull request #1070 from pytest-dev/release-3.6.0
    • 80bc0b8 Release 3.6.0
    • 20e3ac7 Use execnet main_thread_only execmodel (#1027)
    • 0a4238f Merge pull request #1067 from pytest-dev/pre-commit-ci-update-config
    • 0686279 [pre-commit.ci] pre-commit autoupdate
    • Additional commits viewable in compare view

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-xdist&package-manager=pip&previous-version=2.1.0&new-version=3.6.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/e2e_tests/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/e2e_tests/requirements.txt b/tests/e2e_tests/requirements.txt index 934a5bcbc90888..a9d7bb0861ddd2 100644 --- a/tests/e2e_tests/requirements.txt +++ b/tests/e2e_tests/requirements.txt @@ -26,7 +26,7 @@ pytest-cov==2.11.1 pytest-html pytest-json-report==1.5.0 # pytest-metadata==1.7.0 -pytest-xdist==2.1.0 +pytest-xdist==3.6.1 pytest-timeout==2.3.1 # for common utils, e2e_tests From 3e8bc27b226049f5d0d5395e1edea2af704e02e0 Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Mon, 20 Jan 2025 13:05:27 +0100 Subject: [PATCH 46/97] [CPU] Replace custom THROW_ERROR macros usage with THROW_CPU_NODE_ERR (#28510) ### Details: Replace custom THROW_ERROR macros usage for error reporting in nodes implementation with THROW_CPU_NODE_ERR to unify error handling infrastructure in CPU plugin ### Tickets: - 160275 --- .../intel_cpu/src/nodes/depth_to_space.cpp | 24 +++--- src/plugins/intel_cpu/src/nodes/eye.cpp | 2 - src/plugins/intel_cpu/src/nodes/gather.cpp | 20 ++--- .../intel_cpu/src/nodes/gather_elements.cpp | 10 +-- src/plugins/intel_cpu/src/nodes/gather_nd.cpp | 22 +++-- .../intel_cpu/src/nodes/grid_sample.cpp | 2 - .../intel_cpu/src/nodes/interaction.cpp | 4 +- src/plugins/intel_cpu/src/nodes/mha.cpp | 20 ++--- src/plugins/intel_cpu/src/nodes/normalize.cpp | 19 ++--- src/plugins/intel_cpu/src/nodes/priorbox.cpp | 8 +- .../intel_cpu/src/nodes/space_to_depth.cpp | 24 +++--- src/plugins/intel_cpu/src/nodes/split.cpp | 18 ++-- .../intel_cpu/src/nodes/tensoriterator.cpp | 82 +++++++++---------- src/plugins/intel_cpu/src/nodes/unique.cpp | 14 ++-- 14 files changed, 123 insertions(+), 146 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp index bf0823885ebc71..ed8f1776d6c974 100644 --- a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp @@ -14,8 +14,6 @@ #include "openvino/opsets/opset1.hpp" #include "utils/general_utils.h" -#define THROW_ERROR(...) OPENVINO_THROW("DepthToSpace layer with name '", getName(), "' ", __VA_ARGS__) - using namespace dnnl::impl; namespace ov { @@ -73,11 +71,11 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (inputShapes.size() != 1 || outputShapes.size() != 1) - THROW_ERROR("has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); auto depthToSpace = ov::as_type_ptr(op); if (!depthToSpace) - THROW_ERROR("supports only opset1"); + THROW_CPU_NODE_ERR("supports only opset1"); const auto modeNgraph = depthToSpace->get_mode(); if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST) { @@ -85,22 +83,22 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte } else if (modeNgraph == ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST) { attrs.mode = Mode::DEPTH_FIRST; } else { - THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph)); + THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph)); } attrs.blockSize = depthToSpace->get_block_size(); if (attrs.blockSize == 0) - THROW_ERROR("has incorrect block_size parameter is zero!"); + THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!"); const size_t srcRank = getInputShapeAtPort(0).getRank(); const size_t dstRank = getOutputShapeAtPort(0).getRank(); if (srcRank < 3) - THROW_ERROR("has incorrect number of input dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input dimensions"); if (srcRank > 5) - THROW_ERROR("doesn't support dimensions with rank greater than 5"); + THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5"); if (srcRank != dstRank) - THROW_ERROR("has incorrect number of input/output dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions"); const size_t nSpatialDims = srcRank - 2; attrs.blockStep = static_cast(std::pow(attrs.blockSize, nSpatialDims)); @@ -164,11 +162,11 @@ void DepthToSpace::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(0); if (!dstMemPtr) - THROW_ERROR("has null destination memory"); + THROW_CPU_NODE_ERR("has null destination memory"); if (!srcMemPtr) - THROW_ERROR("has null input memory"); + THROW_CPU_NODE_ERR("has null input memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR("has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const auto& memoryDesc = srcMemPtr->getDesc(); attrs.dataSize = memoryDesc.getPrecision().size(); @@ -305,7 +303,7 @@ void DepthToSpace::DepthToSpaceExecutor::exec(const MemoryPtr& srcMemPtr, const void DepthToSpace::execute(const dnnl::stream& strm) { if (!execPtr) { - THROW_ERROR("doesn't have a compiled executor."); + THROW_CPU_NODE_ERR("doesn't have a compiled executor."); } int MB = getSrcMemoryAtPort(0)->getStaticDims()[0]; diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index 873d07673c8990..ef4995a87fd492 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -12,8 +12,6 @@ #include "shape_inference/shape_inference.hpp" #include "utils/bfloat16.hpp" -#define THROW_ERROR(...) OPENVINO_THROW(NameFromType(getType()), " node with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index e72901d7d43e62..f349990f56f620 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -24,8 +24,6 @@ using namespace dnnl::impl::cpu; -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -69,7 +67,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co if (one_of(op->get_input_size(), 4u, 5u) && op->get_output_size() == 1u) { compressed = true; } else if (op->get_input_size() != 3 || op->get_output_size() != 1) { - THROW_ERROR("has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); } const auto& dataShape = getInputShapeAtPort(GATHER_DATA); @@ -80,7 +78,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co isIdxShapeStat = idxShape.isStatic(); const auto indicesRank = idxShape.getRank(); if (dataSrcRank == 0lu || indicesRank == 0lu) - THROW_ERROR("has incorrect input parameters ranks."); + THROW_CPU_NODE_ERR("has incorrect input parameters ranks."); if (ov::is_type(op)) { batchDims = static_cast(ov::as_type_ptr(op)->get_batch_dims()); @@ -104,7 +102,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co if (batchDims < 0) batchDims += indicesRank; if (batchDims < 0 || batchDims > std::min(static_cast(dataSrcRank), static_cast(indicesRank))) - THROW_ERROR("has incorrect batch_dims ", batchDims, "!"); + THROW_CPU_NODE_ERR("has incorrect batch_dims ", batchDims, "!"); if (ov::is_type(op->get_input_node_ptr(GATHER_AXIS))) { isAxisInputConst = true; @@ -112,7 +110,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co if (axis < 0) axis += dataSrcRank; if (axis < 0 || axis >= dataSrcRank || batchDims > axis) - THROW_ERROR("has incorrect input parameter axis value: ", axis); + THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis); } if (auto indices = ov::as_type(op->get_input_node_ptr(GATHER_INDICES))) { @@ -339,12 +337,12 @@ bool Gather::needPrepareParams() const { void Gather::prepareParams() { auto dataMemPtr = getSrcMemoryAtPort(GATHER_DATA); if (!dataMemPtr || !dataMemPtr->isDefined()) - THROW_ERROR(" has undefined input data memory."); + THROW_CPU_NODE_ERR("has undefined input data memory."); auto idxMemPtr = getSrcMemoryAtPort(GATHER_INDICES); if (!idxMemPtr || !idxMemPtr->isDefined()) - THROW_ERROR(" has undefined input indices memory."); + THROW_CPU_NODE_ERR("has undefined input indices memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR(" has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); // short 1D vector fast execution impl (typical in shape infer subgraph) canOptimize1DCase = false; @@ -363,7 +361,7 @@ void Gather::prepareParams() { if (axis < 0) axis += dataSrcRank; if (axis < 0 || axis >= dataSrcRank || batchDims > axis) - THROW_ERROR("has incorrect input parameter axis value: ", axis); + THROW_CPU_NODE_ERR("has incorrect input parameter axis value: ", axis); } if (!isDataShapeStat || !isAxisInputConst) { @@ -553,7 +551,7 @@ void Gather::executeDynamicImpl(const dnnl::stream& strm) { void Gather::initShortParams(threadExecParams& p, const uint64_t start) { if (!jitKernel) - THROW_ERROR("has uninitialized kernel in function initShortParams."); + THROW_CPU_NODE_ERR("has uninitialized kernel in function initShortParams."); const uint64_t idxElPerVec = jitKernel->getIdxElPerVec(); if (afterAxisSize == 1) { // Elementwise gather. diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp index 7a494d184ce9c1..29bc32370d03de 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp @@ -38,19 +38,19 @@ GatherElements::GatherElements(const std::shared_ptr& op, const GraphC OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (inputShapes.size() != 2 || outputShapes.size() != 1) - THROW_CPU_NODE_ERR(" has invalid number of input/output edges."); + THROW_CPU_NODE_ERR("has invalid number of input/output edges."); const auto dataRank = getInputShapeAtPort(dataIndex_).getRank(); const auto indicesRank = getInputShapeAtPort(indicesIndex_).getRank(); if (dataRank != indicesRank) - THROW_CPU_NODE_ERR(" has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks."); + THROW_CPU_NODE_ERR("has invalid input shapes. Inputs 'Data' and 'Indices' must have equal ranks."); auto gatherElementsOp = ov::as_type_ptr(op); auto axis = gatherElementsOp->get_axis(); if (axis < 0) axis += dataRank; if (axis < 0 || axis >= static_cast(dataRank)) - THROW_CPU_NODE_ERR(" has invalid axis attribute: ", axis); + THROW_CPU_NODE_ERR("has invalid axis attribute: ", axis); axis_ = axis; } @@ -78,12 +78,12 @@ void GatherElements::initSupportedPrimitiveDescriptors() { sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type))) { - THROW_CPU_NODE_ERR(" has unsupported 'inputData' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR("has unsupported 'inputData' input precision: ", inDataPrecision); } ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(indicesIndex_); if (!one_of(indicesPrecision, ov::element::i32, ov::element::i64)) { - THROW_CPU_NODE_ERR(" has unsupported 'indices' input precision: ", indicesPrecision); + THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision); } dataTypeSize_ = inDataPrecision.size(); diff --git a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp index 1124bec41632b8..8df99882adc9cf 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp @@ -14,8 +14,6 @@ #include "openvino/core/parallel.hpp" #include "utils/general_utils.h" -#define THROW_ERROR(...) OPENVINO_THROW("GatherND layer with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -43,7 +41,7 @@ GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr } if (inputShapes.size() != 2 && outputShapes.size() != 1) - THROW_ERROR("has invalid number of input/output edges."); + THROW_CPU_NODE_ERR("has invalid number of input/output edges."); const size_t dataInputRank = getInputShapeAtPort(GATHERND_DATA).getRank(); const size_t indicesInputRank = getInputShapeAtPort(GATHERND_INDEXES).getRank(); @@ -53,10 +51,10 @@ GatherND::GatherND(const std::shared_ptr& op, const GraphContext::CPtr } else if (auto gatherNdOp = ov::as_type_ptr(op)) { attrs.batchDims = gatherNdOp->get_batch_dims(); } else { - THROW_ERROR("has support only opset5."); + THROW_CPU_NODE_ERR("has support only opset5."); } if (attrs.batchDims >= std::min(dataInputRank, indicesInputRank)) - THROW_ERROR("has invalid batch_dims attribute: ", attrs.batchDims); + THROW_CPU_NODE_ERR("has invalid batch_dims attribute: ", attrs.batchDims); } void GatherND::initSupportedPrimitiveDescriptors() { @@ -68,7 +66,7 @@ void GatherND::initSupportedPrimitiveDescriptors() { sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type))) { - THROW_ERROR("has unsupported 'data' input precision: ", inDataPrecision); + THROW_CPU_NODE_ERR("has unsupported 'data' input precision: ", inDataPrecision); } attrs.dataSize = inDataPrecision.size(); @@ -80,7 +78,7 @@ void GatherND::initSupportedPrimitiveDescriptors() { ov::element::u16, ov::element::i8, ov::element::u8)) { - THROW_ERROR("has unsupported 'indices' input precision: ", indicesPrecision); + THROW_CPU_NODE_ERR("has unsupported 'indices' input precision: ", indicesPrecision); } addSupportedPrimDesc({{LayoutType::ncsp, inDataPrecision}, {LayoutType::ncsp, ov::element::i32}}, @@ -93,13 +91,13 @@ void GatherND::prepareParams() { auto idxMemPtr = getSrcMemoryAtPort(GATHERND_INDEXES); auto dstMemPtr = getDstMemoryAtPort(0); if (!srcMemPtr || !srcMemPtr->isDefined()) - THROW_ERROR(" has undefined input memory of 'data'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'data'."); if (!idxMemPtr || !idxMemPtr->isDefined()) - THROW_ERROR(" has undefined input memory of 'indices'."); + THROW_CPU_NODE_ERR("has undefined input memory of 'indices'."); if (!dstMemPtr || !dstMemPtr->isDefined()) - THROW_ERROR(" has undefined output memory."); + THROW_CPU_NODE_ERR("has undefined output memory."); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR(" has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); attrs.srcDims = srcMemPtr->getStaticDims(); attrs.srcStrides = srcMemPtr->getDescWithType()->getStrides(); @@ -141,7 +139,7 @@ GatherND::GatherNDExecutor::GatherNDExecutor(const GatherNDAttributes& attrs) void GatherND::execute(const dnnl::stream& strm) { if (!execPtr) - THROW_ERROR("has not compiled executor."); + THROW_CPU_NODE_ERR("has not compiled executor."); execPtr->exec(getSrcMemoryAtPort(GATHERND_DATA), getSrcMemoryAtPort(GATHERND_INDEXES), getDstMemoryAtPort(0)); } diff --git a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp index 0e25c64acfe534..7a8eb1088453c7 100644 --- a/src/plugins/intel_cpu/src/nodes/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/grid_sample.cpp @@ -14,8 +14,6 @@ using namespace ov::intel_cpu::node; using namespace dnnl::impl::cpu; #endif // OPENVINO_ARCH_X86_64 -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - bool GridSample::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { if (!ov::is_type(op)) { diff --git a/src/plugins/intel_cpu/src/nodes/interaction.cpp b/src/plugins/intel_cpu/src/nodes/interaction.cpp index 13c846da6e2bea..d1ffcb3546754a 100644 --- a/src/plugins/intel_cpu/src/nodes/interaction.cpp +++ b/src/plugins/intel_cpu/src/nodes/interaction.cpp @@ -28,8 +28,6 @@ namespace ov { namespace intel_cpu { namespace node { -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - #if defined(OPENVINO_ARCH_X86_64) template @@ -346,7 +344,7 @@ void Interaction::prepareParams() { moveFeatureKernel->create_ker(); moveInteractKernel->create_ker(); } else { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } #ifdef CPU_DEBUG_CAPS if (prim) { diff --git a/src/plugins/intel_cpu/src/nodes/mha.cpp b/src/plugins/intel_cpu/src/nodes/mha.cpp index e1f4a774011dc9..43867cd99b2b01 100644 --- a/src/plugins/intel_cpu/src/nodes/mha.cpp +++ b/src/plugins/intel_cpu/src/nodes/mha.cpp @@ -25,8 +25,6 @@ using namespace dnnl::impl::cpu::x64; using namespace dnnl::impl::cpu::x64::matmul; using namespace Xbyak; -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -879,7 +877,7 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr& brgKerne ctx.K, &strides); if (status != dnnl_success) { - THROW_ERROR("cannot be executed due to invalid brgconv params"); + THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params"); } ctx.is_with_amx = use_amx; @@ -893,11 +891,11 @@ void MHA::init_brgemm(brgemmCtx& ctx, std::unique_ptr& brgKerne brgemm_kernel_t* brgKernel_ = nullptr; status = brgemm_kernel_create(&brgKernel_, brgDesc); if (status != dnnl_success) { - THROW_ERROR("cannot be executed due to invalid brgconv params"); + THROW_CPU_NODE_ERR("cannot be executed due to invalid brgconv params"); } brgKernel.reset(brgKernel_); #else - THROW_ERROR("is not supported on non-x86_64"); + THROW_CPU_NODE_ERR("is not supported on non-x86_64"); #endif // OPENVINO_ARCH_X86_64 } @@ -972,7 +970,7 @@ void MHA::init_brgemm_copy_b(std::unique_ptr& brgCop #if defined(OPENVINO_ARCH_X86_64) auto ret = create_brgemm_matmul_copy_b(brgCopyKernel, &brgCopyKernelConf); if (ret != dnnl::impl::status_t::dnnl_success) - THROW_ERROR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret); + THROW_CPU_NODE_ERR("cannot create_brgemm_matmul_copy_b kernel, dnnl_status: ", ret); #endif // OPENVINO_ARCH_X86_64 } @@ -1204,7 +1202,7 @@ void MHA::prepareParams() { } #endif // OPENVINO_ARCH_X86_64 if (!mulAddSoftmaxKernel) { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } } @@ -1228,7 +1226,7 @@ void MHA::prepareParams() { } #endif // OPENVINO_ARCH_X86_64 if (!convertReorderKernel) { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } } @@ -1255,7 +1253,7 @@ void MHA::prepareParams() { #endif // OPENVINO_ARCH_X86_64 if (!convertTransposeKernel) { - THROW_ERROR("cannot create jit eltwise kernel"); + THROW_CPU_NODE_ERR("cannot create jit eltwise kernel"); } } @@ -1312,7 +1310,7 @@ void MHA::callBrgemm(brgemmCtx& ctx, brgemm_kernel_execute(brgKernel.get(), 1, pin0, pin1, nullptr, pout, wsp); } #else - THROW_ERROR("is not supported on non-x64 platforms"); + THROW_CPU_NODE_ERR("is not supported on non-x64 platforms"); #endif // OPENVINO_ARCH_X86_64 } @@ -1547,7 +1545,7 @@ void MHA::execute(const dnnl::stream& strm) { } else if (inputPrecisions[1] == ov::element::i8) { mhaImpl(); } else { - THROW_ERROR("doesn't support provided input precisions"); + THROW_CPU_NODE_ERR("doesn't support provided input precisions"); } } diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index e416781cdf69a2..13322254ab4ee1 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -35,7 +35,6 @@ using namespace Xbyak; #if defined(OPENVINO_ARCH_X86_64) # define GET_OFF(field) offsetof(jit_normalize_call_args, field) #endif -#define THROW_ERROR(...) OPENVINO_THROW("NormalizeL2 layer with name '", getName(), "' ", __VA_ARGS__) namespace ov { namespace intel_cpu { @@ -782,10 +781,10 @@ NormalizeL2::NormalizeL2(const std::shared_ptr& op, const GraphContext } if (inputShapes.size() != 2 || outputShapes.size() != 1) - THROW_ERROR(" has incorrect number of input/output edges"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges"); if (getInputShapeAtPort(DATA).getRank() > 4 || getInputShapeAtPort(DATA).getRank() < 2) { - THROW_ERROR("has invalid input shape. Normalize supports from 2D to 4D blobs."); + THROW_CPU_NODE_ERR("has invalid input shape. Normalize supports from 2D to 4D blobs."); } auto norm = ov::as_type_ptr(op); @@ -825,7 +824,7 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() { ov::element::f16, ov::element::i8, ov::element::u8)) { - THROW_ERROR("has unsupported input precision: ", inputPrecision); + THROW_CPU_NODE_ERR("has unsupported input precision: ", inputPrecision); } if (!one_of(outputPrecision, ov::element::f32, @@ -833,7 +832,7 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() { ov::element::f16, ov::element::i8, ov::element::u8)) { - THROW_ERROR("has unsupported output precision: ", outputPrecision); + THROW_CPU_NODE_ERR("has unsupported output precision: ", outputPrecision); } attrs.input_prec = inputPrecision; @@ -914,11 +913,11 @@ void NormalizeL2::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(DATA); auto srcMemPtr = getSrcMemoryAtPort(DATA); if (!dstMemPtr) - THROW_ERROR("can't get destination memory"); + THROW_CPU_NODE_ERR("can't get destination memory"); if (!srcMemPtr) - THROW_ERROR("can't get input memory"); + THROW_CPU_NODE_ERR("can't get input memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR("has nullable preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has nullable preferable primitive descriptor"); if (!attrs.cornerCase) { if (srcMemPtr->getDesc().hasLayoutType(LayoutType::ncsp)) { @@ -930,7 +929,7 @@ void NormalizeL2::createPrimitive() { } else if (srcMemPtr->getDesc().hasLayoutType(LayoutType::nspc)) { attrs.layout = LayoutType::nspc; } else { - THROW_ERROR("has selected layout which is not supported"); + THROW_CPU_NODE_ERR("has selected layout which is not supported"); } } @@ -972,7 +971,7 @@ void NormalizeL2::executeDynamicImpl(const dnnl::stream& strm) { void NormalizeL2::execute(const dnnl::stream& strm) { if (!execPtr) - THROW_ERROR("doesn't have a compiled executor."); + THROW_CPU_NODE_ERR("doesn't have a compiled executor."); const uint8_t* src_ptr = getSrcDataAtPortAs(DATA); uint8_t* dst_ptr = getDstDataAtPortAs(DATA); diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.cpp b/src/plugins/intel_cpu/src/nodes/priorbox.cpp index d1a2acd05d1a7a..3bf6a47797e044 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox.cpp @@ -14,8 +14,6 @@ #include "openvino/opsets/opset1.hpp" #include "shape_inference/custom/priorbox.hpp" -#define THROW_ERROR(...) OPENVINO_THROW("PriorBox layer with name '", getName(), "': ", __VA_ARGS__) - namespace ov { namespace intel_cpu { namespace node { @@ -69,7 +67,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr exist = false; if (std::fabs(aspect_ratio_item) < std::numeric_limits::epsilon()) { - THROW_ERROR("Aspect_ratio param can't be equal to zero"); + THROW_CPU_NODE_ERR("has aspect_ratio param can't be equal to zero"); } for (float _aspect_ratio : aspect_ratio) { @@ -94,7 +92,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr if (attrs.variance.size() == 1 || attrs.variance.size() == 4) { for (float i : attrs.variance) { if (i < 0) { - THROW_ERROR("Variance must be > 0."); + THROW_CPU_NODE_ERR("variance must be > 0."); } variance.push_back(i); @@ -102,7 +100,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr } else if (attrs.variance.empty()) { variance.push_back(0.1f); } else { - THROW_ERROR("Wrong number of variance values. Not less than 1 and more than 4 variance values."); + THROW_CPU_NODE_ERR("has wrong number of variance values. Not less than 1 and more than 4 variance values."); } } diff --git a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp index 859944161d48b9..0384dabc63d73c 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp +++ b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp @@ -15,8 +15,6 @@ #include "openvino/util/pp.hpp" #include "utils/general_utils.h" -#define THROW_ERROR(...) OPENVINO_THROW("SpaceToDepth layer with name '", getName(), "' ", __VA_ARGS__) - using namespace dnnl; using namespace dnnl::impl; @@ -76,11 +74,11 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } if (inputShapes.size() != 1 || outputShapes.size() != 1) - THROW_ERROR("has incorrect number of input/output edges!"); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges!"); auto spaceToDepth = ov::as_type_ptr(op); if (!spaceToDepth) - THROW_ERROR("supports only opset1"); + THROW_CPU_NODE_ERR("supports only opset1"); const auto modeNgraph = spaceToDepth->get_mode(); if (modeNgraph == ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST) { @@ -88,21 +86,21 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte } else if (modeNgraph == ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST) { attrs.mode = Mode::DEPTH_FIRST; } else { - THROW_ERROR("doesn't support mode: ", ov::as_string(modeNgraph)); + THROW_CPU_NODE_ERR("doesn't support mode: ", ov::as_string(modeNgraph)); } attrs.blockSize = spaceToDepth->get_block_size(); if (attrs.blockSize == 0) - THROW_ERROR("has incorrect block_size parameter is zero!"); + THROW_CPU_NODE_ERR("has incorrect block_size parameter is zero!"); const size_t srcRank = getInputShapeAtPort(0).getRank(); const size_t dstRank = getOutputShapeAtPort(0).getRank(); if (srcRank < 3) - THROW_ERROR("has incorrect number of input dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input dimensions"); if (srcRank > 5) - THROW_ERROR("doesn't support dimensions with rank greater than 5"); + THROW_CPU_NODE_ERR("doesn't support dimensions with rank greater than 5"); if (srcRank != dstRank) - THROW_ERROR("has incorrect number of input/output dimensions"); + THROW_CPU_NODE_ERR("has incorrect number of input/output dimensions"); attrs.nSpatialDims = srcRank - 2; attrs.blockStep = static_cast(std::pow(attrs.blockSize, attrs.nSpatialDims)); } @@ -164,11 +162,11 @@ void SpaceToDepth::createPrimitive() { auto dstMemPtr = getDstMemoryAtPort(0); auto srcMemPtr = getSrcMemoryAtPort(0); if (!dstMemPtr) - THROW_ERROR("has null destination memory"); + THROW_CPU_NODE_ERR("has null destination memory"); if (!srcMemPtr) - THROW_ERROR("has null input memory"); + THROW_CPU_NODE_ERR("has null input memory"); if (getSelectedPrimitiveDescriptor() == nullptr) - THROW_ERROR("has unidentified preferable primitive descriptor"); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor"); const auto& memoryDesc = srcMemPtr->getDesc(); attrs.dataSize = memoryDesc.getPrecision().size(); @@ -301,7 +299,7 @@ void SpaceToDepth::SpaceToDepthExecutor::exec(const uint8_t* srcData, uint8_t* d void SpaceToDepth::execute(const dnnl::stream& strm) { if (!execPtr) { - THROW_ERROR("doesn't have a compiled executor."); + THROW_CPU_NODE_ERR("doesn't have a compiled executor."); } const uint8_t* srcData = getSrcDataAtPortAs(0); uint8_t* dstData = getDstDataAtPortAs(0); diff --git a/src/plugins/intel_cpu/src/nodes/split.cpp b/src/plugins/intel_cpu/src/nodes/split.cpp index 59ab2776ba884b..af8295cbe98a9e 100644 --- a/src/plugins/intel_cpu/src/nodes/split.cpp +++ b/src/plugins/intel_cpu/src/nodes/split.cpp @@ -19,8 +19,6 @@ #include "utils/general_utils.h" #include "utils/ngraph_utils.hpp" -#define THROW_ERROR(...) OPENVINO_THROW("Split layer with name '", getName(), "' ", __VA_ARGS__) - using namespace dnnl; namespace ov { @@ -74,7 +72,7 @@ Split::Split(const std::shared_ptr& op, const GraphContext::CPtr& cont axis += inRank; } if (axis >= static_cast(inRank)) { - THROW_ERROR("Split node with name '", op->get_friendly_name(), "' has invalid value of axis parameter: ", axis); + THROW_CPU_NODE_ERR("has invalid value of axis parameter: ", axis); } this->axis = axis; } @@ -92,14 +90,14 @@ void Split::initSupportedPrimitiveDescriptors() { for (size_t i = 0; i < outputShapes.size(); i++) { const auto& o_Dims = outputShapes[i].getDims(); if (dstFirstDims.size() != o_Dims.size()) { - THROW_ERROR("only supports output blobs with equal number of dimensions"); + THROW_CPU_NODE_ERR("only supports output blobs with equal number of dimensions"); } for (size_t j = 0; j < dstFirstDims.size(); j++) { if (j == axis) continue; if (!dimsEqualWeak(o_Dims[j], dstFirstDims[j])) - THROW_ERROR("has incorrect output dimensions"); + THROW_CPU_NODE_ERR("has incorrect output dimensions"); } } @@ -256,7 +254,7 @@ void Split::createPrimitive() { void Split::prepareParams() { const auto& srcMemPtr = getSrcMemoryAtPort(0); if (!srcMemPtr || !srcMemPtr->isDefined()) { - THROW_ERROR("has undefined input memory"); + THROW_CPU_NODE_ERR("has undefined input memory"); } if (!constSplitLengths) { @@ -271,7 +269,7 @@ void Split::prepareParams() { for (size_t port = 0; port < outputShapes.size(); ++port) { const auto& outMemPtr = this->getDstMemoryAtPort(port); if (!outMemPtr || !outMemPtr->isDefined()) { - THROW_ERROR("has undefined destination memory"); + THROW_CPU_NODE_ERR("has undefined destination memory"); } if (outMemPtr->getShape().hasZeroDims()) { @@ -301,7 +299,7 @@ void Split::execute(const dnnl::stream& strm) { } if (dstMemPtrs.empty()) - THROW_ERROR("Output data pointers have not been initialized."); + THROW_CPU_NODE_ERR("Output data pointers have not been initialized."); const auto& srcMem = getParentEdgeAt(0)->getMemory(); @@ -323,7 +321,7 @@ void Split::initOptimalPrimitiveDescriptor() { Node::initOptimalPrimitiveDescriptor(); auto selected_pd = getSelectedPrimitiveDescriptor(); if (selected_pd == nullptr) - THROW_ERROR("Preferable primitive descriptor is not set."); + THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set."); auto config = selected_pd->getConfig(); canUseOptimizedNspc2Ncsp = false; @@ -487,7 +485,7 @@ std::vector Split::getRawDstMemPtrs() const { for (size_t i = 0; i < dstMemPtrs.size(); ++i) { result[i] = dstMemPtrs[i].second->getDataAs(); if (!result[i]) { - THROW_ERROR("can't get child edge indx ", dstMemPtrs[i].first, " data."); + THROW_CPU_NODE_ERR("can't get child edge indx ", dstMemPtrs[i].first, " data."); } } return result; diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp index fbd6361eca53fc..cffde3a81d23dd 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp @@ -25,8 +25,6 @@ namespace ov { namespace intel_cpu { namespace node { -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " layer with name '", getName(), "' ", __VA_ARGS__) - static NodeConfig make_plain_config(const std::shared_ptr& op) { NodeConfig config; @@ -435,7 +433,7 @@ TensorIterator::TensorIterator(const std::shared_ptr& op, const GraphC void TensorIterator::getSupportedDescriptors() { auto tiOp = ov::as_type_ptr(ngraphOp); if (!tiOp) { - THROW_ERROR("cannot be cast to ov::op::util::SubGraphOp"); + THROW_CPU_NODE_ERR("cannot be cast to ov::op::util::SubGraphOp"); } const std::shared_ptr body = tiOp->get_function(); sub_graph.CreateGraph(body, context); @@ -519,7 +517,7 @@ void TensorIterator::getSupportedDescriptors() { -1, 1}); } else { - THROW_ERROR("has incorrect type of the input description."); + THROW_CPU_NODE_ERR("has incorrect type of the input description."); } } @@ -537,7 +535,7 @@ void TensorIterator::getSupportedDescriptors() { } else if (auto ti = ov::as_type_ptr(ngraphOp)) { algorithm = Algorithm::TensorIteratorCommon; } else { - THROW_ERROR("isn't supported!"); + THROW_CPU_NODE_ERR("isn't supported!"); } } @@ -894,11 +892,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, const auto getNumIterations = [this](const PortMap& rule, const std::vector& dimensions) -> int { const auto axis = rule.axis; if (axis < 0 || static_cast(axis) >= dimensions.size()) { - THROW_ERROR(": Invalid \"axis\" value in an iteration component: ", - rule.axis, - ", dimensions number = ", - dimensions.size(), - " (out of range)"); + THROW_CPU_NODE_ERR(": Invalid \"axis\" value in an iteration component: ", + rule.axis, + ", dimensions number = ", + dimensions.size(), + " (out of range)"); } const auto space = dimensions[axis]; const int start = static_cast((rule.start < 0 ? (space + 1) : 0) + rule.start); @@ -906,7 +904,9 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, const auto stride = rule.stride; if (stride == 0) { - THROW_ERROR(": Invalid \"stride\" value in an iteration component: ", rule.stride, " (infinite loop)"); + THROW_CPU_NODE_ERR(": Invalid \"stride\" value in an iteration component: ", + rule.stride, + " (infinite loop)"); } const auto step = std::abs(stride); @@ -914,21 +914,21 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, const auto dst = stride < 0 ? start : end; const auto length = dst - src; if (src < 0 || src >= dst || dst > static_cast(space) || length < step) { - THROW_ERROR(": Invalid \"start\",\"stride\",\"end\" values in an iteration component", - ": \"start\" = ", - rule.start, - ", \"stride\" = ", - rule.stride, - ", \"end\" = ", - rule.end); + THROW_CPU_NODE_ERR(": Invalid \"start\",\"stride\",\"end\" values in an iteration component", + ": \"start\" = ", + rule.start, + ", \"stride\" = ", + rule.stride, + ", \"end\" = ", + rule.end); } if (length % step != 0) { - THROW_ERROR(": Each iteration must be the same size: length (", - length, - ") is not divisible by step (", - step, - ")"); + THROW_CPU_NODE_ERR(": Each iteration must be the same size: length (", + length, + ") is not divisible by step (", + step, + ")"); } return static_cast(length / step); @@ -943,11 +943,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, } if (rule.from < 0 || rule.from >= static_cast(inputShapes.size())) { - THROW_ERROR(": Invalid \"from\" value: \"from\" = ", - rule.from, - " inputs number = ", - inputShapes.size(), - " (out of range)"); + THROW_CPU_NODE_ERR(": Invalid \"from\" value: \"from\" = ", + rule.from, + " inputs number = ", + inputShapes.size(), + " (out of range)"); } const auto currentNumIterations = getNumIterations(rule, dims); @@ -955,10 +955,10 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, isDefault = false; numIterations = currentNumIterations; } else if (numIterations != currentNumIterations) { - THROW_ERROR(": There are at least two different iterations numbers: ", - numIterations, - " and ", - currentNumIterations); + THROW_CPU_NODE_ERR(": There are at least two different iterations numbers: ", + numIterations, + " and ", + currentNumIterations); } } @@ -972,11 +972,11 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, continue; if (rule.from < 0 || rule.from >= static_cast(outputShapes.size())) { - THROW_ERROR(": Invalid \"from\" value: \"from\" = ", - rule.from, - " inputs number = ", - outputShapes.size(), - " (out of range)"); + THROW_CPU_NODE_ERR(": Invalid \"from\" value: \"from\" = ", + rule.from, + " inputs number = ", + outputShapes.size(), + " (out of range)"); } const auto currentNumIterations = getNumIterations(rule, dims); @@ -984,10 +984,10 @@ int TensorIterator::getNumIteration(const std::vector& inputPortMap, isDefault = false; numIterations = currentNumIterations; } else if (numIterations != currentNumIterations) { - THROW_ERROR(": There are at least two different iterations numbers: ", - numIterations, - " and ", - currentNumIterations); + THROW_CPU_NODE_ERR(": There are at least two different iterations numbers: ", + numIterations, + " and ", + currentNumIterations); } } diff --git a/src/plugins/intel_cpu/src/nodes/unique.cpp b/src/plugins/intel_cpu/src/nodes/unique.cpp index 391e1967a8c682..5a5888090ef6ee 100644 --- a/src/plugins/intel_cpu/src/nodes/unique.cpp +++ b/src/plugins/intel_cpu/src/nodes/unique.cpp @@ -14,8 +14,6 @@ using namespace ov::intel_cpu; using namespace ov::intel_cpu::node; -#define THROW_ERROR(...) OPENVINO_THROW(getTypeStr(), " node with name '", getName(), "' ", __VA_ARGS__) - bool Unique::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { if (!ov::is_type(op)) { @@ -41,7 +39,7 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co } if (!one_of(op->get_input_size(), 1u, 2u) || op->get_output_size() != 4) - THROW_ERROR("has incorrect number of input/output edges."); + THROW_CPU_NODE_ERR("has incorrect number of input/output edges."); for (int i = 0; i < 4; i++) { definedOutputs[i] = !op->get_output_target_inputs(i).empty(); @@ -55,8 +53,8 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co axis += op->get_input_partial_shape(IN_DATA).rank().get_length(); } if (axis < 0 || axis >= op->get_input_partial_shape(IN_DATA).rank().get_length()) { - THROW_ERROR("has invalid axis value: ", - ov::as_type(op->get_input_node_ptr(AXIS))->cast_vector()[0]); + THROW_CPU_NODE_ERR("has invalid axis value: ", + ov::as_type(op->get_input_node_ptr(AXIS))->cast_vector()[0]); } } else { flattened = true; @@ -93,18 +91,18 @@ void Unique::createPrimitive() { void Unique::prepareParams() { auto dataMemPtr = getSrcMemoryAtPort(IN_DATA); if (!dataMemPtr) { - THROW_ERROR(" has null input data memory."); + THROW_CPU_NODE_ERR("has null input data memory."); } for (int i = 0; i < 4; i++) { if (definedOutputs[i]) { auto dstMemPtr = getDstMemoryAtPort(i); if (!dstMemPtr) { - THROW_ERROR(" has null output memory at port ", i); + THROW_CPU_NODE_ERR("has null output memory at port ", i); } } } if (getSelectedPrimitiveDescriptor() == nullptr) { - THROW_ERROR(" has unidentified preferable primitive descriptor."); + THROW_CPU_NODE_ERR("has unidentified preferable primitive descriptor."); } size_t srcLen = 1; From f46e3e9d143a18316e14f6d632fde318e329607f Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Mon, 20 Jan 2025 20:15:57 +0800 Subject: [PATCH 47/97] [Hetro][Func Test] only the nightly tests can use hw plugin (#28545) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* Signed-off-by: Zhai, Xuejun --- .../behavior/ov_plugin/core_threading_tests.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp index 39dc277f25a11e..b0152a06b8ab0f 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/core_threading_tests.cpp @@ -7,7 +7,7 @@ namespace { const Params params[] = { std::tuple{ov::test::utils::DEVICE_HETERO, - {{ov::device::priorities.name(), ov::test::utils::DEVICE_CPU}}}, + {{ov::device::priorities.name(), ov::test::utils::DEVICE_TEMPLATE}}}, }; } // namespace @@ -19,4 +19,4 @@ INSTANTIATE_TEST_SUITE_P(nightly_HETERO, INSTANTIATE_TEST_SUITE_P(HETERO_Streams, CoreThreadingTestsWithIter, testing::Combine(testing::ValuesIn(params), testing::Values(4), testing::Values(50)), - CoreThreadingTestsWithIter::getTestCaseName); \ No newline at end of file + CoreThreadingTestsWithIter::getTestCaseName); From 96c22330d5aa953752c22942a00e3032e4b1c9f0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:24:33 +0000 Subject: [PATCH 48/97] Bump pytest-dependency from 0.5.1 to 0.6.0 in /tests (#28549) Bumps [pytest-dependency](https://github.com/RKrahl/pytest-dependency) from 0.5.1 to 0.6.0.
    Changelog

    Sourced from pytest-dependency's changelog.

    0.6.0 (2023-12-31)

    
    Documentation
    -------------
    
    • [#39](https://github.com/RKrahl/pytest-dependency/issues/39), [#41](https://github.com/RKrahl/pytest-dependency/issues/41), [#59](https://github.com/RKrahl/pytest-dependency/issues/59)_: Review documentation

    Incompatible changes

    • Drop support for Python 2.

    Bug fixes and minor changes

    • [#40](https://github.com/RKrahl/pytest-dependency/issues/40)_: add logging.
    • [#50](https://github.com/RKrahl/pytest-dependency/issues/50), [#51](https://github.com/RKrahl/pytest-dependency/issues/51): test suite incompatibility with pytest 6.2.0.
    • [#58](https://github.com/RKrahl/pytest-dependency/issues/58)_: declare the type of automark_dependency ini-option correctly as bool.

    Internal

    • [#75](https://github.com/RKrahl/pytest-dependency/issues/75)_: review build tool chain.

    .. _#39: RKrahl/pytest-dependency#39 .. _#40: RKrahl/pytest-dependency#40 .. _#41: RKrahl/pytest-dependency#41 .. _#50: RKrahl/pytest-dependency#50 .. _#51: RKrahl/pytest-dependency#51 .. _#58: RKrahl/pytest-dependency#58 .. _#59: RKrahl/pytest-dependency#59 .. _#75: RKrahl/pytest-dependency#75

    Commits
    • 2cae589 Merge branch 'develop'
    • def647e Prepare release 0.6.0
    • 2baac9b Merge branch 'doc' into develop
    • 38baf8c Update changelog
    • e2edf54 Explicitely set language to 'en'
    • f11cf56 Rewrite introduction to the debugging guide
    • 346a344 Move the changelog to the end, after the API reference
    • 463227e Review README and bump copyright year
    • eb48f32 Fixup 695ea27: trailing whitespace
    • 695ea27 Update install instructions
    • Additional commits viewable in compare view

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pytest-dependency&package-manager=pip&previous-version=0.5.1&new-version=0.6.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tests/constraints.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/constraints.txt b/tests/constraints.txt index a806b7dfb47c18..45aac9051f2fd2 100644 --- a/tests/constraints.txt +++ b/tests/constraints.txt @@ -17,7 +17,7 @@ paddlepaddle==2.6.2 protobuf>=3.18.1,<6.0.0 py>=1.9.0 pytest>=5.0,<8.4 -pytest-dependency==0.5.1 +pytest-dependency==0.6.0 pytest-html==4.1.1 pytest-timeout==2.3.1 kornia==0.8.0 From 0fce5f3a17fc0d782e6468d5e048d6c449caa453 Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Mon, 20 Jan 2025 13:34:55 +0100 Subject: [PATCH 49/97] [cpu] Remove custom shape inference factories (#27924) ### Details: - Remove custom shape inference factories CPU nodes. ### Related PR - #27770 ### Tickets: - CVS-118704 --------- Signed-off-by: Raasz, Pawel Co-authored-by: Michal Lukaszewski Co-authored-by: Maksim Kutakov --- src/frontends/tensorflow/src/frontend.cpp | 11 +++-- src/plugins/intel_cpu/src/nodes/deconv.cpp | 37 ++++++++++++++--- src/plugins/intel_cpu/src/nodes/eye.cpp | 16 +------- src/plugins/intel_cpu/src/nodes/reference.cpp | 30 +++++++------- src/plugins/intel_cpu/src/nodes/reference.h | 1 + .../src/shape_inference/shape_inference.cpp | 41 ++++--------------- .../src/shape_inference/shape_inference.hpp | 1 - 7 files changed, 61 insertions(+), 76 deletions(-) diff --git a/src/frontends/tensorflow/src/frontend.cpp b/src/frontends/tensorflow/src/frontend.cpp index 006a4e22e06304..e4e35c42b08b35 100644 --- a/src/frontends/tensorflow/src/frontend.cpp +++ b/src/frontends/tensorflow/src/frontend.cpp @@ -466,12 +466,11 @@ std::shared_ptr FrontEnd::convert(const ov::frontend::InputModel::Ptr // recommend to use openvino-tokenizers if some unconverted operations from tokenizers are met if (unsupported_ops_from_tokenizers.size() > 0) { - exception_message - << "\nEncountered unconverted operation(s) for which openvino-tokenizers package " - "provides conversion extension(s): " - << unsupported_ops_from_tokenizers - << ". Install OpenVINO Tokenizers, refer to the documentation: " - "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n"; + exception_message << "\nEncountered unconverted operation(s) for which openvino-tokenizers package " + "provides conversion extension(s): " + << unsupported_ops_from_tokenizers + << ". Install OpenVINO Tokenizers, refer to the documentation: " + "https://docs.openvino.ai/2024/openvino-workflow-generative/ov-tokenizers.html \n"; } } diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index 886497bd57cc29..4090244a17ec32 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -125,16 +125,43 @@ bool DeconvKey::operator==(const DeconvKey& rhs) const { * input. Since in case it exists, plugin should pass the input data to the shape inference function. * */ -class DeconfolutionShapeInferFactory : public ShapeInferFactory { +class DeconvolutionShapeInferFactory : public ShapeInferFactory { public: - DeconfolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} + DeconvolutionShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} ShapeInferPtr makeShapeInfer() const override { - const auto port_mask = (m_op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK; - return make_shape_inference(m_op, port_mask); + return std::make_shared(m_op); } private: + class DeconvolutionShapeInfer : public IShapeInfer { + public: + DeconvolutionShapeInfer(const std::shared_ptr& op) + : m_shape_infer(make_shape_inference(op)), + m_port_mask((op->get_input_size() > 2) ? PortMask(2) : EMPTY_PORT_MASK) {} + + Result infer(const std::vector>& input_shapes, + const std::unordered_map& data_dependency) override { + return m_shape_infer->infer(input_shapes, data_dependency); + } + + const ov::CoordinateDiff& get_pads_begin() override { + return m_shape_infer->get_pads_begin(); + } + + const ov::CoordinateDiff& get_pads_end() override { + return m_shape_infer->get_pads_end(); + } + + port_mask_t get_port_mask() const override { + return m_port_mask; + }; + + private: + ShapeInferPtr m_shape_infer; + const port_mask_t m_port_mask; + }; + std::shared_ptr m_op; }; } // namespace @@ -165,7 +192,7 @@ bool Deconvolution::isSupportedOperation(const std::shared_ptr& } Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphContext::CPtr& context) - : Node(op, context, DeconfolutionShapeInferFactory(op)) { + : Node(op, context, DeconvolutionShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index ef4995a87fd492..411a77260aa7d6 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -29,22 +29,8 @@ bool Eye::isSupportedOperation(const std::shared_ptr& op, std::s return true; } -namespace { -class EyeShapeInferFactory : public ShapeInferFactory { -public: - EyeShapeInferFactory(std::shared_ptr op) : m_op(std::move(op)) {} - ShapeInferPtr makeShapeInfer() const override { - return (m_op->get_input_size() == 4) ? make_shape_inference(m_op) - : make_shape_inference(m_op, PortMask(Eye::ROWS_NUM, Eye::COLS_NUM)); - } - -private: - std::shared_ptr m_op; -}; -} // namespace - Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr& context) - : Node(op, context, EyeShapeInferFactory(op)) { + : Node(op, context, NgraphShapeInferFactory(op)) { std::string errorMessage; if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); diff --git a/src/plugins/intel_cpu/src/nodes/reference.cpp b/src/plugins/intel_cpu/src/nodes/reference.cpp index c7f1bbe30ff574..3283f7a43253ab 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.cpp +++ b/src/plugins/intel_cpu/src/nodes/reference.cpp @@ -12,22 +12,10 @@ namespace ov { namespace intel_cpu { -class ReferenceShapeInferFactory : public ShapeInferFactory { -public: - ReferenceShapeInferFactory(std::shared_ptr op) : m_op{std::move(op)} {} - - ShapeInferPtr makeShapeInfer() const override { - return make_shape_inference(m_op, FULL_PORT_MASK); - } - -private: - std::shared_ptr m_op; -}; - namespace node { Reference::Reference(const std::shared_ptr& op, const GraphContext::CPtr& context, std::string errorMessage) - : Node(op, context, ReferenceShapeInferFactory(op)), + : Node(op, context, NgraphShapeInferFactory(op)), ovCoreNode(op), additionalErrorMessage(std::move(errorMessage)) { if (!op->has_evaluate()) { @@ -61,7 +49,9 @@ void Reference::initSupportedPrimitiveDescriptors() { addSupportedPrimDesc(inputConfigurators, outputConfigurators, impl_desc_type::ref); } -void Reference::createPrimitive() {} +void Reference::createPrimitive() { + hasOutputShapeDataDependency = isDynamicNode() && outputShapeDataDependency(); +} void Reference::execute(const dnnl::stream& strm) { auto inputs = prepareInputs(); @@ -72,6 +62,14 @@ void Reference::execute(const dnnl::stream& strm) { } void Reference::executeDynamicImpl(const dnnl::stream& strm) { + if (!hasOutputShapeDataDependency) { + // if there is no data dependency for the output shape, we can execute the operation as is, similar to the + // static case, since the shapes are already calculated + execute(strm); + return; + } + + // if there is data dependency, we need to perform shape inference first auto inputs = prepareInputs(); ov::TensorVector outputs; auto result = Node::shapeInfer(); @@ -125,7 +123,9 @@ bool Reference::created() const { } bool Reference::needShapeInfer() const { - return false; + // If there is data dependency for the output shape, let's assume the node has internal dynamism (in general case), + // so we postpone the shape inference until the actual execution + return !hasOutputShapeDataDependency && Node::needShapeInfer(); } ov::TensorVector Reference::prepareInputs() const { diff --git a/src/plugins/intel_cpu/src/nodes/reference.h b/src/plugins/intel_cpu/src/nodes/reference.h index 782c55716506a8..f0a37ae6529f5f 100644 --- a/src/plugins/intel_cpu/src/nodes/reference.h +++ b/src/plugins/intel_cpu/src/nodes/reference.h @@ -36,6 +36,7 @@ class Reference : public Node { private: const std::shared_ptr ovCoreNode; const std::string additionalErrorMessage; + bool hasOutputShapeDataDependency = false; // flag to cache the output shape data dependency check result }; } // namespace node diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index 5ba7e7173792fd..ba7832aef71fab 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -234,8 +234,7 @@ class ShapeInferFallback : public ShapeInferBase { ov::optional> infer(const std::vector& input_shapes, const ov::ITensorAccessor& tensor_accessor) override { - auto op = m_node.get(); - std::vector output_shapes; + const auto op = m_node.get(); std::shared_ptr local_op; ov::OutputVector new_inputs; @@ -252,7 +251,7 @@ class ShapeInferFallback : public ShapeInferBase { local_op = op->clone_with_new_inputs(new_inputs); local_op->validate_and_infer_types(); - output_shapes.resize(local_op->get_output_size()); + std::vector output_shapes(local_op->get_output_size()); for (size_t i = 0; i < output_shapes.size(); ++i) { const auto& partial_shape = local_op->get_output_partial_shape(i); @@ -265,6 +264,11 @@ class ShapeInferFallback : public ShapeInferBase { return {std::move(output_shapes)}; } + + port_mask_t get_port_mask() const override { + // For fallback return full port mask to try get data for all node's inputs + return FULL_PORT_MASK; + } }; template @@ -610,34 +614,6 @@ const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{ #undef _OV_OP_SHAPE_INFER_MASK_REG #undef _OV_OP_SHAPE_INFER_VA_REG -class ShapeInferCustomMask : public IShapeInfer { -public: - ShapeInferCustomMask(ShapeInferPtr shape_infer, port_mask_t port_mask) - : m_shape_infer{std::move(shape_infer)}, - m_port_mask{port_mask} {} - - Result infer(const std::vector>& input_shapes, - const std::unordered_map& data_dependency) override { - return m_shape_infer->infer(input_shapes, data_dependency); - } - - const ov::CoordinateDiff& get_pads_begin() override { - return m_shape_infer->get_pads_begin(); - } - - const ov::CoordinateDiff& get_pads_end() override { - return m_shape_infer->get_pads_end(); - } - - port_mask_t get_port_mask() const override { - return m_port_mask; - } - -private: - const ShapeInferPtr m_shape_infer; - const port_mask_t m_port_mask; -}; - std::shared_ptr make_shape_inference(std::shared_ptr op) { if (auto shape_infer = IStaticShapeInferFactory::make(op->get_type_info(), op)) { return shape_infer; @@ -652,8 +628,5 @@ std::shared_ptr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask) { - return std::make_shared(make_shape_inference(std::move(op)), port_mask); -} } // namespace intel_cpu } // namespace ov diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp index 21b36e76ddd9a7..cb937127b219f0 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.hpp @@ -32,6 +32,5 @@ class IStaticShapeInfer : public IShapeInfer { }; std::shared_ptr make_shape_inference(std::shared_ptr op); -ShapeInferPtr make_shape_inference(std::shared_ptr op, IShapeInfer::port_mask_t port_mask); } // namespace intel_cpu } // namespace ov From 1025c76d098c435972bb42ff43d3262a0d82c7cf Mon Sep 17 00:00:00 2001 From: Michal Miotk Date: Mon, 20 Jan 2025 15:02:31 +0100 Subject: [PATCH 50/97] [GPU] added missing info about conv autopad (#28552) ### Details: - fix yolov3 dynamic inference ### Tickets: - CVS-157866 --- .../src/graph/graph_optimizer/prepare_primitive_fusing.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index 2120a1308ea290..ce5333f95a1b59 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -399,7 +399,8 @@ void prepare_primitive_fusing::fuse_bias(program &p) { desc->padding_begin, desc->padding_end, desc->grouped_weights_shape, - conv.get_output_layout().data_type); + conv.get_output_layout().data_type, + desc->auto_pad); // Copy transposed flag to new prim as convolution node might be produced by deconv -> conv replacement before this pass conv_with_bias_prim->transposed = desc->transposed; From 2999477ad77cad3de4aadb5f56996bf2f7f5dd43 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Mon, 20 Jan 2025 20:52:25 +0100 Subject: [PATCH 51/97] [GHA] Save JS artifacts (#28521) ### Details: - JS package is needed to build extensions using provider action - ### Tickets: - *ticket-id* --- .github/workflows/job_build_linux.yml | 9 ++++++++- .github/workflows/job_build_windows.yml | 15 ++++++++++++++- .github/workflows/job_openvino_js.yml | 9 +++++++-- .github/workflows/windows_vs2019_release.yml | 11 ++++++++--- 4 files changed, 37 insertions(+), 7 deletions(-) diff --git a/.github/workflows/job_build_linux.yml b/.github/workflows/job_build_linux.yml index c56de5872cc2df..d1dfd0504ae194 100644 --- a/.github/workflows/job_build_linux.yml +++ b/.github/workflows/job_build_linux.yml @@ -234,6 +234,11 @@ jobs: -DENABLE_WHEEL=OFF cmake --build ${BUILD_DIR} --parallel $(nproc) cmake --install ${BUILD_DIR} --prefix ${INSTALL_DIR_JS} + + - name: Pack openvino_js_package + if: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} + run: tar -cvf - * | pigz > ${BUILD_DIR}/openvino_js_package.tar.gz + working-directory: ${{ env.INSTALL_DIR_JS }} - name: Build RPM packages if: ${{ inputs.build-rpm-packages }} @@ -279,7 +284,7 @@ jobs: uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_js_package - path: ${{ env.INSTALL_DIR_JS }} + path: ${{ env.BUILD_DIR }}/openvino_js_package.tar.gz if-no-files-found: 'error' - name: Upload openvino developer package @@ -333,8 +338,10 @@ jobs: ${{ env.BUILD_DIR }}/openvino_tests.tar.gz ${{ env.BUILD_DIR }}/deb ${{ env.MANIFEST_PATH }} + ${{ env.STORE_JS == 'true' && format('{0}/openvino_js_package.tar.gz', env.BUILD_DIR) || '' }} ${{ env.STORE_WHEELS == 'true' && format('{0}/wheels', env.INSTALL_WHEELS_DIR) || '' }} storage_dir: ${{ env.PRODUCT_TYPE }} storage_root: ${{ env.ARTIFACTS_SHARE }} env: STORE_WHEELS: ${{ inputs.os != 'debian_10' && inputs.arch != 'arm' }} + STORE_JS: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index d5d42ffcfea8d2..f0c150c4ac4db4 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -265,6 +265,17 @@ jobs: -DENABLE_WHEEL=OFF cmake --build ${{ env.BUILD_DIR }} --parallel $ENV:NUMBER_OF_PROCESSORS cmake --install ${{ env.BUILD_DIR }} --config ${{ env.CMAKE_BUILD_TYPE }} --prefix ${{ env.INSTALL_DIR_JS }} + + - name: Pack JS Artifacts + if: ${{ fromJSON(inputs.affected-components).JS_API }} + run: | + $file = Get-ChildItem -Path "${{ env.INSTALL_DIR_JS }}" + $compress = @{ + Path = $file + CompressionLevel = "Optimal" + DestinationPath = "${{ env.BUILD_DIR }}/openvino_js_package.zip" + } + Compress-Archive @compress # # Upload build artifacts and logs @@ -297,7 +308,7 @@ jobs: uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_js_package - path: ${{ env.INSTALL_DIR_JS }} + path: ${{ env.BUILD_DIR }}/openvino_js_package.zip if-no-files-found: 'error' - name: Store artifacts to a shared drive @@ -309,8 +320,10 @@ jobs: ${{ env.BUILD_DIR }}/openvino_package.zip ${{ env.BUILD_DIR }}/openvino_tests.zip ${{ env.MANIFEST_PATH }} + ${{ env.STORE_JS == 'true' && format('{0}/openvino_js_package.zip', env.BUILD_DIR) || '' }} ${{ env.STORE_WHEELS == 'true' && format('{0}/wheels', env.INSTALL_WHEELS_DIR) || '' }} storage_dir: ${{ env.PRODUCT_TYPE }} storage_root: ${{ env.ARTIFACTS_SHARE }} env: STORE_WHEELS: ${{ inputs.build-type != 'Debug' }} + STORE_JS: ${{ fromJSON(inputs.affected-components).JS_API && inputs.build-js }} diff --git a/.github/workflows/job_openvino_js.yml b/.github/workflows/job_openvino_js.yml index fd04d8842daae7..dbee8511c4187b 100644 --- a/.github/workflows/job_openvino_js.yml +++ b/.github/workflows/job_openvino_js.yml @@ -45,11 +45,16 @@ jobs: echo "OPENVINO_JS_DIR=$GITHUB_WORKSPACE/openvino/src/bindings/js" >> "$GITHUB_ENV" echo "OPENVINO_JS_LIBS_DIR=$GITHUB_WORKSPACE/openvino/src/bindings/js/node/bin" >> "$GITHUB_ENV" - - name: Download OpenVINO JS package + - name: Download OpenVINO artifacts (JS) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: openvino_js_package + pattern: openvino_[js]* path: ${{ env.OPENVINO_JS_LIBS_DIR }} + merge-multiple: true + + - name: Extract OpenVINO packages + run: pigz -dc openvino_js_package.tar.gz | tar -xf - -C ${OPENVINO_JS_LIBS_DIR} + working-directory: ${{ env.OPENVINO_JS_LIBS_DIR }} - name: Setup Node ${{ env.NODE_VERSION }} if: runner.os != 'Linux' # Node is already installed in the Docker image diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 5708b529f25acc..92d826de1d8394 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -192,12 +192,17 @@ jobs: sparse-checkout: | src/bindings/js path: 'openvino' - - - name: Download OpenVINO js package + + - name: Download OpenVINO artifacts (JS) uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: - name: openvino_js_package + pattern: openvino_[js]* path: ${{ env.OPENVINO_JS_LIBS_DIR }} + merge-multiple: true + + - name: Extract OpenVINO packages + run: Expand-Archive openvino_js_package.zip -DestinationPath . + working-directory: ${{ env.OPENVINO_JS_LIBS_DIR }} - name: Setup Node ${{ env.NODE_VERSION }} uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 From bb78f44476bb1701c4982423588f4472382dc140 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Mon, 20 Jan 2025 20:55:35 +0100 Subject: [PATCH 52/97] [LPT] Fix medium static code analyzer issues (#28483) ### Tickets: - *CVS-1521493* - *CVS-130703* - *CVS-121616* - *CVS-121618* --- .../common/fake_quantize_dequantization.hpp | 3 ++- .../quantization_granularity_attribute.hpp | 2 +- .../src/assign_and_read_value.cpp | 17 ++++------------- .../low_precision_transformations/src/clamp.cpp | 3 ++- .../src/eliminate_fake_quantize.cpp | 2 +- .../src/fake_quantize_dequantization.cpp | 3 --- .../src/markup_quantization_granularity.cpp | 6 +++--- .../src/network_helper.cpp | 1 + .../src/pull_reshape_through_dequantization.cpp | 6 +++--- .../pull_transpose_through_dequantization.cpp | 2 +- 10 files changed, 18 insertions(+), 27 deletions(-) diff --git a/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp b/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp index 1035e88ed1d0f0..0d16dbba891b61 100644 --- a/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp +++ b/src/common/low_precision_transformations/include/low_precision/common/fake_quantize_dequantization.hpp @@ -50,7 +50,8 @@ class LP_TRANSFORMATIONS_API FakeQuantizeDequantization { const std::shared_ptr& elementwise, std::shared_ptr& constant); - size_t channelDimIndex; + // for most node with layout NC, NCHW, NCDWH, index of channel dimension is 1 + size_t channelDimIndex = 1ul; Output data; std::shared_ptr convert; std::shared_ptr subtract; diff --git a/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp index e74f601f4bd4de..c43d061fb455b3 100644 --- a/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp +++ b/src/common/low_precision_transformations/include/low_precision/rt_info/quantization_granularity_attribute.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API QuantizationGranularityAttribute : public ov::Runti }; QuantizationGranularityAttribute() : granularity(Granularity::PerChannel) {} - QuantizationGranularityAttribute(const Granularity granularity) : granularity(granularity) {} + QuantizationGranularityAttribute(const Granularity& granularity) : granularity(granularity) {} bool operator==(const QuantizationGranularityAttribute& attribute) const { return this->granularity == attribute.granularity; diff --git a/src/common/low_precision_transformations/src/assign_and_read_value.cpp b/src/common/low_precision_transformations/src/assign_and_read_value.cpp index 27b79e4d347102..e65e35890c0600 100644 --- a/src/common/low_precision_transformations/src/assign_and_read_value.cpp +++ b/src/common/low_precision_transformations/src/assign_and_read_value.cpp @@ -20,31 +20,22 @@ namespace low_precision { AssignAndReadValueTransformation::AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params) : LayerTransformation(params), model(model) { MATCHER_SCOPE(AssignAndReadValueTransformation); - auto assign3 = pattern::wrap_type({ pattern::wrap_type() }); - auto assign6 = pattern::wrap_type({ pattern::wrap_type() }); + auto assign_m = pattern::wrap_type({ pattern::wrap_type() }); ov::graph_rewrite_callback callback = [OV_CAPTURE_CPY_AND_THIS](pattern::Matcher& m) { - const auto& opsMap = m.get_pattern_value_map(); - auto op = m.get_match_root(); - auto assignIt = opsMap.find(assign3); - if (assignIt == opsMap.end()) { - assignIt = opsMap.find(assign6); - } - const auto assign = assignIt->second.get_node_shared_ptr(); + const auto assign = m.get_match_root(); // check that we have ReadValue as the first dependency if (assign->get_control_dependencies().empty()) { return false; } - if (transformation_callback(op)) { + if (transformation_callback(assign)) { return false; } return transform(*context, m); }; - auto m = std::make_shared( - std::make_shared(OutputVector{ assign3, assign6 }), - matcher_name); + auto m = std::make_shared(assign_m, matcher_name); this->register_matcher(m, callback); } diff --git a/src/common/low_precision_transformations/src/clamp.cpp b/src/common/low_precision_transformations/src/clamp.cpp index 80748f549bf1ba..89150e81470bce 100644 --- a/src/common/low_precision_transformations/src/clamp.cpp +++ b/src/common/low_precision_transformations/src/clamp.cpp @@ -72,7 +72,8 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa replace_node_update_name(newClamp, replacement); - element::Type outputClampType = dequantization.multiply ? + OPENVINO_ASSERT(dequantization.multiply != nullptr || dequantization.subtract != nullptr, "incorrect dequantization ops configuration"); + const auto outputClampType = dequantization.multiply ? dequantization.multiply->get_output_element_type(0) : dequantization.subtract->get_output_element_type(0); ov::pass::low_precision::NetworkHelper::setOutDataPrecision(replacement, outputClampType); diff --git a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp index cb5d9270a43768..1a09d9914de3bf 100644 --- a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp @@ -51,7 +51,7 @@ bool check_interval(const std::shared_ptr& fq, const std::shared_ptr& constant, const float value, const float max_diff, - const bool exact_comparison) noexcept { + const bool exact_comparison) { bool need_to_check_intervals = false; const auto& constant_values = constant->cast_vector(); for (const auto constant_value : constant_values) { diff --git a/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp b/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp index a96a5032b5fef9..7246c9869ce7d8 100644 --- a/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize_dequantization.cpp @@ -32,9 +32,6 @@ FakeQuantizeDequantization::FakeQuantizeDequantization( subtractConstant(subtractConstant), multiply(multiply), multiplyConstant(multiplyConstant) { - // for most node with layout NC, NCHW, NCDWH, index of channel dimension is 1 - channelDimIndex = 1ul; - const auto rank = data.get_partial_shape().rank(); if (rank.is_static()) { std::string data_src_type = data.get_node()->get_type_name(); diff --git a/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp b/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp index b9d5ac2ec4dead..f59aca3498c9f0 100644 --- a/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp +++ b/src/common/low_precision_transformations/src/markup_quantization_granularity.cpp @@ -30,7 +30,7 @@ ov::pass::low_precision::MarkupQuantizationGranularity::MarkupQuantizationGranul bool ov::pass::low_precision::MarkupQuantizationGranularity::run_on_model(const std::shared_ptr& f) { RUN_ON_FUNCTION_SCOPE(MarkupPerTensorQuantization); auto setRestriction = [](const std::shared_ptr& node, const std::vector& restrictedPorts) { - auto createAttribute = [](Input& input, const QuantizationGranularityAttribute::Granularity granularity){ + auto createAttribute = [](Input& input, const QuantizationGranularityAttribute::Granularity& granularity){ auto &rt = input.get_rt_info(); rt.emplace(QuantizationGranularityAttribute::get_type_info_static(), QuantizationGranularityAttribute(granularity)); }; @@ -43,14 +43,14 @@ bool ov::pass::low_precision::MarkupQuantizationGranularity::run_on_model(const } } else { // markup specific ports - for (const auto item : restrictedPorts) { + for (const auto& item : restrictedPorts) { Input input = node->input(item.port); createAttribute(input, item.granularity); } } }; - for (const std::shared_ptr& node : f->get_ordered_ops()) { + for (const auto& node : f->get_ordered_ops()) { if (node->get_input_size() == 0) { continue; } diff --git a/src/common/low_precision_transformations/src/network_helper.cpp b/src/common/low_precision_transformations/src/network_helper.cpp index e57fdcfb1b8e81..afb7e19c13e7ad 100644 --- a/src/common/low_precision_transformations/src/network_helper.cpp +++ b/src/common/low_precision_transformations/src/network_helper.cpp @@ -622,6 +622,7 @@ std::shared_ptr NetworkHelper::separateInStandaloneBranch(std::shared_ parent = multiply->output(0); } + OPENVINO_ASSERT(dequantization.multiply != nullptr || dequantization.subtract != nullptr, "incorrect dequantization ops configuration"); const auto originalParent = dequantization.multiply ? dequantization.multiply->shared_from_this() : dequantization.subtract->shared_from_this(); diff --git a/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp b/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp index 157a204af3a089..6e33afc09461f2 100644 --- a/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp +++ b/src/common/low_precision_transformations/src/pull_reshape_through_dequantization.cpp @@ -101,7 +101,7 @@ std::shared_ptr moveThroughConvert(const std::shared_ptr& reshape, c void fuseConstant(const std::shared_ptr& reshape, const std::shared_ptr& constant) { ov::OutputVector result(1); - reshape->constant_fold(result, { constant, reshape->input_value(1) }); + OPENVINO_ASSERT(reshape->constant_fold(result, { constant, reshape->input_value(1) }), "Reshape constant folding failed"); const auto newConstant = result[0].get_node_shared_ptr(); replace_node(reshape, newConstant); copy_runtime_info({ constant, reshape }, newConstant); @@ -139,7 +139,7 @@ ov::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDeq return false; } - while (reshape != nullptr) { + do { const auto parent = reshape->get_input_node_shared_ptr(0); if (ov::is_type(parent) || ov::is_type(parent)) { reshape = pull_reshape_through_dequantization::moveThroughElementwise(reshape, parent); @@ -151,7 +151,7 @@ ov::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroughDeq } else { THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type"; } - } + } while (reshape != nullptr); return true; }; diff --git a/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp b/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp index a4557288c74f23..3f3533f12a7da7 100644 --- a/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp +++ b/src/common/low_precision_transformations/src/pull_transpose_through_dequantization.cpp @@ -110,7 +110,7 @@ ov::pass::low_precision::PullTransposeThroughDequantization::PullTransposeThroug ov::matcher_pass_callback callback = [=](ov::pass::pattern::Matcher & m) -> bool { const auto& opsMap = m.get_pattern_value_map(); - auto transpose = opsMap.find(matcherTranspose)->second.get_node()->shared_from_this(); + auto transpose = opsMap.at(matcherTranspose).get_node_shared_ptr(); while (transpose != nullptr) { const auto parent = transpose->get_input_node_shared_ptr(0); From 155f6968b00e5931506e079b17c2820d164be6f8 Mon Sep 17 00:00:00 2001 From: Ekaterina Shiryaeva Date: Mon, 20 Jan 2025 21:07:34 +0100 Subject: [PATCH 53/97] [NPUW] Fix scales processing in CWAI for nf4 (#28523) ### Tickets: - *E-149709* --- .../intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp index 93a43c9b82570a..a4a03dea982438 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/partitioning/patterns/dcoff.cpp @@ -890,7 +890,8 @@ CWAI3::CWAI3(CWAI3::Results scales) { auto matched_valueA = std::static_pointer_cast(matched_nodeA); auto matched_valueC = std::static_pointer_cast(matched_nodeC); - if (ov::element::i4 == matched_valueA->get_element_type() && + if ((ov::element::i4 == matched_valueA->get_element_type() || + ov::element::nf4 == matched_valueA->get_element_type()) && (ov::element::f16 == matched_valueC->get_element_type() || ov::element::f32 == matched_valueC->get_element_type())) { LOG_DEBUG("Matched: " << matched_valueC); From 1ad48635dc3bd31407c0a6aff93fcf9aedfa266a Mon Sep 17 00:00:00 2001 From: Tomasz Jankowski Date: Mon, 20 Jan 2025 21:40:45 +0100 Subject: [PATCH 54/97] [RTTI] Use OV dynamic cast on Android only (#28519) ### Details: OV dynamic casting causes issue in external software with badly formed OV RTTI definitions, so it's replaced with standard dynamic casting, except for Android. ### Tickets: - CVS-160749 --------- Signed-off-by: Tomasz Jankowski Co-authored-by: Ilya Lavrenov --- src/core/include/openvino/core/type.hpp | 12 ++++++ src/core/tests/rtti.cpp | 56 ++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/src/core/include/openvino/core/type.hpp b/src/core/include/openvino/core/type.hpp index 4877b9ce02b251..812208855fa7f3 100644 --- a/src/core/include/openvino/core/type.hpp +++ b/src/core/include/openvino/core/type.hpp @@ -77,6 +77,10 @@ struct OPENVINO_API DiscreteTypeInfo { OPENVINO_API std::ostream& operator<<(std::ostream& s, const DiscreteTypeInfo& info); +#if defined(__ANDROID__) || defined(ANDROID) +# define OPENVINO_DYNAMIC_CAST +#endif + /// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a /// Type*/shared_ptr template @@ -93,7 +97,11 @@ template typename std::enable_if(std::declval())), Type*>::value, Type*>::type as_type(Value value) { +#ifdef OPENVINO_DYNAMIC_CAST return ov::is_type(value) ? static_cast(value) : nullptr; +#else + return dynamic_cast(value); +#endif } namespace util { @@ -114,7 +122,11 @@ struct AsTypePtr> { /// Type, nullptr otherwise template auto as_type_ptr(const U& value) -> decltype(::ov::util::AsTypePtr::template call(value)) { +#ifdef OPENVINO_DYNAMIC_CAST return ::ov::util::AsTypePtr::template call(value); +#else + return std::dynamic_pointer_cast(value); +#endif } } // namespace ov diff --git a/src/core/tests/rtti.cpp b/src/core/tests/rtti.cpp index 1fd8787ee60f38..9cfa225f4a3010 100644 --- a/src/core/tests/rtti.cpp +++ b/src/core/tests/rtti.cpp @@ -5,10 +5,12 @@ #include "common_test_utils/test_tools.hpp" #include "gtest/gtest.h" #include "openvino/op/op.hpp" +#include "openvino/pass/matcher_pass.hpp" -using namespace ov; using namespace std; +namespace ov::test { + class OpType : public ov::op::Op { public: OPENVINO_OP("OpType"); @@ -88,3 +90,55 @@ TEST(rtti, op_with_type_version_parent_old) { ASSERT_NE(type_info.parent, nullptr); ASSERT_EQ(*type_info.parent, OpType::get_type_info_static()); } + +#if !defined(__ANDROID__) && !defined(ANDROID) + +class IncompleteRtti : public pass::MatcherPass { +public: + OPENVINO_RTTI("IncompleteRtti", "rtti_test"); +}; + +class DerivedIncompleteRtti : public IncompleteRtti { +public: + OPENVINO_RTTI("DerivedIncompleteRtti", "rtti_test", IncompleteRtti); +}; + +// Assert backward compatibility of RTTI definition without parent but casted with as_type or as_type_ptr pointer work. +TEST(rtti, assert_casting_without_parent) { + { + IncompleteRtti incomplete; + DerivedIncompleteRtti derived; + + auto pass_A = as_type(&incomplete); + auto pass_B = as_type(&derived); + auto pass_C = as_type(&derived); + + EXPECT_NE(nullptr, pass_A); + EXPECT_NE(nullptr, pass_B); + EXPECT_NE(nullptr, pass_C); + + EXPECT_NE(nullptr, as_type(pass_A)); + EXPECT_NE(nullptr, as_type(pass_B)); + EXPECT_NE(nullptr, as_type(pass_B)); + EXPECT_NE(nullptr, as_type(pass_C)); + } + { + auto incomplete = std::make_shared(); + auto derived = std::make_shared(); + + auto pass_A = as_type_ptr(incomplete); + auto pass_B = as_type_ptr(derived); + auto pass_C = as_type_ptr(derived); + + EXPECT_NE(nullptr, pass_A); + EXPECT_NE(nullptr, pass_B); + EXPECT_NE(nullptr, pass_C); + + EXPECT_NE(nullptr, as_type_ptr(pass_A)); + EXPECT_NE(nullptr, as_type_ptr(pass_B)); + EXPECT_NE(nullptr, as_type_ptr(pass_B)); + EXPECT_NE(nullptr, as_type_ptr(pass_C)); + } +} +#endif // ANDROID +} // namespace ov::test From 08be7ae090cb1871490cd7ec521a8e80422152e9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:24:34 +0000 Subject: [PATCH 55/97] Bump reviewdog/action-shellcheck from 1.27.0 to 1.29.0 (#28571) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [reviewdog/action-shellcheck](https://github.com/reviewdog/action-shellcheck) from 1.27.0 to 1.29.0.
    Release notes

    Sourced from reviewdog/action-shellcheck's releases.

    Release v1.29.0

    What's Changed

    New Contributors

    Full Changelog: https://github.com/reviewdog/action-shellcheck/compare/v1.28.0...v1.29.0

    Release v1.28.0

    What's Changed

    New Contributors

    Full Changelog: https://github.com/reviewdog/action-shellcheck/compare/v1.27.0...v1.28.0

    Commits
    • 6e0e63d Merge pull request #70 from reviewdog/depup/reviewdog/reviewdog
    • 958d9e1 Merge pull request #71 from abitrolly/patch-1
    • 44addb0 Show shellcheck version after install
    • fff8e91 chore(deps): update reviewdog/reviewdog to 0.20.3
    • 22f96e3 Merge pull request #69 from reviewdog/add_fail_level
    • e48fb59 Add line break
    • d394b4f Add fail_level and deduplicate fail_on_error
    • See full diff in compare view

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=reviewdog/action-shellcheck&package-manager=github_actions&previous-version=1.27.0&new-version=1.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/code_style.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml index 97b399b1abf48d..89fb4e64670d8d 100644 --- a/.github/workflows/code_style.yml +++ b/.github/workflows/code_style.yml @@ -98,7 +98,7 @@ jobs: # always provide suggestions even for skipped scripts in ov_shellcheck tagret - name: ShellCheck action if: always() - uses: reviewdog/action-shellcheck@ccaafec556ffa154f112bfcb7b9c9574190b7091 # v1.27.0 + uses: reviewdog/action-shellcheck@6e0e63d1750d02d761b3df0f2c5ba9f9ac4a9ed7 # v1.29.0 with: level: style reporter: github-pr-review From 0efe897a15ce6470b3eb78ef119b3b620966ab2f Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Mon, 20 Jan 2025 22:55:07 +0100 Subject: [PATCH 56/97] [LPT] Cleanup base LayerTransformation class from legacy TransformationContext (#28327) ### Details: - *`TransformationContext` is not used anywhere and `LayerTransformation::context` always equal to `nullptr`* - *This PR completely removes `TransformationContext`* - *Also, `LayerTransformation` class is cleaned up from legacy methods which are not used anywhere* ### Tickets: - *N\A* --- .../include/low_precision/add.hpp | 4 +- .../low_precision/assign_and_read_value.hpp | 4 +- .../include/low_precision/avg_pool.hpp | 4 +- .../include/low_precision/batch_to_space.hpp | 4 +- .../include/low_precision/broadcast.hpp | 2 +- .../include/low_precision/clamp.hpp | 4 +- .../low_precision/cleanup_transformation.hpp | 2 +- .../include/low_precision/concat.hpp | 4 +- .../include/low_precision/convert.hpp | 2 +- .../include/low_precision/convolution.hpp | 2 +- .../convolution_backprop_data.hpp | 4 +- .../include/low_precision/depth_to_space.hpp | 2 +- .../low_precision/eliminate_fake_quantize.hpp | 4 +- .../eltwise_base_transformation.hpp | 2 +- .../include/low_precision/fake_quantize.hpp | 9 +- .../fake_quantize_decomposition.hpp | 2 +- .../include/low_precision/fold_convert.hpp | 4 +- .../low_precision/fold_fake_quantize.hpp | 4 +- .../include/low_precision/fuse_convert.hpp | 4 +- .../fuse_elementwise_to_fake_quantize.hpp | 2 +- .../fuse_multiply_to_fake_quantize.hpp | 2 +- .../fuse_subtract_to_fake_quantize.hpp | 2 +- .../include/low_precision/gather.hpp | 4 +- .../low_precision/group_convolution.hpp | 2 +- .../include/low_precision/interpolate.hpp | 4 +- .../low_precision/layer_transformation.hpp | 67 +++----------- .../include/low_precision/mat_mul.hpp | 4 +- .../include/low_precision/max_pool.hpp | 4 +- .../low_precision/move_fake_quantize.hpp | 4 +- .../include/low_precision/multiply.hpp | 2 +- .../low_precision/multiply_partial.hpp | 4 +- .../multiply_to_group_convolution.hpp | 4 +- .../include/low_precision/mvn.hpp | 4 +- .../include/low_precision/network_helper.hpp | 1 - .../include/low_precision/normalize_l2.hpp | 4 +- .../include/low_precision/pad.hpp | 4 +- .../include/low_precision/prelu.hpp | 4 +- .../include/low_precision/recurrent_cell.hpp | 6 +- .../reduce_base_transformation.hpp | 4 +- .../include/low_precision/reduce_max.hpp | 2 +- .../include/low_precision/reduce_mean.hpp | 2 +- .../include/low_precision/reduce_min.hpp | 2 +- .../include/low_precision/reduce_sum.hpp | 2 +- .../include/low_precision/relu.hpp | 4 +- .../include/low_precision/reshape.hpp | 4 +- .../low_precision/shuffle_channels.hpp | 4 +- .../include/low_precision/slice.hpp | 4 +- .../include/low_precision/space_to_batch.hpp | 4 +- .../include/low_precision/split.hpp | 9 +- .../include/low_precision/squeeze.hpp | 4 +- .../include/low_precision/strided_slice.hpp | 4 +- .../include/low_precision/subtract.hpp | 2 +- .../low_precision/transformation_context.hpp | 39 -------- .../transparent_base_transformation.hpp | 4 +- .../include/low_precision/transpose.hpp | 4 +- .../include/low_precision/unsqueeze.hpp | 4 +- .../weightable_layer_transformation.hpp | 13 +-- .../low_precision_transformations/src/add.cpp | 12 +-- .../src/assign_and_read_value.cpp | 12 +-- .../src/avg_pool.cpp | 12 +-- .../src/batch_to_space.cpp | 12 +-- .../src/broadcast.cpp | 6 +- .../src/clamp.cpp | 12 +-- .../src/cleanup_transformation.cpp | 2 +- .../src/concat.cpp | 10 +-- .../src/convert.cpp | 6 +- .../src/convolution.cpp | 10 +-- .../src/convolution_backprop_data.cpp | 14 +-- .../src/depth_to_space.cpp | 6 +- .../src/eliminate_fake_quantize.cpp | 10 +-- .../src/eltwise_base_transformation.cpp | 4 +- .../src/fake_quantize.cpp | 7 +- .../src/fake_quantize_decomposition.cpp | 6 +- .../src/fold_convert.cpp | 12 +-- .../src/fold_fake_quantize.cpp | 8 +- .../src/fuse_convert.cpp | 10 +-- .../src/fuse_elementwise_to_fake_quantize.cpp | 4 +- .../src/fuse_multiply_to_fake_quantize.cpp | 8 +- .../src/fuse_subtract_to_fake_quantize.cpp | 8 +- .../src/gather.cpp | 12 +-- .../src/group_convolution.cpp | 11 +-- .../src/interpolate.cpp | 12 +-- .../src/layer_transformation.cpp | 88 ++----------------- .../src/mat_mul.cpp | 12 +-- .../src/max_pool.cpp | 12 +-- .../src/move_fake_quantize.cpp | 12 +-- .../src/multiply.cpp | 10 +-- .../src/multiply_partial.cpp | 12 +-- .../src/multiply_to_group_convolution.cpp | 10 +-- .../low_precision_transformations/src/mvn.cpp | 12 +-- .../src/normalize_l2.cpp | 12 +-- .../low_precision_transformations/src/pad.cpp | 12 +-- .../src/prelu.cpp | 12 +-- .../src/recurrent_cell.cpp | 18 ++-- .../src/reduce_base_transformation.cpp | 8 +- .../src/reduce_max.cpp | 6 +- .../src/reduce_mean.cpp | 6 +- .../src/reduce_min.cpp | 6 +- .../src/reduce_sum.cpp | 6 +- .../src/relu.cpp | 12 +-- .../src/reshape.cpp | 12 +-- .../src/shuffle_channels.cpp | 12 +-- .../src/slice.cpp | 12 +-- .../src/space_to_batch.cpp | 12 +-- .../src/split.cpp | 14 ++- .../src/squeeze.cpp | 12 +-- .../src/strided_slice.cpp | 10 +-- .../src/subtract.cpp | 6 +- .../src/transformation_context.cpp | 18 ---- .../src/transparent_base_transformation.cpp | 8 +- .../src/transpose.cpp | 12 +-- .../src/unsqueeze.cpp | 12 +-- .../src/variadic_split.cpp | 2 +- .../src/weightable_layer_transformation.cpp | 11 +-- .../tests/layer_transformation.hpp | 1 - .../simple_low_precision_transformer.cpp | 1 - 116 files changed, 382 insertions(+), 575 deletions(-) delete mode 100644 src/common/low_precision_transformations/include/low_precision/transformation_context.hpp delete mode 100644 src/common/low_precision_transformations/src/transformation_context.cpp diff --git a/src/common/low_precision_transformations/include/low_precision/add.hpp b/src/common/low_precision_transformations/include/low_precision/add.hpp index 2c97087696d2f7..55efbf940e94b7 100644 --- a/src/common/low_precision_transformations/include/low_precision/add.hpp +++ b/src/common/low_precision_transformations/include/low_precision/add.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API AddTransformation : public EltwiseBaseTransformatio public: OPENVINO_RTTI("AddTransformation", "0", EltwiseBaseTransformation); AddTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp index edef4d63aa134a..9134293d5512dd 100644 --- a/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp +++ b/src/common/low_precision_transformations/include/low_precision/assign_and_read_value.hpp @@ -15,8 +15,8 @@ class LP_TRANSFORMATIONS_API AssignAndReadValueTransformation : public LayerTran public: OPENVINO_RTTI("AssignAndReadValueTransformation", "0", LayerTransformation); AssignAndReadValueTransformation(const std::shared_ptr model, const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; private: std::shared_ptr model; diff --git a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp index ac8b91aeb57504..7dfac41beffb06 100644 --- a/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp +++ b/src/common/low_precision_transformations/include/low_precision/avg_pool.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API AvgPoolTransformation : public LayerTransformation public: OPENVINO_RTTI("AvgPoolTransformation", "0", LayerTransformation); AvgPoolTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp index 7859a29ec3a046..b729eb1fc956d3 100644 --- a/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/batch_to_space.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API BatchToSpaceTransformation : public LayerTransforma public: OPENVINO_RTTI("BatchToSpaceTransformation", "0", LayerTransformation); BatchToSpaceTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp index 05f7cadb88e888..75096e322a6571 100644 --- a/src/common/low_precision_transformations/include/low_precision/broadcast.hpp +++ b/src/common/low_precision_transformations/include/low_precision/broadcast.hpp @@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API BroadcastTransformation : public TransparentBaseTra public: OPENVINO_RTTI("BroadcastTransformation", "0", TransparentBaseTransformation); BroadcastTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/clamp.hpp b/src/common/low_precision_transformations/include/low_precision/clamp.hpp index d79a6ad159e21b..c41d80939bca8f 100644 --- a/src/common/low_precision_transformations/include/low_precision/clamp.hpp +++ b/src/common/low_precision_transformations/include/low_precision/clamp.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API ClampTransformation : public LayerTransformation { public: OPENVINO_RTTI("ClampTransformation", "0", LayerTransformation); ClampTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp index 503c519ea60f22..52de352c0bb5d9 100644 --- a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp @@ -19,7 +19,7 @@ class LP_TRANSFORMATIONS_API CleanupTransformation : public LayerTransformation CleanupTransformation(const Params& params); virtual ~CleanupTransformation() = default; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; static bool canBeTransformedStatic( const std::shared_ptr& layer, const std::vector& defaultPrecisions = precision_set::get_int8_support()); diff --git a/src/common/low_precision_transformations/include/low_precision/concat.hpp b/src/common/low_precision_transformations/include/low_precision/concat.hpp index c082e30dfa1ecd..a4511ef0f7c099 100644 --- a/src/common/low_precision_transformations/include/low_precision/concat.hpp +++ b/src/common/low_precision_transformations/include/low_precision/concat.hpp @@ -31,9 +31,9 @@ class LP_TRANSFORMATIONS_API ConcatTransformation : public LayerTransformation { public: OPENVINO_RTTI("ConcatTransformation", "0", LayerTransformation); ConcatTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; static bool isQuantizedStatic(const std::shared_ptr& layer); }; diff --git a/src/common/low_precision_transformations/include/low_precision/convert.hpp b/src/common/low_precision_transformations/include/low_precision/convert.hpp index 7cbd79be03bb2b..edfb58076c9d20 100644 --- a/src/common/low_precision_transformations/include/low_precision/convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convert.hpp @@ -15,7 +15,7 @@ class LP_TRANSFORMATIONS_API ConvertTransformation : public LayerTransformation public: OPENVINO_RTTI("ConvertTransformation", "0", LayerTransformation); ConvertTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/convolution.hpp b/src/common/low_precision_transformations/include/low_precision/convolution.hpp index 428a8adf00ca17..74a61817c15b18 100644 --- a/src/common/low_precision_transformations/include/low_precision/convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convolution.hpp @@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API ConvolutionTransformation : public WeightableLayerT public: OPENVINO_RTTI("ConvolutionTransformation", "0", WeightableLayerTransformation); ConvolutionTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isQuantized(const std::shared_ptr& layer, const std::vector&defaultPrecisions) const override; static bool isQuantizedStatic(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp b/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp index 6221a75aca5fb2..9b1e2580e59193 100644 --- a/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp +++ b/src/common/low_precision_transformations/include/low_precision/convolution_backprop_data.hpp @@ -21,8 +21,8 @@ namespace low_precision { class LP_TRANSFORMATIONS_API ConvolutionBackpropDataTransformation : public WeightableLayerTransformation { public: ConvolutionBackpropDataTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isQuantized(const std::shared_ptr& layer, const std::vector&defaultPrecisions) const override; static bool isQuantizedStatic(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp index e86a2de2941b3c..1ace395ac8331d 100644 --- a/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp +++ b/src/common/low_precision_transformations/include/low_precision/depth_to_space.hpp @@ -22,7 +22,7 @@ class LP_TRANSFORMATIONS_API DepthToSpaceTransformation : public TransparentBase public: OPENVINO_RTTI("DepthToSpaceTransformation", "0", TransparentBaseTransformation); DepthToSpaceTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp index bfaa0c3b3a2b1b..190d146a741151 100644 --- a/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/eliminate_fake_quantize.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API EliminateFakeQuantizeTransformation : public Cleanu public: OPENVINO_RTTI("EliminateFakeQuantizeTransformation", "0", CleanupTransformation); EliminateFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp index 5d3361e7283eb9..9c3c5d1c3b2a5d 100644 --- a/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/eltwise_base_transformation.hpp @@ -19,7 +19,7 @@ namespace low_precision { class LP_TRANSFORMATIONS_API EltwiseBaseTransformation : public LayerTransformation { public: EltwiseBaseTransformation(const Params& params) : LayerTransformation(params) {} - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; static bool isBroadcasted(const PartialShape& shape); diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp index 640814dc15cabb..8f5c67dbc0bcc4 100644 --- a/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize.hpp @@ -23,16 +23,15 @@ class LP_TRANSFORMATIONS_API FakeQuantizeTransformation : public LayerTransforma public: OPENVINO_RTTI("FakeQuantizeTransformation", "0", LayerTransformation); FakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; static bool checkElementwise(const std::shared_ptr& eltwise); static std::shared_ptr fuseElementwise( - TransformationContext& context, - MatcherPass* matcherPass, - const std::shared_ptr& fakeQuantize, - const bool updatePrecisions); + MatcherPass* matcherPass, + const std::shared_ptr& fakeQuantize, + const bool updatePrecisions); }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp index 4d2ee8d88fadaf..8289a9ea5493f7 100644 --- a/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fake_quantize_decomposition.hpp @@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API FakeQuantizeDecompositionTransformation : public La public: OPENVINO_RTTI("FakeQuantizeDecompositionTransformation", "0", LayerTransformation); FakeQuantizeDecompositionTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp index bc5342b5cca4f1..d0d864835c8f98 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_convert.hpp @@ -25,8 +25,8 @@ class LP_TRANSFORMATIONS_API FoldConvertTransformation : public CleanupTransform public: OPENVINO_RTTI("FoldConvertTransformation", "0", CleanupTransformation); FoldConvertTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp index c47c39a78ef081..b345ce5edbd80a 100644 --- a/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fold_fake_quantize.hpp @@ -23,8 +23,8 @@ class LP_TRANSFORMATIONS_API FoldFakeQuantizeTransformation : public LayerTransf public: OPENVINO_RTTI("FoldFakeQuantizeTransformation", "0", LayerTransformation); FoldFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; bool isConstantOutput(std::shared_ptr op) const; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp index 0ff0dc60821486..06d252961e2c26 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_convert.hpp @@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API FuseConvertTransformation : public CleanupTransform public: OPENVINO_RTTI("FuseConvertTransformation", "0", CleanupTransformation); FuseConvertTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp index ab1a589845aa10..13b73a1112f4c5 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp @@ -21,7 +21,7 @@ class LP_TRANSFORMATIONS_API FuseElementwiseToFakeQuantizeTransformation : publi FuseElementwiseToFakeQuantizeTransformation(const Params& params); virtual ~FuseElementwiseToFakeQuantizeTransformation() = default; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp index 67471a56a4a6b8..1933a07bbb881b 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_multiply_to_fake_quantize.hpp @@ -24,7 +24,7 @@ class LP_TRANSFORMATIONS_API FuseMultiplyToFakeQuantizeTransformation : public F public: OPENVINO_RTTI("FuseMultiplyToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation); FuseMultiplyToFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp index c5dd8994e2a512..644aafb740d8ff 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_subtract_to_fake_quantize.hpp @@ -24,7 +24,7 @@ class LP_TRANSFORMATIONS_API FuseSubtractToFakeQuantizeTransformation : public F public: OPENVINO_RTTI("FuseSubtractToFakeQuantizeTransformation", "0", FuseElementwiseToFakeQuantizeTransformation); FuseSubtractToFakeQuantizeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/gather.hpp b/src/common/low_precision_transformations/include/low_precision/gather.hpp index 6aebd3fb094e0a..980ec8f1e9b992 100644 --- a/src/common/low_precision_transformations/include/low_precision/gather.hpp +++ b/src/common/low_precision_transformations/include/low_precision/gather.hpp @@ -15,9 +15,9 @@ class LP_TRANSFORMATIONS_API GatherTransformation : public LayerTransformation { public: OPENVINO_RTTI("GatherTransformation", "0", LayerTransformation); GatherTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp index 6551a929339830..f1e0bb44bddad8 100644 --- a/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/group_convolution.hpp @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API GroupConvolutionTransformation : public Convolution public: OPENVINO_RTTI("GroupConvolutionTransformation", "0", ConvolutionTransformation); GroupConvolutionTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isQuantized(const std::shared_ptr& layer, const std::vector& defaultPrecisions) const override; static bool isQuantizedStatic(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp index 634d422dc2b09b..d715a24cc73e5d 100644 --- a/src/common/low_precision_transformations/include/low_precision/interpolate.hpp +++ b/src/common/low_precision_transformations/include/low_precision/interpolate.hpp @@ -22,9 +22,9 @@ class LP_TRANSFORMATIONS_API InterpolateTransformation : public LayerTransformat public: OPENVINO_RTTI("InterpolateTransformation", "0", LayerTransformation); InterpolateTransformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index c675ade19b516b..b3c7aaa16ea33a 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -12,27 +12,15 @@ #include #include "openvino/pass/matcher_pass.hpp" -#include "transformation_context.hpp" #include "quantization_details.hpp" #include "low_precision/common/ie_lpt_exception.hpp" #include "common/fake_quantize_dequantization.hpp" /***************************************************** * Debug capability - * - ORIGINAL_MODEL_PATH : Specify with existing folder name - * to serialize original model into it (XML & BIN extensions were added) - * - TRANSFORMED_MODEL_PATH : Specify with existing folder name - * to serialize original model into it (XML & BIN extensions were added) - * - LPT_PRINT_DEQUANTIZATION_INFO : Define it to enable - * dequantization layers printing - * - LPT_DISPLAY_PRECISION : Define it to to display precision info - * during low precision transformations - * + * - LPT_PRINT_DEQUANTIZATION_INFO : Define it to enable dequantization info printing: scales, shifts, etc. *****************************************************/ -// #define LPT_ORIGINAL_MODEL_PATH "/localdisk/orig.model" -// #define LPT_TRANSFORMED_MODEL_PATH "/localdisk/transformed.model" // #define LPT_PRINT_DEQUANTIZATION_INFO -// #define LPT_DISPLAY_PRECISION namespace ov { namespace pass { @@ -301,15 +289,9 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass LayerTransformation(const Params& params); virtual ~LayerTransformation() = default; - virtual bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) = 0; + virtual bool transform(ov::pass::pattern::Matcher &m) = 0; - void setContext(TransformationContext* context) noexcept; - - void setUpdatePrecisions(const bool updatePrecisions); - - void setDefaultPrecisions(const std::vector& defaultPrecisions); - - virtual bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const; + virtual bool canBeTransformed(const std::shared_ptr& layer) const; static bool canBeTransformedStatic(const std::shared_ptr& layer, const std::vector& defaultPrecisions = precision_set::get_int8_support()); @@ -352,59 +334,32 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass const std::vector& dequantizationShifts); #endif - bool updatePrecisions; - element::Type deqPrecision; - std::vector defaultPrecisions; - bool reshapeIgnorePerTensorQuantizationCheck; - bool scalingMode; + const bool updatePrecisions; + const element::Type deqPrecision; + const std::vector defaultPrecisions; + const bool reshapeIgnorePerTensorQuantizationCheck; + const bool scalingMode; static constexpr char originalLayerPostfix[] = "_original"; - TransformationContext* context; protected: std::shared_ptr moveDequantizationAfter( - TransformationContext &context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool updateOutputPrecision = true, const bool moveSubtract = true) const; std::shared_ptr moveDequantizationBefore( - TransformationContext& context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool moveSubtract = true) const; - bool updateOutput( - TransformationContext &context, - std::shared_ptr lastNode, - std::shared_ptr originalNode) const; - - void updateOutput( - TransformationContext& context, - std::shared_ptr lastNode, - std::string originalName) const; - - void addPattern(ov::pass::GraphRewrite& pass, TransformationContext& context, std::shared_ptr patternRoot); - - //TODO: replace with canBeTransformed when quantization by special dimension is supported for all transformations - bool canBeTransformedSpatialDimension(const TransformationContext& context, std::shared_ptr layer) const; + bool updateOutput(const std::shared_ptr& lastNode, const std::shared_ptr& originalNode) const; - template - void addSingleNodePattern(ov::pass::GraphRewrite& pass, TransformationContext& context) const { - using namespace ov; - - auto is_op_type = [](std::shared_ptr n) { - return !!as_type_ptr(n); - }; - auto p_node = std::make_shared(element::f32, Shape{}, is_op_type); - - addPattern(pass, context, p_node); - } + // TODO: replace with canBeTransformed when quantization by special dimension is supported for all transformations + bool canBeTransformedSpatialDimension(const std::shared_ptr& layer) const; }; -typedef std::shared_ptr LayerTransformationPtr; - } // namespace low_precision } // namespace pass } // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp index 0b6115e9345b0e..910154fe0e16e0 100644 --- a/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp +++ b/src/common/low_precision_transformations/include/low_precision/mat_mul.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API MatMulTransformation : public LayerTransformation { public: OPENVINO_RTTI("MatMulTransformation", "0", LayerTransformation); MatMulTransformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp index 7d499c9ec254f3..f6307ed69cbfbe 100644 --- a/src/common/low_precision_transformations/include/low_precision/max_pool.hpp +++ b/src/common/low_precision_transformations/include/low_precision/max_pool.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API MaxPoolTransformation : public LayerTransformation public: OPENVINO_RTTI("MaxPoolTransformation", "0", LayerTransformation); MaxPoolTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp index 628c88b38992e4..96a344cc4620fe 100644 --- a/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/move_fake_quantize.hpp @@ -16,8 +16,8 @@ class LP_TRANSFORMATIONS_API MoveFakeQuantize : public LayerTransformation { public: OPENVINO_RTTI("MoveFakeQuantize", "0", LayerTransformation); MoveFakeQuantize(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply.hpp b/src/common/low_precision_transformations/include/low_precision/multiply.hpp index fd51b8cac07f35..5658a5bf71bedf 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply.hpp @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API MultiplyTransformation : public WeightableLayerTran public: OPENVINO_RTTI("MultiplyTransformation", "0", WeightableLayerTransformation); MultiplyTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; protected: size_t getInputChannels(const std::shared_ptr op) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp index 3bee03cfb1a265..7f05baeaf3b12e 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_partial.hpp @@ -23,8 +23,8 @@ class LP_TRANSFORMATIONS_API MultiplyPartialTransformation : public EltwiseBaseT public: OPENVINO_RTTI("MultiplyPartialTransformation", "0", EltwiseBaseTransformation); MultiplyPartialTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp index 45252777252fc6..3d6fc228331b13 100644 --- a/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp +++ b/src/common/low_precision_transformations/include/low_precision/multiply_to_group_convolution.hpp @@ -27,8 +27,8 @@ class LP_TRANSFORMATIONS_API MultiplyToGroupConvolutionTransformation : public C const Params& params = Params(), const PrecisionsRestriction::PrecisionsByPorts& restrictions = {}); ~MultiplyToGroupConvolutionTransformation() override {} - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; bool isQuantized(const std::shared_ptr& layer, const std::vector& defaultPrecisions) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/mvn.hpp b/src/common/low_precision_transformations/include/low_precision/mvn.hpp index cd73075ad5740b..061cca9917c43f 100644 --- a/src/common/low_precision_transformations/include/low_precision/mvn.hpp +++ b/src/common/low_precision_transformations/include/low_precision/mvn.hpp @@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API MVNTransformation : public LayerTransformation { public: OPENVINO_RTTI("MVNTransformation", "0", LayerTransformation); MVNTransformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp index 40f2973b0701df..d4a3ba6d429044 100644 --- a/src/common/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/src/common/low_precision_transformations/include/low_precision/network_helper.hpp @@ -16,7 +16,6 @@ #include "rt_info/precisions_attribute.hpp" #include "rt_info/quantization_granularity_attribute.hpp" #include "rt_info/intervals_alignment_attribute.hpp" -#include "transformation_context.hpp" #include "quantization_details.hpp" #include "transformations/utils/utils.hpp" #include "common/fake_quantize_dequantization.hpp" diff --git a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp index c2777ca0652a07..8d16867982e5fe 100644 --- a/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp +++ b/src/common/low_precision_transformations/include/low_precision/normalize_l2.hpp @@ -22,8 +22,8 @@ class LP_TRANSFORMATIONS_API NormalizeL2Transformation : public LayerTransformat public: OPENVINO_RTTI("NormalizeL2Transformation", "0", LayerTransformation); NormalizeL2Transformation(const Params& params = Params()); - bool transform(TransformationContext &context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/pad.hpp b/src/common/low_precision_transformations/include/low_precision/pad.hpp index 49012e19a604e8..595d7b02dbd77e 100644 --- a/src/common/low_precision_transformations/include/low_precision/pad.hpp +++ b/src/common/low_precision_transformations/include/low_precision/pad.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API PadTransformation : public LayerTransformation { public: OPENVINO_RTTI("PadTransformation", "0", LayerTransformation); PadTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/prelu.hpp b/src/common/low_precision_transformations/include/low_precision/prelu.hpp index df64677b861dbb..12af2f536b28f2 100644 --- a/src/common/low_precision_transformations/include/low_precision/prelu.hpp +++ b/src/common/low_precision_transformations/include/low_precision/prelu.hpp @@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API PReluTransformation : public LayerTransformation { public: OPENVINO_RTTI("PReluTransformation", "0", LayerTransformation); PReluTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp index fc0401b08dd74e..9cb8ed91c4b70b 100644 --- a/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp +++ b/src/common/low_precision_transformations/include/low_precision/recurrent_cell.hpp @@ -16,8 +16,8 @@ class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransform public: OPENVINO_RTTI("RecurrentCellTransformation", "0", LayerTransformation); RecurrentCellTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; void propagateSkipCleanupAttribute(std::shared_ptr dequantization_multiply); static std::shared_ptr wrap_fake_quantize(const std::shared_ptr parameter); @@ -25,7 +25,7 @@ class LP_TRANSFORMATIONS_API RecurrentCellTransformation : public LayerTransform static std::shared_ptr wrap_dequantization(const std::shared_ptr parameter, const bool with_subtract); private: - void propagate(TransformationContext& context, const std::shared_ptr node); + void propagate(const std::shared_ptr node); }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp index 4a42edd60d80c8..c91a8364f71c08 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_base_transformation.hpp @@ -22,8 +22,8 @@ namespace low_precision { class LP_TRANSFORMATIONS_API ReduceBaseTransformation : public LayerTransformation { public: ReduceBaseTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: virtual void changeDequantizationValues( diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp index 33f685ba8ca74c..f4e824a43fdec7 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_max.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMaxTransformation : public ReduceBaseTransfor OPENVINO_RTTI("ReduceMaxTransformation", "0", ReduceBaseTransformation); ReduceMaxTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: bool getUpdatePrecision(const std::shared_ptr& reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp index c5f9d7d0a5e239..4a689cc4007317 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_mean.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMeanTransformation : public ReduceBaseTransfo OPENVINO_RTTI("ReduceMeanTransformation", "0", ReduceBaseTransformation); ReduceMeanTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: bool getUpdatePrecision(const std::shared_ptr& reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp index a229b441b8b6da..f41630989de361 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_min.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceMinTransformation : public ReduceBaseTransfor OPENVINO_RTTI("ReduceMinTransformation", "0", ReduceBaseTransformation); ReduceMinTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: bool getUpdatePrecision(const std::shared_ptr& reduce) const override; diff --git a/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp b/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp index aba35e0f793c83..0efd79b2472624 100644 --- a/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reduce_sum.hpp @@ -27,7 +27,7 @@ class LP_TRANSFORMATIONS_API ReduceSumTransformation : public ReduceBaseTransfor OPENVINO_RTTI("ReduceSumTransformation", "0", ReduceBaseTransformation); ReduceSumTransformation(const Params& params = Params()); bool isPrecisionPreserved(std::shared_ptr reduce) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const override; + bool canBeTransformed(const std::shared_ptr& reduce) const override; protected: void changeDequantizationValues( diff --git a/src/common/low_precision_transformations/include/low_precision/relu.hpp b/src/common/low_precision_transformations/include/low_precision/relu.hpp index 936d4f3f8fc9a8..cc92ea72c40f49 100644 --- a/src/common/low_precision_transformations/include/low_precision/relu.hpp +++ b/src/common/low_precision_transformations/include/low_precision/relu.hpp @@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API ReluTransformation : public LayerTransformation { public: OPENVINO_RTTI("ReluTransformation", "0", LayerTransformation); ReluTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/reshape.hpp b/src/common/low_precision_transformations/include/low_precision/reshape.hpp index 43858e67cce21a..b9857f6928a6aa 100644 --- a/src/common/low_precision_transformations/include/low_precision/reshape.hpp +++ b/src/common/low_precision_transformations/include/low_precision/reshape.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API ReshapeTransformation : public LayerTransformation public: OPENVINO_RTTI("ReshapeTransformation", "0", LayerTransformation); ReshapeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; static bool canBeTransformed( const ov::Shape& subtractShape, diff --git a/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp b/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp index 12a60b128d707d..999c052a3108ad 100644 --- a/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp +++ b/src/common/low_precision_transformations/include/low_precision/shuffle_channels.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API ShuffleChannelsTransformation : public LayerTransfo public: OPENVINO_RTTI("ShuffleChannelsTransformation", "0", LayerTransformation); ShuffleChannelsTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; + bool transform(ov::pass::pattern::Matcher& m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/slice.hpp b/src/common/low_precision_transformations/include/low_precision/slice.hpp index c00028f0d71169..6b73536cf06c98 100644 --- a/src/common/low_precision_transformations/include/low_precision/slice.hpp +++ b/src/common/low_precision_transformations/include/low_precision/slice.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API SliceTransformation : public LayerTransformation { public: OPENVINO_RTTI("SliceTransformation", "0", LayerTransformation); SliceTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp index e05353aaf24d1c..48cda7b4c305fb 100644 --- a/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp +++ b/src/common/low_precision_transformations/include/low_precision/space_to_batch.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API SpaceToBatchTransformation : public LayerTransforma public: OPENVINO_RTTI("SpaceToBatchTransformation", "0", LayerTransformation); SpaceToBatchTransformation(const Params& params = Params()); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/split.hpp b/src/common/low_precision_transformations/include/low_precision/split.hpp index bd3294fba7e691..bb48628a832372 100644 --- a/src/common/low_precision_transformations/include/low_precision/split.hpp +++ b/src/common/low_precision_transformations/include/low_precision/split.hpp @@ -25,13 +25,10 @@ class LP_TRANSFORMATIONS_API SplitTransformation : public LayerTransformation { public: OPENVINO_RTTI("SplitTransformation", "0", LayerTransformation); SplitTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; + bool transform(ov::pass::pattern::Matcher& m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; - void updateOutputs( - TransformationContext& context, - std::vector> lastNodes, - std::shared_ptr originalNode) const; + bool canBeTransformed(const std::shared_ptr& layer) const override; + void updateOutputs(std::vector> lastNodes, std::shared_ptr originalNode) const; }; } // namespace low_precision } // namespace pass diff --git a/src/common/low_precision_transformations/include/low_precision/squeeze.hpp b/src/common/low_precision_transformations/include/low_precision/squeeze.hpp index b19a676c6de1fd..599b9e2f2eadb3 100644 --- a/src/common/low_precision_transformations/include/low_precision/squeeze.hpp +++ b/src/common/low_precision_transformations/include/low_precision/squeeze.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API SqueezeTransformation : public LayerTransformation public: OPENVINO_RTTI("SqueezeTransformation", "0", LayerTransformation); SqueezeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp b/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp index 5081903c751dfb..a5bbaf983e0b07 100644 --- a/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp +++ b/src/common/low_precision_transformations/include/low_precision/strided_slice.hpp @@ -24,8 +24,8 @@ class LP_TRANSFORMATIONS_API StridedSliceTransformation : public LayerTransforma public: OPENVINO_RTTI("StridedSliceTransformation", "0", LayerTransformation); StridedSliceTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher& m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool transform(ov::pass::pattern::Matcher& m) override; + bool canBeTransformed(const std::shared_ptr& op) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/subtract.hpp b/src/common/low_precision_transformations/include/low_precision/subtract.hpp index a7c6bfbe888ca7..c020480c1e314d 100644 --- a/src/common/low_precision_transformations/include/low_precision/subtract.hpp +++ b/src/common/low_precision_transformations/include/low_precision/subtract.hpp @@ -23,7 +23,7 @@ class LP_TRANSFORMATIONS_API SubtractTransformation : public LayerTransformation public: OPENVINO_RTTI("SubtractTransformation", "0", LayerTransformation); SubtractTransformation(const Params& params); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/transformation_context.hpp b/src/common/low_precision_transformations/include/low_precision/transformation_context.hpp deleted file mode 100644 index ec46224f8d88ae..00000000000000 --- a/src/common/low_precision_transformations/include/low_precision/transformation_context.hpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include -#include -#include "low_precision/quantization_details.hpp" - -namespace ov { -namespace pass { -namespace low_precision { - -/** - * @ingroup ov_transformation_common_api - * @brief TransformationContext instance is used to pass model transformation context data between transformations. - */ -class LP_TRANSFORMATIONS_API TransformationContext { -public: - TransformationContext(); - explicit TransformationContext(std::shared_ptr model); - std::shared_ptr model; - - // Used to store handled FakeQuantize operations. - // ConcatTransformation and FakeQuantizeTransformation handle FakeQuantize operations. ConcatTransformation handles FakeQuantize operation first. - // If updatePrecision transformation option is set to False then there are no FakeQuantize operation attributes to identify that the operation - // have been handled by ConcatTransformation already: - // - output precision is original (FP32), - // - intervals are changed but not equal to precision boundaries, - // - quantization level can be or can be not changed. - // To avoid FakeQuantize operation double handling by FakeQuantizeTransformation after ConcatTransformation, FakeQuantizeTransformation - // has to use this member. - std::unordered_set quantizedFakeQuantizeNames; -}; - -} // namespace low_precision -} // namespace pass -} // namespace ov diff --git a/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp index 792c749f525b53..c0350fe186942a 100644 --- a/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/transparent_base_transformation.hpp @@ -20,8 +20,8 @@ class LP_TRANSFORMATIONS_API TransparentBaseTransformation : public LayerTransfo public: TransparentBaseTransformation(const Params& params) : LayerTransformation(params) {} ~TransparentBaseTransformation() override {}; - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool transform(ov::pass::pattern::Matcher &m) override; + bool canBeTransformed(const std::shared_ptr& layer) const override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/transpose.hpp b/src/common/low_precision_transformations/include/low_precision/transpose.hpp index f2cedb31e2e5a7..a2fc1d4fa58598 100644 --- a/src/common/low_precision_transformations/include/low_precision/transpose.hpp +++ b/src/common/low_precision_transformations/include/low_precision/transpose.hpp @@ -24,9 +24,9 @@ class LP_TRANSFORMATIONS_API TransposeTransformation : public LayerTransformatio public: OPENVINO_RTTI("TransposeTransformation", "0", LayerTransformation); TransposeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr op) const override; + bool canBeTransformed(const std::shared_ptr& op) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp b/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp index 98152eaf919524..41728d1acf289e 100644 --- a/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp +++ b/src/common/low_precision_transformations/include/low_precision/unsqueeze.hpp @@ -23,9 +23,9 @@ class LP_TRANSFORMATIONS_API UnsqueezeTransformation : public LayerTransformatio public: OPENVINO_RTTI("UnsqueezeTransformation", "0", LayerTransformation); UnsqueezeTransformation(const Params& params = Params()); - bool transform(TransformationContext& context, ov::pass::pattern::Matcher &m) override; + bool transform(ov::pass::pattern::Matcher &m) override; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; + bool canBeTransformed(const std::shared_ptr& layer) const override; }; } // namespace low_precision diff --git a/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp index dfb75067ff426b..7b5c5b782d9a65 100644 --- a/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/weightable_layer_transformation.hpp @@ -5,7 +5,6 @@ #pragma once #include -#include "transformation_context.hpp" #include "layer_transformation.hpp" #include "openvino/opsets/opset1.hpp" @@ -42,17 +41,11 @@ class LP_TRANSFORMATIONS_API WeightableLayerTransformation : public LayerTransfo WeightableLayerTransformation(const Params& params, const CanBeTransformedParams& canBeTransformedParams = {}); - bool canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const override; - bool canConvolutionBeTransformed(const TransformationContext& context, std::shared_ptr layer, - const std::vector& defaultPrecisions) const; + bool canBeTransformed(const std::shared_ptr& layer) const override; + bool canConvolutionBeTransformed(const std::shared_ptr& layer, + const ov::element::TypeVector& defaultPrecisions) const; bool isPrecisionPreserved(std::shared_ptr layer) const noexcept override; - static bool checkPrecisionOnActivation( - const std::shared_ptr& node, - const std::vector& supportedPrecisionsOnActivations) { - return true; - } - static bool isQuantizedStatic(const std::shared_ptr& layer, const bool reshapeIsRequired, const std::vector& defaultPrecisions = precision_set::get_int8_support()); diff --git a/src/common/low_precision_transformations/src/add.cpp b/src/common/low_precision_transformations/src/add.cpp index b895d3325377de..e55577fda4ce3a 100644 --- a/src/common/low_precision_transformations/src/add.cpp +++ b/src/common/low_precision_transformations/src/add.cpp @@ -95,16 +95,16 @@ AddTransformation::AddTransformation(const Params& params) : EltwiseBaseTransfor if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool AddTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool AddTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr op = ov::as_type_ptr(m.get_match_root()); - if ((op == nullptr) || (!canBeTransformed(context, op))) { + if ((op == nullptr) || (!canBeTransformed(op))) { return false; } @@ -229,7 +229,7 @@ bool AddTransformation::transform(TransformationContext& context, ov::pass::patt ov::copy_runtime_info({ add, newMultiply }, newMultiply); } - updateOutput(context, newMultiply, newAddOrSubtract); + updateOutput(newMultiply, newAddOrSubtract); if (fullPathIndex != -1) { std::shared_ptr node = add; @@ -240,7 +240,7 @@ bool AddTransformation::transform(TransformationContext& context, ov::pass::patt return true; } -bool AddTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool AddTransformation::canBeTransformed(const std::shared_ptr& layer) const { const FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 0ul); if (dequantization1.multiplyHasZeroOrDenormal()) { return false; @@ -251,7 +251,7 @@ bool AddTransformation::canBeTransformed(const TransformationContext& context, s return false; } - return EltwiseBaseTransformation::canBeTransformed(context, layer); + return EltwiseBaseTransformation::canBeTransformed(layer); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/assign_and_read_value.cpp b/src/common/low_precision_transformations/src/assign_and_read_value.cpp index e65e35890c0600..0b3f775c57ad22 100644 --- a/src/common/low_precision_transformations/src/assign_and_read_value.cpp +++ b/src/common/low_precision_transformations/src/assign_and_read_value.cpp @@ -32,15 +32,15 @@ AssignAndReadValueTransformation::AssignAndReadValueTransformation(const std::sh if (transformation_callback(assign)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(assign_m, matcher_name); this->register_matcher(m, callback); } -bool AssignAndReadValueTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool AssignAndReadValueTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -90,13 +90,13 @@ bool AssignAndReadValueTransformation::transform(TransformationContext& context, return true; } - FakeQuantizeTransformation::fuseElementwise(context, this, fakeQuantize, updatePrecisions); + FakeQuantizeTransformation::fuseElementwise(this, fakeQuantize, updatePrecisions); return true; } -bool AssignAndReadValueTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool AssignAndReadValueTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/avg_pool.cpp b/src/common/low_precision_transformations/src/avg_pool.cpp index c9bfa67cfc1cfb..7a38834efbdb0d 100644 --- a/src/common/low_precision_transformations/src/avg_pool.cpp +++ b/src/common/low_precision_transformations/src/avg_pool.cpp @@ -27,28 +27,28 @@ AvgPoolTransformation::AvgPoolTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool AvgPoolTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool AvgPoolTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr pooling = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); const bool updatePrecision = isPrecisionPreserved(pooling); - const auto newOperation = moveDequantizationAfter(context, pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions), updatePrecision); + const auto newOperation = moveDequantizationAfter(pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions), updatePrecision); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool AvgPoolTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool AvgPoolTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/batch_to_space.cpp b/src/common/low_precision_transformations/src/batch_to_space.cpp index 6745227e848f7b..b231c7110d3d29 100644 --- a/src/common/low_precision_transformations/src/batch_to_space.cpp +++ b/src/common/low_precision_transformations/src/batch_to_space.cpp @@ -26,15 +26,15 @@ BatchToSpaceTransformation::BatchToSpaceTransformation(const Params& params) : L if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool BatchToSpaceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool BatchToSpaceTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } @@ -46,13 +46,13 @@ bool BatchToSpaceTransformation::canBeTransformed(const TransformationContext& c return dequantization.isPerTensor(); } -bool BatchToSpaceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool BatchToSpaceTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr op = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(op, NetworkHelper::getDequantization(op, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; diff --git a/src/common/low_precision_transformations/src/broadcast.cpp b/src/common/low_precision_transformations/src/broadcast.cpp index 5e78ca0ef50996..e59d9de3c3e5d9 100644 --- a/src/common/low_precision_transformations/src/broadcast.cpp +++ b/src/common/low_precision_transformations/src/broadcast.cpp @@ -35,15 +35,15 @@ BroadcastTransformation::BroadcastTransformation(const Params& params) : Transpa if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool BroadcastTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool BroadcastTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/clamp.cpp b/src/common/low_precision_transformations/src/clamp.cpp index 89150e81470bce..440cee10adc3a3 100644 --- a/src/common/low_precision_transformations/src/clamp.cpp +++ b/src/common/low_precision_transformations/src/clamp.cpp @@ -24,15 +24,15 @@ ClampTransformation::ClampTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ClampTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ClampTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -45,7 +45,7 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa return false; } - const auto newClamp = ov::as_type_ptr(moveDequantizationAfter(context, clamp, dequantization, false, moveSubtract)); + const auto newClamp = ov::as_type_ptr(moveDequantizationAfter(clamp, dequantization, false, moveSubtract)); std::shared_ptr replacement; { @@ -82,8 +82,8 @@ bool ClampTransformation::transform(TransformationContext& context, ov::pass::pa return true; } -bool ClampTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool ClampTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/cleanup_transformation.cpp b/src/common/low_precision_transformations/src/cleanup_transformation.cpp index 3a7cb0da5d5c36..e3c363818013b5 100644 --- a/src/common/low_precision_transformations/src/cleanup_transformation.cpp +++ b/src/common/low_precision_transformations/src/cleanup_transformation.cpp @@ -13,7 +13,7 @@ namespace low_precision { CleanupTransformation::CleanupTransformation(const Params& params) : LayerTransformation(params) { } -bool CleanupTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool CleanupTransformation::canBeTransformed(const std::shared_ptr& layer) const { return canBeTransformedStatic(layer); } diff --git a/src/common/low_precision_transformations/src/concat.cpp b/src/common/low_precision_transformations/src/concat.cpp index 05b1aa940c9191..fe39ed8d4f65b2 100644 --- a/src/common/low_precision_transformations/src/concat.cpp +++ b/src/common/low_precision_transformations/src/concat.cpp @@ -32,15 +32,15 @@ ConcatTransformation::ConcatTransformation(const Params& params) : LayerTransfor return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ConcatTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -193,7 +193,7 @@ bool ConcatTransformation::transform(TransformationContext& context, ov::pass::p NetworkHelper::insertDequantizationAfter(concat, lastDequantization, newConcat); NetworkHelper::copyInfo(concat, newConcat); - updateOutput(context, lastDequantization, newConcat); + updateOutput(lastDequantization, newConcat); OPENVINO_DEBUG("LPT: done: ", newConcat); return true; @@ -203,7 +203,7 @@ bool ConcatTransformation::isPrecisionPreserved(std::shared_ptr) const noe return true; } -bool ConcatTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) const { std::shared_ptr concat = ov::as_type_ptr(layer); if (concat == nullptr) { return false; diff --git a/src/common/low_precision_transformations/src/convert.cpp b/src/common/low_precision_transformations/src/convert.cpp index 4b773fc67c52c1..f1a7ae83e1dd73 100644 --- a/src/common/low_precision_transformations/src/convert.cpp +++ b/src/common/low_precision_transformations/src/convert.cpp @@ -31,20 +31,20 @@ ConvertTransformation::ConvertTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ConvertTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool ConvertTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr convert = ov::as_type_ptr(m.get_match_root()); if (!convert) { return false; } - if (!canBeTransformed(context, convert)) { + if (!canBeTransformed(convert)) { return false; } diff --git a/src/common/low_precision_transformations/src/convolution.cpp b/src/common/low_precision_transformations/src/convolution.cpp index ebf75e450f6384..ab9ed1e133d4b3 100644 --- a/src/common/low_precision_transformations/src/convolution.cpp +++ b/src/common/low_precision_transformations/src/convolution.cpp @@ -39,7 +39,7 @@ ConvolutionTransformation::ConvolutionTransformation(const Params& params) : Wei if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -62,10 +62,10 @@ size_t ConvolutionTransformation::getInputChannels(const std::shared_ptrget_input_node_shared_ptr(1); const auto reshapeFromWeights = ov::as_type_ptr(weightInput); FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ? @@ -97,7 +97,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ov::pa auto newFQ = std::get<1>(res_tuple); auto dequantize = std::get<2>(res_tuple); if (newFQ != nullptr && dequantize != nullptr) - updateOutput(context, dequantize, newFQ); + updateOutput(dequantize, newFQ); if (updatePrecisions && !fqOnWeightsWasDecomposed) { return false; @@ -338,7 +338,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ov::pa const auto finalDequantization = NetworkHelper::optimizeMultipliesAfter(newMultiplyAfter); ov::copy_runtime_info({ convolution, finalDequantization }, finalDequantization); - updateOutput(context, finalDequantization, convolution); + updateOutput(finalDequantization, convolution); const auto onActiviation = convolution->get_input_node_shared_ptr(0); if (ov::is_type(onActiviation)) { diff --git a/src/common/low_precision_transformations/src/convolution_backprop_data.cpp b/src/common/low_precision_transformations/src/convolution_backprop_data.cpp index 25abd4061ca4d3..5017abd3486071 100644 --- a/src/common/low_precision_transformations/src/convolution_backprop_data.cpp +++ b/src/common/low_precision_transformations/src/convolution_backprop_data.cpp @@ -51,7 +51,7 @@ ConvolutionBackpropDataTransformation::ConvolutionBackpropDataTransformation(con if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -74,10 +74,10 @@ size_t ConvolutionBackpropDataTransformation::getInputChannels(const std::shared return channels.get_length(); } -bool ConvolutionBackpropDataTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool ConvolutionBackpropDataTransformation::transform(ov::pass::pattern::Matcher &m) { auto convolutionBackpropData = m.get_match_root(); - if (!canBeTransformed(context, convolutionBackpropData)) { + if (!canBeTransformed(convolutionBackpropData)) { auto weightsInput = convolutionBackpropData->get_input_node_shared_ptr(1); std::shared_ptr reshapeFromWeights = ov::as_type_ptr(weightsInput); FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ? @@ -149,7 +149,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con auto newFQ = std::get<1>(res_tuple); auto dequantize = std::get<2>(res_tuple); if (newFQ != nullptr && dequantize != nullptr) - updateOutput(context, dequantize, newFQ); + updateOutput(dequantize, newFQ); dequantization = NetworkHelper::getDequantization(convolutionBackpropData, defaultPrecisions, 1ul); @@ -225,7 +225,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con const auto finalDequantization = NetworkHelper::optimizeMultipliesAfter(newMultiplyAfter); ov::copy_runtime_info({ convolutionBackpropData, finalDequantization }, finalDequantization); - updateOutput(context, finalDequantization, convolutionBackpropData); + updateOutput(finalDequantization, convolutionBackpropData); const auto onActiviation = convolutionBackpropData->get_input_node_shared_ptr(0); if (ov::is_type(onActiviation)) { @@ -245,8 +245,8 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con return true; } -bool ConvolutionBackpropDataTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - return canConvolutionBeTransformed(context, op, defaultPrecisions); +bool ConvolutionBackpropDataTransformation::canBeTransformed(const std::shared_ptr& op) const { + return canConvolutionBeTransformed(op, defaultPrecisions); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/depth_to_space.cpp b/src/common/low_precision_transformations/src/depth_to_space.cpp index eb518d62202840..941ac308c0b5b9 100644 --- a/src/common/low_precision_transformations/src/depth_to_space.cpp +++ b/src/common/low_precision_transformations/src/depth_to_space.cpp @@ -20,15 +20,15 @@ DepthToSpaceTransformation::DepthToSpaceTransformation(const Params& params) : T if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool DepthToSpaceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool DepthToSpaceTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp index 1a09d9914de3bf..88e544aa238714 100644 --- a/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/eliminate_fake_quantize.cpp @@ -30,16 +30,16 @@ EliminateFakeQuantizeTransformation::EliminateFakeQuantizeTransformation(const P if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; const auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool EliminateFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool EliminateFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher& m) { const auto root = m.get_match_root(); - if (!canBeTransformed(context, root)) { + if (!canBeTransformed(root)) { return false; } @@ -115,8 +115,8 @@ bool check_intervals(const std::shared_ptr& fakeQuanti } } // namespace -bool EliminateFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!CleanupTransformation::canBeTransformed(context, operation)) { +bool EliminateFakeQuantizeTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!CleanupTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp b/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp index f7cf7033543b40..f5594fc9fcf8cb 100644 --- a/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/eltwise_base_transformation.cpp @@ -33,8 +33,8 @@ bool EltwiseBaseTransformation::isBroadcasted(const PartialShape& shape) { return true; } -bool EltwiseBaseTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool EltwiseBaseTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/fake_quantize.cpp b/src/common/low_precision_transformations/src/fake_quantize.cpp index 8f3f8835ece8b0..4bfb24a57abd65 100644 --- a/src/common/low_precision_transformations/src/fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize.cpp @@ -28,14 +28,14 @@ FakeQuantizeTransformation::FakeQuantizeTransformation(const Params& params) : L return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool FakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto layer = ov::as_type_ptr(m.get_match_root()); if (!layer || !QuantizationDetails::outputLayoutIsSupported(layer)) { return false; @@ -44,7 +44,7 @@ bool FakeQuantizeTransformation::transform(TransformationContext& context, ov::p bool wasHandled = false; std::shared_ptr fakeQuantize = layer; do { - fakeQuantize = fuseElementwise(context, this, fakeQuantize, updatePrecisions); + fakeQuantize = fuseElementwise(this, fakeQuantize, updatePrecisions); wasHandled = wasHandled || (fakeQuantize != nullptr); } while (fakeQuantize != nullptr); @@ -158,7 +158,6 @@ bool FakeQuantizeTransformation::checkElementwise(const std::shared_ptr& e } std::shared_ptr FakeQuantizeTransformation::fuseElementwise( - TransformationContext& context, MatcherPass* matcherPass, const std::shared_ptr& fakeQuantize, const bool updatePrecisions) { diff --git a/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp b/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp index d4345aef1aaccd..32040b06f80fba 100644 --- a/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp +++ b/src/common/low_precision_transformations/src/fake_quantize_decomposition.cpp @@ -32,7 +32,7 @@ FakeQuantizeDecompositionTransformation::FakeQuantizeDecompositionTransformation return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -276,7 +276,7 @@ std::tuple, std::shared_ptr> decomposeFakeQuantize( } // namespace } // namespace fq_decomposition -bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool FakeQuantizeDecompositionTransformation::transform(ov::pass::pattern::Matcher& m) { auto node = ov::as_type_ptr(m.get_match_root()); if (!node || !NetworkHelper::isQuantizeSupported(node)) { return false; @@ -427,7 +427,7 @@ bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& c return rewritten; } - updateOutput(context, dequantize, newFakeQuantize); + updateOutput(dequantize, newFakeQuantize); if (precisionsAttribute.value().size() != 1ul) { precisionsAttribute.value() = { dataPrecision.precision }; diff --git a/src/common/low_precision_transformations/src/fold_convert.cpp b/src/common/low_precision_transformations/src/fold_convert.cpp index 2308bcc936e220..e5e3a361c2f483 100644 --- a/src/common/low_precision_transformations/src/fold_convert.cpp +++ b/src/common/low_precision_transformations/src/fold_convert.cpp @@ -24,15 +24,15 @@ FoldConvertTransformation::FoldConvertTransformation(const Params& params) : Cle if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; this->register_matcher(matcher, callback); } -bool FoldConvertTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FoldConvertTransformation::transform(ov::pass::pattern::Matcher &m) { const auto subtract = m.get_match_root(); - if (!canBeTransformed(context, subtract)) { + if (!canBeTransformed(subtract)) { return false; } @@ -46,7 +46,7 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ov::pa assert(ov::is_type(resultConstant)); replace_node(convert, resultConstant); - updateOutput(context, resultConstant, convert); + updateOutput(resultConstant, convert); }; foldConvert(0ul); @@ -55,9 +55,9 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ov::pa return true; } -bool FoldConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { +bool FoldConvertTransformation::canBeTransformed(const std::shared_ptr& operation) const { return - CleanupTransformation::canBeTransformed(context, operation) && + CleanupTransformation::canBeTransformed(operation) && ((ov::is_type(operation->get_input_node_ptr(1)) && ov::is_type(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) || (ov::is_type(operation->get_input_node_ptr(0)) && diff --git a/src/common/low_precision_transformations/src/fold_fake_quantize.cpp b/src/common/low_precision_transformations/src/fold_fake_quantize.cpp index 2f275ccb995c4f..3963c1eea20ef1 100644 --- a/src/common/low_precision_transformations/src/fold_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fold_fake_quantize.cpp @@ -26,20 +26,20 @@ FoldFakeQuantizeTransformation::FoldFakeQuantizeTransformation(const Params& par if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(fakeQuantize, matcher_name); this->register_matcher(m, callback); } -bool FoldFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FoldFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto fakeQuantize = ov::as_type_ptr(m.get_match_root()); if (fakeQuantize == nullptr) { return false; } - if (!canBeTransformed(context, fakeQuantize)) { + if (!canBeTransformed(fakeQuantize)) { return false; } @@ -76,7 +76,7 @@ bool FoldFakeQuantizeTransformation::isConstantOutput(std::shared_ptr return vecLow == vecHigh; } -bool FoldFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { +bool FoldFakeQuantizeTransformation::canBeTransformed(const std::shared_ptr& op) const { if (!NetworkHelper::isConstantPath(op) && !isConstantOutput(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_convert.cpp b/src/common/low_precision_transformations/src/fuse_convert.cpp index bda3cd8b3d38c8..889233c03236b6 100644 --- a/src/common/low_precision_transformations/src/fuse_convert.cpp +++ b/src/common/low_precision_transformations/src/fuse_convert.cpp @@ -40,7 +40,7 @@ FuseConvertTransformation::FuseConvertTransformation(const Params& params) : Cle if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; this->register_matcher(matcher, callback); @@ -68,9 +68,9 @@ std::shared_ptr removeConvertIfPossibleForSubtract( } // namespace -bool FuseConvertTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FuseConvertTransformation::transform(ov::pass::pattern::Matcher &m) { const auto op = m.get_match_root(); - if (!canBeTransformed(context, op)) { + if (!canBeTransformed(op)) { return false; } @@ -114,8 +114,8 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ov::pa return true; } -bool FuseConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!CleanupTransformation::canBeTransformed(context, op)) { +bool FuseConvertTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!CleanupTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp index e9418da055c929..5f8d9be15eb20b 100644 --- a/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_elementwise_to_fake_quantize.cpp @@ -15,8 +15,8 @@ namespace low_precision { FuseElementwiseToFakeQuantizeTransformation::FuseElementwiseToFakeQuantizeTransformation(const Params& params) : CleanupTransformation(params) { } -bool FuseElementwiseToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!CleanupTransformation::canBeTransformed(context, operation)) { +bool FuseElementwiseToFakeQuantizeTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!CleanupTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp index 56d67cb2edcbab..6b77e42f581af0 100644 --- a/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp @@ -25,16 +25,16 @@ FuseMultiplyToFakeQuantizeTransformation::FuseMultiplyToFakeQuantizeTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FuseMultiplyToFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -86,7 +86,7 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& newFakeQuantize->set_levels(intervalAlignment.as().levels); } - updateOutput(context, newFakeQuantize, multiply); + updateOutput(newFakeQuantize, multiply); return true; } diff --git a/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp b/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp index 61603cc8826713..73862fc856a944 100644 --- a/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp @@ -25,16 +25,16 @@ FuseSubtractToFakeQuantizeTransformation::FuseSubtractToFakeQuantizeTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool FuseSubtractToFakeQuantizeTransformation::transform(ov::pass::pattern::Matcher &m) { const auto subtract = m.get_match_root(); - if (!canBeTransformed(context, subtract)) { + if (!canBeTransformed(subtract)) { return false; } @@ -81,7 +81,7 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& replace_node(subtract, newFakeQuantize); NetworkHelper::copyInfo(fakeQuantize, newFakeQuantize); - updateOutput(context, newFakeQuantize, subtract); + updateOutput(newFakeQuantize, subtract); return true; } diff --git a/src/common/low_precision_transformations/src/gather.cpp b/src/common/low_precision_transformations/src/gather.cpp index 1a8aa377cff2aa..4c5959d5c373e0 100644 --- a/src/common/low_precision_transformations/src/gather.cpp +++ b/src/common/low_precision_transformations/src/gather.cpp @@ -95,16 +95,16 @@ GatherTransformation::GatherTransformation(const Params& params) : LayerTransfor if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(gather, matcher_name); this->register_matcher(m, callback); } -bool GatherTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool GatherTransformation::transform(ov::pass::pattern::Matcher &m) { auto node = m.get_match_root(); - if (!canBeTransformed(context, m.get_match_root())) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -120,14 +120,14 @@ bool GatherTransformation::transform(TransformationContext& context, ov::pass::p replace_node(dequantization.subtractConstant, newConstant); } - const auto newOperation = moveDequantizationAfter(context, gather, NetworkHelper::getDequantization(gather, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(gather, NetworkHelper::getDequantization(gather, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool GatherTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool GatherTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/group_convolution.cpp b/src/common/low_precision_transformations/src/group_convolution.cpp index 6e2f48cd10e734..feabc004f05144 100644 --- a/src/common/low_precision_transformations/src/group_convolution.cpp +++ b/src/common/low_precision_transformations/src/group_convolution.cpp @@ -25,7 +25,7 @@ GroupConvolutionTransformation::GroupConvolutionTransformation(const Params& par if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -37,15 +37,12 @@ bool GroupConvolutionTransformation::isQuantized(const std::shared_ptr& layer, diff --git a/src/common/low_precision_transformations/src/interpolate.cpp b/src/common/low_precision_transformations/src/interpolate.cpp index f1d9a2d505788a..5559de793500e6 100644 --- a/src/common/low_precision_transformations/src/interpolate.cpp +++ b/src/common/low_precision_transformations/src/interpolate.cpp @@ -46,7 +46,7 @@ InterpolateTransformation::InterpolateTransformation(const Params& params) : Lay if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto matcher = std::make_shared( @@ -56,13 +56,13 @@ InterpolateTransformation::InterpolateTransformation(const Params& params) : Lay this->register_matcher(matcher, callback); } -bool InterpolateTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool InterpolateTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr interpolate = m.get_match_root(); - if (!canBeTransformed(context, m.get_match_root())) { + if (!canBeTransformed(m.get_match_root())) { return false; } interpolate = NetworkHelper::separateInStandaloneBranch(interpolate, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, interpolate, NetworkHelper::getDequantization(interpolate, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(interpolate, NetworkHelper::getDequantization(interpolate, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -84,8 +84,8 @@ bool InterpolateTransformation::isPrecisionPreserved(std::shared_ptr layer return false; } -bool InterpolateTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool InterpolateTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/layer_transformation.cpp b/src/common/low_precision_transformations/src/layer_transformation.cpp index fa014a078a22d3..e7a1af82d95614 100644 --- a/src/common/low_precision_transformations/src/layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/layer_transformation.cpp @@ -45,22 +45,9 @@ LayerTransformation::LayerTransformation(const Params& params) : deqPrecision(params.deqPrecision), defaultPrecisions(params.defaultPrecisions), reshapeIgnorePerTensorQuantizationCheck(params.reshapeIgnorePerTensorQuantizationCheck), - scalingMode(params.scalingMode), - context(nullptr) {} + scalingMode(params.scalingMode) {} -void LayerTransformation::setContext(TransformationContext* context) noexcept { - this->context = context; -} - -void LayerTransformation::setUpdatePrecisions(const bool updatePrecisions) { - this->updatePrecisions = updatePrecisions; -} - -void LayerTransformation::setDefaultPrecisions(const std::vector& defaultPrecisions) { - this->defaultPrecisions = defaultPrecisions; -} - -bool LayerTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool LayerTransformation::canBeTransformed(const std::shared_ptr& layer) const { if (!isQuantized(layer, defaultPrecisions)) { return false; } @@ -126,7 +113,7 @@ bool LayerTransformation::canBeTransformedStatic(const std::shared_ptr& la return true; } -bool LayerTransformation::canBeTransformedSpatialDimension(const TransformationContext& context, std::shared_ptr layer) const { +bool LayerTransformation::canBeTransformedSpatialDimension(const std::shared_ptr& layer) const { if (!isQuantized(layer, defaultPrecisions)) { OPENVINO_DEBUG("LPT: early exit: not quantized"); return false; @@ -397,7 +384,6 @@ DataPrecision LayerTransformation::getDataPrecision( } std::shared_ptr LayerTransformation::moveDequantizationAfter( - TransformationContext &context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool updateOutputPrecision, @@ -408,31 +394,28 @@ std::shared_ptr LayerTransformation::moveDequantizationAfter( updateOutputPrecision, moveSubtract, defaultPrecisions); - updateOutput(context, result.lastDequantization, result.newOperation); + updateOutput(result.lastDequantization, result.newOperation); return result.newOperation; } std::shared_ptr LayerTransformation::moveDequantizationBefore( - TransformationContext& context, const std::shared_ptr& operation, const FakeQuantizeDequantization& dequantization, const bool moveSubtract) const { const auto result = ov::pass::low_precision::NetworkHelper::moveDequantizationBefore(operation, dequantization, moveSubtract); - updateOutput(context, result.newOperation, result.lastDequantization); + updateOutput(result.newOperation, result.lastDequantization); return result.newOperation; } -bool LayerTransformation::updateOutput( - TransformationContext &context, - std::shared_ptr lastNode, - std::shared_ptr originalNode) const { +bool LayerTransformation::updateOutput(const std::shared_ptr& lastNode, + const std::shared_ptr& originalNode) const { bool was_updated = false; for (auto output : lastNode->outputs()) { for (auto input : output.get_target_inputs()) { if (ov::is_type(input.get_node())) { - const std::string originalName = originalNode->get_friendly_name(); + const auto originalName = originalNode->get_friendly_name(); originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix); lastNode->set_friendly_name(originalName); was_updated = true; @@ -442,61 +425,6 @@ bool LayerTransformation::updateOutput( } return was_updated; } - -void LayerTransformation::updateOutput( - TransformationContext& context, - std::shared_ptr lastNode, - std::string originalName) const { - const size_t outputSize = context.model->get_output_size(); - for (size_t i = 0; i < outputSize; ++i) { - std::shared_ptr result = context.model->get_output_op(i); - std::shared_ptr outputNode = result->get_input_node_shared_ptr(0); - if (outputNode.get() == lastNode.get()) { - lastNode->set_friendly_name(originalName); - break; - } - } -} - -void LayerTransformation::addPattern(ov::pass::GraphRewrite& pass, TransformationContext& context, std::shared_ptr patternRoot) { - MATCHER_SCOPE(SingleNodeMatcher); - ov::graph_rewrite_callback internal_callback = [this, &context](ov::pass::pattern::Matcher &m) { - const bool result = transform(context, m); - (void)result; -#ifdef LPT_DISPLAY_PRECISION - if (result) { - auto operationNode = m.get_match_root(); - std::cout << "Operation was transformed: " << - operationNode->get_type_name() << ", " << - operationNode->get_friendly_name() << ", output operation precision: " << - ((operationNode->get_output_size() == 1u) ? operationNode->get_output_element_type(0) : ov::element::Type()) << - std::endl; - } -#endif - return false; - }; - // TODO: better name for matcher? required? - auto m = std::make_shared(patternRoot, matcher_name); - auto match_pass = std::make_shared( - m->get_name(), - m, - [m, internal_callback](const std::shared_ptr& node) -> bool { - OPENVINO_DEBUG("Running matcher ", m->get_name(), " on ", node); - OV_PASS_CALLBACK(m); - if (std::dynamic_pointer_cast(m)->match(node->output(0))) { - OPENVINO_DEBUG("Matcher ", m->get_name(), " matched ", node); - bool status = internal_callback(*m.get()); - // explicitly clear Matcher state because it holds pointers to matched nodes - m->clear_state(); - return status; - } - m->clear_state(); - return false; - }, - ov::pass::PassProperty::CHANGE_DYNAMIC_STATE); - pass.add_matcher(match_pass); -} - } // namespace low_precision } // namespace pass } // namespace ov diff --git a/src/common/low_precision_transformations/src/mat_mul.cpp b/src/common/low_precision_transformations/src/mat_mul.cpp index f2d471bb222739..b153173d264a6e 100644 --- a/src/common/low_precision_transformations/src/mat_mul.cpp +++ b/src/common/low_precision_transformations/src/mat_mul.cpp @@ -32,16 +32,16 @@ MatMulTransformation::MatMulTransformation(const Params& params) : LayerTransfor if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MatMulTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool MatMulTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr matMul = ov::as_type_ptr(m.get_match_root()); - if ((matMul == nullptr) || !canBeTransformed(context, matMul)) { + if ((matMul == nullptr) || !canBeTransformed(matMul)) { return false; } @@ -174,7 +174,7 @@ bool MatMulTransformation::transform(TransformationContext &context, ov::pass::p NetworkHelper::insertDequantizationAfter(matMul, newMultiply, newMatMul); copy_runtime_info({ newMultiply, matMul }, newMultiply); - updateOutput(context, newMultiply, newMatMul); + updateOutput(newMultiply, newMatMul); OPENVINO_DEBUG("LPT: done: ", newMatMul); return true; @@ -184,8 +184,8 @@ bool MatMulTransformation::isPrecisionPreserved(std::shared_ptr layer) con return false; } -bool MatMulTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformedSpatialDimension(context, layer)) { +bool MatMulTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformedSpatialDimension(layer)) { return false; } diff --git a/src/common/low_precision_transformations/src/max_pool.cpp b/src/common/low_precision_transformations/src/max_pool.cpp index d9b06644037847..ef0a508fac3a65 100644 --- a/src/common/low_precision_transformations/src/max_pool.cpp +++ b/src/common/low_precision_transformations/src/max_pool.cpp @@ -26,15 +26,15 @@ MaxPoolTransformation::MaxPoolTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MaxPoolTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool MaxPoolTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } @@ -51,13 +51,13 @@ bool MaxPoolTransformation::canBeTransformed(const TransformationContext& contex return true; } -bool MaxPoolTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool MaxPoolTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr pooling = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(pooling, NetworkHelper::getDequantization(pooling, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; diff --git a/src/common/low_precision_transformations/src/move_fake_quantize.cpp b/src/common/low_precision_transformations/src/move_fake_quantize.cpp index abee9cbd081a0f..54b54a332db561 100644 --- a/src/common/low_precision_transformations/src/move_fake_quantize.cpp +++ b/src/common/low_precision_transformations/src/move_fake_quantize.cpp @@ -46,7 +46,7 @@ MoveFakeQuantize::MoveFakeQuantize(const Params& params) : LayerTransformation(p return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared( @@ -55,9 +55,9 @@ MoveFakeQuantize::MoveFakeQuantize(const Params& params) : LayerTransformation(p this->register_matcher(m, callback); } -bool MoveFakeQuantize::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool MoveFakeQuantize::transform(ov::pass::pattern::Matcher& m) { const auto fq = m.get_match_root(); - if (!canBeTransformed(context, fq)) { + if (!canBeTransformed(fq)) { return false; } @@ -156,16 +156,16 @@ bool MoveFakeQuantize::transform(TransformationContext& context, ov::pass::patte newConcat->set_friendly_name(concat->get_friendly_name()); NetworkHelper::copyInfo(concat, newConcat); if (!dequantization.empty()) { - moveDequantizationBefore(context, newConcat, dequantization); + moveDequantizationBefore(newConcat, dequantization); return true; } replace_node(fq, newConcat); - updateOutput(context, newConcat, fq); + updateOutput(newConcat, fq); return true; } -bool MoveFakeQuantize::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool MoveFakeQuantize::canBeTransformed(const std::shared_ptr& layer) const { auto operation = layer->get_input_node_shared_ptr(0); std::shared_ptr concat; if (is_type(operation)) { diff --git a/src/common/low_precision_transformations/src/multiply.cpp b/src/common/low_precision_transformations/src/multiply.cpp index 4c1f3c073febcf..8dd6cce059d96e 100644 --- a/src/common/low_precision_transformations/src/multiply.cpp +++ b/src/common/low_precision_transformations/src/multiply.cpp @@ -33,16 +33,16 @@ MultiplyTransformation::MultiplyTransformation(const Params& params) : if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MultiplyTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool MultiplyTransformation::transform(ov::pass::pattern::Matcher& m) { auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -100,7 +100,7 @@ bool MultiplyTransformation::transform(TransformationContext& context, ov::pass: ov::op::TemporaryReplaceOutputType(in2, deqPrecision).get()); replace_node(multiply, new_multiply); - updateOutput(context, new_multiply, multiply); + updateOutput(new_multiply, multiply); return true; } @@ -128,7 +128,7 @@ bool MultiplyTransformation::transform(TransformationContext& context, ov::pass: multiply->get_output_element_type(0)); replace_node(multiply, new_scales); - const auto was_updated = updateOutput(context, new_scales, multiply); + const auto was_updated = updateOutput(new_scales, multiply); NetworkHelper::copyInfo(multiply, new_multiply, !was_updated); return true; diff --git a/src/common/low_precision_transformations/src/multiply_partial.cpp b/src/common/low_precision_transformations/src/multiply_partial.cpp index aea1bf49b8ffc1..e01b09324712ef 100644 --- a/src/common/low_precision_transformations/src/multiply_partial.cpp +++ b/src/common/low_precision_transformations/src/multiply_partial.cpp @@ -32,16 +32,16 @@ MultiplyPartialTransformation::MultiplyPartialTransformation(const Params& param if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MultiplyPartialTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool MultiplyPartialTransformation::transform(ov::pass::pattern::Matcher& m) { auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -168,7 +168,7 @@ bool MultiplyPartialTransformation::transform(TransformationContext& context, ov } replace_node(multiply, newMultiply); - updateOutput(context, newMultiply, multiply); + updateOutput(newMultiply, multiply); if (fullPathIndex != -1) { NetworkHelper::foldDequantization(newMultiply, fullPathIndex, defaultPrecisions); @@ -178,7 +178,7 @@ bool MultiplyPartialTransformation::transform(TransformationContext& context, ov return true; } -bool MultiplyPartialTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool MultiplyPartialTransformation::canBeTransformed(const std::shared_ptr& layer) const { FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 0ul); FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, defaultPrecisions, 1ul); @@ -193,7 +193,7 @@ bool MultiplyPartialTransformation::canBeTransformed(const TransformationContext return false; } - return EltwiseBaseTransformation::canBeTransformed(context, layer); + return EltwiseBaseTransformation::canBeTransformed(layer); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp index 466b7ad6c75e5d..8e52eb38ee8ee1 100644 --- a/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp +++ b/src/common/low_precision_transformations/src/multiply_to_group_convolution.cpp @@ -24,16 +24,16 @@ MultiplyToGroupConvolutionTransformation::MultiplyToGroupConvolutionTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool MultiplyToGroupConvolutionTransformation::transform(ov::pass::pattern::Matcher &m) { const auto multiply = m.get_match_root(); - if (!canBeTransformed(context, multiply)) { + if (!canBeTransformed(multiply)) { return false; } @@ -142,8 +142,8 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& return true; } -bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!CleanupTransformation::canBeTransformed(context, operation)) { +bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!CleanupTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/mvn.cpp b/src/common/low_precision_transformations/src/mvn.cpp index 4c848b69b82661..22dfc2f9816ed0 100644 --- a/src/common/low_precision_transformations/src/mvn.cpp +++ b/src/common/low_precision_transformations/src/mvn.cpp @@ -52,15 +52,15 @@ MVNTransformation::MVNTransformation(const Params& params) : LayerTransformation if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool MVNTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool MVNTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } @@ -117,9 +117,9 @@ bool MVNTransformation::canBeTransformed(const TransformationContext& context, s return false; } -bool MVNTransformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool MVNTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr operation = m.get_match_root(); - if (!canBeTransformed(context, operation)) { + if (!canBeTransformed(operation)) { return false; } @@ -167,7 +167,7 @@ bool MVNTransformation::transform(TransformationContext &context, ov::pass::patt NetworkHelper::insertDequantizationAfter(mvn, newMultiply, newMVN); - updateOutput(context, newMultiply, newMVN); + updateOutput(newMultiply, newMVN); OPENVINO_DEBUG("LPT: done: ", newMVN); return true; diff --git a/src/common/low_precision_transformations/src/normalize_l2.cpp b/src/common/low_precision_transformations/src/normalize_l2.cpp index c9f1cc6a7fe8af..9c30456f84afba 100644 --- a/src/common/low_precision_transformations/src/normalize_l2.cpp +++ b/src/common/low_precision_transformations/src/normalize_l2.cpp @@ -47,15 +47,15 @@ NormalizeL2Transformation::NormalizeL2Transformation(const Params& params) : Lay if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool NormalizeL2Transformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool NormalizeL2Transformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } @@ -97,9 +97,9 @@ bool NormalizeL2Transformation::canBeTransformed(const TransformationContext& co return true; } -bool NormalizeL2Transformation::transform(TransformationContext &context, ov::pass::pattern::Matcher &m) { +bool NormalizeL2Transformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr operation = m.get_match_root(); - if (!canBeTransformed(context, operation)) { + if (!canBeTransformed(operation)) { return false; } @@ -146,7 +146,7 @@ bool NormalizeL2Transformation::transform(TransformationContext &context, ov::pa NetworkHelper::insertDequantizationAfter(normalize, newMultiply, newNormalize); ov::copy_runtime_info({ normalize, newMultiply }, newMultiply); - updateOutput(context, newMultiply, newNormalize); + updateOutput(newMultiply, newNormalize); OPENVINO_DEBUG("LPT: done: ", newNormalize); return true; diff --git a/src/common/low_precision_transformations/src/pad.cpp b/src/common/low_precision_transformations/src/pad.cpp index 12310ec5724f6c..c023ee9d14d9ee 100644 --- a/src/common/low_precision_transformations/src/pad.cpp +++ b/src/common/low_precision_transformations/src/pad.cpp @@ -31,7 +31,7 @@ PadTransformation::PadTransformation(const Params& params) : LayerTransformation if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -50,8 +50,8 @@ namespace { } } // namespace -bool PadTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool PadTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -164,14 +164,14 @@ bool PadTransformation::transform(TransformationContext& context, ov::pass::patt const auto convertedZero = ov::opset1::Constant::create(dequantization.data.get_element_type(), Shape{}, { padConstantValue }); pad->set_argument(3, convertedZero); - const auto newOperation = moveDequantizationAfter(context, pad, dequantization); + const auto newOperation = moveDequantizationAfter(pad, dequantization); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool PadTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformedSpatialDimension(context, op)) { +bool PadTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformedSpatialDimension(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/prelu.cpp b/src/common/low_precision_transformations/src/prelu.cpp index 46e0d692f0faca..e30bb6fa041074 100644 --- a/src/common/low_precision_transformations/src/prelu.cpp +++ b/src/common/low_precision_transformations/src/prelu.cpp @@ -28,22 +28,22 @@ PReluTransformation::PReluTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool PReluTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool PReluTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr prelu = m.get_match_root(); - if (!canBeTransformed(context, prelu)) { + if (!canBeTransformed(prelu)) { return false; } prelu = NetworkHelper::separateInStandaloneBranch(prelu, defaultPrecisions); const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(prelu, defaultPrecisions, 0); - const auto newOperation = moveDequantizationAfter(context, prelu, dequantization, false, false); + const auto newOperation = moveDequantizationAfter(prelu, dequantization, false, false); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -53,8 +53,8 @@ bool PReluTransformation::isPrecisionPreserved(std::shared_ptr op) const n return false; } -bool PReluTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool PReluTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/recurrent_cell.cpp b/src/common/low_precision_transformations/src/recurrent_cell.cpp index cec96044502596..34d851d6a2b464 100644 --- a/src/common/low_precision_transformations/src/recurrent_cell.cpp +++ b/src/common/low_precision_transformations/src/recurrent_cell.cpp @@ -43,7 +43,7 @@ RecurrentCellTransformation::RecurrentCellTransformation(const Params& params) : return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared( @@ -116,7 +116,7 @@ std::vector> get_supported_precisions(std::shar } // namespace -void RecurrentCellTransformation::propagate(TransformationContext& context, const std::shared_ptr node) { +void RecurrentCellTransformation::propagate(const std::shared_ptr node) { if (!isSupportedForPerChannelQuantization(node)) { return; } @@ -126,7 +126,7 @@ void RecurrentCellTransformation::propagate(TransformationContext& context, cons if (dequantization.empty()) { return; } - const auto& new_node = moveDequantizationAfter(context, normalized_node, dequantization); + const auto& new_node = moveDequantizationAfter(normalized_node, dequantization); const auto& new_dequantization = NetworkHelper::getDequantizationBelow(new_node); if (new_dequantization.empty()) { @@ -136,12 +136,12 @@ void RecurrentCellTransformation::propagate(TransformationContext& context, cons for (auto output : new_dequantization.multiply->outputs()) { for (auto input : output.get_target_inputs()) { auto child = input.get_node()->shared_from_this(); - propagate(context, child); + propagate(child); } } } -bool RecurrentCellTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { +bool RecurrentCellTransformation::transform(ov::pass::pattern::Matcher& m) { const auto lstm = m.get_match_root(); const auto inputs = get_supported_precisions(lstm); for (const auto& input : inputs) { @@ -179,13 +179,13 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: for (const auto& output : multiply->outputs()) { for (const auto& input : output.get_target_inputs()) { const auto input_node = input.get_node(); - propagate(context, input_node->shared_from_this()); + propagate(input_node->shared_from_this()); } } } } - if (!canBeTransformed(context, lstm)) { + if (!canBeTransformed(lstm)) { return false; } @@ -228,7 +228,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: propagateSkipCleanupAttribute(deq_multiply); this->register_new_node(new_fq); - updateOutput(context, deq_multiply, new_fq); + updateOutput(deq_multiply, new_fq); } else { continue; } @@ -245,7 +245,7 @@ bool RecurrentCellTransformation::transform(TransformationContext& context, ov:: return true; } -bool RecurrentCellTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr lstm) const { +bool RecurrentCellTransformation::canBeTransformed(const std::shared_ptr& lstm) const { const auto inputs = get_supported_precisions(lstm); for (const auto& index : inputs) { const auto& input = lstm->get_input_node_ptr(index.first); diff --git a/src/common/low_precision_transformations/src/reduce_base_transformation.cpp b/src/common/low_precision_transformations/src/reduce_base_transformation.cpp index 5fe679d8c997bf..c39681bc660f21 100644 --- a/src/common/low_precision_transformations/src/reduce_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/reduce_base_transformation.cpp @@ -16,8 +16,8 @@ namespace low_precision { ReduceBaseTransformation::ReduceBaseTransformation(const Params& params) : LayerTransformation(params) {} -bool ReduceBaseTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ReduceBaseTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -29,13 +29,13 @@ bool ReduceBaseTransformation::transform(TransformationContext& context, ov::pas // updatePrecision depends on type and parameters of the reduce const bool updatePrecision = getUpdatePrecision(reduce); - const auto newOperation = moveDequantizationAfter(context, reduce, dequantization, updatePrecision); + const auto newOperation = moveDequantizationAfter(reduce, dequantization, updatePrecision); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool ReduceBaseTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceBaseTransformation::canBeTransformed(const std::shared_ptr& reduce) const { const auto dequantization = NetworkHelper::getDequantization(reduce, defaultPrecisions); if (dequantization.empty()) { return false; diff --git a/src/common/low_precision_transformations/src/reduce_max.cpp b/src/common/low_precision_transformations/src/reduce_max.cpp index 4cf9c2ed2100aa..65d021accf3452 100644 --- a/src/common/low_precision_transformations/src/reduce_max.cpp +++ b/src/common/low_precision_transformations/src/reduce_max.cpp @@ -23,19 +23,19 @@ ReduceMaxTransformation::ReduceMaxTransformation(const Params& params) : ReduceB if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceMaxTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceMaxTransformation::canBeTransformed(const std::shared_ptr& reduce) const { if (!ov::is_type(reduce)) { return false; } - if (!ReduceBaseTransformation::canBeTransformed(context, reduce)) { + if (!ReduceBaseTransformation::canBeTransformed(reduce)) { return false; } diff --git a/src/common/low_precision_transformations/src/reduce_mean.cpp b/src/common/low_precision_transformations/src/reduce_mean.cpp index 451a1d4c3804df..55f080587290b9 100644 --- a/src/common/low_precision_transformations/src/reduce_mean.cpp +++ b/src/common/low_precision_transformations/src/reduce_mean.cpp @@ -23,15 +23,15 @@ ReduceMeanTransformation::ReduceMeanTransformation(const Params& params) : Reduc if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceMeanTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { - return ov::is_type(reduce) ? ReduceBaseTransformation::canBeTransformed(context, reduce) : false; +bool ReduceMeanTransformation::canBeTransformed(const std::shared_ptr& reduce) const { + return ov::is_type(reduce) ? ReduceBaseTransformation::canBeTransformed(reduce) : false; } bool ReduceMeanTransformation::isPrecisionPreserved(std::shared_ptr reduce) const noexcept { diff --git a/src/common/low_precision_transformations/src/reduce_min.cpp b/src/common/low_precision_transformations/src/reduce_min.cpp index d7433c322718c0..6ad0cfea259b38 100644 --- a/src/common/low_precision_transformations/src/reduce_min.cpp +++ b/src/common/low_precision_transformations/src/reduce_min.cpp @@ -22,19 +22,19 @@ ReduceMinTransformation::ReduceMinTransformation(const Params& params) : ReduceB if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceMinTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceMinTransformation::canBeTransformed(const std::shared_ptr& reduce) const { if (!ov::is_type(reduce)) { return false; } - if (!ReduceBaseTransformation::canBeTransformed(context, reduce)) { + if (!ReduceBaseTransformation::canBeTransformed(reduce)) { return false; } diff --git a/src/common/low_precision_transformations/src/reduce_sum.cpp b/src/common/low_precision_transformations/src/reduce_sum.cpp index 1bc8bf75d27a7f..d28fbdc9c559e7 100644 --- a/src/common/low_precision_transformations/src/reduce_sum.cpp +++ b/src/common/low_precision_transformations/src/reduce_sum.cpp @@ -23,16 +23,16 @@ ReduceSumTransformation::ReduceSumTransformation(const Params& params) : ReduceB if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReduceSumTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { +bool ReduceSumTransformation::canBeTransformed(const std::shared_ptr& reduce) const { const auto reduceSum = ov::as_type_ptr(reduce); - if (!reduceSum || !ReduceBaseTransformation::canBeTransformed(context, reduceSum)) { + if (!reduceSum || !ReduceBaseTransformation::canBeTransformed(reduceSum)) { return false; } diff --git a/src/common/low_precision_transformations/src/relu.cpp b/src/common/low_precision_transformations/src/relu.cpp index 6d39cccc0e0260..ca2de4e05f0b84 100644 --- a/src/common/low_precision_transformations/src/relu.cpp +++ b/src/common/low_precision_transformations/src/relu.cpp @@ -28,22 +28,22 @@ ReluTransformation::ReluTransformation(const Params& params) : LayerTransformati if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ReluTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool ReluTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr relu = m.get_match_root(); - if (!canBeTransformed(context, relu)) { + if (!canBeTransformed(relu)) { return false; } relu = NetworkHelper::separateInStandaloneBranch(relu, defaultPrecisions); const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(relu, defaultPrecisions, 0); - const auto newOperation = moveDequantizationAfter(context, relu, dequantization); + const auto newOperation = moveDequantizationAfter(relu, dequantization); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -53,8 +53,8 @@ bool ReluTransformation::isPrecisionPreserved(std::shared_ptr op) const no return true; } -bool ReluTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool ReluTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/reshape.cpp b/src/common/low_precision_transformations/src/reshape.cpp index 4d9b9c53a782f6..cf24edc18953b4 100644 --- a/src/common/low_precision_transformations/src/reshape.cpp +++ b/src/common/low_precision_transformations/src/reshape.cpp @@ -48,7 +48,7 @@ ReshapeTransformation::ReshapeTransformation(const Params& params) : LayerTransf } } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -146,19 +146,19 @@ void reshapeDequantizationConstant(const std::shared_ptr& r } // namespace -bool ReshapeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool ReshapeTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr reshape = ov::as_type_ptr(m.get_match_root()); if (NetworkHelper::isConstantPath(reshape)) { return false; } - if (!canBeTransformed(context, reshape)) { + if (!canBeTransformed(reshape)) { return false; } reshape = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(reshape, defaultPrecisions)); reshapeDequantizationConstant(reshape, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, reshape, NetworkHelper::getDequantization(reshape, defaultPrecisions, 0)); + const auto newOperation = moveDequantizationAfter(reshape, NetworkHelper::getDequantization(reshape, defaultPrecisions, 0)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -188,8 +188,8 @@ inline size_t getFirstChangedDimension(const PartialShape& shape1, const Partial return i; } -bool ReshapeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool ReshapeTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/shuffle_channels.cpp b/src/common/low_precision_transformations/src/shuffle_channels.cpp index ab170ea28572e2..67abf9b28db708 100644 --- a/src/common/low_precision_transformations/src/shuffle_channels.cpp +++ b/src/common/low_precision_transformations/src/shuffle_channels.cpp @@ -26,15 +26,15 @@ ShuffleChannelsTransformation::ShuffleChannelsTransformation(const Params& param if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool ShuffleChannelsTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool ShuffleChannelsTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -73,14 +73,14 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ov replace_node(dequantization.multiplyConstant, shuffledMulConst); dequantization.multiplyConstant = shuffledMulConst; - const auto newOperation = moveDequantizationAfter(context, shuffleChannels, dequantization); + const auto newOperation = moveDequantizationAfter(shuffleChannels, dequantization); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool ShuffleChannelsTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformedSpatialDimension(context, op)) { +bool ShuffleChannelsTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformedSpatialDimension(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/slice.cpp b/src/common/low_precision_transformations/src/slice.cpp index 99c51f4e3f5ac3..082c00f207a37a 100644 --- a/src/common/low_precision_transformations/src/slice.cpp +++ b/src/common/low_precision_transformations/src/slice.cpp @@ -26,27 +26,27 @@ SliceTransformation::SliceTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SliceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!SliceTransformation::canBeTransformed(context, m.get_match_root())) { +bool SliceTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!SliceTransformation::canBeTransformed(m.get_match_root())) { return false; } const auto strided_slice = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool SliceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!LayerTransformation::canBeTransformed(context, operation)) { +bool SliceTransformation::canBeTransformed(const std::shared_ptr& operation) const { + if (!LayerTransformation::canBeTransformed(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/space_to_batch.cpp b/src/common/low_precision_transformations/src/space_to_batch.cpp index 2f0373ffec1068..20a5d1b863a18a 100644 --- a/src/common/low_precision_transformations/src/space_to_batch.cpp +++ b/src/common/low_precision_transformations/src/space_to_batch.cpp @@ -26,15 +26,15 @@ SpaceToBatchTransformation::SpaceToBatchTransformation(const Params& params) : L if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SpaceToBatchTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool SpaceToBatchTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } @@ -46,13 +46,13 @@ bool SpaceToBatchTransformation::canBeTransformed(const TransformationContext& c return dequantization.isPerTensor(); } -bool SpaceToBatchTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool SpaceToBatchTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } const std::shared_ptr op = NetworkHelper::separateInStandaloneBranch(m.get_match_root(), defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(op, NetworkHelper::getDequantization(op, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; diff --git a/src/common/low_precision_transformations/src/split.cpp b/src/common/low_precision_transformations/src/split.cpp index 88deb9f62e444b..35b1ede004730e 100644 --- a/src/common/low_precision_transformations/src/split.cpp +++ b/src/common/low_precision_transformations/src/split.cpp @@ -24,15 +24,15 @@ SplitTransformation::SplitTransformation(const Params& params) : LayerTransforma if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SplitTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool SplitTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -120,7 +120,7 @@ bool SplitTransformation::transform(TransformationContext& context, ov::pass::pa } } - updateOutputs(context, lastNodes, newSplit); + updateOutputs(lastNodes, newSplit); OPENVINO_DEBUG("LPT: done: ", newSplit); return true; @@ -128,12 +128,10 @@ bool SplitTransformation::transform(TransformationContext& context, ov::pass::pa void SplitTransformation::updateOutputs( - TransformationContext& context, std::vector> lastNodes, std::shared_ptr originalNode) const { - //TODO: LPT: during refactoring update is not tested if (lastNodes.size() == 1ul) { - updateOutput(context, lastNodes[0], originalNode); + updateOutput(lastNodes[0], originalNode); } else { const std::string originalName = originalNode->get_friendly_name(); for (size_t i = 0; i < lastNodes.size(); ++i) { @@ -155,7 +153,7 @@ bool SplitTransformation::isPrecisionPreserved(std::shared_ptr layer) cons return true; } -bool SplitTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool SplitTransformation::canBeTransformed(const std::shared_ptr& layer) const { return !NetworkHelper::getDequantization(layer, defaultPrecisions).empty() && layer->get_input_partial_shape(0).rank().is_static(); } diff --git a/src/common/low_precision_transformations/src/squeeze.cpp b/src/common/low_precision_transformations/src/squeeze.cpp index 04bdf62362bddd..2ddef0b81be120 100644 --- a/src/common/low_precision_transformations/src/squeeze.cpp +++ b/src/common/low_precision_transformations/src/squeeze.cpp @@ -26,15 +26,15 @@ SqueezeTransformation::SqueezeTransformation(const Params& params) : LayerTransf if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool SqueezeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool SqueezeTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -66,7 +66,7 @@ bool SqueezeTransformation::transform(TransformationContext& context, ov::pass:: replace_node(dequantization.subtractConstant, newConstant); } - const auto newOperation = moveDequantizationAfter(context, squeeze, NetworkHelper::getDequantization(squeeze, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(squeeze, NetworkHelper::getDequantization(squeeze, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -76,8 +76,8 @@ bool SqueezeTransformation::isPrecisionPreserved(std::shared_ptr layer) co return true; } -bool SqueezeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(context, layer); +bool SqueezeTransformation::canBeTransformed(const std::shared_ptr& layer) const { + return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(layer); } } // namespace low_precision diff --git a/src/common/low_precision_transformations/src/strided_slice.cpp b/src/common/low_precision_transformations/src/strided_slice.cpp index 5d9939e1fe943a..046a1d65af6e50 100644 --- a/src/common/low_precision_transformations/src/strided_slice.cpp +++ b/src/common/low_precision_transformations/src/strided_slice.cpp @@ -107,15 +107,15 @@ StridedSliceTransformation::StridedSliceTransformation(const Params& params) : L if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool StridedSliceTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher& m) { - if (!StridedSliceTransformation::canBeTransformed(context, m.get_match_root())) { +bool StridedSliceTransformation::transform(ov::pass::pattern::Matcher& m) { + if (!StridedSliceTransformation::canBeTransformed(m.get_match_root())) { return false; } @@ -132,13 +132,13 @@ bool StridedSliceTransformation::transform(TransformationContext& context, ov::p replace_node(dequantization.multiplyConstant, new_mul_const); dequantization.multiplyConstant = new_mul_const; - const auto newOperation = moveDequantizationAfter(context, strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(strided_slice, NetworkHelper::getDequantization(strided_slice, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool StridedSliceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { +bool StridedSliceTransformation::canBeTransformed(const std::shared_ptr& operation) const { if (!ov::is_type(operation)) { return false; } diff --git a/src/common/low_precision_transformations/src/subtract.cpp b/src/common/low_precision_transformations/src/subtract.cpp index d67a8dc1e0e288..b19add9fca1570 100644 --- a/src/common/low_precision_transformations/src/subtract.cpp +++ b/src/common/low_precision_transformations/src/subtract.cpp @@ -34,16 +34,16 @@ SubtractTransformation::SubtractTransformation(const Params& params) : LayerTran if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(subtract, matcher_name); this->register_matcher(m, callback); } -bool SubtractTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool SubtractTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr subtract = ov::as_type_ptr(m.get_match_root()); - if (!canBeTransformed(context, subtract)) { + if (!canBeTransformed(subtract)) { return false; } diff --git a/src/common/low_precision_transformations/src/transformation_context.cpp b/src/common/low_precision_transformations/src/transformation_context.cpp deleted file mode 100644 index 7cef253f0e3f3f..00000000000000 --- a/src/common/low_precision_transformations/src/transformation_context.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2018-2025 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "low_precision/transformation_context.hpp" - -namespace ov { -namespace pass { -namespace low_precision { - -TransformationContext::TransformationContext() : model(nullptr) {} - -TransformationContext::TransformationContext(std::shared_ptr model) : model(model) { -} - -} // namespace low_precision -} // namespace pass -} // namespace ov diff --git a/src/common/low_precision_transformations/src/transparent_base_transformation.cpp b/src/common/low_precision_transformations/src/transparent_base_transformation.cpp index f5efd99e008b86..8b7a94f5ef966b 100644 --- a/src/common/low_precision_transformations/src/transparent_base_transformation.cpp +++ b/src/common/low_precision_transformations/src/transparent_base_transformation.cpp @@ -14,20 +14,20 @@ using namespace ov; using namespace ov::pass; using namespace ov::pass::low_precision; -bool TransparentBaseTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool TransparentBaseTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr op = m.get_match_root(); - if (!canBeTransformed(context, op)) { + if (!canBeTransformed(op)) { return false; } op = NetworkHelper::separateInStandaloneBranch(op, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, op, NetworkHelper::getDequantization(op, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(op, NetworkHelper::getDequantization(op, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; } -bool TransparentBaseTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { +bool TransparentBaseTransformation::canBeTransformed(const std::shared_ptr& layer) const { return true; } diff --git a/src/common/low_precision_transformations/src/transpose.cpp b/src/common/low_precision_transformations/src/transpose.cpp index 4d8577e40643ff..b210920ab5bc65 100644 --- a/src/common/low_precision_transformations/src/transpose.cpp +++ b/src/common/low_precision_transformations/src/transpose.cpp @@ -26,7 +26,7 @@ TransposeTransformation::TransposeTransformation(const Params& params) : LayerTr if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); @@ -83,15 +83,15 @@ void transposeDequantizationConstant(std::shared_ptr& transpose, const std } // namespace -bool TransposeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { +bool TransposeTransformation::transform(ov::pass::pattern::Matcher &m) { std::shared_ptr transpose = m.get_match_root(); - if (!canBeTransformed(context, transpose)) { + if (!canBeTransformed(transpose)) { return false; } transpose = NetworkHelper::separateInStandaloneBranch(transpose, defaultPrecisions); transposeDequantizationConstant(transpose, defaultPrecisions); - const auto newOperation = moveDequantizationAfter(context, transpose, NetworkHelper::getDequantization(transpose, defaultPrecisions, 0)); + const auto newOperation = moveDequantizationAfter(transpose, NetworkHelper::getDequantization(transpose, defaultPrecisions, 0)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -101,8 +101,8 @@ bool TransposeTransformation::isPrecisionPreserved(std::shared_ptr op) con return true; } -bool TransposeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - if (!LayerTransformation::canBeTransformed(context, op)) { +bool TransposeTransformation::canBeTransformed(const std::shared_ptr& op) const { + if (!LayerTransformation::canBeTransformed(op)) { return false; } diff --git a/src/common/low_precision_transformations/src/unsqueeze.cpp b/src/common/low_precision_transformations/src/unsqueeze.cpp index 3ba7a951950a5b..32e3f89ab01e69 100644 --- a/src/common/low_precision_transformations/src/unsqueeze.cpp +++ b/src/common/low_precision_transformations/src/unsqueeze.cpp @@ -26,15 +26,15 @@ UnsqueezeTransformation::UnsqueezeTransformation(const Params& params) : LayerTr if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); this->register_matcher(m, callback); } -bool UnsqueezeTransformation::transform(TransformationContext& context, ov::pass::pattern::Matcher &m) { - if (!canBeTransformed(context, m.get_match_root())) { +bool UnsqueezeTransformation::transform(ov::pass::pattern::Matcher &m) { + if (!canBeTransformed(m.get_match_root())) { return false; } @@ -68,7 +68,7 @@ bool UnsqueezeTransformation::transform(TransformationContext& context, ov::pass replace_node(dequantization.subtractConstant, newConstant); } - const auto newOperation = moveDequantizationAfter(context, unsqueeze, NetworkHelper::getDequantization(unsqueeze, defaultPrecisions)); + const auto newOperation = moveDequantizationAfter(unsqueeze, NetworkHelper::getDequantization(unsqueeze, defaultPrecisions)); OPENVINO_DEBUG("LPT: done: ", newOperation); return true; @@ -78,8 +78,8 @@ bool UnsqueezeTransformation::isPrecisionPreserved(std::shared_ptr layer) return true; } -bool UnsqueezeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(context, layer); +bool UnsqueezeTransformation::canBeTransformed(const std::shared_ptr& layer) const { + return (!NetworkHelper::getDequantization(layer, defaultPrecisions).empty()) && LayerTransformation::canBeTransformed(layer); } diff --git a/src/common/low_precision_transformations/src/variadic_split.cpp b/src/common/low_precision_transformations/src/variadic_split.cpp index fd719cb5fcdf05..e381fd2e58bd48 100644 --- a/src/common/low_precision_transformations/src/variadic_split.cpp +++ b/src/common/low_precision_transformations/src/variadic_split.cpp @@ -26,7 +26,7 @@ VariadicSplitTransformation::VariadicSplitTransformation(const Params& params) : if (transformation_callback(op)) { return false; } - return transform(*context, m); + return transform(m); }; auto m = std::make_shared(matcher, matcher_name); diff --git a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp index 9bd43e8a73fe9b..64c6e15cd81356 100644 --- a/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp +++ b/src/common/low_precision_transformations/src/weightable_layer_transformation.cpp @@ -49,9 +49,10 @@ WeightableLayerTransformation::WeightableLayerTransformation(const Params& param canBeTransformedParams(canBeTransformedParams) { } -bool WeightableLayerTransformation::canConvolutionBeTransformed(const TransformationContext& context, std::shared_ptr layer, - const std::vector& defaultPrecisions) const { - if (!WeightableLayerTransformation::canBeTransformed(context, layer)) { +bool WeightableLayerTransformation::canConvolutionBeTransformed( + const std::shared_ptr& layer, + const ov::element::TypeVector& defaultPrecisions) const { + if (!WeightableLayerTransformation::canBeTransformed(layer)) { return false; } @@ -88,8 +89,8 @@ bool WeightableLayerTransformation::canConvolutionBeTransformed(const Transforma return true; } -bool WeightableLayerTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - if (!LayerTransformation::canBeTransformed(context, layer)) { +bool WeightableLayerTransformation::canBeTransformed(const std::shared_ptr& layer) const { + if (!LayerTransformation::canBeTransformed(layer)) { return false; } diff --git a/src/common/low_precision_transformations/tests/layer_transformation.hpp b/src/common/low_precision_transformations/tests/layer_transformation.hpp index 6ce93863c42a67..83a9faa70e16d3 100644 --- a/src/common/low_precision_transformations/tests/layer_transformation.hpp +++ b/src/common/low_precision_transformations/tests/layer_transformation.hpp @@ -8,7 +8,6 @@ #include "low_precision/rt_info/intervals_alignment_attribute.hpp" #include "low_precision/rt_info/precisions_attribute.hpp" #include "low_precision/layer_transformation.hpp" -#include "low_precision/transformation_context.hpp" #include "low_precision/network_helper.hpp" #include "ov_lpt_models/common/dequantization_operations.hpp" diff --git a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp index 9a80930160b298..ee1bb78ef4992a 100644 --- a/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp +++ b/src/common/low_precision_transformations/tests/simple_low_precision_transformer.cpp @@ -10,7 +10,6 @@ #include "low_precision/markup_bias.hpp" #include "low_precision/markup_can_be_quantized.hpp" #include "low_precision/markup_quantization_granularity.hpp" -#include "low_precision/transformation_context.hpp" // cleanup transformations #include "low_precision/convert.hpp" From e13f71005a714a1180770896eecd21ad29eed53b Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Mon, 20 Jan 2025 22:10:25 +0000 Subject: [PATCH 57/97] [CI] [GHA] Remove `pip cache info` from `setup_python` action (#28561) ### Details: - Port of #28557 --- .github/actions/setup_python/action.yml | 9 --------- .github/workflows/job_build_windows.yml | 1 - docs/dev/ci/github_actions/custom_actions.md | 2 -- 3 files changed, 12 deletions(-) diff --git a/.github/actions/setup_python/action.yml b/.github/actions/setup_python/action.yml index ce85be46ced17e..507c52d8a69efa 100644 --- a/.github/actions/setup_python/action.yml +++ b/.github/actions/setup_python/action.yml @@ -15,10 +15,6 @@ inputs: description: 'If the runner is self-hosted' required: false default: 'true' - show-cache-info: - description: 'If the action should show the share space occupied by cache' - required: false - default: 'false' runs: using: 'composite' steps: @@ -75,8 +71,3 @@ runs: $pipVersion = python3 -c "import pip; print(pip.__version__)" Write-Host "Using pip version: $pipVersion" "PIP_CACHE_DIR=${{ inputs.pip-cache-path }}/$pipVersion" >> $env:GITHUB_ENV - - - if: ${{ inputs.show-cache-info == 'true' }} - name: Get pip cache info - shell: bash - run: python3 -m pip cache info diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index f0c150c4ac4db4..988bec1de7f929 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -80,7 +80,6 @@ jobs: pip-cache-path: ${{ env.PIP_CACHE_PATH }} should-setup-pip-paths: 'true' self-hosted-runner: 'true' - show-cache-info: 'true' - name: Generate product manifest and set CI_BUILD_NUMBER & CI_BUILD_DEV_TAG id: create_manifest diff --git a/docs/dev/ci/github_actions/custom_actions.md b/docs/dev/ci/github_actions/custom_actions.md index d2c2ca149b20b9..e65650aea2b741 100644 --- a/docs/dev/ci/github_actions/custom_actions.md +++ b/docs/dev/ci/github_actions/custom_actions.md @@ -29,14 +29,12 @@ Since `actions/setup-python` does not work on the Linux ARM64 machines, pip-cache-path: ${{ env.PIP_CACHE_PATH }} should-setup-pip-paths: 'true' self-hosted-runner: 'true' - show-cache-info: 'true' ``` where: * `version` - the Python version to install in the `MAJOR.MINOR` format * `pip-cache-path` - the path to the `pip` cache on the mounted share. Read more in the [shares and caches](./caches.md) documentation * `should-setup-pip-paths` - indicates whether the action should set up the `PIP_CACHE_DIR` and `PIP_INSTALL_PATH` environment variables for later usage * `self-hosted-runner` - indicates whether the runner is self-hosted. Learn more about [available runners](./runners.md) -* `show-cache-info` - indicates whether the action should show the share space occupied by the `pip` cache ## System Info Print From 5ce87bf1fe1f8181e551340574cfacbea9a942c2 Mon Sep 17 00:00:00 2001 From: Andrei Kashchikhin Date: Mon, 20 Jan 2025 22:11:38 +0000 Subject: [PATCH 58/97] [CI] [GHA] Add more download errors to rerunner (#28515) ### Tickets: - *159938* - *156593* --- .github/scripts/workflow_rerun/errors_to_look_for.json | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/scripts/workflow_rerun/errors_to_look_for.json b/.github/scripts/workflow_rerun/errors_to_look_for.json index 8e45212a89a6f0..55b46f9df1b1bb 100644 --- a/.github/scripts/workflow_rerun/errors_to_look_for.json +++ b/.github/scripts/workflow_rerun/errors_to_look_for.json @@ -110,5 +110,13 @@ { "error_text": "download failed after attempts", "ticket": 159547 + }, + { + "error_text": "Failed to connect to github.com port 443: Connection refused", + "ticket": 156593 + }, + { + "error_text": "file DOWNLOAD cannot compute hash on failed download", + "ticket": 156593 } ] \ No newline at end of file From 73d6a3687d5cf13678938fc886e86d7861f257b5 Mon Sep 17 00:00:00 2001 From: Andrey Babushkin Date: Mon, 20 Jan 2025 22:22:06 +0000 Subject: [PATCH 59/97] Revert "[GHA] Use upload-artifact with tag in Build Doc" (#28359) Reverts openvinotoolkit/openvino#28354 Original PR is not needed anymore. The culprit was `cache-apt-pkgs-action`, it used `upload-artifact@v3` action which caused the original workflow failure, but now it's fixed https://github.com/awalsh128/cache-apt-pkgs-action/pull/140 and `upload-artifact@v3` will be working till January 30 (https://github.blog/changelog/2024-04-16-deprecation-notice-v3-of-the-artifact-actions/) --- .github/workflows/build_doc.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index df85b1ef3aa385..2ea17b79af7514 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -78,13 +78,13 @@ jobs: echo "PR_NUMBER=$PR_NUMBER" >> $GITHUB_ENV - name: 'Upload sphinx.log' - uses: actions/upload-artifact@v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: sphinx_build_log_${{ env.PR_NUMBER }}.log path: build/docs/sphinx.log - name: 'Upload docs html' - uses: actions/upload-artifact@v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_docs_html_${{ env.PR_NUMBER }}.zip path: build/docs/openvino_docs_html.zip @@ -101,7 +101,7 @@ jobs: - name: 'Upload test results' if: failure() - uses: actions/upload-artifact@v4.6.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: openvino_docs_pytest path: build/docs/_artifacts/ From 6eb75bbeff18f3475500d835edb5bfb27a11157c Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Tue, 21 Jan 2025 00:20:17 +0100 Subject: [PATCH 60/97] [LPT] ConcatTransformation: support scalar equal DQ propagation through dynamic dimension (#28350) ### Details: Currently, `ConcatTransformation` doesn't support DQ propagation if `concat->get_out_partial_shape()[axis}.is_dynamic()`. However, it is theoretically possible to propagate the DQ if all dequantization constants are **scalar and equal**. This PR introduces this support. ### Tickets: - *CVS-160325* --- .../src/concat.cpp | 118 +++++++++++++----- .../tests/concat_transformation.cpp | 104 +++++++++++++++ 2 files changed, 192 insertions(+), 30 deletions(-) diff --git a/src/common/low_precision_transformations/src/concat.cpp b/src/common/low_precision_transformations/src/concat.cpp index fe39ed8d4f65b2..db77179a229cd6 100644 --- a/src/common/low_precision_transformations/src/concat.cpp +++ b/src/common/low_precision_transformations/src/concat.cpp @@ -82,31 +82,42 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { allDequantizationShiftConvertAreNotZero = false; } - // constant shape must be broadcastable to the shape on data. - auto broadcastElementWiseConst = [](std::shared_ptr operation, const Shape targetShape) { - auto targetShapeConst = std::make_shared(element::i64, Shape{ targetShape.size() }, targetShape); - auto broadcast = fold(operation, targetShapeConst); - return broadcast; + const auto& concat_out_shape = concat->get_output_partial_shape(0); + const auto axis = ov::util::try_normalize_axis(concat->get_axis(), concat_out_shape.rank(), *concat); + const bool scalar_equal_constants_requested = concat_out_shape[axis].is_dynamic(); + + auto adaptConstForConcatenation = [scalar_equal_constants_requested]( + const std::shared_ptr& constant, + const Shape& targetShape) { + if (scalar_equal_constants_requested) { + OPENVINO_ASSERT(targetShape.empty(), "scalar_equal_constants_requested implies targetShape is empty"); + return std::make_shared(*constant, ov::Shape{}); + } else { + auto targetShapeConst = std::make_shared(element::i64, Shape{ targetShape.size() }, targetShape); + auto bcastedConst = ov::as_type_ptr(fold(constant, targetShapeConst)); + OPENVINO_ASSERT(bcastedConst, "adaptConstForConcatenation must return constant"); + return bcastedConst; + } }; - bool someDqInLowPrecision = std::any_of( + const bool someDqInLowPrecision = std::any_of( layerDequantizations.begin(), layerDequantizations.end(), [](const FakeQuantizeDequantization& value) { return value.isLowPrecision(); }); - bool someDqInFpPrecision = std::any_of( + const bool someDqInFpPrecision = std::any_of( layerDequantizations.begin(), layerDequantizations.end(), [](const FakeQuantizeDequantization& value) { return !value.isLowPrecision(); }); - bool DqWithDifferentPrecision = someDqInLowPrecision && someDqInFpPrecision; - const auto axis = - ov::util::try_normalize_axis(concat->get_axis(), concat->get_output_partial_shape(0).rank(), *concat); + const bool DqWithDifferentPrecision = someDqInLowPrecision && someDqInFpPrecision; OutputVector dataNodes; NodeVector convertNodes; - NodeVector subConstants; - NodeVector mulConstants; + + using ConstVector = std::vector>; + ConstVector subConstants; + ConstVector mulConstants; std::shared_ptr subtractConvert = nullptr; for (size_t i = 0; i < layerDequantizations.size(); ++i) { const auto& dequantization = layerDequantizations[i]; @@ -121,8 +132,13 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { convertNodes.push_back(dequantization.convert); } - Shape targetShape(concat->get_input_partial_shape(i).rank().get_length(), 1ul); - targetShape[axis] = concat->get_input_partial_shape(i)[axis].get_length(); + const auto targetShape = [&]() { + if (scalar_equal_constants_requested) + return ov::Shape{}; + Shape targetShape(concat->get_input_partial_shape(i).rank().get_length(), 1ul); + targetShape[axis] = concat->get_input_partial_shape(i)[axis].get_length(); + return targetShape; + }(); if (!allDequantizationShiftAreZero) { auto subtractInput = dequantization.subtract == nullptr ? @@ -132,13 +148,15 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { deqPrecision), targetShape, std::vector({ 0.f })) : - broadcastElementWiseConst(dequantization.subtractConstant, targetShape); + adaptConstForConcatenation(dequantization.subtractConstant, targetShape); if (allDequantizationShiftConvertAreNotZero) { if (subtractConvert == nullptr && dequantization.subtractConvert != nullptr) { subtractConvert = dequantization.subtractConvert; } } else if (dequantization.subtractConvert != nullptr) { - subtractInput = foldConvert(subtractInput, dequantization.subtractConvert->get_convert_element_type()); + const auto& dstType = dequantization.subtractConvert->get_convert_element_type(); + subtractInput = ov::as_type_ptr(foldConvert(subtractInput, dstType)); + OPENVINO_ASSERT(subtractInput, "foldConvert must finish successfully for the concatenated subtract constant"); NetworkHelper::copyInfo(dequantization.subtractConvert, subtractInput); } subConstants.push_back(subtractInput); @@ -147,7 +165,7 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { if (!allDequantizationMultiplyAreZero) { mulConstants.push_back(dequantization.multiply == nullptr ? std::make_shared(deqPrecision, targetShape, std::vector({ 1.0f })) : - broadcastElementWiseConst(dequantization.multiplyConstant, targetShape)); + adaptConstForConcatenation(dequantization.multiplyConstant, targetShape)); } } @@ -162,10 +180,31 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { lastDequantization = convert; } + auto concat_constants_if_needed = [&](const ConstVector& constants) -> std::shared_ptr { + OPENVINO_ASSERT(!constants.empty(), "concat_constants_if_needed expects non empty constants vec"); + if (constants.size() == 1ul) { + return constants[0]; + } + if (scalar_equal_constants_requested) { + if (ov::shape_size(constants[0]->get_shape()) == 1) { + const auto ref_value = constants[0]->cast_vector(); + if (std::all_of(constants.cbegin() + 1, constants.cend(), [&ref_value](const auto& constant) { + return constant->template cast_vector() == ref_value; + })) { + return constants[0]; + } + } + OPENVINO_THROW("in case of dynamic concatenation dim all constants must be scalar and equal"); + } + ov::OutputVector concatInputs; + std::transform(constants.begin(), constants.end(), std::back_inserter(concatInputs), [](const auto& constant) { + return constant->output(0); + }); + return fold(concatInputs, axis); + }; + if (!subConstants.empty()) { - std::shared_ptr subtractNode = subConstants.size() == 1ul ? - subConstants[0] : - ov::pass::low_precision::fold(subConstants, axis); + auto subtractNode = concat_constants_if_needed(subConstants); if (subtractConvert != nullptr) subtractNode = subtractConvert->clone_with_new_inputs({subtractNode}); const auto subtract = std::make_shared( @@ -181,9 +220,7 @@ bool ConcatTransformation::transform(ov::pass::pattern::Matcher &m) { const auto multiply = std::make_shared>( opset1::Multiply( lastDequantization, - NetworkHelper::toScalarIfPossible(mulConstants.size() == 1ul ? - mulConstants[0] : - ov::pass::low_precision::fold(mulConstants, axis))), + NetworkHelper::toScalarIfPossible(concat_constants_if_needed(mulConstants))), layerDequantizations[0].multiply->get_output_element_type(0)); NetworkHelper::copyInfo({ concat, multiply }, multiply); @@ -216,9 +253,32 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) return false; } + auto base_dq_check = [&](const FakeQuantizeDequantization& dequantization) { + return !dequantization.empty() && (!updatePrecisions || dequantization.isLowPrecision()); + }; + const size_t normalizedAxis = ov::util::try_normalize_axis(axis, outRank, *concat); if (outPShape[normalizedAxis].is_dynamic()) { - return false; + // in case of dynamic dimension we can propagate all dequantizations only if they are all scalar and equal, + // since DQ broadcast is impossible (requested shape is unknown), and only single scalar DQ after Concat can be set + const auto dequantization_ref = NetworkHelper::getDequantization(concat, defaultPrecisions, 0); + if (!base_dq_check(dequantization_ref) || !dequantization_ref.isPerTensor()) + return false; + + auto extract_values = [](const std::shared_ptr& constant) { + return constant ? constant->cast_vector() : std::vector(); + }; + const auto ref_shifts = extract_values(dequantization_ref.subtractConstant); + const auto ref_scales = extract_values(dequantization_ref.multiplyConstant); + + for (size_t i = 1ul; i < concat->get_input_size(); i++) { + const auto cur_dequantization = NetworkHelper::getDequantization(concat, defaultPrecisions, i); + if (!base_dq_check(dequantization_ref) || + ref_shifts != extract_values(cur_dequantization.subtractConstant) || + ref_scales != extract_values(cur_dequantization.multiplyConstant)) + return false; + } + return true; } auto checkConstShape = [&normalizedAxis, &outRank](const std::shared_ptr& constant) { @@ -235,7 +295,6 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) }; const auto check_const_precision = []( - const FakeQuantizeDequantization& dequantization, const std::shared_ptr& constant, ov::element::Type& const_precision) { if (constant == nullptr) { @@ -253,9 +312,8 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) for (size_t i = 0ul; i < concat->get_input_size(); i++) { const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(concat, defaultPrecisions, i); - if (dequantization.empty() || (updatePrecisions && !dequantization.isLowPrecision())) { + if (!base_dq_check(dequantization)) return false; - } if (((dequantization.subtract != nullptr) && (!checkConstShape(dequantization.subtractConstant))) || ((dequantization.multiply != nullptr) && (!checkConstShape(dequantization.multiplyConstant)))) { @@ -268,9 +326,9 @@ bool ConcatTransformation::canBeTransformed(const std::shared_ptr& layer) return false; } - if (!check_const_precision(dequantization, dequantization.subtractConvert, const_precision) || - ((dequantization.subtractConvert == nullptr) && !check_const_precision(dequantization, dequantization.subtractConstant, const_precision)) || - !check_const_precision(dequantization, dequantization.multiplyConstant, const_precision)) { + if (!check_const_precision(dequantization.subtractConvert, const_precision) || + ((dequantization.subtractConvert == nullptr) && !check_const_precision(dequantization.subtractConstant, const_precision)) || + !check_const_precision(dequantization.multiplyConstant, const_precision)) { return false; } } diff --git a/src/common/low_precision_transformations/tests/concat_transformation.cpp b/src/common/low_precision_transformations/tests/concat_transformation.cpp index 4d6973f3f440cf..d833e21ad81584 100644 --- a/src/common/low_precision_transformations/tests/concat_transformation.cpp +++ b/src/common/low_precision_transformations/tests/concat_transformation.cpp @@ -128,6 +128,110 @@ const std::vector testValues = { {ov::element::f32, {128.f}, {0.1f}} } }, + // dynamic concatenation axis, but the same per-tensor values + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {128.f}, {0.1f}} + } + }, + { + ov::element::u8, + {{}, {}}, + ov::element::u8, + {ov::element::f32, {128.f}, {0.1f}} + } + }, + // dynamic concatenation axis, but the same per-tensor values + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {}, {{0.1f}, ov::element::f32, {1, 1, 1}}}, + {ov::element::f32, {}, {{0.1f}, ov::element::f32, {1, 1, 1}}} + } + }, + { + ov::element::u8, + {{}, {}}, + ov::element::u8, + {ov::element::f32, {}, {0.1f}} + } + }, + // dynamic concatenation axis, dq don't match + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {}, {0.1f}} + } + }, + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {}, {0.1f}} + }, + ov::element::f32, + {} + } + }, + // dynamic concatenation axis, different per-tensor values + { + {{1, -1, 4, 4}, {1, -1, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {128.f}, {10.f}} + } + }, + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {128.f}, {10.f}} + }, + ov::element::f32, + {} + } + }, + // dynamic output concatenation axis, but one input dim is static + { + {{1, -1, 4, 4}, {1, 3, 4, 4}}, + std::int64_t{1}, + LayerTransformation::createParamsU8I8(), + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {{128.f, 64.f, 128.f}}, {{10.f, 1.f, 10.f}}} + } + }, + { + ov::element::u8, + { + {ov::element::f32, {128.f}, {0.1f}}, + {ov::element::f32, {{128.f, 64.f, 128.f}}, {{10.f, 1.f, 10.f}}} + }, + ov::element::f32, + {} + } + }, { {{1, 3, 4, 4}, {1, 3, 4, 4}}, std::int64_t{1}, From e8d01dc46fd8c74feb0ae79ea28809c9acee1290 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Tue, 21 Jan 2025 10:52:13 +0800 Subject: [PATCH 61/97] fix CID issue1590313, 1590213, 1588539, 1588497, 1588220 (#28541) ### Details: - *fix CID issue1590313, 1590213, 1588539, 1588497, 1588220* ### Tickets: - *ticket-id* --- .../openvino/runtime/threading/istreams_executor.hpp | 2 +- src/inference/src/dev/threading/cpu_streams_executor.cpp | 4 ++-- src/inference/src/dev/threading/istreams_executor.cpp | 2 +- src/plugins/intel_cpu/src/cpu_streams_calculation.cpp | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp index efb9d41a4dd5a6..18e7216cf22e0d 100644 --- a/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp +++ b/src/inference/dev_api/openvino/runtime/threading/istreams_executor.hpp @@ -150,7 +150,7 @@ class OPENVINO_RUNTIME_API IStreamsExecutor : virtual public ITaskExecutor { _cpu_pinning{cpu_pinning}, _cores_limit{cores_limit}, _streams_info_table{std::move(streams_info_table)}, - _rank{rank}, + _rank{std::move(rank)}, _add_lock(add_lock) { update_executor_config(_add_lock); } diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index a10709aa6db3df..0313c4f5aabc6b 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -36,7 +36,7 @@ struct CPUStreamsExecutor::Impl { : custom::task_scheduler_observer(arena), _mask{std::move(mask)}, _ncpus(ncpus), - _cpu_ids(cpu_ids) {} + _cpu_ids(std::move(cpu_ids)) {} void on_scheduler_entry(bool) override { pin_thread_to_vacant_core(tbb::this_task_arena::current_thread_index(), _threadBindingStep, @@ -167,7 +167,7 @@ struct CPUStreamsExecutor::Impl { _rank = _impl->_config.get_rank(); get_cur_stream_info(stream_id, _impl->_config.get_cpu_pinning(), - org_proc_type_table, + std::move(org_proc_type_table), _impl->_config.get_streams_info_table(), stream_type, concurrency, diff --git a/src/inference/src/dev/threading/istreams_executor.cpp b/src/inference/src/dev/threading/istreams_executor.cpp index 59201baadfd387..663c7d138b397f 100644 --- a/src/inference/src/dev/threading/istreams_executor.cpp +++ b/src/inference/src/dev/threading/istreams_executor.cpp @@ -234,7 +234,7 @@ void IStreamsExecutor::Config::update_executor_config() { if (_thread_preferred_core_type == ov::hint::SchedulingCoreType::ECORE_ONLY) { stream_info[PROC_TYPE] = EFFICIENT_CORE_PROC; stream_info[NUMBER_OF_STREAMS] = _streams; - _streams_info_table.push_back(stream_info); + _streams_info_table.push_back(std::move(stream_info)); } else { int start = proc_type_table.size() > 1 ? 1 : 0; std::vector core_types; diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 6b68afffa711e7..8b2c7c620923fe 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -514,7 +514,7 @@ std::vector> get_streams_info_table( ALL_PROC); } else if (stream_info[PROC_TYPE] == MAIN_CORE_PROC) { if (stream_info[THREADS_PER_STREAM] == proc_socket_table[0][MAIN_CORE_PROC]) { - streams_info_table.push_back(stream_info); + streams_info_table.push_back(std::move(stream_info)); } else { stream_info[PROC_TYPE] = ALL_PROC; streams_info_table.push_back(stream_info); @@ -524,10 +524,10 @@ std::vector> get_streams_info_table( streams_info_table.push_back(stream_info); stream_info[PROC_TYPE] = HYPER_THREADING_PROC; stream_info[THREADS_PER_STREAM] = proc_socket_table[0][HYPER_THREADING_PROC]; - streams_info_table.push_back(stream_info); + streams_info_table.push_back(std::move(stream_info)); } } else { - streams_info_table.push_back(stream_info); + streams_info_table.push_back(std::move(stream_info)); } } From 15914a9ca0f88206a5a46a6484c19ffa01586e08 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Tue, 21 Jan 2025 10:52:49 +0800 Subject: [PATCH 62/97] fix openvino panic when run CPU inference on XEON HBM platform (#28426) ### Details: - *skip HBM numa node on XEON HBM platform* ### Tickets: - *[issues-28335](https://github.com/openvinotoolkit/openvino/issues/28335)* --- src/inference/src/os/lin/lin_system_conf.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/inference/src/os/lin/lin_system_conf.cpp b/src/inference/src/os/lin/lin_system_conf.cpp index a235227a4b56f0..f809f15a362943 100644 --- a/src/inference/src/os/lin/lin_system_conf.cpp +++ b/src/inference/src/os/lin/lin_system_conf.cpp @@ -140,7 +140,9 @@ CPU::CPU() { } std::string cache_info; std::getline(cache_file, cache_info); - node_info_table.emplace_back(std::move(cache_info)); + if (cache_info.size() > 0) { + node_info_table.emplace_back(std::move(cache_info)); + } node_index++; } }; From 0aee40dc6187cca73edbb162f2e1754ce97e260c Mon Sep 17 00:00:00 2001 From: Jade Cho Date: Tue, 21 Jan 2025 13:16:48 +0900 Subject: [PATCH 63/97] [GPU] Fix a group conv unit test fail. (#28511) ### Tickets: - *160644* --- .../cl_kernels/convolution_gpu_imad.cl | 12 ++++++------ .../tests/unit/test_cases/convolution_gpu_test.cpp | 4 +--- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl index 0cf873f570cf8e..cf442ee80b9bce 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/convolution_gpu_imad.cl @@ -282,12 +282,6 @@ KERNEL (fused_convolution_eltwise_gpu_imad)( out[br * OUT_BLOCK_WIDTH + bc] = TO_ACCUMULATOR_TYPE(IMAD(out[br * OUT_BLOCK_WIDTH + bc], inputs, AS_FILTER_TYPE_4(w[wi]))); - #ifdef ASYMMETRIC_WEIGHTS_QUANTIZATION - ACCUMULATOR_TYPE dotProdAxWZP = 0; - dotProdAxWZP = TO_ACCUMULATOR_TYPE(IMAD(dotProdAxWZP, inputs, AS_FILTER_TYPE_4(weights_zp_val))); - out[br * OUT_BLOCK_WIDTH + bc] -= dotProdAxWZP; - #endif - #if !defined COMPENSATION_TERM && defined ASYMMETRIC_DATA_QUANTIZATION out[br * OUT_BLOCK_WIDTH + bc] -= dotProdAZPxW; #endif @@ -297,6 +291,12 @@ KERNEL (fused_convolution_eltwise_gpu_imad)( defined ASYMMETRIC_WEIGHTS_QUANTIZATION) out[br * OUT_BLOCK_WIDTH + bc] += dotProdAZPxWZP; #endif + + #ifdef ASYMMETRIC_WEIGHTS_QUANTIZATION + ACCUMULATOR_TYPE dotProdAxWZP = 0; + dotProdAxWZP = TO_ACCUMULATOR_TYPE(IMAD(dotProdAxWZP, inputs, AS_FILTER_TYPE_4(weights_zp_val))); + out[br * OUT_BLOCK_WIDTH + bc] -= dotProdAxWZP; + #endif } } wi++; diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp index 30d12c490e3d15..bb952e860d0dfe 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/convolution_gpu_test.cpp @@ -7685,9 +7685,7 @@ INSTANTIATE_TEST_SUITE_P(convolution_grouped_fsv4_fsv16, TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, false, false, false, format::b_fs_yx_fsv4, ""), TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, false, true, false, format::b_fs_yx_fsv4, ""), TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, false, false, format::b_fs_yx_fsv4, ""), - - // TODO: It will be fix soon, test reference is wrong in new driver. - // TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, true, false, format::b_fs_yx_fsv4, ""), + TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, true, false, format::b_fs_yx_fsv4, ""), TestParamType_grouped_convolution_gpu(3, 1, 1, 80, 252, 3, 1, 1, 4, 1, 1, true, false, true, format::b_fs_yx_fsv4, ""), // Format: b_fs_yx_fsv16 From c3bdeaf422ec536d1ae5423c3d2d2bfee337cf98 Mon Sep 17 00:00:00 2001 From: "Min, Byungil" Date: Tue, 21 Jan 2025 13:41:06 +0900 Subject: [PATCH 64/97] [GPU] Fix error onednn grouped size dyn-quan with enabled asymmetric config (#28497) + Fixed runtime error of grouped size dyn-quan of onednn if DynamicQuantizeAsym is enabled + Disable dyn-quan if DynamicQuantizeAsym is enabled with grouped size ### Details: - Disable dyn-quan if DynamicQuantizeAsym is enabled with grouped size ### Tickets: - CVS-160327 Signed-off-by: Min, Byung il --- .../transformations/dynamic_quantize_fully_connected.cpp | 4 +++- .../intel_gpu/src/plugin/transformations_pipeline.cpp | 8 ++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp index a8eb149ff28646..f5607e98ab0f6f 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/dynamic_quantize_fully_connected.cpp @@ -56,7 +56,9 @@ DynamicQuantizeFullyConnected::DynamicQuantizeFullyConnected(uint64_t group_size config.scale_dt = element::f16; config.group_sizes = shape_group_size; - GPU_DEBUG_IF(debug_config->dynamic_quantize_asym) { + // AZP does not support grouped size dyn-quan + // XXX: This is currently wrapped as GPU_DEBUG_IF as dynamic_quantize_asym is not exposed through public API. + GPU_DEBUG_IF(debug_config->dynamic_quantize_asym && group_size == UINT64_MAX) { config.quantization_type = QuantizationType::Asymmetric; config.quantization_dt = element::u8; config.zp_dt = element::u8; // it supports u8 only now diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 9094354a03fbe8..a2bdac78fcb805 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -1077,6 +1077,13 @@ void TransformationsPipeline::apply(std::shared_ptr func) { return true; } + // AZP does not support grouped size dyn-quan + GPU_DEBUG_IF(debug_config->dynamic_quantize_asym && (dynamic_quantization_group_size != UINT64_MAX)) { + GPU_DEBUG_TRACE << root->get_friendly_name() << " dyn_quan is turned off: asym quantization does not support grouped quantization" << + " ('DynamicQuantizeAsym' is enabled with grouped size dyn-quan)" << std::endl; + return true; + } + bool has_wzp = root->get_input_size() > 4; if ((root->get_input_element_type(1) == ov::element::i8 || root->get_input_element_type(1) == ov::element::u8) && has_wzp @@ -1085,6 +1092,7 @@ void TransformationsPipeline::apply(std::shared_ptr func) { " asym 8bit weight does not support grouped quantization" << std::endl; return true; } + return false; }); manager.register_pass(dynamic_quantization_group_size); From fb1838fbe8d2d1054ff331c5958397b30df5df6d Mon Sep 17 00:00:00 2001 From: Srinjoy Dutta <114402816+srinjoydutta03@users.noreply.github.com> Date: Tue, 21 Jan 2025 10:41:15 +0530 Subject: [PATCH 65/97] [CPU][ARM64] Implement JIT Emitter for Eltwise Less Operation (#28494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Details: - Implemented and added `jit_less_emitter` derived class for element wise less operation - Added entry `Algorithm::EltwiseLess`, in executors/aarch64 as one of the supported algorithms - Added entry in the `get_supported_precisions` and `create_eltwise_emitters` in kernel/aarch64 ### Tests - Passed local tests using `./bin/aarch64/Release/ov_cpu_func_tests --gtest_filter='*smoke*ComparisonLayerTest*Less*'` Screenshot 2025-01-16 at 7 23 39 PM ### Tickets: - Closes #24415 --- .../plugin/aarch64/jit_eltwise_emitters.cpp | 59 +++++++++++++++++++ .../plugin/aarch64/jit_eltwise_emitters.hpp | 28 +++++++++ .../nodes/executors/aarch64/jit_eltwise.cpp | 1 + .../aarch64/jit_uni_eltwise_generic.cpp | 2 + 4 files changed, 90 insertions(+) diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp index a2041718a14875..b1e64cd25ba0b4 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.cpp @@ -1363,6 +1363,65 @@ void jit_is_nan_emitter::register_table_entries() { push_arg_entry_of("zero", 0x00000000, true); } +/// LESS /// +jit_less_emitter::jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& node) + : jit_emitter(host, host_isa, node, get_arithmetic_binary_exec_precision(node)) { + prepare_table(); +} + +jit_less_emitter::jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc) + : jit_emitter(host, host_isa, exec_prc) { + prepare_table(); +} + +size_t jit_less_emitter::get_inputs_count() const { + return 2; +} + +size_t jit_less_emitter::get_aux_vecs_count() const { + return 1; +} + +size_t jit_less_emitter::get_aux_gprs_count() const { + return 1; +} + +void jit_less_emitter::emit_impl(const std::vector& in_vec_idxs, + const std::vector& out_vec_idxs) const { + if (host_isa_ == dnnl::impl::cpu::aarch64::asimd) { + emit_isa(in_vec_idxs, out_vec_idxs); + } else { + OV_CPU_JIT_EMITTER_THROW("Can't create jit eltwise kernel"); + } +} + +template +void jit_less_emitter::emit_isa(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const { + OV_CPU_JIT_EMITTER_ASSERT(exec_prc_ == ov::element::f32, "unsupported precision: " + exec_prc_.to_string()); + + using TReg = typename dnnl::impl::cpu::aarch64::cpu_isa_traits::TReg; + const TReg src1 = TReg(in_vec_idxs[0]); + const TReg src2 = TReg(in_vec_idxs[1]); + const TReg dst = TReg(out_vec_idxs[0]); + const TReg aux = TReg(aux_vec_idxs[0]); + + h->fcmgt(dst.s, src2.s, src1.s); + h->ld1r(aux.s, table_val2("one")); + h->and_(dst.b16, dst.b16, aux.b16); +} + +void jit_less_emitter::register_table_entries() { + push_arg_entry_of("one", 0x3f800000, true); +} + +std::set> jit_less_emitter::get_supported_precisions(const std::shared_ptr& node) { + return {{element::f32, element::f32}}; +} + /// LESS_EQUAL /// jit_less_equal_emitter::jit_less_equal_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp index c4c70c6651522d..5d0e00e2da42b0 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_eltwise_emitters.hpp @@ -608,6 +608,34 @@ class jit_is_inf_emitter : public jit_emitter { bool detect_positive; }; +class jit_less_emitter : public jit_emitter { +public: + jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const ov::element::Type exec_prc = ov::element::f32); + + jit_less_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, + dnnl::impl::cpu::aarch64::cpu_isa_t host_isa, + const std::shared_ptr& n); + + size_t get_inputs_count() const override; + + size_t get_aux_vecs_count() const override; + + size_t get_aux_gprs_count() const override; + + static std::set> get_supported_precisions( + const std::shared_ptr& node = nullptr); + +private: + void emit_impl(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const override; + + template + void emit_isa(const std::vector& in_vec_idxs, const std::vector& out_vec_idxs) const; + + void register_table_entries() override; +}; + class jit_less_equal_emitter : public jit_emitter { public: jit_less_equal_emitter(dnnl::impl::cpu::aarch64::jit_generator* host, diff --git a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp index d5b893b67bf2b1..8d5e905f10e86a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/aarch64/jit_eltwise.cpp @@ -38,6 +38,7 @@ bool JitEltwiseExecutor::isSupported(const Algorithm& algorithm, Algorithm::EltwiseIsFinite, Algorithm::EltwiseIsInf, Algorithm::EltwiseIsNaN, + Algorithm::EltwiseLess, Algorithm::EltwiseLessEqual, Algorithm::EltwiseLogicalAnd, Algorithm::EltwiseLogicalOr, diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp index 66db416ec7c732..5e69cfb36b5462 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/jit_uni_eltwise_generic.cpp @@ -670,6 +670,7 @@ std::shared_ptr jit_uni_eltwise_generic::create_eltwise_emitte OV_CASE(Algorithm::EltwiseHswish, ov::intel_cpu::aarch64::jit_hswish_emitter), OV_CASE(Algorithm::EltwiseIsFinite, ov::intel_cpu::aarch64::jit_is_finite_emitter), OV_CASE(Algorithm::EltwiseIsInf, ov::intel_cpu::aarch64::jit_is_inf_emitter), + OV_CASE(Algorithm::EltwiseLess, ov::intel_cpu::aarch64::jit_less_emitter), OV_CASE(Algorithm::EltwiseLessEqual, ov::intel_cpu::aarch64::jit_less_equal_emitter), OV_CASE(Algorithm::EltwiseLogicalAnd, ov::intel_cpu::aarch64::jit_logical_and_emitter), OV_CASE(Algorithm::EltwiseLogicalOr, ov::intel_cpu::aarch64::jit_logical_or_emitter), @@ -863,6 +864,7 @@ std::set> eltwise_precision_helper::get_supported_pre OV_CASE(Algorithm::EltwiseIsFinite, jit_is_finite_emitter), OV_CASE(Algorithm::EltwiseIsInf, jit_is_inf_emitter), OV_CASE(Algorithm::EltwiseIsNaN, jit_is_nan_emitter), + OV_CASE(Algorithm::EltwiseLess, jit_less_emitter), OV_CASE(Algorithm::EltwiseLessEqual, jit_less_equal_emitter), OV_CASE(Algorithm::EltwiseLogicalAnd, jit_logical_and_emitter), OV_CASE(Algorithm::EltwiseLogicalOr, jit_logical_or_emitter), From 8d5f583bc7e56152440192806b3acda619a997fe Mon Sep 17 00:00:00 2001 From: Gorokhov Dmitriy Date: Tue, 21 Jan 2025 09:49:43 +0400 Subject: [PATCH 66/97] [CPU] Fixed FC dynamic quantization accuracy issue (#28554) ### Details: - Cherry-picks: https://github.com/openvinotoolkit/openvino/pull/28553 --- .../src/x64/matmul_weights_decompression.cpp | 11 ++++++++--- src/plugins/intel_cpu/thirdparty/onednn | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp index e2e04501368ac7..1ac681b3b6eff2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/matmul_weights_decompression.cpp @@ -211,6 +211,11 @@ const std::vector input_shapes_basic_dyn_quant = }; const std::vector weights_precisions_dyn_quant = {ov::element::u8, ov::element::u4}; +const std::vector fusing_params_dyn_quant{ + emptyFusingSpec, + fusingBias, // bias is hanlded in separate code-path with post-ops + fusingSwish // max amount of post-op regs (which reduces available accum regs) +}; std::vector filter_additional_config_dyn_quant() { std::vector additional_config = { @@ -232,7 +237,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_non_default_dyn_quant_gro ::testing::ValuesIn(decompression_subtract_type), ::testing::Values(false), ::testing::ValuesIn(filter_additional_config_dyn_quant()), - ::testing::ValuesIn(fusing_params), + ::testing::ValuesIn(fusing_params_dyn_quant), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -249,7 +254,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_sym_non_default_dyn_quant ::testing::Values(DecompressionType::empty), ::testing::Values(false), ::testing::ValuesIn(filter_additional_config_dyn_quant()), - ::testing::ValuesIn(fusing_params), + ::testing::ValuesIn(fusing_params_dyn_quant), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -265,7 +270,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_mxfp4, // todo: zero points converted to fp32 for reshape == true case ::testing::Values(false), ::testing::ValuesIn(filter_additional_config_basic()), - ::testing::ValuesIn(fusing_params), + ::testing::ValuesIn(fusing_params_dyn_quant), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); diff --git a/src/plugins/intel_cpu/thirdparty/onednn b/src/plugins/intel_cpu/thirdparty/onednn index c7ecd8fc43610c..1789b1e0ae441d 160000 --- a/src/plugins/intel_cpu/thirdparty/onednn +++ b/src/plugins/intel_cpu/thirdparty/onednn @@ -1 +1 @@ -Subproject commit c7ecd8fc43610c82af317c178d28630bd948cb04 +Subproject commit 1789b1e0ae441de15d793123003a900a35d1dc71 From 6aab9ccf15ab41acae4583798bfd9f151f155c35 Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Tue, 21 Jan 2025 10:13:59 +0200 Subject: [PATCH 67/97] [NPU] Adding extra features for the state tensors (#28414) ### Details: Add the last features to the state tensors: - Update MutableCommandList instead of memcpy if memory was allocated in the same L0 context - set_shape is available for state tensors as well. ### Tickets: - *CVS-160364* --------- Signed-off-by: Bogdan Pereanu --- .../backend/include/zero_infer_request.hpp | 8 + .../backend/include/zero_variable_state.hpp | 77 ++++ .../src/backend/src/zero_infer_request.cpp | 276 ++++++++----- .../src/backend/src/zero_pipeline.cpp | 21 +- .../src/backend/src/zero_variable_state.cpp | 80 ++++ .../intel_npu/common/sync_infer_request.hpp | 2 + .../intel_npu/common/variable_state.hpp | 7 +- .../src/common/src/sync_infer_request.cpp | 7 +- .../intel_npu/utils/zero/zero_utils.hpp | 27 ++ .../functional/behavior/infer_request_run.cpp | 2 +- .../functional/behavior/infer_request_run.hpp | 99 +++++ .../remote_tensor_tests/remote_run.cpp | 2 +- .../remote_tensor_tests/remote_run.hpp | 374 ++++++++++++++++++ .../tests/functional/common/utils.cpp | 33 ++ .../tests/functional/common/utils.hpp | 11 +- 15 files changed, 891 insertions(+), 135 deletions(-) create mode 100644 src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp create mode 100644 src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp diff --git a/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp b/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp index aaaa128518b34f..c40142c75608b8 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_infer_request.hpp @@ -67,11 +67,19 @@ class ZeroInferRequest final : public SyncInferRequest { const ov::Shape& shape, const ov::Allocator& allocator = {}) const override; + void add_state(const IODescriptor& descriptor, size_t tensorIndex) const override; + + void update_pipeline_if_memory_changed(); + void update_states_if_memory_changed(); + const std::shared_ptr _initStructs; const std::shared_ptr _graph; const Config _config; Logger _logger; + const std::vector& _graphInputDescriptors; + const std::vector& _graphOutputDescriptors; + // A copy of each tensor is needed to maintain the original L0 memory allocation in case the user provides another // memory area for the tensor. mutable std::vector>> _levelZeroInputTensors; diff --git a/src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp b/src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp new file mode 100644 index 00000000000000..c7c03bcfe4c8d8 --- /dev/null +++ b/src/plugins/intel_npu/src/backend/include/zero_variable_state.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "intel_npu/config/config.hpp" +#include "intel_npu/utils/logger/logger.hpp" +#include "intel_npu/utils/zero/zero_init.hpp" +#include "openvino/runtime/ivariable_state.hpp" + +namespace intel_npu { + +/** + * @brief Interface for zero variable state implementation + * @note In case the memory was allocated in the same level zero context use that memory, otherwise use memcpy at infer + * time. Also, get correct data if remote tensor is used. + */ +class ZeroVariableState final : public ov::IVariableState { +public: + explicit ZeroVariableState(const std::shared_ptr& init_structs, + const std::string& name, + const ov::SoPtr& tensor, + size_t tensor_index, + size_t related_tensor_index, + const Config& config); + + void set_state(const ov::SoPtr& new_state) override; + + void reset() override; + + /** + * @brief Get input tensor index used internally for the state + */ + size_t get_tensor_index() const; + + /** + * @brief Get output tensor index used internally for the state + * @details The related tensors are defined by state input, state output pairs. + */ + size_t get_related_tensor_index() const; + + /** + * @brief Get acknowledge if the tensor was updated + */ + bool tensor_was_updated() const; + + /** + * @brief Reset tensor updated flag + */ + void reset_tensor_updated_flag(); + + /** + * @brief Get acknowledge if the zero tensor was updated + * @details In case the memory was allocated in the same level zero context update the zero tensor + */ + bool zero_tensor_should_be_updated() const; + + /** + * @brief Reset zero tensor updated flag + */ + void reset_zero_tensor_updated_flag(); + + ~ZeroVariableState() override = default; + +private: + std::shared_ptr _init_structs; + size_t _tensor_index; + size_t _related_tensor_index; + + bool _tensor_updated = false; + bool _zero_tensor_updated = false; + + Logger _logger; +}; + +} // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index b7049f62af6d31..034f69f63e4158 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -13,6 +13,7 @@ #include "openvino/op/util/op_types.hpp" #include "openvino/runtime/intel_npu/remote_properties.hpp" #include "zero_memory.hpp" +#include "zero_variable_state.hpp" using namespace intel_npu; @@ -63,33 +64,6 @@ void check_level_zero_attributes_match(const IODescriptor& ioDescriptor, const A } } -template -Type extract_object(const ov::AnyMap& params, const ov::Property& p) { - auto itrHandle = params.find(p.name()); - ov::Any res = nullptr; - if (itrHandle == params.end()) { - OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); - } - res = itrHandle->second; - return res.as(); -} - -bool memory_was_allocated_in_the_same_l0_context(ze_context_handle_t hContext, const void* ptr) { - ze_memory_allocation_properties_t desc = {}; - desc.stype = ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES; - auto res = intel_npu::zeMemGetAllocProperties(hContext, ptr, &desc, nullptr); - if (res == ZE_RESULT_SUCCESS) { - if (desc.id) { - if ((desc.type & ZE_MEMORY_TYPE_HOST) || (desc.type & ZE_MEMORY_TYPE_DEVICE) || - (desc.type & ZE_MEMORY_TYPE_SHARED)) { - return true; - } - } - } - - return false; -} - } // namespace //------------------------------------------------------------------------------ @@ -101,13 +75,13 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& _graph(compiledModel->get_graph()), _config(config), _logger("ZeroInferRequest", config.get()), + _graphInputDescriptors(_graph->get_input_descriptors()), + _graphOutputDescriptors(_graph->get_output_descriptors()), _levelZeroInputTensors(_metadata.inputs.size(), std::vector>(1, nullptr)), _levelZeroOutputTensors(_metadata.outputs.size(), nullptr), _profilingPool(_initStructs, _graph, zeroProfiling::POOL_SIZE), _profilingQuery(_initStructs, 0) { _logger.debug("ZeroInferRequest::ZeroInferRequest - SyncInferRequest"); - const std::vector& executorInputDescriptors = _graph->get_input_descriptors(); - const std::vector& executorOutputDescriptors = _graph->get_output_descriptors(); auto proftype = config.get(); if (proftype == ov::intel_npu::ProfilingType::INFER) { @@ -127,7 +101,7 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& size_t ioIndex = 0; for (const IODescriptor& inputDescriptor : _metadata.inputs) { - check_level_zero_attributes_match(inputDescriptor, executorInputDescriptors.at(ioIndex)); + check_level_zero_attributes_match(inputDescriptor, _graphInputDescriptors.at(ioIndex)); if (!(inputDescriptor.isStateInput || inputDescriptor.isShapeTensor)) { ++ioIndex; @@ -142,7 +116,7 @@ ZeroInferRequest::ZeroInferRequest(const std::shared_ptr& ioIndex = 0; for (const IODescriptor& outputDescriptor : _metadata.outputs) { - check_level_zero_attributes_match(outputDescriptor, executorOutputDescriptors.at(ioIndex)); + check_level_zero_attributes_match(outputDescriptor, _graphOutputDescriptors.at(ioIndex)); if (!(outputDescriptor.isStateOutput || outputDescriptor.isShapeTensor)) { ++ioIndex; @@ -203,6 +177,29 @@ void ZeroInferRequest::create_pipeline() { auto groupOrdinal = zeroUtils::findGroupOrdinal(_initStructs->getDevice(), _properties); _logger.debug("ZeroInferRequest::create_pipeline - init completed"); + // Set new tensors and reset variable state flag if memory updated before creating the pipeline + _logger.debug("ZeroInferRequest::create_pipeline - set new tensors and reset variable state flag if memory updated " + "before creating the pipeline"); + for (const auto& variableState : _variableStates) { + auto zeroState = std::dynamic_pointer_cast(variableState._ptr); + + OPENVINO_ASSERT(zeroState != nullptr, "State is not compatible with NPU plugin"); + + if (zeroState->tensor_was_updated()) { + get_user_input(zeroState->get_tensor_index()) = zeroState->get_state(); + _userOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state(); + + zeroState->reset_tensor_updated_flag(); + + if (zeroState->zero_tensor_should_be_updated()) { + zeroState->reset_zero_tensor_updated_flag(); + + get_level_zero_input(zeroState->get_tensor_index()) = zeroState->get_state()._ptr; + _levelZeroOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state()._ptr; + } + } + } + _logger.debug("ZeroInferRequest::create_pipeline - constructing pipeline"); // Construct pipeline @@ -228,7 +225,7 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr& tenso bool updateCommandListArg = false; OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "check_data_allocation"); - if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) { + if (zeroUtils::memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) { _logger.debug("ZeroInferRequest::set_tensor_data - tensor was created in the same L0 context"); levelZeroTensors = tensor; updateCommandListArg = true; @@ -268,7 +265,7 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptr( - extract_object(tensor->get_context()->get_property(), ov::intel_npu::l0_context)); + zeroUtils::extract_object(tensor->get_context()->get_property(), ov::intel_npu::l0_context)); if (_initStructs->getContext() != l0_context) { OPENVINO_THROW("Using different context for creating the tensor is not supported"); } @@ -279,7 +276,7 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptrget_properties(), ov::intel_npu::mem_handle); + auto data = zeroUtils::extract_object(tensor->get_properties(), ov::intel_npu::mem_handle); OPENVINO_ASSERT(data, "Empty buffer"); OV_ITT_TASK_NEXT(ZERO_SET_REMOTE_TENSOR, "updateCommandList"); @@ -371,7 +368,8 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, bool tensorHasSameL0Context = false; OV_ITT_TASK_NEXT(SET_TENSORS, "check_data_allocation"); - if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensors[i]->data())) { + if (zeroUtils::memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), + tensors[i]->data())) { _logger.debug("ZeroInferRequest::set_tensors - tensor was created in the same L0 context"); get_level_zero_input(foundPort.idx, i) = tensors.at(i)._ptr; @@ -390,7 +388,7 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, } else { _logger.debug("ZeroInferRequest::set_tensors - remote tensor is used"); - data = extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); get_level_zero_input(foundPort.idx, i) = tensors.at(i)._ptr; } @@ -453,6 +451,112 @@ ov::SoPtr ZeroInferRequest::get_tensor(const ov::Output(levelZeroTensor.at(SINGLE_TENSOR)); + + if (is_batched_input(ioIndex) || inputDescriptor.isShapeTensor || + is_remote_tensor(levelZeroTensor.at(SINGLE_TENSOR)) || zeroTensor == nullptr) { + ++ioIndex; + continue; + } + + if (zeroTensor->memory_address_changed()) { + _logger.debug("Update input graph descriptor with the new tensor"); + OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); + + _pipeline->updateCommandList(_graph->get_input_descriptors().at(ioIndex).idx, + zeroTensor->data(), + zeroTensor->get_byte_size()); + closePipeline = true; + + if (!inputDescriptor.isStateInput) { + zeroTensor->reset_memory_flag(); + } + } + + ++ioIndex; + } + + ioIndex = 0; + + for (const auto& levelZeroTensor : _levelZeroOutputTensors) { + const auto outputDescriptor = _metadata.outputs.at(ioIndex); + auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor); + + if (outputDescriptor.isShapeTensor || is_remote_tensor(levelZeroTensor) || zeroTensor == nullptr) { + ++ioIndex; + continue; + } + + if (zeroTensor->memory_address_changed()) { + _logger.debug("Update output graph descriptor with the new tensor"); + OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); + + _pipeline->updateCommandList(_graph->get_output_descriptors().at(ioIndex).idx, + zeroTensor->data(), + zeroTensor->get_byte_size()); + closePipeline = true; + + zeroTensor->reset_memory_flag(); + } + + ++ioIndex; + } + + if (closePipeline) { + _pipeline->closeCommandList(); + } +} + +void ZeroInferRequest::update_states_if_memory_changed() { + bool closePipeline = false; + + for (const auto& variableState : _variableStates) { + auto zeroState = std::dynamic_pointer_cast(variableState._ptr); + + OPENVINO_ASSERT(zeroState != nullptr, "State is not compatible with NPU plugin"); + + if (zeroState->tensor_was_updated()) { + get_user_input(zeroState->get_tensor_index()) = zeroState->get_state(); + _userOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state(); + + zeroState->reset_tensor_updated_flag(); + + if (zeroState->zero_tensor_should_be_updated()) { + auto remoteTensor = std::dynamic_pointer_cast(zeroState->get_state()._ptr); + + void* userBuffer = !remoteTensor ? zeroState->get_state()->data() + : zeroUtils::extract_object(remoteTensor->get_properties(), + ov::intel_npu::mem_handle); + + _pipeline->updateCommandList(_graphInputDescriptors.at(zeroState->get_tensor_index()).idx, + userBuffer, + zeroState->get_state()->get_byte_size()); + + _pipeline->updateCommandList(_graphOutputDescriptors.at(zeroState->get_related_tensor_index()).idx, + userBuffer, + zeroState->get_state()->get_byte_size()); + + zeroState->reset_zero_tensor_updated_flag(); + + get_level_zero_input(zeroState->get_tensor_index()) = zeroState->get_state()._ptr; + _levelZeroOutputTensors.at(zeroState->get_related_tensor_index()) = zeroState->get_state()._ptr; + + closePipeline = true; + } + } + } + + if (closePipeline) { + _pipeline->closeCommandList(); + } +} + void ZeroInferRequest::infer() { if (_config.get()) { OPENVINO_THROW("Only start async is supported when RUN_INFERENCES_SEQUENTIALLY is enabled!"); @@ -476,64 +580,8 @@ void ZeroInferRequest::infer_async() { _pipelineIsCreated = true; } else { if (_initStructs->getMutableCommandListVersion()) { - bool closePipeline = false; - size_t ioIndex = 0; - - for (const auto& levelZeroTensor : _levelZeroInputTensors) { - const auto inputDescriptor = _metadata.inputs.at(ioIndex); - auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor.at(SINGLE_TENSOR)); - - if (is_batched_input(ioIndex) || inputDescriptor.isShapeTensor || inputDescriptor.isStateInput || - is_remote_tensor(levelZeroTensor.at(SINGLE_TENSOR)) || zeroTensor == nullptr) { - ++ioIndex; - continue; - } - - if (zeroTensor->memory_address_changed()) { - _logger.debug("Update input graph descriptor with the new tensor"); - OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); - - _pipeline->updateCommandList(_graph->get_input_descriptors().at(ioIndex).idx, - zeroTensor->data(), - zeroTensor->get_byte_size()); - closePipeline = true; - - zeroTensor->reset_memory_flag(); - } - - ++ioIndex; - } - - ioIndex = 0; - - for (const auto& levelZeroTensor : _levelZeroOutputTensors) { - const auto outputDescriptor = _metadata.outputs.at(ioIndex); - auto zeroTensor = std::dynamic_pointer_cast(levelZeroTensor); - - if (outputDescriptor.isShapeTensor || outputDescriptor.isStateOutput || - is_remote_tensor(levelZeroTensor) || zeroTensor == nullptr) { - ++ioIndex; - continue; - } - - if (zeroTensor->memory_address_changed()) { - _logger.debug("Update output graph descriptor with the new tensor"); - OPENVINO_ASSERT(zeroTensor->data(), "Empty buffer"); - - _pipeline->updateCommandList(_graph->get_output_descriptors().at(ioIndex).idx, - zeroTensor->data(), - zeroTensor->get_byte_size()); - closePipeline = true; - - zeroTensor->reset_memory_flag(); - } - - ++ioIndex; - } - - if (closePipeline) { - _pipeline->closeCommandList(); - } + update_pipeline_if_memory_changed(); + update_states_if_memory_changed(); } } } @@ -561,10 +609,10 @@ void ZeroInferRequest::infer_async() { auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); - void* userBuffer = - !userBatchRemoteTensor - ? userTensor.at(i)->data() - : extract_object(userBatchRemoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !userBatchRemoteTensor + ? userTensor.at(i)->data() + : zeroUtils::extract_object(userBatchRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); if (userBuffer != levelZeroBuffer) { if (userBuffer == nullptr || levelZeroBuffer == nullptr) { @@ -586,9 +634,10 @@ void ZeroInferRequest::infer_async() { for (size_t i = 0; i < userTensor.size(); i++) { auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); - void* userBuffer = !userBatchRemoteTensor ? userTensor.at(i)->data() - : extract_object(userBatchRemoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = !userBatchRemoteTensor + ? userTensor.at(i)->data() + : zeroUtils::extract_object(userBatchRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); std::memcpy(static_cast(levelZeroBuffer) + (i * userTensor.at(i)->get_byte_size()), userBuffer, @@ -601,9 +650,9 @@ void ZeroInferRequest::infer_async() { } auto userRemoteTensor = std::dynamic_pointer_cast(userTensor.at(SINGLE_TENSOR)._ptr); - void* userBuffer = !userRemoteTensor - ? userTensor.at(SINGLE_TENSOR)->data() - : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !userRemoteTensor ? userTensor.at(SINGLE_TENSOR)->data() + : zeroUtils::extract_object(userRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); const auto& levelZeroTensor = get_level_zero_input(inputIndex); if (!is_remote_tensor(levelZeroTensor)) { @@ -652,9 +701,9 @@ void ZeroInferRequest::get_result() { } auto userRemoteTensor = std::dynamic_pointer_cast(userTensor._ptr); - void* userBuffer = !userRemoteTensor - ? userTensor->data() - : extract_object(userRemoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !userRemoteTensor ? userTensor->data() + : zeroUtils::extract_object(userRemoteTensor->get_properties(), + ov::intel_npu::mem_handle); const std::shared_ptr& levelZeroTensor = _levelZeroOutputTensors.at(outputIndex); if (!is_remote_tensor(levelZeroTensor)) { @@ -751,6 +800,19 @@ std::shared_ptr ZeroInferRequest::create_tensor(ov::element::Type t return std::make_shared(_initStructs, type, shape, allocator); } +void ZeroInferRequest::add_state(const IODescriptor& descriptor, size_t tensorIndex) const { + OPENVINO_ASSERT(descriptor.relatedDescriptorIndex.has_value(), + "The link between state descriptors is missing, state name: ", + descriptor.nameFromCompiler); + + _variableStates.push_back(std::make_shared(_initStructs, + descriptor.nameFromCompiler, + get_user_input(tensorIndex), + tensorIndex, + *descriptor.relatedDescriptorIndex, + _config)); +} + std::vector ZeroInferRequest::get_raw_profiling_data() const { return _profilingQuery.getData(); } diff --git a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp index 7ada704c9969d8..a01238a899e0dc 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp @@ -15,21 +15,6 @@ #include "intel_npu/utils/zero/zero_types.hpp" #include "zero_remote_tensor.hpp" -namespace { - -template -Type extract_object(const ov::AnyMap& params, const ov::Property& p) { - auto itrHandle = params.find(p.name()); - ov::Any res = nullptr; - if (itrHandle == params.end()) { - OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); - } - res = itrHandle->second; - return res.as(); -} - -} // namespace - namespace intel_npu { Pipeline::Pipeline(const Config& config, @@ -80,7 +65,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = input_tensors.at(io_index).at(i)->data(); } else { - data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); } graph->set_argument_value(desc.idx, data); @@ -94,7 +79,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = input_tensors.at(io_index).at(0)->data(); } else { - data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); } graph->set_argument_value( @@ -112,7 +97,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = output_tensors.at(io_index)->data(); } else { - data = extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); } graph->set_argument_value( diff --git a/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp b/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp new file mode 100644 index 00000000000000..19cabfb4246e5d --- /dev/null +++ b/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "zero_variable_state.hpp" + +#include "intel_npu/config/common.hpp" +#include "intel_npu/utils/zero/zero_utils.hpp" +#include "zero_remote_tensor.hpp" + +namespace intel_npu { + +ZeroVariableState::ZeroVariableState(const std::shared_ptr& init_structs, + const std::string& name, + const ov::SoPtr& tensor, + size_t tensor_index, + size_t related_tensor_index, + const Config& config) + : ov::IVariableState(name), + _init_structs(init_structs), + _tensor_index(tensor_index), + _related_tensor_index(related_tensor_index), + _logger("ZeroVariableState", config.get()) { + m_state = tensor; +} + +void ZeroVariableState::set_state(const ov::SoPtr& new_state) { + m_state = new_state; + _tensor_updated = true; + + if (_init_structs->getMutableCommandListVersion()) { + if (!is_remote_tensor(new_state._ptr)) { + if (zeroUtils::memory_was_allocated_in_the_same_l0_context(_init_structs->getContext(), + new_state->data())) { + _logger.debug("ZeroVariableState::set_state - tensor was created in the same L0 context"); + _zero_tensor_updated = true; + } + + return; + } + + _zero_tensor_updated = true; + } +} + +void ZeroVariableState::reset() { + auto remoteTensor = std::dynamic_pointer_cast(m_state._ptr); + + void* userBuffer = !remoteTensor + ? m_state->data() + : zeroUtils::extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); + + std::memset(userBuffer, 0, m_state->get_byte_size()); +} + +size_t ZeroVariableState::get_tensor_index() const { + return _tensor_index; +} + +size_t ZeroVariableState::get_related_tensor_index() const { + return _related_tensor_index; +} + +bool ZeroVariableState::tensor_was_updated() const { + return _tensor_updated; +} + +void ZeroVariableState::reset_tensor_updated_flag() { + _tensor_updated = false; +} + +bool ZeroVariableState::zero_tensor_should_be_updated() const { + return _zero_tensor_updated; +} + +void ZeroVariableState::reset_zero_tensor_updated_flag() { + _zero_tensor_updated = false; +} + +} // namespace intel_npu diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp index 3c772168c0c93f..f7406413c9f197 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/sync_infer_request.hpp @@ -167,6 +167,8 @@ class SyncInferRequest : public ov::IInferRequest { const ov::Shape& shape, const ov::Allocator& allocator = {}) const; + virtual void add_state(const IODescriptor& descriptor, const size_t tensorIndex) const; + bool is_batched_input(size_t idx) const; ov::SoPtr& get_user_input(size_t index) const; diff --git a/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp b/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp index acb83d5b718033..0987f2b44bbb04 100644 --- a/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp +++ b/src/plugins/intel_npu/src/common/include/intel_npu/common/variable_state.hpp @@ -11,12 +11,11 @@ namespace intel_npu { class VariableState final : public ov::IVariableState { public: - explicit VariableState(const std::string& name, const std::shared_ptr& tensor) - : ov::IVariableState(name) { + explicit VariableState(const std::string& name, const ov::SoPtr& tensor) : ov::IVariableState(name) { m_state = tensor; } - void set_state(const ov::SoPtr& newState) override { + virtual void set_state(const ov::SoPtr& newState) override { if (newState->get_byte_size() != m_state->get_byte_size()) { OPENVINO_THROW("Byte size mismatch"); } @@ -24,7 +23,7 @@ class VariableState final : public ov::IVariableState { std::memcpy(m_state->data(), newState->data(), newState->get_byte_size()); } - void reset() override { + virtual void reset() override { std::memset(m_state->data(), 0, m_state->get_byte_size()); } diff --git a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp index 17dc6391761e5c..775113ef0d39bf 100644 --- a/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp +++ b/src/plugins/intel_npu/src/common/src/sync_infer_request.cpp @@ -326,7 +326,7 @@ std::shared_ptr SyncInferRequest::allocate_tensor(const IODescripto } if (descriptor.isStateInput) { - _variableStates.push_back(std::make_shared(descriptor.nameFromCompiler, tensor)); + add_state(descriptor, index); } } else if (_userOutputTensors.at(index) == nullptr) { _userOutputTensors.at(index) = tensor; @@ -341,6 +341,11 @@ std::shared_ptr SyncInferRequest::create_tensor(ov::element::Type t return ov::make_tensor(type, shape, allocator); } +void SyncInferRequest::add_state(const IODescriptor& descriptor, const size_t tensorIndex) const { + _variableStates.push_back( + std::make_shared(descriptor.nameFromCompiler, get_user_input(tensorIndex))); +} + bool SyncInferRequest::is_batched_input(size_t idx) const { return _userInputTensors.at(idx).size() > 1; } diff --git a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp index db9dc1c9f51d34..0c2367b680851e 100644 --- a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp +++ b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp @@ -277,5 +277,32 @@ static inline std::string getLatestBuildError(ze_graph_dditable_ext_curr_t& _gra } } +template +static inline Type extract_object(const ov::AnyMap& params, const ov::Property& p) { + auto itrHandle = params.find(p.name()); + ov::Any res = nullptr; + if (itrHandle == params.end()) { + OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); + } + res = itrHandle->second; + return res.as(); +} + +static inline bool memory_was_allocated_in_the_same_l0_context(ze_context_handle_t hContext, const void* ptr) { + ze_memory_allocation_properties_t desc = {}; + desc.stype = ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES; + auto res = intel_npu::zeMemGetAllocProperties(hContext, ptr, &desc, nullptr); + if (res == ZE_RESULT_SUCCESS) { + if (desc.id) { + if ((desc.type & ZE_MEMORY_TYPE_HOST) || (desc.type & ZE_MEMORY_TYPE_DEVICE) || + (desc.type & ZE_MEMORY_TYPE_SHARED)) { + return true; + } + } + } + + return false; +} + } // namespace zeroUtils } // namespace intel_npu diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp index f45e30bb109849..f30fa2bb1416a3 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.cpp @@ -11,7 +11,7 @@ using namespace ov::test::behavior; -const std::vector configsInferRequestRunTests = {{ov::log::level(ov::log::Level::ERR)}}; +const std::vector configsInferRequestRunTests = {{}}; INSTANTIATE_TEST_SUITE_P(compatibility_smoke_BehaviorTest, InferRequestRunTests, diff --git a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp index 31b55704757b01..ab53a442c16cda 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/infer_request_run.hpp @@ -10,6 +10,7 @@ #include #include +#include #include #include "base/ov_behavior_test_utils.hpp" @@ -962,6 +963,104 @@ TEST_P(SetShapeInferRunTests, checkResultsAfterIOBlobReallocation) { } } +TEST_P(SetShapeInferRunTests, checkResultsAfterStateTensorsReallocation) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto dummy_shape = Shape{1, 50, 100, 100}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device); + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + for (auto&& state : inference_request.query_state()) { + state.reset(); + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto states = inference_request.query_state(); + for (auto state : states) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (size_t i = 0; i < last_state_size; ++i) { + EXPECT_NEAR(0.0, last_state_data[i], 1e-5); + } + } + + // create dummy Tensors to force the driver to allocate memory for the initial tensor somewhere else + [[maybe_unused]] auto l0_host_dummy_tensor_0 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_1 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_2 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_3 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_4 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_5 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_6 = context.create_host_tensor(ov::element::f32, dummy_shape); + [[maybe_unused]] auto l0_host_dummy_tensor_7 = context.create_host_tensor(ov::element::f32, dummy_shape); + + for (auto item : inference_request.query_state()) { + auto tensor_state = item.get_state(); + auto original_shape = tensor_state.get_shape(); + OV_ASSERT_NO_THROW(tensor_state.set_shape({1, 50, 20, 20})); + OV_ASSERT_NO_THROW(tensor_state.set_shape(original_shape)); + } + + for (auto&& state : inference_request.query_state()) { + state.reset(); + } + + for (auto state : states) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (size_t i = 0; i < last_state_size; ++i) { + last_state_data[i] = 1.0f; + } + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + for (auto state : states) { + auto last_state = state.get_state(); + auto last_state_size = last_state.get_size(); + auto last_state_data = static_cast(last_state.data()); + + ASSERT_TRUE(last_state_size != 0) << "State size should not be 0"; + + for (size_t i = 0; i < last_state_size; ++i) { + EXPECT_NEAR(input_data[i], last_state_data[i], 1e-5); + } + } +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp index 870f6596dca9ce..d3e537863227e4 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.cpp @@ -10,7 +10,7 @@ using namespace ov::test::behavior; -const std::vector remoteConfigs = {{ov::log::level(ov::log::Level::ERR)}}; +const std::vector remoteConfigs = {{}}; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTest, RemoteRunTests, diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp index fa58d4270889ad..c1992b3047996d 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp @@ -434,6 +434,380 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor2) 0); } +TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensors) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device); + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor0 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor1 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor0); + states[0].reset(); + states[1].set_state(l0_host_tensor1); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto tensor_size = l0_host_tensor0.get_size(); + auto state_data = static_cast(l0_host_tensor0.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor1.get_size(); + state_data = static_cast(l0_host_tensor1.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_state = states[0].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor2 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor3 = context.create_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor2); + states[1].set_state(l0_host_tensor3); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } +} + +TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensorsWithRemoteTensors) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 2, 2, 2}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device).as(); + ; + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor0 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor1 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor0); + states[0].reset(); + states[1].set_state(l0_host_tensor1); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto tensor_size = l0_host_tensor0.get_size(); + auto state_data = static_cast(l0_host_tensor0.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor1.get_size(); + state_data = static_cast(l0_host_tensor1.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_state = states[0].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor2 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor3 = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + states[0].set_state(l0_host_tensor2); + states[1].set_state(l0_host_tensor3); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor2.get_size(); + state_data = static_cast(l0_host_tensor2.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor3.get_size(); + state_data = static_cast(l0_host_tensor3.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } +} + +TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTensors0) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device).as(); + ; + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto byte_size = tensor_state.get_byte_size(); + float* data = new float[byte_size / sizeof(float)]; + ov::Tensor random_tensor{ov::element::f32, tensor_state_shape, data}; + + states[0].set_state(l0_host_tensor); + states[0].reset(); + states[1].set_state(random_tensor); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + auto tensor_size = l0_host_tensor.get_size(); + auto state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } +} + +TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTensors1) { + // Skip test according to plugin specific disabledTestPatterns() (if any) + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + testing::internal::Random random(1); + ov::Tensor input_tensor; + + auto original_shape = Shape{1, 10, 10, 10}; + auto shape_size = ov::shape_size(original_shape); + auto model = createModelWithStates(element::f32, original_shape); + + auto context = core->get_default_context(target_device).as(); + ; + + compiled_model = core->compile_model(model, target_device, configuration); + ov::InferRequest inference_request; + inference_request = compiled_model.create_infer_request(); + + auto input = compiled_model.input(); + OV_ASSERT_NO_THROW(input_tensor = inference_request.get_tensor(input)); + auto* input_data = input_tensor.data(); + for (size_t i = 0; i < shape_size; ++i) { + input_data[i] = static_cast(random.Generate(10)); + } + + auto states = inference_request.query_state(); + + auto tensor_state = states[0].get_state(); + auto tensor_state_shape = tensor_state.get_shape(); + auto l0_host_tensor = context.create_l0_host_tensor(ov::element::f32, tensor_state_shape); + + tensor_state = states[1].get_state(); + tensor_state_shape = tensor_state.get_shape(); + auto byte_size = tensor_state.get_byte_size(); + float* data = new float[byte_size / sizeof(float)]; + ov::Tensor random_tensor{ov::element::f32, tensor_state_shape, data}; + + auto tensor_size = l0_host_tensor.get_size(); + auto state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + state_data[i] = 1.0f; + } + + states[0].set_state(l0_host_tensor); + states[1].set_state(random_tensor); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(input_data[i], state_data[i], 1e-5); + } + + states[0].reset(); + states[1].reset(); + + OV_ASSERT_NO_THROW(inference_request.infer()); + + auto output_tensor = inference_request.get_tensor("sigmod_state"); + auto output_data = output_tensor.data(); + for (size_t i = 0; i < output_tensor.get_size(); i++) { + EXPECT_NEAR(0.5f, output_data[i], 1e-5); + } + + tensor_size = l0_host_tensor.get_size(); + state_data = static_cast(l0_host_tensor.get()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } + + tensor_size = random_tensor.get_size(); + state_data = static_cast(random_tensor.data()); + for (size_t i = 0; i < tensor_size; ++i) { + EXPECT_NEAR(0.0, state_data[i], 1e-5); + } +} + } // namespace behavior } // namespace test } // namespace ov diff --git a/src/plugins/intel_npu/tests/functional/common/utils.cpp b/src/plugins/intel_npu/tests/functional/common/utils.cpp index 91f78487934e38..b041e694b19ad0 100644 --- a/src/plugins/intel_npu/tests/functional/common/utils.cpp +++ b/src/plugins/intel_npu/tests/functional/common/utils.cpp @@ -7,6 +7,10 @@ #include #include "intel_npu/npu_private_properties.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/op.hpp" +#include "openvino/op/sigmoid.hpp" std::string getBackendName(const ov::Core& core) { return core.get_property("NPU", ov::intel_npu::backend_name.name()).as(); @@ -99,3 +103,32 @@ std::vector getRWMandatoryPropertiesValues(std::vector p } return props; } + +std::shared_ptr createModelWithStates(ov::element::Type type, const ov::Shape& shape) { + auto input = std::make_shared(type, shape); + auto mem_i1 = std::make_shared(type, shape, 0); + auto mem_r1 = std::make_shared(mem_i1, "r_1-3"); + auto mul1 = std::make_shared(mem_r1, input); + + auto mem_i2 = std::make_shared(type, shape, 0); + auto mem_r2 = std::make_shared(mem_i2, "c_1-3"); + auto mul2 = std::make_shared(mem_r2, mul1); + auto mem_w2 = std::make_shared(mul2, "c_1-3"); + + auto mem_w1 = std::make_shared(mul2, "r_1-3"); + auto sigm = std::make_shared(mul2); + sigm->set_friendly_name("sigmod_state"); + sigm->get_output_tensor(0).set_names({"sigmod_state"}); + mem_r1->set_friendly_name("Memory_1"); + mem_r1->get_output_tensor(0).set_names({"Memory_1"}); + mem_w1->add_control_dependency(mem_r1); + sigm->add_control_dependency(mem_w1); + + mem_r2->set_friendly_name("Memory_2"); + mem_r2->get_output_tensor(0).set_names({"Memory_2"}); + mem_w2->add_control_dependency(mem_r2); + sigm->add_control_dependency(mem_w2); + + auto function = std::make_shared(ov::NodeVector{sigm}, ov::ParameterVector{input}, "add_output"); + return function; +} diff --git a/src/plugins/intel_npu/tests/functional/common/utils.hpp b/src/plugins/intel_npu/tests/functional/common/utils.hpp index 4ad54cc016302c..40ac987bd25487 100644 --- a/src/plugins/intel_npu/tests/functional/common/utils.hpp +++ b/src/plugins/intel_npu/tests/functional/common/utils.hpp @@ -6,6 +6,7 @@ #include #include + #include "common_test_utils/unicode_utils.hpp" std::string getBackendName(const ov::Core& core); @@ -18,6 +19,8 @@ std::string removeDeviceNameOnlyID(const std::string& device_name_id); std::vector getRWMandatoryPropertiesValues(std::vector props); +std::shared_ptr createModelWithStates(ov::element::Type type, const ov::Shape& shape); + template ::value || std::is_same::value)>::type> void removeDirFilesRecursive(const std::basic_string& path) { @@ -72,6 +75,8 @@ struct GenericTestCaseNameClass { }; template -constexpr bool GenericTestCaseNameClass::hasGetTestCaseName< - T, std::void_t().getTestCaseName( - std::declval>()))>> = true; +constexpr bool + GenericTestCaseNameClass::hasGetTestCaseName().getTestCaseName( + std::declval>()))>> = + true; From 81ff40997bcc3fccba98fc1609c2a5d7035f3d12 Mon Sep 17 00:00:00 2001 From: Egor Duplenskii Date: Tue, 21 Jan 2025 09:58:42 +0100 Subject: [PATCH 68/97] [CPU][TESTS] Remove sse4 instances from oneDNN related tests (#28527) Seems to not worth to verify them considering how slow they are --- .../classes/convolution.cpp | 18 -- .../single_layer_tests/classes/pooling.cpp | 3 +- .../single_layer_tests/group_convolution.cpp | 300 +----------------- .../instances/common/convolution.cpp | 3 +- .../instances/x64/convolution.cpp | 6 - .../instances/x64/pooling.cpp | 13 +- .../instances/x64/softmax.cpp | 3 +- .../subgraph_tests/src/common/conv_concat.cpp | 4 - .../subgraph_tests/src/x64/conv_concat.cpp | 11 +- .../src/x64/memory_sharing_test.cpp | 2 - .../functional/utils/convolution_params.hpp | 30 -- 11 files changed, 14 insertions(+), 379 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp index b3c958a2c88a68..ed7fdcff0479d8 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp @@ -190,16 +190,6 @@ void ConvolutionLayerCPUTest::SetUp() { } TEST_P(ConvolutionLayerCPUTest, CompareWithRefs) { - // Skip tests for sse41 convolution where ic or oc cannot be exactly divided by the block size, - // since tails processing for sse41 nspc layout is not supported yet (see 52736). - if (!inFmts.empty() && (inFmts.front() == nwc || inFmts.front() == nhwc || inFmts.front() == ndhwc) && selectedType.find("jit_sse") != std::string::npos) { - auto inpChannels = function->get_parameters().front()->get_partial_shape()[1].get_length(); - auto outChannels = function->get_output_partial_shape(0)[1].get_length(); - if ((inpChannels % 8) || (outChannels % 8)) { - GTEST_SKIP() << "Disabled test due to the sse41 convolution kernel does not support tails for nspc layout." << std::endl; - } - } - if (!priority.empty()) { // Skip tests for brgconv convolution where kernel size = 1x1 if (one_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) { @@ -340,10 +330,7 @@ const std::vector& inShapesGemm2D_cache() { const std::vector& CPUParams_2D() { static const std::vector CPUParams_2D = { - conv_sse42_2D, - conv_avx2_2D, conv_avx512_2D, - conv_sse42_2D_nspc, conv_avx2_2D_nspc, conv_avx2_2D_nspc_brgconv, conv_avx512_2D_nspc, @@ -354,7 +341,6 @@ const std::vector& CPUParams_2D() { const std::vector& CPUParams_3D() { static const std::vector CPUParams_3D = { - //conv_sse42_3D, // not supported jit_sse42 for 3d conv_avx2_3D, conv_avx512_3D, conv_avx2_3D_nspc, @@ -479,10 +465,8 @@ const std::vector& inputShapes2d_dynBatch() { const std::vector& CPUParams_1x1_1D() { static const std::vector CPUParams_1x1_1D = { - conv_sse42_1D_1x1, conv_avx2_1D_1x1, conv_avx512_1D_1x1, - conv_sse42_1D_1x1_nspc, conv_avx2_1D_1x1_nspc, conv_avx2_1D_1x1_nspc_brgconv, conv_avx512_1D_1x1_nspc, @@ -567,10 +551,8 @@ const std::vector& CPUParams_GEMM_3D() { const std::vector& CPUParams_1x1_2D() { static const std::vector CPUParams_1x1_2D = { - conv_sse42_2D_1x1, conv_avx2_2D_1x1, conv_avx512_2D_1x1, - conv_sse42_2D_1x1_nspc, conv_avx2_2D_1x1_nspc, conv_avx2_2D_1x1_nspc_brgconv, conv_avx512_2D_1x1_nspc, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp index 7c4854dd334bcf..62352c851435b2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/pooling.cpp @@ -787,12 +787,11 @@ const CPUSpecificParams& expectedCpuConfigAnyLayout() { } const std::vector& vecCpuConfigsFusing_4D() { - const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}; const auto avx512_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx512"}, "jit_avx512"}; const auto acl_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"acl"}, "acl"}; - static const std::vector vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc, acl_nhwc, expectedCpuConfigAnyLayout()}; + static const std::vector vecCpuConfigsFusing_4D = {avx2_nhwc, avx512_nhwc, acl_nhwc, expectedCpuConfigAnyLayout()}; return vecCpuConfigsFusing_4D; } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp index 64dcf20542c09d..7d9173e472e089 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/group_convolution.cpp @@ -1043,7 +1043,7 @@ const auto groupConvParams_ExplicitPadding_1D = ::testing::Combine(::testing::Va ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_1D = - {conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx512_1D_nspc}; + {conv_avx2_1D, conv_avx512_1D, conv_avx2_1D_nspc, conv_avx512_1D_nspc}; std::vector inputShapes1d = {{{}, {{2, 64, 7}}}, {// dynamic shapes @@ -1108,7 +1108,7 @@ const auto groupConvParams_ExplicitPadding_2D = ::testing::Combine(::testing::Va ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_2D = - {conv_sse42_2D, conv_avx2_2D, conv_avx512_2D, conv_sse42_2D_nspc, conv_avx2_2D_nspc, conv_avx512_2D_nspc}; + {conv_avx2_2D, conv_avx512_2D, conv_avx2_2D_nspc, conv_avx512_2D_nspc}; std::vector inputShapes2d = {{{}, {{1, 64, 7, 7}}}, {// dynamic shapes @@ -1197,7 +1197,6 @@ const auto groupConvParams_ExplicitPadding_3D = ::testing::Combine(::testing::Va ::testing::Values(ov::op::PadType::EXPLICIT)); const std::vector CPUParams_3D = { - // conv_sse42_3D, // not supported jit_sse42 for 3d conv_avx2_3D, conv_avx512_3D, conv_avx2_3D_nspc, @@ -1247,10 +1246,8 @@ const auto groupConvParams_ExplicitPadding_DW_1D = ::testing::Combine(::testing: ::testing::ValuesIn(numGroups_DW), ::testing::Values(ov::op::PadType::EXPLICIT)); -const std::vector CPUParams_DW_1D = {conv_sse42_dw_1D, - conv_avx2_dw_1D, +const std::vector CPUParams_DW_1D = {conv_avx2_dw_1D, conv_avx512_dw_1D, - conv_sse42_dw_1D_nspc, conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc}; @@ -1272,8 +1269,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::ValuesIn(inputShapes1dDW), ::testing::Values(ov::test::utils::DEVICE_CPU)), ::testing::ValuesIn(filterCPUInfoForDevice( - {conv_sse42_dw_1D, conv_avx2_dw_1D, conv_avx512_dw_1D})), // todo: [AV] what about conv_sse42_dw_1D_nspc, - // conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? + {conv_avx2_dw_1D, conv_avx512_dw_1D})), // todo: [AV] what about conv_avx2_dw_1D_nspc, conv_avx512_dw_1D_nspc? ::testing::ValuesIn(fusingParamsSet), ::testing::Values(empty_plugin_config)), GroupConvolutionLayerCPUTest::getTestCaseName); @@ -1302,10 +1298,8 @@ const auto groupConvParams_ExplicitPadding_DW_2D = ::testing::Combine(::testing: ::testing::ValuesIn(numGroups_DW), ::testing::Values(ov::op::PadType::EXPLICIT)); -const std::vector CPUParams_DW_2D = {conv_sse42_dw_2D, - conv_avx2_dw_2D, +const std::vector CPUParams_DW_2D = {conv_avx2_dw_2D, conv_avx512_dw_2D, - conv_sse42_dw_2D_nspc, conv_avx2_dw_2D_nspc, conv_avx512_dw_2D_nspc}; @@ -1411,10 +1405,8 @@ const auto groupConvParams_ExplicitPadding_DW_3D = ::testing::Combine(::testing: ::testing::ValuesIn(numGroups_DW), ::testing::Values(ov::op::PadType::EXPLICIT)); -const std::vector CPUParams_DW_3D = {conv_sse42_dw_3D, - conv_avx2_dw_3D, +const std::vector CPUParams_DW_3D = {conv_avx2_dw_3D, conv_avx512_dw_3D, - conv_sse42_dw_3D_nspc, conv_avx2_dw_3D_nspc, conv_avx512_dw_3D_nspc}; @@ -1673,171 +1665,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GEMM_GroupConv, ::testing::ValuesIn(filterParamsSetForDevice(gemmGroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); -/* ============= JIT SSE42 GroupConvolution ============= */ -const std::vector sse42_GroupConv = {conv_sse42_2D, conv_sse42_2D_nspc}; -const std::vector JIT_SSE42_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ur_w (=3,<3) - // 2. jcp.ur_w_tail (=0,>0) - // 3. jcp.kw (>7,<=7) - // 4. jcp.nb_oc = jcp.oc / jcp.oc_block; - // 5. jcp.nb_ic = jcp.ic / jcp.ic_block; - // 6. ocb_work - - // jcp.ur_w == 3, jcp.ur_w_tail == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 10}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.ur_w < 3 (jcp.ur_w == jcp.ow) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 4}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.ur_w == 3, jcp.ur_w_tail == 0 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 11}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.kw > 7 - makeSingleGroupConvCPUTestCases({3, 8}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 10}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.nb_oc == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 8, - 16, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.nb_ic == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 16, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // ocb_work > 1 (ocb_work == 2) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 8, - 40, - sse42_GroupConv, - vecPrcConnectParamsFP32), - // jcp.nb_ic == 2, ocb_work == 2 - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 2, - 1, - {5, 5}, - 16, - 40, - sse42_GroupConv, - vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, - {2, 2}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 3, - 2, - {129, 129}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 4}, - {1, 2}, - {3, 2}, - {2, 1}, - {1, 0}, - ov::op::PadType::EXPLICIT, - 2, - 1, - {10, 10}, - 8, - 8, - sse42_GroupConv, - vecPrcConnectParamsFP32Default) - - // not supported jit_sse42 for 3d - // makeSingleGroupConvCPUTestCases({3, 3, 3}, {2, 2, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, - // ov::op::PadType::EXPLICIT, - // 3, 2, {33, 33, 33}, 8, 8, cpuParams_sse42_3D), - // makeSingleGroupConvCPUTestCases({2, 3, 4}, {1, 2, 2}, {3, 1, 2}, {2, 2, 1}, {1, 1, 0}, - // ov::op::PadType::EXPLICIT, - // 2, 1, {10, 10, 10}, 8, 8, cpuParams_sse42_3D), -); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_GroupConv, - GroupConvolutionLayerCPUTest, - ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); - /* ============= JIT AVX2 GroupConvolution ============= */ const std::vector avx2_GroupConv_2D = {conv_avx2_2D, conv_avx2_2D_nspc}; const std::vector avx2_GroupConv_3D = {conv_avx2_3D, conv_avx2_3D_nspc}; @@ -2130,120 +1957,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_GroupConv, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); -/* ============= JIT SSE42 DW GroupConvolution ============= */ -const std::vector sse42_DW_2D = {conv_sse42_dw_2D, conv_sse42_dw_2D_nspc}; -const std::vector sse42_DW_3D = {conv_sse42_dw_3D, conv_sse42_dw_3D_nspc}; -const std::vector JIT_SSE42_DW_GroupConvTestCases = generateSingleGroupConvCPUTestCases( - // 1. jcp.ngroups % simd_w (=0,!=0) - // 2. jcp.nb_ch - // 3. jcp.nb_ch_blocking (=2,<2) - // 4. jcp.ur_w == 3 - - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 1, jcp.nb_ch_blocking == 1 (jcp.ngroups == 8) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 8, - 1, - {5, 5}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w == 0, jcp.nb_ch == 2, jcp.nb_ch_blocking == 2 (jcp.ngroups == 16) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 16, - 1, - {5, 5}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - // jcp.ngroups % simd_w != 0, jcp.nb_ch == 3, jcp.nb_ch_blocking == 2 (jcp.ngroups == 17) TODO: pad channels not - // supported for SSE42 makeSingleGroupConvCPUTestCases({3, 3}, {1, 1}, {1, 1}, {0, 0}, {0, 0}, - // ov::op::PadType::VALID, 17, 1, {5, 5}, 1, 1, conv_sse42_DW_2D, vecPrcConnectParamsFP32only), jcp.ow > jcp.ur_w - // (jcp.ow == 7) - makeSingleGroupConvCPUTestCases({3, 3}, - {1, 1}, - {1, 1}, - {0, 0}, - {0, 0}, - ov::op::PadType::VALID, - 8, - 1, - {5, 9}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - - // "hard" cases - makeSingleGroupConvCPUTestCases({3, 3}, - {2, 2}, - {1, 1}, - {1, 1}, - {1, 1}, - ov::op::PadType::EXPLICIT, - 8, - 2, - {129, 129}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32), - makeSingleGroupConvCPUTestCases({2, 4}, - {1, 2}, - {3, 2}, - {2, 1}, - {1, 0}, - ov::op::PadType::EXPLICIT, - 8, - 1, - {10, 10}, - 1, - 1, - sse42_DW_2D, - vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({3, 3, 3}, - {2, 2, 2}, - {1, 1, 1}, - {1, 1, 1}, - {1, 1, 1}, - ov::op::PadType::EXPLICIT, - 8, - 2, - {33, 33, 33}, - 1, - 1, - sse42_DW_3D, - vecPrcConnectParamsFP32Default), - makeSingleGroupConvCPUTestCases({2, 3, 4}, - {1, 2, 2}, - {3, 1, 2}, - {2, 2, 1}, - {1, 1, 0}, - ov::op::PadType::EXPLICIT, - 8, - 1, - {10, 10, 10}, - 1, - 1, - sse42_DW_3D, - vecPrcConnectParamsFP32)); - -INSTANTIATE_TEST_SUITE_P(smoke_JIT_SSE42_DW_GroupConv, - GroupConvolutionLayerCPUTest, - ::testing::ValuesIn(filterParamsSetForDevice(JIT_SSE42_DW_GroupConvTestCases)), - GroupConvolutionLayerCPUTest::getTestCaseName); - /* ============= JIT AVX2 DW GroupConvolution ============= */ const std::vector avx2_DW_2D = {conv_avx2_dw_2D, conv_avx2_dw_2D_nspc}; const std::vector avx2_DW_3D = {conv_avx2_dw_3D, conv_avx2_dw_3D_nspc}; @@ -2494,7 +2207,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_JIT_AVX512_DW_GroupConv, ::testing::ValuesIn(filterParamsSetForDevice(JIT_AVX512_DW_GroupConvTestCases)), GroupConvolutionLayerCPUTest::getTestCaseName); -/* ============= JIT SSE42 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX2 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX512 1x1 Convolution (not supported with groups) ============= */ /* ============= JIT AVX2 PLANAR Convolution (not supported with groups) ============= */ diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp index 09f8dc14660392..94683387d1eac0 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution.cpp @@ -106,7 +106,6 @@ INSTANTIATE_TEST_SUITE_P(Conv_2D_FP32_dilated_empty_fusing, ConvolutionLayerCPUT ConvolutionLayerCPUTest::getTestCaseName); const std::vector CPUParams_2D_plain_to_blocked = { - conv_sse42_plain_to_blocked_2D, conv_avx2_plain_to_blocked_2D, conv_avx512_plain_to_blocked_2D, }; @@ -397,4 +396,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_Large_Filter, ConvolutionLayerCPUTest, } // namespace } // namespace Convolution } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp index 741e12031c680c..030f7eb3bc40b8 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution.cpp @@ -344,10 +344,8 @@ const auto convParams_ExplicitPadding_1D = ::testing::Combine( ); const std::vector CPUParams_1D_f32 = { - conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, - conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx2_1D_nspc_brgconv, conv_avx512_1D_nspc, @@ -356,10 +354,8 @@ const std::vector CPUParams_1D_f32 = { //Current avx2 I8 fall back on JIT avx2 implement when having src zero point.Not enabling conv_avx2_1D_nspc_brgconv for I8 precision. const std::vector CPUParams_1D_I8 = { - conv_sse42_1D, conv_avx2_1D, conv_avx512_1D, - conv_sse42_1D_nspc, conv_avx2_1D_nspc, conv_avx512_1D_nspc, conv_avx512_1D_nspc_brgconv @@ -424,7 +420,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_1D_I8, ConvolutionLayerCPUTest, ConvolutionLayerCPUTest::getTestCaseName); const std::vector CPUParams_1D_plain_to_blocked = { - conv_sse42_plain_to_blocked_1D, conv_avx2_plain_to_blocked_1D, conv_avx512_plain_to_blocked_1D, }; @@ -630,7 +625,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Conv_2D_1x1_FP16, ConvolutionLayerCPUTest, /* ============= Jit Planar ============= */ /* ============= Convolution planar params (2D) ============= */ const std::vector CPUParams_Jit_Planar_2D = { - // sse42 is not supported conv_avx2_planar_2D, conv_avx512_planar_2D, }; diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp index cfe29692f8414c..6b9e9d3718f556 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/pooling.cpp @@ -17,9 +17,8 @@ namespace { const auto ref = CPUSpecificParams{{}, {}, {"ref_any"}, "ref_any"}; const auto avx512 = CPUSpecificParams{{}, {}, {"jit_avx512"}, "jit_avx512"}; const auto avx = CPUSpecificParams{{}, {}, {"jit_avx"}, "jit_avx"}; -const auto sse42 = CPUSpecificParams{{}, {}, {"jit_sse42"}, "jit_sse42"}; -const std::vector vecCpuConfigs = {sse42, avx, avx512}; +const std::vector vecCpuConfigs = {avx, avx512}; const std::vector paramsMaxV84D_ref = { maxPoolV8SpecificParams{ {2, 2}, {2, 2}, {2, 2}, {0, 0}, {0, 0}, @@ -50,13 +49,9 @@ const auto avx2_nwc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx2"}, "jit_avx2"}; const auto avx2_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2"}, "jit_avx2"}; const auto avx2_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_avx2"}, "jit_avx2"}; -const auto sse42_nwc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42"}, "jit_sse42"}; -const auto sse42_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; -const auto sse42_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}; - -const std::vector vecCpuConfigsFusing_3D = {sse42_nwc, avx2_nwc, avx512_nwc}; -const std::vector vecCpuConfigsFusing_4D = {sse42_nhwc, avx2_nhwc, avx512_nhwc}; -const std::vector vecCpuConfigsFusing_5D = {sse42_ndhwc, avx2_ndhwc, avx512_ndhwc}; +const std::vector vecCpuConfigsFusing_3D = {avx2_nwc, avx512_nwc}; +const std::vector vecCpuConfigsFusing_4D = {avx2_nhwc, avx512_nhwc}; +const std::vector vecCpuConfigsFusing_5D = {avx2_ndhwc, avx512_ndhwc}; std::vector fusingParamsSet { emptyFusingSpec, diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp index 2a22f629c29661..9f7938310d788f 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/softmax.cpp @@ -17,8 +17,7 @@ namespace { const auto optimizedCPUSpec = []()-> std::vector{ const auto avx512 = CPUSpecificParams{{}, {}, {"jit"}, "jit_avx512"}; const auto avx2 = CPUSpecificParams{{}, {}, {"jit"}, "jit_avx2"}; - const auto sse42 = CPUSpecificParams{{}, {}, {"jit"}, "jit_sse42"}; - const std::vector vecCpuConfigs = {avx512, avx2, sse42}; + const std::vector vecCpuConfigs = {avx512, avx2}; auto supportConfigure = CPUTestUtils::filterCPUInfoForDevice(vecCpuConfigs); // only the MAX ISA of vecCpuConfigs will be tested if (supportConfigure.size() > 0) { diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp index 22b94d369c5d8f..2825a3528baf6b 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/conv_concat.cpp @@ -50,7 +50,6 @@ namespace ConvolutionConact { /* ============= Convolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D_nspc, conv_gemm_2D }; @@ -66,7 +65,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D, ConvConcatSubgraphTest, params2D, /* ============= Convolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D_nspc, conv_gemm_3D }; @@ -86,7 +84,6 @@ namespace GroupConvolutionConcat { /* ============= GroupConvolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D_nspc, conv_gemm_2D }; @@ -102,7 +99,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution2D, ConvConcatSubgraphTest, param /* ============= GroupConvolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D_nspc, conv_gemm_3D }; diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp index 1fdbd5016099e1..3d7ec525c0a105 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/conv_concat.cpp @@ -20,7 +20,6 @@ namespace Kernel_1x1 { /* ============= Kernel_1x1 (2D) ============= */ const std::vector CPUParams2DConv = { - conv_sse42_2D_1x1, conv_avx2_2D_1x1, conv_avx512_2D_1x1 }; @@ -84,7 +83,6 @@ commonConvParams dwConvParams2D = commonConvParams{kernelSize2D(), strides2D(), numOutChannels(), paddingType(), numOutChannels()}; const ov::Shape inputShapesDW2D{1, 32, 16, 16}; const std::vector CPUParams2D = { - conv_sse42_dw_2D, conv_avx2_dw_2D, conv_avx512_dw_2D }; @@ -104,7 +102,6 @@ commonConvParams dwConvParams3D = commonConvParams{kernelSize3D(), strides3D(), numOutChannels(), paddingType(), numOutChannels()}; const ov::Shape inputShapesDW3D{1, 32, 8, 16, 16}; const std::vector CPUParams3D = { - conv_sse42_dw_3D, conv_avx2_dw_3D, conv_avx512_dw_3D }; @@ -158,8 +155,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConvolutionBackpropData3D, ConvConcatSubgraphTest namespace ConvolutionConcat { /* ============= Convolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D, - conv_sse42_2D, conv_avx2_2D, conv_avx512_2D }; @@ -176,7 +171,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution2D, ConvConcatSubgraphTest, params2D, /* ============= Convolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D, conv_avx2_3D, conv_avx512_3D }; @@ -195,8 +189,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Convolution3D, ConvConcatSubgraphTest, params3D, namespace GroupConvolutionConcat { /* ============= GroupConvolution (2D) ============= */ const std::vector CPUParams2D = { - conv_ref_2D, - conv_sse42_2D, conv_avx2_2D, conv_avx512_2D }; @@ -213,7 +205,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolution2D, ConvConcatSubgraphTest, param /* ============= GroupConvolution (3D) ============= */ const std::vector CPUParams3D = { - conv_ref_3D, conv_avx2_3D, conv_avx512_3D }; @@ -255,4 +246,4 @@ INSTANTIATE_TEST_SUITE_P(smoke_GroupConvolutionBackpropData3D, ConvConcatSubgrap } // namespace GroupConvolutionBackpropDataConcat } // namespace test -} // namespace ov \ No newline at end of file +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp index 2b9214c4c22cd0..36f9b0f1e50a65 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/memory_sharing_test.cpp @@ -33,8 +33,6 @@ TEST_F(EdgeWithSameNameInTwoModels, smoke_CompareWithRef) { std::tie(inFmts, outFmts, priority, selectedType) = conv_avx512_2D; } else if (ov::with_cpu_x86_avx2()) { std::tie(inFmts, outFmts, priority, selectedType) = conv_avx2_2D; - } else if (ov::with_cpu_x86_sse42()) { - std::tie(inFmts, outFmts, priority, selectedType) = conv_sse42_2D; } // first model diff --git a/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp b/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp index 6fc3a8ab9382d4..941a4274598de7 100644 --- a/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp +++ b/src/plugins/intel_cpu/tests/functional/utils/convolution_params.hpp @@ -7,14 +7,6 @@ #include "cpu_test_utils.hpp" namespace CPUTestUtils { - const auto conv_ref_1D = CPUSpecificParams{{ncw}, {ncw}, {"ref_any"}, "ref_any"}; - const auto conv_ref_2D = CPUSpecificParams{{nchw}, {nchw}, {"ref_any"}, "ref_any"}; - const auto conv_ref_3D = CPUSpecificParams{{ncdhw}, {ncdhw}, {"ref_any"}, "ref_any"}; - - const auto conv_ref_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"ref_any"}, "ref_any"}; - const auto conv_ref_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"ref_any"}, "ref_any"}; - const auto conv_ref_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"ref_any"}, "ref_any"}; - const auto conv_gemm_1D = CPUSpecificParams{{ncw}, {ncw}, {"jit_gemm"}, "jit_gemm"}; const auto conv_gemm_2D = CPUSpecificParams{{nchw}, {nchw}, {"jit_gemm"}, "jit_gemm"}; const auto conv_gemm_3D = CPUSpecificParams{{ncdhw}, {ncdhw}, {"jit_gemm"}, "jit_gemm"}; @@ -31,24 +23,6 @@ namespace CPUTestUtils { const auto conv_gemm_acl_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"gemm_acl"}, "gemm_acl"}; const auto conv_gemm_acl_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"gemm_acl"}, "gemm_acl"}; - const auto conv_sse42_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_dw_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - - const auto conv_sse42_plain_to_blocked_1D = CPUSpecificParams{{ncw}, {nCw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_plain_to_blocked_2D = CPUSpecificParams{{nchw}, {nChw8c}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_plain_to_blocked_3D = CPUSpecificParams{{ncdhw}, {nCdhw8c}, {"jit_sse42"}, "jit_sse42"}; - - const auto conv_sse42_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42"}, "jit_sse42"}; - const auto conv_sse42_dw_1D_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_2D_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_sse42_dw_3D_nspc = CPUSpecificParams{{ndhwc}, {ndhwc}, {"jit_sse42_dw"}, "jit_sse42_dw"}; - const auto conv_avx2_1D = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_avx2"}, "jit_avx2"}; const auto conv_avx2_2D = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2"}, "jit_avx2"}; const auto conv_avx2_3D = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {"jit_avx2"}, "jit_avx2"}; @@ -107,22 +81,18 @@ namespace CPUTestUtils { const auto conv_avx512_2D_nspc_brgconv_amx = CPUSpecificParams{{nhwc}, {nhwc}, {"brgconv_avx512_amx"}, "brgconv_avx512_amx"}; const auto conv_avx512_3D_nspc_brgconv_amx = CPUSpecificParams{{ndhwc}, {ndhwc}, {"brgconv_avx512_amx"}, "brgconv_avx512_amx"}; - const auto conv_sse42_1D_1x1 = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_1D_1x1 = CPUSpecificParams{{nCw8c}, {nCw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx512_1D_1x1 = CPUSpecificParams{{nCw16c}, {nCw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1"}; - const auto conv_sse42_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx2_1D_1x1_nspc_brgconv = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"}; const auto conv_avx512_1D_1x1_nspc = CPUSpecificParams{{nwc}, {nwc}, {"jit_avx512_1x1"}, "jit_avx512_1x1"}; const auto conv_avx512_1D_1x1_nspc_brgconv = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx512_1x1"}, "brgconv_avx512_1x1"}; const auto conv_avx512_1D_1x1_nspc_brgconv_amx = CPUSpecificParams{{nwc}, {nwc}, {"brgconv_avx512_amx_1x1"}, "brgconv_avx512_amx_1x1"}; - const auto conv_sse42_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_2D_1x1 = CPUSpecificParams{{nChw8c}, {nChw8c}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx512_2D_1x1 = CPUSpecificParams{{nChw16c}, {nChw16c}, {"jit_avx512_1x1"}, "jit_avx512_1x1"}; - const auto conv_sse42_2D_1x1_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_sse42_1x1"}, "jit_sse42_1x1"}; const auto conv_avx2_2D_1x1_nspc = CPUSpecificParams{{nhwc}, {nhwc}, {"jit_avx2_1x1"}, "jit_avx2_1x1"}; const auto conv_avx2_2D_1x1_nspc_brgconv = CPUSpecificParams{{nhwc}, {nhwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"}; const auto conv_avx2_3D_1x1_nspc_brgconv = CPUSpecificParams{{ndhwc}, {ndhwc}, {"brgconv_avx2_1x1"}, "brgconv_avx2_1x1"}; From d27f0c41d4086b7ac5bcc65a2c9add2a4910b63b Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Tue, 21 Jan 2025 14:20:28 +0400 Subject: [PATCH 69/97] [GPU] Remove unused PagedAttention inputs causing set_arg error in case of zero buffer (#28577) ### Details: - This change removes unused PagedAttention inputs that were accidentally added during the rebase of the original PR, which caused a set_arg error in the case of a zero buffer - Added related test with a dummy activation function to simulate this behavior --- .../src/graph/impls/ocl/paged_attention.cpp | 22 ------------------- .../kernel_selector/cl_kernels/pa_sdpa_opt.cl | 6 ----- .../test_cases/paged_attention_gpu_test.cpp | 12 +++++++++- 3 files changed, 11 insertions(+), 29 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp index ab7d1a4f2ee1b4..1bcd4b0bb10fe2 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/paged_attention.cpp @@ -214,12 +214,6 @@ struct paged_attention_impl : multi_stage_primitive { if (desc->has_alibi) { args.inputs.push_back(instance.alibi_memory_ptr()); } - - if (desc->has_rotated_blocks) { - args.inputs.push_back(instance.rotated_block_indices_memory_ptr()); - args.inputs.push_back(instance.rotation_deltas_memory_ptr()); - args.inputs.push_back(instance.rotation_trig_lut_memory_ptr()); - } } else if (kernel_idx == 2 || kernel_idx == 3) { // Finalization kernel or mixed stage finalization kernel args.inputs = { instance.past_lens_memory_ptr() }; @@ -687,10 +681,6 @@ struct paged_attention_impl : multi_stage_primitive { if (has_alibi) inputs_number++; - const auto has_rotation = impl_param.input_layouts.size() == 16; - if (has_rotation) - inputs_number += 3; - auto input_idx = 0; params.inputs.resize(inputs_number); params.inputs[input_idx++] = query_tensor; @@ -709,12 +699,6 @@ struct paged_attention_impl : multi_stage_primitive { if (has_alibi) params.inputs[input_idx++] = alibi_tensor; - if (has_rotation) { - params.inputs[input_idx++] = input_tensors[13]; - params.inputs[input_idx++] = input_tensors[14]; - params.inputs[input_idx++] = input_tensors[15]; - } - if (has_scores_output) { params.outputs.resize(2); params.outputs[1] = convert_data_tensor(impl_param.get_output_layout(1)); @@ -752,12 +736,6 @@ struct paged_attention_impl : multi_stage_primitive { if (has_alibi) in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(11)}); - if (has_rotation) { - in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(13)}); - in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(14)}); - in_tensor_to_offset_map.insert({input_idx++, in_offsets_map.at(15)}); - } - if (has_scores_output) out_tensor_to_offset_map.insert({1, out_offsets_map.at(1)}); diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl index ae0f7a666c4309..7a300aaee1a16a 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/pa_sdpa_opt.cl @@ -43,12 +43,6 @@ KERNEL(pa_sdpa_opt)( #if HAS_ALIBI const __global ALIBI_INPUT_TYPE* alibi_slopes, #endif - -#if HAS_ROTATED_BLOCKS - const __global INPUT7_TYPE* rotated_block_indices, - const __global INPUT8_TYPE* rotation_deltas, - const __global INPUT9_TYPE* rotation_trig_lut, -#endif __global OUTPUT_TYPE* output, #if PAGED_ATTENTION_SCORES_OUTPUT __global SOFTMAX_ACCUMULATOR_TYPE* softmax_results, diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp index 7076b863c450d7..cdb927a57ca2bb 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/paged_attention_gpu_test.cpp @@ -5,6 +5,7 @@ #include "test_utils.h" #include "random_generator.hpp" +#include #include #include #include @@ -306,6 +307,12 @@ struct PagedAttentionManager { auto layout = mem->get_layout(); layout.set_partial_shape(ov::PartialShape{ max_context_len[0], head_size }); + if (rotated_block_indices.empty()) { + auto empty_layout = mem->get_layout(); + empty_layout.set_partial_shape(ov::PartialShape{ 0, head_size }); + return test_engine.reinterpret_buffer(*mem, empty_layout); + } + return test_engine.reinterpret_buffer(*mem, layout); } @@ -741,7 +748,7 @@ struct PagedAttentionTest : public ::testing::TestWithParam { if (p.rotation_config.apply_rotation) { pa_inputs.push_back(input_info("rotated_block_indices")); pa_inputs.push_back(input_info("rotation_deltas")); - pa_inputs.push_back(input_info("rotation_trig_lut")); + pa_inputs.push_back(input_info("rotation_trig_lut_modified")); } auto pa_prim = paged_attention("paged_attention", pa_inputs); @@ -782,6 +789,9 @@ struct PagedAttentionTest : public ::testing::TestWithParam { topology.add(input_layout("rotated_block_indices", rotated_block_indices_layout)); topology.add(input_layout("rotation_deltas", rotation_deltas_layout)); topology.add(input_layout("rotation_trig_lut", rotation_trig_lut_layout)); + + // add dummy activation operation to simulate an empty PA `rotation_trig_lut` buffer for shapes like [0, head_size] + topology.add(activation("rotation_trig_lut_modified", input_info("rotation_trig_lut"), activation_func::none)); } ExecutionConfig config = get_test_default_config(get_test_engine()); From c99a862597ee03e0c3302fe5a3edf9a21a17413f Mon Sep 17 00:00:00 2001 From: Arseniy Obolenskiy Date: Tue, 21 Jan 2025 11:47:06 +0100 Subject: [PATCH 70/97] [CPU] Fix uninitialized array filed coverity issue with CID 1590270 (#28580) ### Details: - Fix uninitialized array class member field in PlainTensor class ### Tickets: - 160817 --- src/plugins/intel_cpu/src/utils/plain_tensor.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp index a27f29c0ab0e1b..497688f831bb90 100644 --- a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp +++ b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp @@ -91,8 +91,8 @@ struct precision_of { #define PLAINTENSOR_RANK_MAX 8 struct PlainTensor { - size_t m_strides[PLAINTENSOR_RANK_MAX]; - size_t m_dims[PLAINTENSOR_RANK_MAX]; + size_t m_strides[PLAINTENSOR_RANK_MAX] = {}; + size_t m_dims[PLAINTENSOR_RANK_MAX] = {}; size_t m_rank = 0; std::shared_ptr m_ptr; size_t m_capacity = 0; From bc6e3347e09e30219bb83f2ba08c25da04f4c59f Mon Sep 17 00:00:00 2001 From: Georgy Krivoruchko Date: Tue, 21 Jan 2025 15:03:57 +0400 Subject: [PATCH 71/97] [ONNX] Added support of optional zero-point for DQ-21 (#28562) ### Details: - Added handling of an optional zero-point for DequantizeLinear-21 ### Tickets: - 160914 --- .../frontend/src/op/dequantize_linear.cpp | 2 + ...equantize_linear_21_no_zero_point.prototxt | 55 +++++++++++++++++++ .../onnx/tests/onnx_import_quant.in.cpp | 11 ++++ 3 files changed, 68 insertions(+) create mode 100644 src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt diff --git a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp index a80e6a77f430e2..4705504699158b 100644 --- a/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp +++ b/src/frontends/onnx/frontend/src/op/dequantize_linear.cpp @@ -255,6 +255,8 @@ ov::OutputVector dequantize_linear(const ov::frontend::onnx::Node& node) { if (zp.get_node_shared_ptr()) { broadcastable_x = std::make_shared(x, zp); + } else { + broadcastable_x = x; } const auto& scaled_x = std::make_shared(broadcastable_x, scale); diff --git a/src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt b/src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt new file mode 100644 index 00000000000000..ab276f3b4d7c78 --- /dev/null +++ b/src/frontends/onnx/tests/models/dequantize_linear_21_no_zero_point.prototxt @@ -0,0 +1,55 @@ +ir_version: 3 +producer_name: "OpenVINO ONNX Frontend" +graph { + name: "test_dequantize_21" + initializer { + dims: 6 + dims: 3 + data_type: 21 + name: "data" + raw_data: "\x21\x43\x65\x87\xA9\xCB\xED\xFF\x00" + } + initializer { + dims: 2 + dims: 3 + data_type: 1 + name: "scale" + raw_data: "\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f\x00\x00\x80\x3f" + } + node { + input: "data" + input: "scale" + output: "output" + name: "DequantizeNode" + op_type: "DequantizeLinear" + attribute { + name: "axis" + i: 0 + type: INT + } + attribute { + name: "block_size" + i: 3 + type: INT + } + } + output { + name: "output" + type { + tensor_type { + elem_type: 1 + shape { + dim { + dim_value: 6 + } + dim { + dim_value: 3 + } + } + } + } + } +} +opset_import { + version: 21 +} diff --git a/src/frontends/onnx/tests/onnx_import_quant.in.cpp b/src/frontends/onnx/tests/onnx_import_quant.in.cpp index 793eb73772880a..166898988a59e4 100644 --- a/src/frontends/onnx/tests/onnx_import_quant.in.cpp +++ b/src/frontends/onnx/tests/onnx_import_quant.in.cpp @@ -191,6 +191,17 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_no_zero_point) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_21_no_zero_point) { + auto model = convert_model("dequantize_linear_21_no_zero_point.onnx"); + + auto test_case = ov::test::TestCase(model, s_device); + + test_case.add_expected_output( + {6, 3}, + std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 0, 0}); + test_case.run(); +} + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_dequantize_linear_scalar_zero_scale_uint8) { auto model = convert_model("dequantize_linear_0.onnx"); From cc05aadbd442eec471654dbb482c2265a5aba103 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:05:39 +0400 Subject: [PATCH 72/97] Bump actions/cache from 4.1.2 to 4.2.0 (#28574) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/cache](https://github.com/actions/cache) from 4.1.2 to 4.2.0.
    Release notes

    Sourced from actions/cache's releases.

    v4.2.0

    ⚠️ Important Changes

    The cache backend service has been rewritten from the ground up for improved performance and reliability. actions/cache now integrates with the new cache service (v2) APIs.

    The new service will gradually roll out as of February 1st, 2025. The legacy service will also be sunset on the same date. Changes in these release are fully backward compatible.

    We are deprecating some versions of this action. We recommend upgrading to version v4 or v3 as soon as possible before February 1st, 2025. (Upgrade instructions below).

    If you are using pinned SHAs, please use the SHAs of versions v4.2.0 or v3.4.0

    If you do not upgrade, all workflow runs using any of the deprecated actions/cache will fail.

    Upgrading to the recommended versions will not break your workflows.

    Read more about the change & access the migration guide: reference to the announcement.

    Minor changes

    Minor and patch version updates for these dependencies:

    • @​actions/core: 1.11.1
    • @​actions/io: 1.1.3
    • @​vercel/ncc: 0.38.3

    Full Changelog: https://github.com/actions/cache/compare/v4...v4.2.0

    Changelog

    Sourced from actions/cache's changelog.

    Releases

    4.2.0

    TLDR; The cache backend service has been rewritten from the ground up for improved performance and reliability. actions/cache now integrates with the new cache service (v2) APIs.

    The new service will gradually roll out as of February 1st, 2025. The legacy service will also be sunset on the same date. Changes in these release are fully backward compatible.

    We are deprecating some versions of this action. We recommend upgrading to version v4 or v3 as soon as possible before February 1st, 2025. (Upgrade instructions below).

    If you are using pinned SHAs, please use the SHAs of versions v4.2.0 or v3.4.0

    If you do not upgrade, all workflow runs using any of the deprecated actions/cache will fail.

    Upgrading to the recommended versions will not break your workflows.

    4.1.2

    • Add GitHub Enterprise Cloud instances hostname filters to inform API endpoint choices - #1474
    • Security fix: Bump braces from 3.0.2 to 3.0.3 - #1475

    4.1.1

    • Restore original behavior of cache-hit output - #1467

    4.1.0

    • Ensure cache-hit output is set when a cache is missed - #1404
    • Deprecate save-always input - #1452

    4.0.2

    • Fixed restore fail-on-cache-miss not working.

    4.0.1

    • Updated isGhes check

    4.0.0

    • Updated minimum runner version support from node 12 -> node 20

    3.4.0

    • Integrated with the new cache service (v2) APIs

    3.3.3

    • Updates @​actions/cache to v3.2.3 to fix accidental mutated path arguments to getCacheVersion actions/toolkit#1378
    • Additional audit fixes of npm package(s)

    ... (truncated)

    Commits

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/cache&package-manager=github_actions&previous-version=4.1.2&new-version=4.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build_doc.yml | 2 +- .github/workflows/job_cpu_functional_tests.yml | 4 ++-- .github/workflows/ovc.yml | 2 +- .github/workflows/windows_conditional_compilation.yml | 2 +- .github/workflows/windows_vs2019_release.yml | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build_doc.yml b/.github/workflows/build_doc.yml index 2ea17b79af7514..b0739432f29066 100644 --- a/.github/workflows/build_doc.yml +++ b/.github/workflows/build_doc.yml @@ -64,7 +64,7 @@ jobs: - name: Cache documentation id: cache_sphinx_docs - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: build/docs/_build/.doctrees key: sphinx-docs-cache diff --git a/.github/workflows/job_cpu_functional_tests.yml b/.github/workflows/job_cpu_functional_tests.yml index 568c33d39e307b..74e54d389a8ec5 100644 --- a/.github/workflows/job_cpu_functional_tests.yml +++ b/.github/workflows/job_cpu_functional_tests.yml @@ -90,7 +90,7 @@ jobs: run: python3 -m pip install -r ${INSTALL_TEST_DIR}/functional_test_utils/layer_tests_summary/requirements.txt - name: Restore tests execution time - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ${{ env.PARALLEL_TEST_CACHE }} key: ${{ runner.os }}-${{ runner.arch }}-tests-functional-cpu-stamp-${{ github.sha }} @@ -110,7 +110,7 @@ jobs: timeout-minutes: 25 - name: Save tests execution time - uses: actions/cache/save@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 if: github.ref_name == 'master' with: path: ${{ env.PARALLEL_TEST_CACHE }} diff --git a/.github/workflows/ovc.yml b/.github/workflows/ovc.yml index 3e7dedf50ad51b..a554ef4fadc6d3 100644 --- a/.github/workflows/ovc.yml +++ b/.github/workflows/ovc.yml @@ -28,7 +28,7 @@ jobs: python-version: '3.10' - name: Cache pip - uses: actions/cache@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('src/bindings/python/requirements*.txt') }} diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index 0f965eabd3c1ad..6ce104ad07fe9f 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -393,7 +393,7 @@ jobs: run: python3 -m pip install -r ${{ env.INSTALL_TEST_DIR }}/layer_tests_summary/requirements.txt - name: Restore tests execution time - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ${{ env.PARALLEL_TEST_CACHE }} key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index 92d826de1d8394..d909c18633795e 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -581,7 +581,7 @@ jobs: run: python3 -m pip install -r ${{ github.workspace }}\install\tests\functional_test_utils\layer_tests_summary\requirements.txt - name: Restore tests execution time - uses: actions/cache/restore@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 with: path: ${{ env.PARALLEL_TEST_CACHE }} key: ${{ runner.os }}-tests-functional-cpu-stamp-${{ github.sha }} @@ -595,7 +595,7 @@ jobs: timeout-minutes: 60 - name: Save tests execution time - uses: actions/cache/save@6849a6489940f00c2f30c0fb92c6274307ccb58a # v4.1.2 + uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 if: github.ref_name == 'master' with: path: ${{ env.PARALLEL_TEST_CACHE }} From 60a3f0cc2a09f08b534ea431df89b26c565c17bf Mon Sep 17 00:00:00 2001 From: Sun Xiaoxia Date: Tue, 21 Jan 2025 20:45:31 +0800 Subject: [PATCH 73/97] fix CID issue 1590212 (#28543) ### Details: - *fix CID issue 1590212* ### Tickets: - *ticket-id* --- src/inference/src/dev/threading/cpu_streams_executor.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/inference/src/dev/threading/cpu_streams_executor.cpp b/src/inference/src/dev/threading/cpu_streams_executor.cpp index 0313c4f5aabc6b..bb47b813dce05f 100644 --- a/src/inference/src/dev/threading/cpu_streams_executor.cpp +++ b/src/inference/src/dev/threading/cpu_streams_executor.cpp @@ -504,7 +504,12 @@ void CPUStreamsExecutor::cpu_reset() { CPUStreamsExecutor::CPUStreamsExecutor(const IStreamsExecutor::Config& config) : _impl{new Impl{config}} {} CPUStreamsExecutor::~CPUStreamsExecutor() { - cpu_reset(); + try { + cpu_reset(); + } catch (const ov::Exception&) { + // Destructor should not throw - catch needed for static analysis. + OPENVINO_THROW("Reset CPU state error."); + } { std::lock_guard lock(_impl->_mutex); _impl->_isStopped = true; From bad9b106c8e1c4b3c18747b0b55ded4fe78560bd Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Tue, 21 Jan 2025 16:14:25 +0200 Subject: [PATCH 74/97] Removing unused files (#28586) ### Details: - * These files are used only for local testing purposes and were added by mistake* Signed-off-by: Bogdan Pereanu --- .../remote_tensor_tests/d3dx12_core.h | 1389 ----------------- .../remote_tensor_tests/d3dx12_default.h | 15 - 2 files changed, 1404 deletions(-) delete mode 100644 src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h delete mode 100644 src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h deleted file mode 100644 index e20327ccbe3158..00000000000000 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_core.h +++ /dev/null @@ -1,1389 +0,0 @@ -//********************************************************* -// -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License (MIT). -// -//********************************************************* - -#pragma once -#ifdef _WIN32 - -# ifndef __cplusplus -# error D3DX12 requires C++ -# endif - -# include - -# include "d3d12.h" -# include "d3dx12_default.h" - - -//------------------------------------------------------------------------------------------------ -# ifndef D3DX12_ASSERT -# ifdef assert -# define D3DX12_ASSERT(x) assert(x) -# else -# define D3DX12_ASSERT(x) -# endif -# endif - -//------------------------------------------------------------------------------------------------ -template -inline ID3D12CommandList* const* CommandListCast(t_CommandListType* const* pp) noexcept { - // This cast is useful for passing strongly typed command list pointers into - // ExecuteCommandLists. - // This cast is valid as long as the const-ness is respected. D3D12 APIs do - // respect the const-ness of their arguments. - return reinterpret_cast(pp); -} - -//------------------------------------------------------------------------------------------------ -inline bool operator==(const D3D12_VIEWPORT& l, const D3D12_VIEWPORT& r) noexcept { - return l.TopLeftX == r.TopLeftX && l.TopLeftY == r.TopLeftY && l.Width == r.Width && l.Height == r.Height && - l.MinDepth == r.MinDepth && l.MaxDepth == r.MaxDepth; -} - -//------------------------------------------------------------------------------------------------ -inline bool operator!=(const D3D12_VIEWPORT& l, const D3D12_VIEWPORT& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RECT : public D3D12_RECT { - CD3DX12_RECT() = default; - explicit CD3DX12_RECT(const D3D12_RECT& o) noexcept : D3D12_RECT(o) {} - explicit CD3DX12_RECT(LONG Left, LONG Top, LONG Right, LONG Bottom) noexcept { - left = Left; - top = Top; - right = Right; - bottom = Bottom; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_VIEWPORT : public D3D12_VIEWPORT { - CD3DX12_VIEWPORT() = default; - explicit CD3DX12_VIEWPORT(const D3D12_VIEWPORT& o) noexcept : D3D12_VIEWPORT(o) {} - explicit CD3DX12_VIEWPORT(FLOAT topLeftX, - FLOAT topLeftY, - FLOAT width, - FLOAT height, - FLOAT minDepth = D3D12_MIN_DEPTH, - FLOAT maxDepth = D3D12_MAX_DEPTH) noexcept { - TopLeftX = topLeftX; - TopLeftY = topLeftY; - Width = width; - Height = height; - MinDepth = minDepth; - MaxDepth = maxDepth; - } - explicit CD3DX12_VIEWPORT(_In_ ID3D12Resource* pResource, - UINT mipSlice = 0, - FLOAT topLeftX = 0.0f, - FLOAT topLeftY = 0.0f, - FLOAT minDepth = D3D12_MIN_DEPTH, - FLOAT maxDepth = D3D12_MAX_DEPTH) noexcept { -# if defined(_MSC_VER) || !defined(_WIN32) - const auto Desc = pResource->GetDesc(); -# else - D3D12_RESOURCE_DESC tmpDesc; - const auto& Desc = *pResource->GetDesc(&tmpDesc); -# endif - const UINT64 SubresourceWidth = Desc.Width >> mipSlice; - const UINT64 SubresourceHeight = Desc.Height >> mipSlice; - switch (Desc.Dimension) { - case D3D12_RESOURCE_DIMENSION_BUFFER: - TopLeftX = topLeftX; - TopLeftY = 0.0f; - Width = float(Desc.Width) - topLeftX; - Height = 1.0f; - break; - case D3D12_RESOURCE_DIMENSION_TEXTURE1D: - TopLeftX = topLeftX; - TopLeftY = 0.0f; - Width = (SubresourceWidth ? float(SubresourceWidth) : 1.0f) - topLeftX; - Height = 1.0f; - break; - case D3D12_RESOURCE_DIMENSION_TEXTURE2D: - case D3D12_RESOURCE_DIMENSION_TEXTURE3D: - TopLeftX = topLeftX; - TopLeftY = topLeftY; - Width = (SubresourceWidth ? float(SubresourceWidth) : 1.0f) - topLeftX; - Height = (SubresourceHeight ? float(SubresourceHeight) : 1.0f) - topLeftY; - break; - default: - break; - } - - MinDepth = minDepth; - MaxDepth = maxDepth; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_BOX : public D3D12_BOX { - CD3DX12_BOX() = default; - explicit CD3DX12_BOX(const D3D12_BOX& o) noexcept : D3D12_BOX(o) {} - explicit CD3DX12_BOX(LONG Left, LONG Right) noexcept { - left = static_cast(Left); - top = 0; - front = 0; - right = static_cast(Right); - bottom = 1; - back = 1; - } - explicit CD3DX12_BOX(LONG Left, LONG Top, LONG Right, LONG Bottom) noexcept { - left = static_cast(Left); - top = static_cast(Top); - front = 0; - right = static_cast(Right); - bottom = static_cast(Bottom); - back = 1; - } - explicit CD3DX12_BOX(LONG Left, LONG Top, LONG Front, LONG Right, LONG Bottom, LONG Back) noexcept { - left = static_cast(Left); - top = static_cast(Top); - front = static_cast(Front); - right = static_cast(Right); - bottom = static_cast(Bottom); - back = static_cast(Back); - } -}; -inline bool operator==(const D3D12_BOX& l, const D3D12_BOX& r) noexcept { - return l.left == r.left && l.top == r.top && l.front == r.front && l.right == r.right && l.bottom == r.bottom && - l.back == r.back; -} -inline bool operator!=(const D3D12_BOX& l, const D3D12_BOX& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_DEPTH_STENCIL_DESC : public D3D12_DEPTH_STENCIL_DESC { - CD3DX12_DEPTH_STENCIL_DESC() = default; - explicit CD3DX12_DEPTH_STENCIL_DESC(const D3D12_DEPTH_STENCIL_DESC& o) noexcept : D3D12_DEPTH_STENCIL_DESC(o) {} - explicit CD3DX12_DEPTH_STENCIL_DESC(CD3DX12_DEFAULT) noexcept { - DepthEnable = TRUE; - DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; - DepthFunc = D3D12_COMPARISON_FUNC_LESS; - StencilEnable = FALSE; - StencilReadMask = D3D12_DEFAULT_STENCIL_READ_MASK; - StencilWriteMask = D3D12_DEFAULT_STENCIL_WRITE_MASK; - const D3D12_DEPTH_STENCILOP_DESC defaultStencilOp = {D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_COMPARISON_FUNC_ALWAYS}; - FrontFace = defaultStencilOp; - BackFace = defaultStencilOp; - } - explicit CD3DX12_DEPTH_STENCIL_DESC(BOOL depthEnable, - D3D12_DEPTH_WRITE_MASK depthWriteMask, - D3D12_COMPARISON_FUNC depthFunc, - BOOL stencilEnable, - UINT8 stencilReadMask, - UINT8 stencilWriteMask, - D3D12_STENCIL_OP frontStencilFailOp, - D3D12_STENCIL_OP frontStencilDepthFailOp, - D3D12_STENCIL_OP frontStencilPassOp, - D3D12_COMPARISON_FUNC frontStencilFunc, - D3D12_STENCIL_OP backStencilFailOp, - D3D12_STENCIL_OP backStencilDepthFailOp, - D3D12_STENCIL_OP backStencilPassOp, - D3D12_COMPARISON_FUNC backStencilFunc) noexcept { - DepthEnable = depthEnable; - DepthWriteMask = depthWriteMask; - DepthFunc = depthFunc; - StencilEnable = stencilEnable; - StencilReadMask = stencilReadMask; - StencilWriteMask = stencilWriteMask; - FrontFace.StencilFailOp = frontStencilFailOp; - FrontFace.StencilDepthFailOp = frontStencilDepthFailOp; - FrontFace.StencilPassOp = frontStencilPassOp; - FrontFace.StencilFunc = frontStencilFunc; - BackFace.StencilFailOp = backStencilFailOp; - BackFace.StencilDepthFailOp = backStencilDepthFailOp; - BackFace.StencilPassOp = backStencilPassOp; - BackFace.StencilFunc = backStencilFunc; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_DEPTH_STENCIL_DESC1 : public D3D12_DEPTH_STENCIL_DESC1 { - CD3DX12_DEPTH_STENCIL_DESC1() = default; - explicit CD3DX12_DEPTH_STENCIL_DESC1(const D3D12_DEPTH_STENCIL_DESC1& o) noexcept : D3D12_DEPTH_STENCIL_DESC1(o) {} - explicit CD3DX12_DEPTH_STENCIL_DESC1(const D3D12_DEPTH_STENCIL_DESC& o) noexcept { - DepthEnable = o.DepthEnable; - DepthWriteMask = o.DepthWriteMask; - DepthFunc = o.DepthFunc; - StencilEnable = o.StencilEnable; - StencilReadMask = o.StencilReadMask; - StencilWriteMask = o.StencilWriteMask; - FrontFace.StencilFailOp = o.FrontFace.StencilFailOp; - FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp; - FrontFace.StencilPassOp = o.FrontFace.StencilPassOp; - FrontFace.StencilFunc = o.FrontFace.StencilFunc; - BackFace.StencilFailOp = o.BackFace.StencilFailOp; - BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp; - BackFace.StencilPassOp = o.BackFace.StencilPassOp; - BackFace.StencilFunc = o.BackFace.StencilFunc; - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC1(CD3DX12_DEFAULT) noexcept { - DepthEnable = TRUE; - DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; - DepthFunc = D3D12_COMPARISON_FUNC_LESS; - StencilEnable = FALSE; - StencilReadMask = D3D12_DEFAULT_STENCIL_READ_MASK; - StencilWriteMask = D3D12_DEFAULT_STENCIL_WRITE_MASK; - const D3D12_DEPTH_STENCILOP_DESC defaultStencilOp = {D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_COMPARISON_FUNC_ALWAYS}; - FrontFace = defaultStencilOp; - BackFace = defaultStencilOp; - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC1(BOOL depthEnable, - D3D12_DEPTH_WRITE_MASK depthWriteMask, - D3D12_COMPARISON_FUNC depthFunc, - BOOL stencilEnable, - UINT8 stencilReadMask, - UINT8 stencilWriteMask, - D3D12_STENCIL_OP frontStencilFailOp, - D3D12_STENCIL_OP frontStencilDepthFailOp, - D3D12_STENCIL_OP frontStencilPassOp, - D3D12_COMPARISON_FUNC frontStencilFunc, - D3D12_STENCIL_OP backStencilFailOp, - D3D12_STENCIL_OP backStencilDepthFailOp, - D3D12_STENCIL_OP backStencilPassOp, - D3D12_COMPARISON_FUNC backStencilFunc, - BOOL depthBoundsTestEnable) noexcept { - DepthEnable = depthEnable; - DepthWriteMask = depthWriteMask; - DepthFunc = depthFunc; - StencilEnable = stencilEnable; - StencilReadMask = stencilReadMask; - StencilWriteMask = stencilWriteMask; - FrontFace.StencilFailOp = frontStencilFailOp; - FrontFace.StencilDepthFailOp = frontStencilDepthFailOp; - FrontFace.StencilPassOp = frontStencilPassOp; - FrontFace.StencilFunc = frontStencilFunc; - BackFace.StencilFailOp = backStencilFailOp; - BackFace.StencilDepthFailOp = backStencilDepthFailOp; - BackFace.StencilPassOp = backStencilPassOp; - BackFace.StencilFunc = backStencilFunc; - DepthBoundsTestEnable = depthBoundsTestEnable; - } - operator D3D12_DEPTH_STENCIL_DESC() const noexcept { - D3D12_DEPTH_STENCIL_DESC D; - D.DepthEnable = DepthEnable; - D.DepthWriteMask = DepthWriteMask; - D.DepthFunc = DepthFunc; - D.StencilEnable = StencilEnable; - D.StencilReadMask = StencilReadMask; - D.StencilWriteMask = StencilWriteMask; - D.FrontFace.StencilFailOp = FrontFace.StencilFailOp; - D.FrontFace.StencilDepthFailOp = FrontFace.StencilDepthFailOp; - D.FrontFace.StencilPassOp = FrontFace.StencilPassOp; - D.FrontFace.StencilFunc = FrontFace.StencilFunc; - D.BackFace.StencilFailOp = BackFace.StencilFailOp; - D.BackFace.StencilDepthFailOp = BackFace.StencilDepthFailOp; - D.BackFace.StencilPassOp = BackFace.StencilPassOp; - D.BackFace.StencilFunc = BackFace.StencilFunc; - return D; - } -}; - -//------------------------------------------------------------------------------------------------ -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 606) -struct CD3DX12_DEPTH_STENCIL_DESC2 : public D3D12_DEPTH_STENCIL_DESC2 { - CD3DX12_DEPTH_STENCIL_DESC2() = default; - explicit CD3DX12_DEPTH_STENCIL_DESC2(const D3D12_DEPTH_STENCIL_DESC2& o) noexcept : D3D12_DEPTH_STENCIL_DESC2(o) {} - explicit CD3DX12_DEPTH_STENCIL_DESC2(const D3D12_DEPTH_STENCIL_DESC1& o) noexcept { - DepthEnable = o.DepthEnable; - DepthWriteMask = o.DepthWriteMask; - DepthFunc = o.DepthFunc; - StencilEnable = o.StencilEnable; - FrontFace.StencilFailOp = o.FrontFace.StencilFailOp; - FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp; - FrontFace.StencilPassOp = o.FrontFace.StencilPassOp; - FrontFace.StencilFunc = o.FrontFace.StencilFunc; - FrontFace.StencilReadMask = o.StencilReadMask; - FrontFace.StencilWriteMask = o.StencilWriteMask; - - BackFace.StencilFailOp = o.BackFace.StencilFailOp; - BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp; - BackFace.StencilPassOp = o.BackFace.StencilPassOp; - BackFace.StencilFunc = o.BackFace.StencilFunc; - BackFace.StencilReadMask = o.StencilReadMask; - BackFace.StencilWriteMask = o.StencilWriteMask; - DepthBoundsTestEnable = o.DepthBoundsTestEnable; - } - explicit CD3DX12_DEPTH_STENCIL_DESC2(const D3D12_DEPTH_STENCIL_DESC& o) noexcept { - DepthEnable = o.DepthEnable; - DepthWriteMask = o.DepthWriteMask; - DepthFunc = o.DepthFunc; - StencilEnable = o.StencilEnable; - - FrontFace.StencilFailOp = o.FrontFace.StencilFailOp; - FrontFace.StencilDepthFailOp = o.FrontFace.StencilDepthFailOp; - FrontFace.StencilPassOp = o.FrontFace.StencilPassOp; - FrontFace.StencilFunc = o.FrontFace.StencilFunc; - FrontFace.StencilReadMask = o.StencilReadMask; - FrontFace.StencilWriteMask = o.StencilWriteMask; - - BackFace.StencilFailOp = o.BackFace.StencilFailOp; - BackFace.StencilDepthFailOp = o.BackFace.StencilDepthFailOp; - BackFace.StencilPassOp = o.BackFace.StencilPassOp; - BackFace.StencilFunc = o.BackFace.StencilFunc; - BackFace.StencilReadMask = o.StencilReadMask; - BackFace.StencilWriteMask = o.StencilWriteMask; - - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC2(CD3DX12_DEFAULT) noexcept { - DepthEnable = TRUE; - DepthWriteMask = D3D12_DEPTH_WRITE_MASK_ALL; - DepthFunc = D3D12_COMPARISON_FUNC_LESS; - StencilEnable = FALSE; - const D3D12_DEPTH_STENCILOP_DESC1 defaultStencilOp = {D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_STENCIL_OP_KEEP, - D3D12_COMPARISON_FUNC_ALWAYS, - D3D12_DEFAULT_STENCIL_READ_MASK, - D3D12_DEFAULT_STENCIL_WRITE_MASK}; - FrontFace = defaultStencilOp; - BackFace = defaultStencilOp; - DepthBoundsTestEnable = FALSE; - } - explicit CD3DX12_DEPTH_STENCIL_DESC2(BOOL depthEnable, - D3D12_DEPTH_WRITE_MASK depthWriteMask, - D3D12_COMPARISON_FUNC depthFunc, - BOOL stencilEnable, - D3D12_STENCIL_OP frontStencilFailOp, - D3D12_STENCIL_OP frontStencilDepthFailOp, - D3D12_STENCIL_OP frontStencilPassOp, - D3D12_COMPARISON_FUNC frontStencilFunc, - UINT8 frontStencilReadMask, - UINT8 frontStencilWriteMask, - D3D12_STENCIL_OP backStencilFailOp, - D3D12_STENCIL_OP backStencilDepthFailOp, - D3D12_STENCIL_OP backStencilPassOp, - D3D12_COMPARISON_FUNC backStencilFunc, - UINT8 backStencilReadMask, - UINT8 backStencilWriteMask, - BOOL depthBoundsTestEnable) noexcept { - DepthEnable = depthEnable; - DepthWriteMask = depthWriteMask; - DepthFunc = depthFunc; - StencilEnable = stencilEnable; - - FrontFace.StencilFailOp = frontStencilFailOp; - FrontFace.StencilDepthFailOp = frontStencilDepthFailOp; - FrontFace.StencilPassOp = frontStencilPassOp; - FrontFace.StencilFunc = frontStencilFunc; - FrontFace.StencilReadMask = frontStencilReadMask; - FrontFace.StencilWriteMask = frontStencilWriteMask; - - BackFace.StencilFailOp = backStencilFailOp; - BackFace.StencilDepthFailOp = backStencilDepthFailOp; - BackFace.StencilPassOp = backStencilPassOp; - BackFace.StencilFunc = backStencilFunc; - BackFace.StencilReadMask = backStencilReadMask; - BackFace.StencilWriteMask = backStencilWriteMask; - - DepthBoundsTestEnable = depthBoundsTestEnable; - } - - operator D3D12_DEPTH_STENCIL_DESC() const noexcept { - D3D12_DEPTH_STENCIL_DESC D; - D.DepthEnable = DepthEnable; - D.DepthWriteMask = DepthWriteMask; - D.DepthFunc = DepthFunc; - D.StencilEnable = StencilEnable; - D.StencilReadMask = FrontFace.StencilReadMask; - D.StencilWriteMask = FrontFace.StencilWriteMask; - D.FrontFace.StencilFailOp = FrontFace.StencilFailOp; - D.FrontFace.StencilDepthFailOp = FrontFace.StencilDepthFailOp; - D.FrontFace.StencilPassOp = FrontFace.StencilPassOp; - D.FrontFace.StencilFunc = FrontFace.StencilFunc; - D.BackFace.StencilFailOp = BackFace.StencilFailOp; - D.BackFace.StencilDepthFailOp = BackFace.StencilDepthFailOp; - D.BackFace.StencilPassOp = BackFace.StencilPassOp; - D.BackFace.StencilFunc = BackFace.StencilFunc; - return D; - } -}; -# endif // D3D12_SDK_VERSION >= 606 - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_BLEND_DESC : public D3D12_BLEND_DESC { - CD3DX12_BLEND_DESC() = default; - explicit CD3DX12_BLEND_DESC(const D3D12_BLEND_DESC& o) noexcept : D3D12_BLEND_DESC(o) {} - explicit CD3DX12_BLEND_DESC(CD3DX12_DEFAULT) noexcept { - AlphaToCoverageEnable = FALSE; - IndependentBlendEnable = FALSE; - const D3D12_RENDER_TARGET_BLEND_DESC defaultRenderTargetBlendDesc = { - FALSE, - FALSE, - D3D12_BLEND_ONE, - D3D12_BLEND_ZERO, - D3D12_BLEND_OP_ADD, - D3D12_BLEND_ONE, - D3D12_BLEND_ZERO, - D3D12_BLEND_OP_ADD, - D3D12_LOGIC_OP_NOOP, - D3D12_COLOR_WRITE_ENABLE_ALL, - }; - for (UINT i = 0; i < D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT; ++i) - RenderTarget[i] = defaultRenderTargetBlendDesc; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RASTERIZER_DESC : public D3D12_RASTERIZER_DESC { - CD3DX12_RASTERIZER_DESC() = default; - explicit CD3DX12_RASTERIZER_DESC(const D3D12_RASTERIZER_DESC& o) noexcept : D3D12_RASTERIZER_DESC(o) {} - explicit CD3DX12_RASTERIZER_DESC(CD3DX12_DEFAULT) noexcept { - FillMode = D3D12_FILL_MODE_SOLID; - CullMode = D3D12_CULL_MODE_BACK; - FrontCounterClockwise = FALSE; - DepthBias = D3D12_DEFAULT_DEPTH_BIAS; - DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; - SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; - DepthClipEnable = TRUE; - MultisampleEnable = FALSE; - AntialiasedLineEnable = FALSE; - ForcedSampleCount = 0; - ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; - } - explicit CD3DX12_RASTERIZER_DESC(D3D12_FILL_MODE fillMode, - D3D12_CULL_MODE cullMode, - BOOL frontCounterClockwise, - INT depthBias, - FLOAT depthBiasClamp, - FLOAT slopeScaledDepthBias, - BOOL depthClipEnable, - BOOL multisampleEnable, - BOOL antialiasedLineEnable, - UINT forcedSampleCount, - D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster) noexcept { - FillMode = fillMode; - CullMode = cullMode; - FrontCounterClockwise = frontCounterClockwise; - DepthBias = depthBias; - DepthBiasClamp = depthBiasClamp; - SlopeScaledDepthBias = slopeScaledDepthBias; - DepthClipEnable = depthClipEnable; - MultisampleEnable = multisampleEnable; - AntialiasedLineEnable = antialiasedLineEnable; - ForcedSampleCount = forcedSampleCount; - ConservativeRaster = conservativeRaster; - } -}; - -//------------------------------------------------------------------------------------------------ -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 608) -struct CD3DX12_RASTERIZER_DESC1 : public D3D12_RASTERIZER_DESC1 { - CD3DX12_RASTERIZER_DESC1() = default; - explicit CD3DX12_RASTERIZER_DESC1(const D3D12_RASTERIZER_DESC1& o) noexcept - : D3D12_RASTERIZER_DESC1(o) - - {} - explicit CD3DX12_RASTERIZER_DESC1(const D3D12_RASTERIZER_DESC& o) noexcept { - FillMode = o.FillMode; - CullMode = o.CullMode; - FrontCounterClockwise = o.FrontCounterClockwise; - DepthBias = static_cast(o.DepthBias); - DepthBiasClamp = o.DepthBiasClamp; - SlopeScaledDepthBias = o.SlopeScaledDepthBias; - DepthClipEnable = o.DepthClipEnable; - MultisampleEnable = o.MultisampleEnable; - AntialiasedLineEnable = o.AntialiasedLineEnable; - ForcedSampleCount = o.ForcedSampleCount; - ConservativeRaster = o.ConservativeRaster; - } - explicit CD3DX12_RASTERIZER_DESC1(CD3DX12_DEFAULT) noexcept { - FillMode = D3D12_FILL_MODE_SOLID; - CullMode = D3D12_CULL_MODE_BACK; - FrontCounterClockwise = FALSE; - DepthBias = D3D12_DEFAULT_DEPTH_BIAS; - DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; - SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; - DepthClipEnable = TRUE; - MultisampleEnable = FALSE; - AntialiasedLineEnable = FALSE; - ForcedSampleCount = 0; - ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; - } - explicit CD3DX12_RASTERIZER_DESC1(D3D12_FILL_MODE fillMode, - D3D12_CULL_MODE cullMode, - BOOL frontCounterClockwise, - FLOAT depthBias, - FLOAT depthBiasClamp, - FLOAT slopeScaledDepthBias, - BOOL depthClipEnable, - BOOL multisampleEnable, - BOOL antialiasedLineEnable, - UINT forcedSampleCount, - D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster) noexcept { - FillMode = fillMode; - CullMode = cullMode; - FrontCounterClockwise = frontCounterClockwise; - DepthBias = depthBias; - DepthBiasClamp = depthBiasClamp; - SlopeScaledDepthBias = slopeScaledDepthBias; - DepthClipEnable = depthClipEnable; - MultisampleEnable = multisampleEnable; - AntialiasedLineEnable = antialiasedLineEnable; - ForcedSampleCount = forcedSampleCount; - ConservativeRaster = conservativeRaster; - } - - operator D3D12_RASTERIZER_DESC() const noexcept { - D3D12_RASTERIZER_DESC o; - - o.FillMode = FillMode; - o.CullMode = CullMode; - o.FrontCounterClockwise = FrontCounterClockwise; - o.DepthBias = static_cast(DepthBias); - o.DepthBiasClamp = DepthBiasClamp; - o.SlopeScaledDepthBias = SlopeScaledDepthBias; - o.DepthClipEnable = DepthClipEnable; - o.MultisampleEnable = MultisampleEnable; - o.AntialiasedLineEnable = AntialiasedLineEnable; - o.ForcedSampleCount = ForcedSampleCount; - o.ConservativeRaster = ConservativeRaster; - - return o; - } -}; -# endif // D3D12_SDK_VERSION >= 608 - -//------------------------------------------------------------------------------------------------ -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 610) -struct CD3DX12_RASTERIZER_DESC2 : public D3D12_RASTERIZER_DESC2 { - CD3DX12_RASTERIZER_DESC2() = default; - explicit CD3DX12_RASTERIZER_DESC2(const D3D12_RASTERIZER_DESC2& o) noexcept - : D3D12_RASTERIZER_DESC2(o) - - {} - explicit CD3DX12_RASTERIZER_DESC2(const D3D12_RASTERIZER_DESC1& o) noexcept { - FillMode = o.FillMode; - CullMode = o.CullMode; - FrontCounterClockwise = o.FrontCounterClockwise; - DepthBias = o.DepthBias; - DepthBiasClamp = o.DepthBiasClamp; - SlopeScaledDepthBias = o.SlopeScaledDepthBias; - DepthClipEnable = o.DepthClipEnable; - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_ALIASED; - if (o.MultisampleEnable) { - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_QUADRILATERAL_WIDE; - } else if (o.AntialiasedLineEnable) { - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_ALPHA_ANTIALIASED; - } - ForcedSampleCount = o.ForcedSampleCount; - ConservativeRaster = o.ConservativeRaster; - } - explicit CD3DX12_RASTERIZER_DESC2(const D3D12_RASTERIZER_DESC& o) noexcept - : CD3DX12_RASTERIZER_DESC2(CD3DX12_RASTERIZER_DESC1(o)) {} - explicit CD3DX12_RASTERIZER_DESC2(CD3DX12_DEFAULT) noexcept { - FillMode = D3D12_FILL_MODE_SOLID; - CullMode = D3D12_CULL_MODE_BACK; - FrontCounterClockwise = FALSE; - DepthBias = D3D12_DEFAULT_DEPTH_BIAS; - DepthBiasClamp = D3D12_DEFAULT_DEPTH_BIAS_CLAMP; - SlopeScaledDepthBias = D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS; - DepthClipEnable = TRUE; - LineRasterizationMode = D3D12_LINE_RASTERIZATION_MODE_ALIASED; - ForcedSampleCount = 0; - ConservativeRaster = D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF; - } - explicit CD3DX12_RASTERIZER_DESC2(D3D12_FILL_MODE fillMode, - D3D12_CULL_MODE cullMode, - BOOL frontCounterClockwise, - FLOAT depthBias, - FLOAT depthBiasClamp, - FLOAT slopeScaledDepthBias, - BOOL depthClipEnable, - D3D12_LINE_RASTERIZATION_MODE lineRasterizationMode, - UINT forcedSampleCount, - D3D12_CONSERVATIVE_RASTERIZATION_MODE conservativeRaster) noexcept { - FillMode = fillMode; - CullMode = cullMode; - FrontCounterClockwise = frontCounterClockwise; - DepthBias = depthBias; - DepthBiasClamp = depthBiasClamp; - SlopeScaledDepthBias = slopeScaledDepthBias; - DepthClipEnable = depthClipEnable; - LineRasterizationMode = lineRasterizationMode; - ForcedSampleCount = forcedSampleCount; - ConservativeRaster = conservativeRaster; - } - - operator D3D12_RASTERIZER_DESC1() const noexcept { - D3D12_RASTERIZER_DESC1 o; - - o.FillMode = FillMode; - o.CullMode = CullMode; - o.FrontCounterClockwise = FrontCounterClockwise; - o.DepthBias = DepthBias; - o.DepthBiasClamp = DepthBiasClamp; - o.SlopeScaledDepthBias = SlopeScaledDepthBias; - o.DepthClipEnable = DepthClipEnable; - o.MultisampleEnable = FALSE; - o.AntialiasedLineEnable = FALSE; - if (LineRasterizationMode == D3D12_LINE_RASTERIZATION_MODE_ALPHA_ANTIALIASED) { - o.AntialiasedLineEnable = TRUE; - } else if (LineRasterizationMode != D3D12_LINE_RASTERIZATION_MODE_ALIASED) { - o.MultisampleEnable = TRUE; - } - o.ForcedSampleCount = ForcedSampleCount; - o.ConservativeRaster = ConservativeRaster; - - return o; - } - operator D3D12_RASTERIZER_DESC() const noexcept { - return static_cast(CD3DX12_RASTERIZER_DESC1(static_cast(*this))); - } -}; -# endif // D3D12_SDK_VERSION >= 610 - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RESOURCE_ALLOCATION_INFO : public D3D12_RESOURCE_ALLOCATION_INFO { - CD3DX12_RESOURCE_ALLOCATION_INFO() = default; - explicit CD3DX12_RESOURCE_ALLOCATION_INFO(const D3D12_RESOURCE_ALLOCATION_INFO& o) noexcept - : D3D12_RESOURCE_ALLOCATION_INFO(o) {} - CD3DX12_RESOURCE_ALLOCATION_INFO(UINT64 size, UINT64 alignment) noexcept { - SizeInBytes = size; - Alignment = alignment; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_HEAP_PROPERTIES : public D3D12_HEAP_PROPERTIES { - CD3DX12_HEAP_PROPERTIES() = default; - explicit CD3DX12_HEAP_PROPERTIES(const D3D12_HEAP_PROPERTIES& o) noexcept : D3D12_HEAP_PROPERTIES(o) {} - CD3DX12_HEAP_PROPERTIES(D3D12_CPU_PAGE_PROPERTY cpuPageProperty, - D3D12_MEMORY_POOL memoryPoolPreference, - UINT creationNodeMask = 1, - UINT nodeMask = 1) - noexcept { - Type = D3D12_HEAP_TYPE_CUSTOM; - CPUPageProperty = cpuPageProperty; - MemoryPoolPreference = memoryPoolPreference; - CreationNodeMask = creationNodeMask; - VisibleNodeMask = nodeMask; - } - explicit CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE type, UINT creationNodeMask = 1, UINT nodeMask = 1) noexcept { - Type = type; - CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN; - MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN; - CreationNodeMask = creationNodeMask; - VisibleNodeMask = nodeMask; - } - bool IsCPUAccessible() const noexcept { - return Type == D3D12_HEAP_TYPE_UPLOAD || Type == D3D12_HEAP_TYPE_READBACK -# if defined(D3D12_SDK_VERSION) && (D3D12_SDK_VERSION >= 609) - || Type == D3D12_HEAP_TYPE_GPU_UPLOAD -# endif - || (Type == D3D12_HEAP_TYPE_CUSTOM && (CPUPageProperty == D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE || - CPUPageProperty == D3D12_CPU_PAGE_PROPERTY_WRITE_BACK)); - } -}; -inline bool operator==(const D3D12_HEAP_PROPERTIES& l, const D3D12_HEAP_PROPERTIES& r) noexcept { - return l.Type == r.Type && l.CPUPageProperty == r.CPUPageProperty && - l.MemoryPoolPreference == r.MemoryPoolPreference && l.CreationNodeMask == r.CreationNodeMask && - l.VisibleNodeMask == r.VisibleNodeMask; -} -inline bool operator!=(const D3D12_HEAP_PROPERTIES& l, const D3D12_HEAP_PROPERTIES& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_HEAP_DESC : public D3D12_HEAP_DESC { - CD3DX12_HEAP_DESC() = default; - explicit CD3DX12_HEAP_DESC(const D3D12_HEAP_DESC& o) noexcept : D3D12_HEAP_DESC(o) {} - CD3DX12_HEAP_DESC(UINT64 size, - D3D12_HEAP_PROPERTIES properties, - UINT64 alignment = 0, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = size; - Properties = properties; - Alignment = alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(UINT64 size, - D3D12_HEAP_TYPE type, - UINT64 alignment = 0, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = size; - Properties = CD3DX12_HEAP_PROPERTIES(type); - Alignment = alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(UINT64 size, - D3D12_CPU_PAGE_PROPERTY cpuPageProperty, - D3D12_MEMORY_POOL memoryPoolPreference, - UINT64 alignment = 0, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = size; - Properties = CD3DX12_HEAP_PROPERTIES(cpuPageProperty, memoryPoolPreference); - Alignment = alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_HEAP_PROPERTIES properties, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = resAllocInfo.SizeInBytes; - Properties = properties; - Alignment = resAllocInfo.Alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_HEAP_TYPE type, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = resAllocInfo.SizeInBytes; - Properties = CD3DX12_HEAP_PROPERTIES(type); - Alignment = resAllocInfo.Alignment; - Flags = flags; - } - CD3DX12_HEAP_DESC(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_CPU_PAGE_PROPERTY cpuPageProperty, - D3D12_MEMORY_POOL memoryPoolPreference, - D3D12_HEAP_FLAGS flags = D3D12_HEAP_FLAG_NONE) - noexcept { - SizeInBytes = resAllocInfo.SizeInBytes; - Properties = CD3DX12_HEAP_PROPERTIES(cpuPageProperty, memoryPoolPreference); - Alignment = resAllocInfo.Alignment; - Flags = flags; - } - bool IsCPUAccessible() const noexcept { - return static_cast(&Properties)->IsCPUAccessible(); - } -}; -inline bool operator==(const D3D12_HEAP_DESC& l, const D3D12_HEAP_DESC& r) noexcept { - return l.SizeInBytes == r.SizeInBytes && l.Properties == r.Properties && l.Alignment == r.Alignment && - l.Flags == r.Flags; -} -inline bool operator!=(const D3D12_HEAP_DESC& l, const D3D12_HEAP_DESC& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_CLEAR_VALUE : public D3D12_CLEAR_VALUE { - CD3DX12_CLEAR_VALUE() = default; - explicit CD3DX12_CLEAR_VALUE(const D3D12_CLEAR_VALUE& o) noexcept : D3D12_CLEAR_VALUE(o) {} - CD3DX12_CLEAR_VALUE(DXGI_FORMAT format, const FLOAT color[4]) noexcept { - Format = format; - memcpy(Color, color, sizeof(Color)); - } - CD3DX12_CLEAR_VALUE(DXGI_FORMAT format, FLOAT depth, UINT8 stencil) noexcept { - Format = format; - memset(&Color, 0, sizeof(Color)); - /* Use memcpy to preserve NAN values */ - memcpy(&DepthStencil.Depth, &depth, sizeof(depth)); - DepthStencil.Stencil = stencil; - } -}; - -//------------------------------------------------------------------------------------------------ -inline bool operator==(const D3D12_CLEAR_VALUE& a, const D3D12_CLEAR_VALUE& b) noexcept { - if (a.Format != b.Format) - return false; - if (a.Format == DXGI_FORMAT_D24_UNORM_S8_UINT || a.Format == DXGI_FORMAT_D16_UNORM || - a.Format == DXGI_FORMAT_D32_FLOAT || a.Format == DXGI_FORMAT_D32_FLOAT_S8X24_UINT) { - return (a.DepthStencil.Depth == b.DepthStencil.Depth) && (a.DepthStencil.Stencil == b.DepthStencil.Stencil); - } else { - return (a.Color[0] == b.Color[0]) && (a.Color[1] == b.Color[1]) && (a.Color[2] == b.Color[2]) && - (a.Color[3] == b.Color[3]); - } -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RANGE : public D3D12_RANGE { - CD3DX12_RANGE() = default; - explicit CD3DX12_RANGE(const D3D12_RANGE& o) noexcept : D3D12_RANGE(o) {} - CD3DX12_RANGE(SIZE_T begin, SIZE_T end) noexcept { - Begin = begin; - End = end; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RANGE_UINT64 : public D3D12_RANGE_UINT64 { - CD3DX12_RANGE_UINT64() = default; - explicit CD3DX12_RANGE_UINT64(const D3D12_RANGE_UINT64& o) noexcept : D3D12_RANGE_UINT64(o) {} - CD3DX12_RANGE_UINT64(UINT64 begin, UINT64 end) noexcept { - Begin = begin; - End = end; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SUBRESOURCE_RANGE_UINT64 : public D3D12_SUBRESOURCE_RANGE_UINT64 { - CD3DX12_SUBRESOURCE_RANGE_UINT64() = default; - explicit CD3DX12_SUBRESOURCE_RANGE_UINT64(const D3D12_SUBRESOURCE_RANGE_UINT64& o) noexcept - : D3D12_SUBRESOURCE_RANGE_UINT64(o) {} - CD3DX12_SUBRESOURCE_RANGE_UINT64(UINT subresource, const D3D12_RANGE_UINT64& range) noexcept { - Subresource = subresource; - Range = range; - } - CD3DX12_SUBRESOURCE_RANGE_UINT64(UINT subresource, UINT64 begin, UINT64 end) noexcept { - Subresource = subresource; - Range.Begin = begin; - Range.End = end; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SHADER_BYTECODE : public D3D12_SHADER_BYTECODE { - CD3DX12_SHADER_BYTECODE() = default; - explicit CD3DX12_SHADER_BYTECODE(const D3D12_SHADER_BYTECODE& o) noexcept : D3D12_SHADER_BYTECODE(o) {} - CD3DX12_SHADER_BYTECODE(_In_ ID3DBlob* pShaderBlob) noexcept { - pShaderBytecode = pShaderBlob->GetBufferPointer(); - BytecodeLength = pShaderBlob->GetBufferSize(); - } - CD3DX12_SHADER_BYTECODE(const void* _pShaderBytecode, SIZE_T bytecodeLength) noexcept { - pShaderBytecode = _pShaderBytecode; - BytecodeLength = bytecodeLength; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TILED_RESOURCE_COORDINATE : public D3D12_TILED_RESOURCE_COORDINATE { - CD3DX12_TILED_RESOURCE_COORDINATE() = default; - explicit CD3DX12_TILED_RESOURCE_COORDINATE(const D3D12_TILED_RESOURCE_COORDINATE& o) noexcept - : D3D12_TILED_RESOURCE_COORDINATE(o) {} - CD3DX12_TILED_RESOURCE_COORDINATE(UINT x, UINT y, UINT z, UINT subresource) noexcept { - X = x; - Y = y; - Z = z; - Subresource = subresource; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TILE_REGION_SIZE : public D3D12_TILE_REGION_SIZE { - CD3DX12_TILE_REGION_SIZE() = default; - explicit CD3DX12_TILE_REGION_SIZE(const D3D12_TILE_REGION_SIZE& o) noexcept : D3D12_TILE_REGION_SIZE(o) {} - CD3DX12_TILE_REGION_SIZE(UINT numTiles, BOOL useBox, UINT width, UINT16 height, UINT16 depth) noexcept { - NumTiles = numTiles; - UseBox = useBox; - Width = width; - Height = height; - Depth = depth; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SUBRESOURCE_TILING : public D3D12_SUBRESOURCE_TILING { - CD3DX12_SUBRESOURCE_TILING() = default; - explicit CD3DX12_SUBRESOURCE_TILING(const D3D12_SUBRESOURCE_TILING& o) noexcept : D3D12_SUBRESOURCE_TILING(o) {} - CD3DX12_SUBRESOURCE_TILING(UINT widthInTiles, - UINT16 heightInTiles, - UINT16 depthInTiles, - UINT startTileIndexInOverallResource) - noexcept { - WidthInTiles = widthInTiles; - HeightInTiles = heightInTiles; - DepthInTiles = depthInTiles; - StartTileIndexInOverallResource = startTileIndexInOverallResource; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TILE_SHAPE : public D3D12_TILE_SHAPE { - CD3DX12_TILE_SHAPE() = default; - explicit CD3DX12_TILE_SHAPE(const D3D12_TILE_SHAPE& o) noexcept : D3D12_TILE_SHAPE(o) {} - CD3DX12_TILE_SHAPE(UINT widthInTexels, UINT heightInTexels, UINT depthInTexels) noexcept { - WidthInTexels = widthInTexels; - HeightInTexels = heightInTexels; - DepthInTexels = depthInTexels; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_PACKED_MIP_INFO : public D3D12_PACKED_MIP_INFO { - CD3DX12_PACKED_MIP_INFO() = default; - explicit CD3DX12_PACKED_MIP_INFO(const D3D12_PACKED_MIP_INFO& o) noexcept : D3D12_PACKED_MIP_INFO(o) {} - CD3DX12_PACKED_MIP_INFO(UINT8 numStandardMips, - UINT8 numPackedMips, - UINT numTilesForPackedMips, - UINT startTileIndexInOverallResource) - noexcept { - NumStandardMips = numStandardMips; - NumPackedMips = numPackedMips; - NumTilesForPackedMips = numTilesForPackedMips; - StartTileIndexInOverallResource = startTileIndexInOverallResource; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_SUBRESOURCE_FOOTPRINT : public D3D12_SUBRESOURCE_FOOTPRINT { - CD3DX12_SUBRESOURCE_FOOTPRINT() = default; - explicit CD3DX12_SUBRESOURCE_FOOTPRINT(const D3D12_SUBRESOURCE_FOOTPRINT& o) noexcept - : D3D12_SUBRESOURCE_FOOTPRINT(o) {} - CD3DX12_SUBRESOURCE_FOOTPRINT(DXGI_FORMAT format, UINT width, UINT height, UINT depth, UINT rowPitch) noexcept { - Format = format; - Width = width; - Height = height; - Depth = depth; - RowPitch = rowPitch; - } - explicit CD3DX12_SUBRESOURCE_FOOTPRINT(const D3D12_RESOURCE_DESC& resDesc, UINT rowPitch) noexcept { - Format = resDesc.Format; - Width = UINT(resDesc.Width); - Height = resDesc.Height; - Depth = (resDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? resDesc.DepthOrArraySize : 1u); - RowPitch = rowPitch; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_TEXTURE_COPY_LOCATION : public D3D12_TEXTURE_COPY_LOCATION { - CD3DX12_TEXTURE_COPY_LOCATION() = default; - explicit CD3DX12_TEXTURE_COPY_LOCATION(const D3D12_TEXTURE_COPY_LOCATION& o) noexcept - : D3D12_TEXTURE_COPY_LOCATION(o) {} - CD3DX12_TEXTURE_COPY_LOCATION(_In_ ID3D12Resource* pRes) noexcept { - pResource = pRes; - Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX; - PlacedFootprint = {}; - } - CD3DX12_TEXTURE_COPY_LOCATION(_In_ ID3D12Resource* pRes, D3D12_PLACED_SUBRESOURCE_FOOTPRINT const& Footprint) - noexcept { - pResource = pRes; - Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT; - PlacedFootprint = Footprint; - } - CD3DX12_TEXTURE_COPY_LOCATION(_In_ ID3D12Resource* pRes, UINT Sub) noexcept { - pResource = pRes; - Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX; - PlacedFootprint = {}; - SubresourceIndex = Sub; - } -}; - -//------------------------------------------------------------------------------------------------ -constexpr UINT D3D12CalcSubresource(UINT MipSlice, - UINT ArraySlice, - UINT PlaneSlice, - UINT MipLevels, - UINT ArraySize) noexcept { - return MipSlice + ArraySlice * MipLevels + PlaneSlice * MipLevels * ArraySize; -} - -//------------------------------------------------------------------------------------------------ -inline UINT8 D3D12GetFormatPlaneCount(_In_ ID3D12Device* pDevice, DXGI_FORMAT Format) noexcept { - D3D12_FEATURE_DATA_FORMAT_INFO formatInfo = {Format, 0}; - if (FAILED(pDevice->CheckFeatureSupport(D3D12_FEATURE_FORMAT_INFO, &formatInfo, sizeof(formatInfo)))) { - return 0; - } - return formatInfo.PlaneCount; -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RESOURCE_DESC : public D3D12_RESOURCE_DESC { - CD3DX12_RESOURCE_DESC() = default; - explicit CD3DX12_RESOURCE_DESC(const D3D12_RESOURCE_DESC& o) noexcept : D3D12_RESOURCE_DESC(o) {} - CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION dimension, - UINT64 alignment, - UINT64 width, - UINT height, - UINT16 depthOrArraySize, - UINT16 mipLevels, - DXGI_FORMAT format, - UINT sampleCount, - UINT sampleQuality, - D3D12_TEXTURE_LAYOUT layout, - D3D12_RESOURCE_FLAGS flags) - noexcept { - Dimension = dimension; - Alignment = alignment; - Width = width; - Height = height; - DepthOrArraySize = depthOrArraySize; - MipLevels = mipLevels; - Format = format; - SampleDesc.Count = sampleCount; - SampleDesc.Quality = sampleQuality; - Layout = layout; - Flags = flags; - } - static inline CD3DX12_RESOURCE_DESC Buffer(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_BUFFER, - resAllocInfo.Alignment, - resAllocInfo.SizeInBytes, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags); - } - static inline CD3DX12_RESOURCE_DESC Buffer(UINT64 width, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_BUFFER, - alignment, - width, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags); - } - static inline CD3DX12_RESOURCE_DESC Tex1D(DXGI_FORMAT format, - UINT64 width, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_TEXTURE1D, - alignment, - width, - 1, - arraySize, - mipLevels, - format, - 1, - 0, - layout, - flags); - } - static inline CD3DX12_RESOURCE_DESC Tex2D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - UINT sampleCount = 1, - UINT sampleQuality = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_TEXTURE2D, - alignment, - width, - height, - arraySize, - mipLevels, - format, - sampleCount, - sampleQuality, - layout, - flags); - } - static inline CD3DX12_RESOURCE_DESC Tex3D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 depth, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC(D3D12_RESOURCE_DIMENSION_TEXTURE3D, - alignment, - width, - height, - depth, - mipLevels, - format, - 1, - 0, - layout, - flags); - } - inline UINT16 Depth() const noexcept { - return (Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT16 ArraySize() const noexcept { - return (Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT8 PlaneCount(_In_ ID3D12Device* pDevice) const noexcept { - return D3D12GetFormatPlaneCount(pDevice, Format); - } - inline UINT Subresources(_In_ ID3D12Device* pDevice) const noexcept { - return static_cast(MipLevels) * ArraySize() * PlaneCount(pDevice); - } - inline UINT CalcSubresource(UINT MipSlice, UINT ArraySlice, UINT PlaneSlice) noexcept { - return D3D12CalcSubresource(MipSlice, ArraySlice, PlaneSlice, MipLevels, ArraySize()); - } -}; -inline bool operator==(const D3D12_RESOURCE_DESC& l, const D3D12_RESOURCE_DESC& r) noexcept { - return l.Dimension == r.Dimension && l.Alignment == r.Alignment && l.Width == r.Width && l.Height == r.Height && - l.DepthOrArraySize == r.DepthOrArraySize && l.MipLevels == r.MipLevels && l.Format == r.Format && - l.SampleDesc.Count == r.SampleDesc.Count && l.SampleDesc.Quality == r.SampleDesc.Quality && - l.Layout == r.Layout && l.Flags == r.Flags; -} -inline bool operator!=(const D3D12_RESOURCE_DESC& l, const D3D12_RESOURCE_DESC& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RESOURCE_DESC1 : public D3D12_RESOURCE_DESC1 { - CD3DX12_RESOURCE_DESC1() = default; - explicit CD3DX12_RESOURCE_DESC1(const D3D12_RESOURCE_DESC1& o) noexcept : D3D12_RESOURCE_DESC1(o) {} - explicit CD3DX12_RESOURCE_DESC1(const D3D12_RESOURCE_DESC& o) noexcept { - Dimension = o.Dimension; - Alignment = o.Alignment; - Width = o.Width; - Height = o.Height; - DepthOrArraySize = o.DepthOrArraySize; - MipLevels = o.MipLevels; - Format = o.Format; - SampleDesc = o.SampleDesc; - Layout = o.Layout; - Flags = o.Flags; - SamplerFeedbackMipRegion = {}; - } - CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION dimension, - UINT64 alignment, - UINT64 width, - UINT height, - UINT16 depthOrArraySize, - UINT16 mipLevels, - DXGI_FORMAT format, - UINT sampleCount, - UINT sampleQuality, - D3D12_TEXTURE_LAYOUT layout, - D3D12_RESOURCE_FLAGS flags, - UINT samplerFeedbackMipRegionWidth = 0, - UINT samplerFeedbackMipRegionHeight = 0, - UINT samplerFeedbackMipRegionDepth = 0) - noexcept { - Dimension = dimension; - Alignment = alignment; - Width = width; - Height = height; - DepthOrArraySize = depthOrArraySize; - MipLevels = mipLevels; - Format = format; - SampleDesc.Count = sampleCount; - SampleDesc.Quality = sampleQuality; - Layout = layout; - Flags = flags; - SamplerFeedbackMipRegion.Width = samplerFeedbackMipRegionWidth; - SamplerFeedbackMipRegion.Height = samplerFeedbackMipRegionHeight; - SamplerFeedbackMipRegion.Depth = samplerFeedbackMipRegionDepth; - } - - static inline CD3DX12_RESOURCE_DESC1 Buffer(const D3D12_RESOURCE_ALLOCATION_INFO& resAllocInfo, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_BUFFER, - resAllocInfo.Alignment, - resAllocInfo.SizeInBytes, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags, - 0, - 0, - 0); - } - static inline CD3DX12_RESOURCE_DESC1 Buffer(UINT64 width, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_BUFFER, - alignment, - width, - 1, - 1, - 1, - DXGI_FORMAT_UNKNOWN, - 1, - 0, - D3D12_TEXTURE_LAYOUT_ROW_MAJOR, - flags, - 0, - 0, - 0); - } - static inline CD3DX12_RESOURCE_DESC1 Tex1D(DXGI_FORMAT format, - UINT64 width, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_TEXTURE1D, - alignment, - width, - 1, - arraySize, - mipLevels, - format, - 1, - 0, - layout, - flags, - 0, - 0, - 0); - } - static inline CD3DX12_RESOURCE_DESC1 Tex2D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 arraySize = 1, - UINT16 mipLevels = 0, - UINT sampleCount = 1, - UINT sampleQuality = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0, - UINT samplerFeedbackMipRegionWidth = 0, - UINT samplerFeedbackMipRegionHeight = 0, - UINT samplerFeedbackMipRegionDepth = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_TEXTURE2D, - alignment, - width, - height, - arraySize, - mipLevels, - format, - sampleCount, - sampleQuality, - layout, - flags, - samplerFeedbackMipRegionWidth, - samplerFeedbackMipRegionHeight, - samplerFeedbackMipRegionDepth); - } - static inline CD3DX12_RESOURCE_DESC1 Tex3D(DXGI_FORMAT format, - UINT64 width, - UINT height, - UINT16 depth, - UINT16 mipLevels = 0, - D3D12_RESOURCE_FLAGS flags = D3D12_RESOURCE_FLAG_NONE, - D3D12_TEXTURE_LAYOUT layout = D3D12_TEXTURE_LAYOUT_UNKNOWN, - UINT64 alignment = 0) noexcept { - return CD3DX12_RESOURCE_DESC1(D3D12_RESOURCE_DIMENSION_TEXTURE3D, - alignment, - width, - height, - depth, - mipLevels, - format, - 1, - 0, - layout, - flags, - 0, - 0, - 0); - } - inline UINT16 Depth() const noexcept { - return (Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT16 ArraySize() const noexcept { - return (Dimension != D3D12_RESOURCE_DIMENSION_TEXTURE3D ? DepthOrArraySize : 1u); - } - inline UINT8 PlaneCount(_In_ ID3D12Device* pDevice) const noexcept { - return D3D12GetFormatPlaneCount(pDevice, Format); - } - inline UINT Subresources(_In_ ID3D12Device* pDevice) const noexcept { - return static_cast(MipLevels) * ArraySize() * PlaneCount(pDevice); - } - inline UINT CalcSubresource(UINT MipSlice, UINT ArraySlice, UINT PlaneSlice) noexcept { - return D3D12CalcSubresource(MipSlice, ArraySlice, PlaneSlice, MipLevels, ArraySize()); - } -}; -inline bool operator==(const D3D12_RESOURCE_DESC1& l, const D3D12_RESOURCE_DESC1& r) noexcept { - return l.Dimension == r.Dimension && l.Alignment == r.Alignment && l.Width == r.Width && l.Height == r.Height && - l.DepthOrArraySize == r.DepthOrArraySize && l.MipLevels == r.MipLevels && l.Format == r.Format && - l.SampleDesc.Count == r.SampleDesc.Count && l.SampleDesc.Quality == r.SampleDesc.Quality && - l.Layout == r.Layout && l.Flags == r.Flags && - l.SamplerFeedbackMipRegion.Width == r.SamplerFeedbackMipRegion.Width && - l.SamplerFeedbackMipRegion.Height == r.SamplerFeedbackMipRegion.Height && - l.SamplerFeedbackMipRegion.Depth == r.SamplerFeedbackMipRegion.Depth; -} -inline bool operator!=(const D3D12_RESOURCE_DESC1& l, const D3D12_RESOURCE_DESC1& r) noexcept { - return !(l == r); -} - -//------------------------------------------------------------------------------------------------ -// Fills in the mipmap and alignment values of pDesc when either members are zero -// Used to replace an implicit field to an explicit (0 mip map = max mip map level) -// If expansion has occured, returns LclDesc, else returns the original pDesc -inline const CD3DX12_RESOURCE_DESC1* D3DX12ConditionallyExpandAPIDesc(CD3DX12_RESOURCE_DESC1& LclDesc, - const CD3DX12_RESOURCE_DESC1* pDesc) { - // Expand mip levels: - if (pDesc->MipLevels == 0 || pDesc->Alignment == 0) { - LclDesc = *pDesc; - if (pDesc->MipLevels == 0) { - auto MaxMipLevels = [](UINT64 uiMaxDimension) -> UINT16 { - UINT16 uiRet = 0; - while (uiMaxDimension > 0) { - uiRet++; - uiMaxDimension >>= 1; - } - return uiRet; - }; - auto Max = [](UINT64 const& a, UINT64 const& b) { - return (a < b) ? b : a; - }; - - LclDesc.MipLevels = - MaxMipLevels(Max(LclDesc.Dimension == D3D12_RESOURCE_DIMENSION_TEXTURE3D ? LclDesc.DepthOrArraySize : 1, - Max(LclDesc.Width, LclDesc.Height))); - } - if (pDesc->Alignment == 0) { - if (pDesc->Layout == D3D12_TEXTURE_LAYOUT_64KB_UNDEFINED_SWIZZLE || - pDesc->Layout == D3D12_TEXTURE_LAYOUT_64KB_STANDARD_SWIZZLE) { - LclDesc.Alignment = D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT; - } else { - LclDesc.Alignment = (pDesc->SampleDesc.Count > 1 ? D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT - : D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT); - } - } - return &LclDesc; - } else { - return pDesc; - } -} - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_VIEW_INSTANCING_DESC : public D3D12_VIEW_INSTANCING_DESC { - CD3DX12_VIEW_INSTANCING_DESC() = default; - explicit CD3DX12_VIEW_INSTANCING_DESC(const D3D12_VIEW_INSTANCING_DESC& o) noexcept - : D3D12_VIEW_INSTANCING_DESC(o) {} - explicit CD3DX12_VIEW_INSTANCING_DESC(CD3DX12_DEFAULT) noexcept { - ViewInstanceCount = 0; - pViewInstanceLocations = nullptr; - Flags = D3D12_VIEW_INSTANCING_FLAG_NONE; - } - explicit CD3DX12_VIEW_INSTANCING_DESC(UINT InViewInstanceCount, - const D3D12_VIEW_INSTANCE_LOCATION* InViewInstanceLocations, - D3D12_VIEW_INSTANCING_FLAGS InFlags) noexcept { - ViewInstanceCount = InViewInstanceCount; - pViewInstanceLocations = InViewInstanceLocations; - Flags = InFlags; - } -}; - -//------------------------------------------------------------------------------------------------ -struct CD3DX12_RT_FORMAT_ARRAY : public D3D12_RT_FORMAT_ARRAY { - CD3DX12_RT_FORMAT_ARRAY() = default; - explicit CD3DX12_RT_FORMAT_ARRAY(const D3D12_RT_FORMAT_ARRAY& o) noexcept : D3D12_RT_FORMAT_ARRAY(o) {} - explicit CD3DX12_RT_FORMAT_ARRAY(_In_reads_(NumFormats) const DXGI_FORMAT* pFormats, UINT NumFormats) noexcept { - NumRenderTargets = NumFormats; - memcpy(RTFormats, pFormats, sizeof(RTFormats)); - // assumes ARRAY_SIZE(pFormats) == ARRAY_SIZE(RTFormats) - } -}; - -#endif diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h deleted file mode 100644 index 2ae8f5c4bee1f6..00000000000000 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/d3dx12_default.h +++ /dev/null @@ -1,15 +0,0 @@ -//********************************************************* -// -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License (MIT). -// -//********************************************************* - -#pragma once - -#ifdef _WIN32 - -struct CD3DX12_DEFAULT {}; -extern const DECLSPEC_SELECTANY CD3DX12_DEFAULT D3D12_DEFAULT; - -#endif From ca935238a902f72611ad796e65cf0ae9c2eb21c6 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Tue, 21 Jan 2025 16:56:29 +0100 Subject: [PATCH 75/97] [DOCS] adjustments preparing 2025.0 pass 2 (#28454) --- cspell.json | 412 ------------------ .../about-openvino/release-notes-openvino.rst | 98 ++--- .../configurations/genai-dependencies.rst | 4 +- .../get-started/install-openvino.rst | 6 +- .../openvino-workflow-generative.rst | 16 +- .../remote-tensor-api-gpu-plugin.rst | 2 +- .../query-device-properties.rst | 2 +- docs/dev/ov_dependencies.txt | 2 +- docs/sphinx_setup/index.rst | 2 +- .../cpp/benchmark/sync_benchmark/README.md | 6 +- .../benchmark/throughput_benchmark/README.md | 6 +- samples/cpp/hello_reshape_ssd/README.md | 2 +- samples/js/node/notebooks/hello-detection.nnb | 2 +- .../js/node/notebooks/hello-segmentation.nnb | 2 +- samples/js/node/notebooks/hello-world.nnb | 2 +- .../python/benchmark/bert_benchmark/README.md | 2 +- .../python/benchmark/sync_benchmark/README.md | 18 +- .../benchmark/throughput_benchmark/README.md | 18 +- 18 files changed, 77 insertions(+), 525 deletions(-) delete mode 100644 cspell.json diff --git a/cspell.json b/cspell.json deleted file mode 100644 index f59d00a6a052f6..00000000000000 --- a/cspell.json +++ /dev/null @@ -1,412 +0,0 @@ -{ - "version": "0.2", - "ignorePaths": [], - "dictionaryDefinitions": [], - "dictionaries": [], - "words": [ - "aarch64", - "acdadcfa", - "acea", - "abmrd", - "acfb", - "acosh", - "Acosh", - "adfcd", - "addcmul", - "addif", - "addmm", - "aeaa", - "agem", - "agew", - "armeabi", - "armhf", - "artefacts", - "ARTEFACTS", - "Asinh", - "asynch", - "Atanh", - "autodoc", - "Autograd", - "autoplugin", - "AUTOPLUGIN", - "autoremove", - "autosummary", - "bace", - "Backprop", - "bblayers", - "Beautif", - "Bilat", - "bindir", - "bitbake", - "BFYX", - "BFXY", - "bkgr", - "brctl", - "Bucketize", - "BUILDDIR", - "buildtools", - "buildsystems", - "BYXF", - "bvalue", - "bvlc", - "caffe", - "caffemodel", - "camvid", - "cbba", - "cbcd", - "cdad", - "cdrom", - "chrpath", - "classov", - "cldnn", - "clumber", - "codepath", - "codepaths", - "coeffs", - "concat", - "Concat", - "Conts", - "constexpr", - "consts", - "Consts", - "conv", - "Convolutional", - "CPPLINT", - "cpplint", - "crbegin", - "crend", - "ctest", - "ctput", - "CVAT", - "cython", - "dadb", - "DANDROID", - "DARM", - "Datumaro", - "datumaro", - "DBUILD", - "DCMAKE", - "ddepth", - "Depthwise", - "dearmor", - "devicesupport", - "dequantization", - "Dequantization", - "deeplabv", - "deeced", - "DENABLE", - "delif", - "denormal", - "DENORMAL", - "denormalized", - "Detectron", - "Dequantize", - "devel", - "devtoolset", - "dgpu", - "diffstat", - "dldt", - "dlstreamer", - "dkms", - "Dockerfiles", - "DOPENVINO", - "downscript", - "doxid", - "doxygen", - "Doxygen", - "doxygensnippet", - "DTHREADING", - "dpkg", - "DPYTHON", - "DSELECTIVE", - "dylib", - "DWORD", - "efficientdet", - "Efficientdet", - "Einsum", - "Elems", - "Elementwise", - "elementwise", - "Eltwise", - "endsphinxdirective", - "enumov", - "emcmake", - "emmake", - "emod", - "emom", - "emow", - "Emscripten", - "emscripten", - "emsdk", - "epel", - "ERRORLEVEL", - "evolutionally", - "executionpolicy", - "fafe", - "fdupes", - "flatbuffers", - "FLATBUFFERS", - "frontends", - "Frontends", - "FYXB", - "gaddb", - "GAPI", - "gapi", - "Gaussed", - "gcompoundkernel", - "gcomputation", - "GCPU", - "gcpukernel", - "Gelu", - "GELU", - "Geti", - "getitem", - "gimg", - "gitee", - "gflags", - "globbing", - "gmmlib", - "GNAs", - "gmock", - "gnueabihf", - "googlenet", - "gpgcheck", - "gpgkey", - "graphviz", - "Graphviz", - "groupov", - "gtest", - "hardtanh", - "hashfile", - "HDDL", - "HKLM", - "HOSTTOOLS", - "Hotspots", - "hotspots", - "hostnet", - "hwloc", - "hwquote", - "idbf", - "IDFT", - "iigd", - "ifdef", - "ifdown", - "ifup", - "imgproc", - "imshow", - "inet", - "INTEGRITYCHECK", - "ILSVRC", - "inferenced", - "Informations", - "insmod", - "intelocl", - "INTERPROCEDURAL", - "INSTALLDIR", - "IRDFT", - "jemalloc", - "kaldi", - "Keras", - "keypress", - "keyrings", - "Khronos", - "KROIs", - "Landm", - "landm", - "Latency", - "Lcov", - "ldconfig", - "libc", - "libopencl", - "libopencv", - "libpython", - "libtbb", - "libtbbbind", - "libtpm", - "libvirtd", - "linmac", - "Liskov", - "lowlatency", - "LTSC", - "LSTM", - "makefiles", - "malloc", - "memleaks", - "manylinux", - "maxdepth", - "miktext", - "Mish", - "mklink", - "mmap", - "mobilenet", - "Mobilenet", - "monodepth", - "mozallowfullscreen", - "msallowfullscreen", - "MSVC", - "msvc", - "Multiclass", - "muxed", - "mxnet", - "namespaceov", - "NCHW", - "ncpu", - "netdev", - "netplan", - "ngraph", - "nireq", - "NNCF", - "nncf", - "nocache", - "noglob", - "nohup", - "nlohmann", - "norestart", - "noqueue", - "nproc", - "NUMA", - "numpy", - "Numpy", - "oallowfullscreen", - "ocloc", - "OCSP", - "oneapi", - "onetbb", - "onnx", - "opencl", - "openembedded", - "openvino", - "Opset", - "opset", - "opsets", - "OVMS", - "ovms", - "ovsa", - "OVSA", - "ovsatool", - "OVTF", - "PACKAGECONFIG", - "paddlepaddle", - "parameterizable", - "partitioner", - "patchelf", - "passpattern", - "Pexels", - "pdmodel", - "PDPD", - "pkgdata", - "pkgs", - "pkill", - "polylines", - "postproc", - "postprocess", - "preprocess", - "Preprocess", - "protobuf", - "Protobuf", - "PROTOBUF", - "prototxt", - "PSROI", - "Pugi", - "pugixml", - "PUGIXML", - "pypi", - "PYTHONPATH", - "pzstd", - "qcow", - "qlen", - "QSPECTRE", - "Qspectre", - "quantizer", - "Rects", - "Relu", - "relu", - "rcnn", - "RCNN", - "RDFT", - "Redistributable", - "remotesigned", - "repolist", - "reproject", - "reshapable", - "Requantize", - "retval", - "RHODS", - "rmmod", - "runtool", - "scons", - "SCONS", - "segm", - "Selu", - "servercore", - "setuptools", - "setupvars", - "SETX", - "SIMD", - "Softmax", - "skylake", - "sphinxdirective", - "Strided", - "squeezenet", - "SWTPM", - "swtpm", - "TBBBIND", - "TBBROOT", - "Tensro", - "texlive", - "textrm", - "tflite", - "thirdparty", - "Thresholded", - "toctree", - "toolset", - "Torchvision", - "tpmrm", - "tpmstate", - "tput", - "Tunables", - "unet", - "Uninstallation", - "unixio", - "unsharp", - "Unsharp", - "Unsh", - "Unsqueeze", - "Usecase", - "usecases", - "USERPROFILE", - "userspace", - "VAAPI", - "valgrind", - "vcpkg", - "vcvars", - "venv", - "virbr", - "virsh", - "virt", - "virtio", - "VMHWM", - "VMRSS", - "VNNI", - "vtune", - "vtunesummary", - "vtunebottonup", - "WHOLEARCHIVE", - "WDDM", - "WORKDIR", - "WORKSIZE", - "xbyak", - "Xbyak", - "xdot", - "xvfz", - "yocto", - "yolo", - "YOLO", - "yolov", - "Yolov", - "YXFB", - "zstd" - ], - "ignoreWords": [], - "import": [] -} diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index 0134ed15215541..739c411dcbe7e5 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -16,7 +16,7 @@ OpenVINO Release Notes -2024.6 - 18 December 2024 +2025.0 - 05 February 2025 ############################# :doc:`System Requirements <./release-notes-openvino/system-requirements>` | :doc:`Release policy <./release-notes-openvino/release-policy>` | :doc:`Installation Guides <./../get-started/install-openvino>` @@ -26,10 +26,9 @@ OpenVINO Release Notes What's new +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -* OpenVINO 2024.6 release includes updates for enhanced stability and improved LLM performance. -* Introduced support for Intel® Arc™ B-Series Graphics (formerly known as Battlemage). -* Implemented optimizations to improve the inference time and LLM performance on NPUs. -* Improved LLM performance with GenAI API optimizations and bug fixes. +* . +* . + @@ -39,26 +38,19 @@ OpenVINO™ Runtime CPU Device Plugin ----------------------------------------------------------------------------------------------- -* KV cache now uses asymmetric 8-bit unsigned integer (U8) as the default precision, reducing - memory stress for LLMs and increasing their performance. This option can be controlled by - model meta data. -* Quality and accuracy has been improved for selected models with several bug fixes. +* . +* . GPU Device Plugin ----------------------------------------------------------------------------------------------- -* Device memory copy optimizations have been introduced for inference with **Intel® Arc™ B-Series - Graphics** (formerly known as Battlemage). Since it does not utilize L2 cache for copying memory - between the device and host, a dedicated `copy` operation is used, if inputs or results are - not expected in the device memory. -* ChatGLM4 inference on GPU has been optimized. +* . +* . NPU Device Plugin ----------------------------------------------------------------------------------------------- -* LLM performance and inference time has been improved with memory optimizations. - - +* . @@ -98,14 +90,10 @@ Previous 2025 releases .. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.. dropdown:: 2024.5 - 20 November 2024 +.. dropdown:: 2024.6 - 18 December 2024 :animate: fade-in-slide-down :color: secondary - **What's new** - - * More GenAI coverage and framework integrations to minimize code changes. - @@ -126,74 +114,44 @@ page. -Discontinued in 2024 +Discontinued in 2025 ----------------------------- * Runtime components: - * Intel® Gaussian & Neural Accelerator (Intel® GNA). Consider using the Neural Processing - Unit (NPU) for low-powered systems like Intel® Core™ Ultra or 14th generation and beyond. - * OpenVINO C++/C/Python 1.0 APIs (see - `2023.3 API transition guide `__ - for reference). - * All ONNX Frontend legacy API (known as ONNX_IMPORTER_API). - * ``PerfomanceMode.UNDEFINED`` property as part of the OpenVINO Python API. + * OpenVINO property Affinity API will is no longer available. It has been replaced with CPU + binding configurations (``ov::hint::enable_cpu_pinning``). * Tools: - * Deployment Manager. See :doc:`installation <../get-started/install-openvino>` and - :doc:`deployment <../get-started/install-openvino>` guides for current distribution - options. - * `Accuracy Checker `__. - * `Post-Training Optimization Tool `__ - (POT). Neural Network Compression Framework (NNCF) should be used instead. - * A `Git patch `__ - for NNCF integration with `huggingface/transformers `__. - The recommended approach is to use `huggingface/optimum-intel `__ - for applying NNCF optimization on top of models from Hugging Face. - * Support for Apache MXNet, Caffe, and Kaldi model formats. Conversion to ONNX may be used - as a solution. - * The macOS x86_64 debug bins are no longer provided with the OpenVINO toolkit, starting - with OpenVINO 2024.5. - * Python 3.8 is no longer supported, starting with OpenVINO 2024.5. - - * As MxNet doesn't support Python version higher than 3.8, according to the - `MxNet PyPI project `__, - it is no longer supported by OpenVINO, either. - - * Discrete Keem Bay support is no longer supported, starting with OpenVINO 2024.5. - * Support for discrete devices (formerly codenamed Raptor Lake) is no longer available for - NPU. + * Intel® Streaming SIMD Extensions (Intel® SSE) are currently not enabled in the binary + package by default. They are still supported in the source code form. + * The OpenVINO™ Development Tools package (pip install openvino-dev) is no longer available + for OpenVINO releases in 2025. + * Model Optimizer is no longer avilable. Consider using the + :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` + instead. For more details, see the + `model conversion transition guide `__. Deprecated and to be removed in the future -------------------------------------------- -* Intel® Streaming SIMD Extensions (Intel® SSE) will be supported in source code form, but not - enabled in the binary package by default, starting with OpenVINO 2025.0. * Ubuntu 20.04 support will be deprecated in future OpenVINO releases due to the end of standard support. * The openvino-nightly PyPI module will soon be discontinued. End-users should proceed with the Simple PyPI nightly repo instead. More information in `Release Policy `__. -* The OpenVINO™ Development Tools package (pip install openvino-dev) will be removed from - installation options and distribution channels beginning with OpenVINO 2025.0. -* Model Optimizer will be discontinued with OpenVINO 2025.0. Consider using the - :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` - instead. For more details, see the - `model conversion transition guide `__. -* OpenVINO property Affinity API will be discontinued with OpenVINO 2025.0. - It will be replaced with CPU binding configurations (``ov::hint::enable_cpu_pinning``). - - - +* “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the + future. OpenVINO's dynamic shape models are recommended instead. +* MacOS x86 is no longer recommended for use due to the discontinuation of validation. + Full support will be removed later in 2025. +* The `openvino` namespace of the OpenVINO Python API has been redesigned, removing the nested + `openvino.runtime` module. The old namespace is now considered deprecated and will be + discontinued in 2026.0. - * “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the - future. OpenVINO's dynamic shape models are recommended instead. -* Starting with 2025.0 MacOS x86 is no longer recommended for use due to the discontinuation - of validation. Full support will be removed later in 2025. diff --git a/docs/articles_en/get-started/configurations/genai-dependencies.rst b/docs/articles_en/get-started/configurations/genai-dependencies.rst index 6eec18a74f0f05..13e28107f69d63 100644 --- a/docs/articles_en/get-started/configurations/genai-dependencies.rst +++ b/docs/articles_en/get-started/configurations/genai-dependencies.rst @@ -4,8 +4,8 @@ OpenVINO™ GenAI Dependencies OpenVINO™ GenAI depends on both `OpenVINO `__ and `OpenVINO Tokenizers `__. During OpenVINO™ GenAI installation from PyPi, the same versions of OpenVINO and OpenVINO Tokenizers -are used (e.g. ``openvino==2024.6.0`` and ``openvino-tokenizers==2024.6.0.0`` are installed for -``openvino-genai==2024.6.0``). +are used (e.g. ``openvino==2025.0.0`` and ``openvino-tokenizers==2025.0.0.0`` are installed for +``openvino-genai==2025.0.0``). Trying to update any of the dependency packages might result in a version incompatibility due to different Application Binary Interfaces (ABIs), which will result in errors while running diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 401aa79213e6d7..387a0bf2ab37e3 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -1,4 +1,4 @@ -Install OpenVINO™ 2024.6 +Install OpenVINO™ 2025.0 ========================== @@ -23,10 +23,10 @@ Install OpenVINO™ 2024.6 -OpenVINO 2024.6, described here, is not a Long-Term-Support version! +OpenVINO 2025.0, described here, is not a Long-Term-Support version! All currently supported versions are: -* 2024.6 (development) +* 2025.0 (development) * 2023.3 (LTS) diff --git a/docs/articles_en/openvino-workflow-generative.rst b/docs/articles_en/openvino-workflow-generative.rst index a4fa53335988ae..14521f118f6dfc 100644 --- a/docs/articles_en/openvino-workflow-generative.rst +++ b/docs/articles_en/openvino-workflow-generative.rst @@ -40,7 +40,7 @@ options: `Check out the OpenVINO GenAI Quick-start Guide [PDF] `__ - .. tab-item:: Hugging Face integration + .. tab-item:: Optimum Intel (Hugging Face integration) | - Suggested for prototyping and, if the use case is not covered by OpenVINO GenAI, production. | - Bigger footprint and more dependencies. @@ -55,10 +55,16 @@ options: as well as conversion on the fly. For integration with the final product it may offer lower performance, though. -Note that the base version of OpenVINO may also be used to run generative AI. Although it may -offer a simpler environment, with fewer dependencies, it has significant limitations and a more -demanding implementation process. For reference, see -`the article on generative AI usage of OpenVINO 2024.6 `__. + .. tab-item:: Base OpenVINO (not recommended) + + Note that the base version of OpenVINO may also be used to run generative AI. Although it may + offer a simpler environment, with fewer dependencies, it has significant limitations and a more + demanding implementation process. + + To learn more, refer to the article for the 2024.6 OpenVINO version: + `Generative AI with Base OpenVINO `__ + + The advantages of using OpenVINO for generative model deployment: diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst index f1018b82cf40ee..ce243dbd87f9ae 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst @@ -621,7 +621,7 @@ Two types of map entries are possible: descriptor and container. Descriptor sets the expected structure and possible parameter values of the map. For possible low-level properties and their description, refer to the header file: -`remote_properties.hpp `__. +`remote_properties.hpp `__. Examples ########################################################### diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst index 913d0090b92a52..a704833b374f19 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst @@ -88,7 +88,7 @@ The ``ov::CompiledModel`` class is also extended to support the properties: * ``ov::CompiledModel::set_property`` For documentation about OpenVINO common device-independent properties, refer to -`properties.hpp (GitHub) `__. +`properties.hpp (GitHub) `__. Device-specific configuration keys can be found in a corresponding device folders, for example, ``openvino/runtime/intel_gpu/properties.hpp``. diff --git a/docs/dev/ov_dependencies.txt b/docs/dev/ov_dependencies.txt index cb64e4d5a6534c..71c9c906f9640d 100644 --- a/docs/dev/ov_dependencies.txt +++ b/docs/dev/ov_dependencies.txt @@ -1,6 +1,6 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -#This file provides a comprehensive list of all dependencies of OpenVINO 2024.6 +#This file provides a comprehensive list of all dependencies of OpenVINO 2025.0 #The file is part of the automation pipeline for posting OpenVINO IR models on the HuggingFace Hub, including OneBOM dependency checks. diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index d0da8fa4244dd6..b4e1039248f3a0 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -1,5 +1,5 @@ ============================ -OpenVINO 2024.6 +OpenVINO 2025.0 ============================ .. meta:: diff --git a/samples/cpp/benchmark/sync_benchmark/README.md b/samples/cpp/benchmark/sync_benchmark/README.md index b1eb079216064d..7cbc0f26624fa6 100644 --- a/samples/cpp/benchmark/sync_benchmark/README.md +++ b/samples/cpp/benchmark/sync_benchmark/README.md @@ -1,6 +1,6 @@ # Sync Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) @@ -8,8 +8,8 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | -------------------------------| -------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | diff --git a/samples/cpp/benchmark/throughput_benchmark/README.md b/samples/cpp/benchmark/throughput_benchmark/README.md index 43633498321c1e..bf8e7e6c8b6291 100644 --- a/samples/cpp/benchmark/throughput_benchmark/README.md +++ b/samples/cpp/benchmark/throughput_benchmark/README.md @@ -1,6 +1,6 @@ # Throughput Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. @@ -10,8 +10,8 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | ----------------------------| -------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | diff --git a/samples/cpp/hello_reshape_ssd/README.md b/samples/cpp/hello_reshape_ssd/README.md index bc346e850cf5ba..1359b07fdf27b5 100644 --- a/samples/cpp/hello_reshape_ssd/README.md +++ b/samples/cpp/hello_reshape_ssd/README.md @@ -9,7 +9,7 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | ----------------------------| -----------------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [person-detection-retail-0013](https://docs.openvino.ai/2024/omz_models_model_person_detection_retail_0013.html) | +| Validated Models | [person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | | Other language realization | [Python](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/hello-reshape-ssd.html) | diff --git a/samples/js/node/notebooks/hello-detection.nnb b/samples/js/node/notebooks/hello-detection.nnb index 60640b3bd042ea..e5c6f43f92a550 100644 --- a/samples/js/node/notebooks/hello-detection.nnb +++ b/samples/js/node/notebooks/hello-detection.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Object Detection\n\nA very basic introduction to using object detection models with OpenVINO™.\n\nThe [horizontal-text-detection-0001](https://docs.openvino.ai/2023.0/omz_models_model_horizontal_text_detection_0001.html) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. It detects horizontal text in images and returns a blob of data in the shape of `[100, 5]`. Each detected text box is stored in the `[x_min, y_min, x_max, y_max, conf]` format, where the\n`(x_min, y_min)` are the coordinates of the top left bounding box corner, `(x_max, y_max)` are the coordinates of the bottom right bounding box corner and `conf` is the confidence for the predicted class." + "# Hello Object Detection\n\nA very basic introduction to using object detection models with OpenVINO™.\n\nThe [horizontal-text-detection-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/horizontal-text-detection-0001) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. It detects horizontal text in images and returns a blob of data in the shape of `[100, 5]`. Each detected text box is stored in the `[x_min, y_min, x_max, y_max, conf]` format, where the\n`(x_min, y_min)` are the coordinates of the top left bounding box corner, `(x_max, y_max)` are the coordinates of the bottom right bounding box corner and `conf` is the confidence for the predicted class." ], "outputs": [] }, diff --git a/samples/js/node/notebooks/hello-segmentation.nnb b/samples/js/node/notebooks/hello-segmentation.nnb index a7da34a2799edf..31873f1e1528df 100644 --- a/samples/js/node/notebooks/hello-segmentation.nnb +++ b/samples/js/node/notebooks/hello-segmentation.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Image Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO™.\nIn this tutorial, a pre-trained [road-segmentation-adas-0001](https://docs.openvino.ai/2023.0/omz_models_model_road_segmentation_adas_0001.html) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.\n" + "# Hello Image Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO™.\nIn this tutorial, a pre-trained [road-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/road-segmentation-adas-0001) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.\n" ], "outputs": [] }, diff --git a/samples/js/node/notebooks/hello-world.nnb b/samples/js/node/notebooks/hello-world.nnb index 83d4ca8bec29f5..4da8eb3b4b334c 100644 --- a/samples/js/node/notebooks/hello-world.nnb +++ b/samples/js/node/notebooks/hello-world.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Image Classification\n\nThis basic introduction to OpenVINO™ shows how to do inference with an image classification model.\n\n A pre-trained [MobileNetV3 model](https://docs.openvino.ai/2023.0/omz_models_model_mobilenet_v3_small_1_0_224_tf.html) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-to-openvino/tensorflow-to-openvino.ipynb) tutorial.\n " + "# Hello Image Classification\n\nThis basic introduction to OpenVINO™ shows how to do inference with an image classification model.\n\n A pre-trained [MobileNetV3 model](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v3-small-1.0-224-tf) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-to-openvino/tensorflow-to-openvino.ipynb) tutorial.\n " ], "outputs": [] }, diff --git a/samples/python/benchmark/bert_benchmark/README.md b/samples/python/benchmark/bert_benchmark/README.md index 84ddcba1e598a4..2894c5f33d633b 100644 --- a/samples/python/benchmark/bert_benchmark/README.md +++ b/samples/python/benchmark/bert_benchmark/README.md @@ -1,6 +1,6 @@ # Bert Benchmark Python Sample -This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/bert-benchmark.html) diff --git a/samples/python/benchmark/sync_benchmark/README.md b/samples/python/benchmark/sync_benchmark/README.md index 4ce1329277b5b8..c7604386625572 100644 --- a/samples/python/benchmark/sync_benchmark/README.md +++ b/samples/python/benchmark/sync_benchmark/README.md @@ -1,19 +1,19 @@ # Sync Benchmark Python Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) ## Requirements -| Options | Values | -| ----------------------------| -----------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | -| Model Format | OpenVINO™ toolkit Intermediate Representation | -| | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | +| Options | Values | +| ----------------------------| ----------------------------------------------------------------------------------------------------------------------| +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | +| Model Format | OpenVINO™ toolkit Intermediate Representation | +| | (\*.xml + \*.bin), ONNX (\*.onnx) | +| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | The following Python API is used in the application: diff --git a/samples/python/benchmark/throughput_benchmark/README.md b/samples/python/benchmark/throughput_benchmark/README.md index 1ff02319ade062..5214c1190bb5e9 100644 --- a/samples/python/benchmark/throughput_benchmark/README.md +++ b/samples/python/benchmark/throughput_benchmark/README.md @@ -1,6 +1,6 @@ # Throughput Benchmark Python Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets uint8, while the sample uses default model precision which is usually float32. @@ -8,14 +8,14 @@ For more detailed information on how this sample works, check the dedicated [art ## Requirements -| Options | Values | -| -------------------------------| -----------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html) | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | -| Model Format | OpenVINO™ toolkit Intermediate Representation | -| | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | +| Options | Values | +| -------------------------------| -----------------------------------------------------------------------------------------------------------------------| +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf) | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | +| Model Format | OpenVINO™ toolkit Intermediate Representation | +| | (\*.xml + \*.bin), ONNX (\*.onnx) | +| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | The following Python API is used in the application: From 7f56fcd4658c6a427111ac835e809ddd87f0cad2 Mon Sep 17 00:00:00 2001 From: Bharat Jain <152432505+itsbharatj@users.noreply.github.com> Date: Wed, 22 Jan 2025 01:30:07 +0530 Subject: [PATCH 76/97] Fixed copyright line on the codebase (#28593) ### Details: Inconsistencies in the `ending year` of the copyright line of the codebase. Identified and made all of them to `2025` Fixed the copyright information for all the files in codebase. --- .github/github_org_control/check_org.py | 2 +- .github/github_org_control/check_pr.py | 2 +- .github/github_org_control/configs.py | 2 +- .github/github_org_control/github_api.py | 2 +- .github/github_org_control/ldap_api.py | 2 +- docs/articles_en/assets/snippets/ov_dynamic_shapes.c | 2 +- docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp | 2 +- docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp | 2 +- docs/openvino_sphinx_theme/setup.py | 2 +- docs/scripts/articles_helper.py | 2 +- docs/scripts/create_mapping.py | 2 +- docs/snippets/example_itask_executor.cpp | 2 +- src/bindings/c/include/openvino/c/ov_remote_context.h | 2 +- src/bindings/c/src/ov_remote_context.cpp | 2 +- src/bindings/c/tests/ov_remote_context_test.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/op_extension.cpp | 2 +- src/bindings/python/src/pyopenvino/graph/op_extension.hpp | 2 +- .../include/low_precision/gather.hpp | 2 +- src/common/low_precision_transformations/src/gather.cpp | 2 +- .../tests/gather_transformation.cpp | 2 +- src/common/snippets/include/snippets/op/reg_spill.hpp | 2 +- src/common/snippets/src/generator.cpp | 2 +- src/common/snippets/src/op/reg_spill.cpp | 2 +- .../transformations/op_conversions/convert_convertlike.hpp | 2 +- .../op_conversions/convert_scatter_nd_update15_downgrade.hpp | 2 +- .../mark_decompression_convert_constant_folding.cpp | 2 +- .../src/transformations/op_conversions/convert_convertlike.cpp | 2 +- .../op_conversions/convert_scatter_nd_update15_downgrade.cpp | 2 +- .../tests/common_optimizations/convert_convertlike.cpp | 2 +- src/common/util/include/openvino/util/cpp_version.hpp | 2 +- src/common/util/include/openvino/util/file_path.hpp | 2 +- src/core/include/openvino/core/preprocess/padding_mode.hpp | 2 +- src/core/src/op/fake_convert.cpp | 2 +- src/core/tests/pass/serialization/from_model.cpp | 2 +- src/core/tests/type_prop/col2im.cpp | 2 +- src/core/tests/type_prop/region_yolo.cpp | 2 +- src/core/tests/type_prop/rms_norm.cpp | 2 +- src/core/tests/type_prop/slice_scatter.cpp | 2 +- src/core/tests/type_prop/stft.cpp | 2 +- src/core/tests/type_prop/string_tensor_pack.cpp | 2 +- src/core/tests/type_prop/string_tensor_unpack.cpp | 2 +- src/frontends/ir/tests/meta_data_tests.cpp | 2 +- src/frontends/ir/tests/threading_tests.cpp | 2 +- .../frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp | 2 +- src/frontends/onnx/frontend/src/op/unique.cpp | 2 +- src/frontends/onnx/frontend/src/utils/norm.hpp | 2 +- src/frontends/onnx/frontend/src/utils/split.hpp | 2 +- src/frontends/onnx/tests/conversion.cpp | 2 +- .../paddle/include/openvino/frontend/paddle/extension/op.hpp | 2 +- .../paddle/src/internal/pass/transform_fakequantize.cpp | 2 +- .../paddle/src/internal/pass/transform_fakequantize.hpp | 2 +- src/frontends/paddle/src/op/dequantize_linear.cpp | 2 +- src/frontends/paddle/src/op/generate_proposals_v2.cpp | 2 +- src/frontends/paddle/src/op/quantize_linear.cpp | 2 +- src/frontends/paddle/src/op/round.cpp | 2 +- src/frontends/paddle/src/op/top_k_v2.cpp | 2 +- src/frontends/paddle/tests/conversion.cpp | 2 +- src/frontends/paddle/tests/op_extension.cpp | 2 +- .../paddle/tests/test_models/gen_scripts/generate_roll.py | 2 +- .../paddle/tests/test_models/gen_scripts/generate_round.py | 2 +- src/frontends/pytorch/src/op/any.cpp | 2 +- src/frontends/pytorch/src/op/index_copy_.cpp | 2 +- src/frontends/pytorch/src/op/index_fill_.cpp | 2 +- src/frontends/tensorflow/tests/conversion.cpp | 2 +- src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp | 2 +- src/frontends/tensorflow_lite/tests/conversion.cpp | 2 +- .../tests/frontend/shared/include/conversion_extension.hpp | 2 +- src/frontends/tests/frontend/shared/src/conversion.cpp | 2 +- src/inference/include/openvino/runtime/intel_npu/properties.hpp | 2 +- src/plugins/auto/tests/functional/behavior/auto_func_test.hpp | 2 +- src/plugins/auto/tests/functional/behavior/io_tensor.hpp | 2 +- src/plugins/auto/tests/unit/include/auto_unit_test.hpp | 2 +- .../auto/tests/unit/infer_request_schedule_policy_test.cpp | 2 +- src/plugins/auto/tests/unit/meta_device_check_test.cpp | 2 +- src/plugins/hetero/tests/functional/hetero_tests.hpp | 2 +- .../behavior/ov_compiled_model/import_export.cpp | 2 +- .../behavior/ov_compiled_model/properties.cpp | 2 +- .../behavior/ov_infer_request/infer_request_dynamic.cpp | 2 +- .../behavior/ov_infer_request/inference_chaining.cpp | 2 +- .../behavior/ov_infer_request/io_tensor.cpp | 2 +- .../behavior/ov_plugin/properties_tests.cpp | 2 +- src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h | 2 +- src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp | 2 +- src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h | 2 +- src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp | 2 +- src/plugins/intel_cpu/src/nodes/common/reorder_prim.h | 2 +- src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/convert.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/convert.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/deconv.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp | 2 +- .../src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp | 2 +- .../src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp | 2 +- .../src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/executor.hpp | 2 +- .../intel_cpu/src/nodes/executors/executor_implementation.hpp | 2 +- .../src/nodes/executors/fullyconnected_implementations.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/pooling.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/transpose.cpp | 2 +- src/plugins/intel_cpu/src/nodes/executors/transpose.hpp | 2 +- .../cpu_opset/common/op/causal_mask_preprocess.cpp | 2 +- .../cpu_opset/common/op/causal_mask_preprocess.hpp | 2 +- .../cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp | 2 +- .../cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp | 2 +- .../transformations/cpu_opset/common/pass/fc_bias_fusion.cpp | 2 +- .../transformations/cpu_opset/common/pass/fc_bias_fusion.hpp | 2 +- .../src/transformations/snippets/x64/op/brgemm_copy_b.cpp | 2 +- .../src/transformations/snippets/x64/op/brgemm_copy_b.hpp | 2 +- .../src/transformations/snippets/x64/op/brgemm_cpu.hpp | 2 +- .../transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp | 2 +- .../transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/factory.cpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/factory.hpp | 2 +- .../intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp | 2 +- src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp | 2 +- .../src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp | 2 +- .../src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp | 2 +- .../src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp | 2 +- .../src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp | 2 +- .../tpp/x64/pass/lowered/set_tpp_leading_dim.cpp | 2 +- .../tpp/x64/pass/lowered/set_tpp_leading_dim.hpp | 2 +- .../src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp | 2 +- .../src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp | 2 +- src/plugins/intel_cpu/src/utils/enum_class_hash.hpp | 2 +- .../single_layer_tests/classes/convolution_backprop_data.cpp | 2 +- .../single_layer_tests/classes/convolution_backprop_data.hpp | 2 +- .../instances/arm/convolution_backprop_data.cpp | 2 +- .../instances/common/convolution_backprop_data.cpp | 2 +- .../instances/x64/convolution_backprop_data.cpp | 2 +- .../custom/subgraph_tests/src/classes/eltwise_chain.cpp | 2 +- .../custom/subgraph_tests/src/classes/eltwise_chain.hpp | 2 +- .../src/common/index_add_scatter_elements_update.cpp | 2 +- .../custom/subgraph_tests/src/common/inplace_resolve_io.cpp | 2 +- .../subgraph_tests/src/common/merge_transpose_reorder.cpp | 2 +- .../subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp | 2 +- .../behavior/compiled_model/cpu_reservation_test.cpp | 2 +- .../behavior/ov_infer_request/inference.cpp | 2 +- .../low_precision_transformations/x64/gather_transformation.cpp | 2 +- src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp | 2 +- .../tests/unit/streams_info/update_proc_table_test.cpp | 2 +- .../intel_gpu/include/intel_gpu/graph/serialization/utils.hpp | 2 +- src/plugins/intel_gpu/src/graph/paged_attention.cpp | 2 +- .../kernels/eltwise/eltwise_kernel_blocked_opt.cpp | 2 +- .../kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp | 2 +- .../kernels/permute/permute_kernel_bfzyx_to_bfyxz.h | 2 +- .../tests/functional/concurrency/gpu_reservation_test.cpp | 2 +- .../fuse_dequantize_to_fq_transformation.cpp | 2 +- .../low_precision_transformations/gather_transformation.cpp | 2 +- .../tests/functional/single_layer_tests/dynamic/broadcast.cpp | 2 +- src/plugins/proxy/tests/proxy_tests.hpp | 2 +- src/plugins/template/tests/functional/op_reference/loop.cpp | 2 +- .../template/tests/functional/op_reference/tensor_iterator.cpp | 2 +- .../behavior/ov_infer_request/batched_tensors.cpp | 2 +- .../behavior/ov_infer_request/inference.cpp | 2 +- .../include/behavior/ov_infer_request/batched_tensors.hpp | 2 +- .../shared/include/behavior/ov_infer_request/inference.hpp | 2 +- .../low_precision_transformations/gather_transformation.hpp | 2 +- .../shared/src/behavior/ov_infer_request/batched_tensors.cpp | 2 +- .../plugin/shared/src/behavior/ov_infer_request/inference.cpp | 2 +- .../fuse_dequantize_to_fake_quantize_transformation.cpp | 2 +- .../src/low_precision_transformations/gather_transformation.cpp | 2 +- .../shared_test_classes/base/utils/calculate_thresholds.hpp | 2 +- .../include/shared_test_classes/base/utils/compare_results.hpp | 2 +- .../include/shared_test_classes/base/utils/generate_inputs.hpp | 2 +- .../include/shared_test_classes/base/utils/ranges.hpp | 2 +- .../shared_test_classes/src/base/utils/generate_inputs.cpp | 2 +- .../ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp | 2 +- src/tests/ov_helpers/ov_lpt_models/src/gather.cpp | 2 +- .../include/common_test_utils/node_builders/broadcast.hpp | 2 +- .../include/common_test_utils/node_builders/reshape.hpp | 2 +- .../test_utils/common_test_utils/src/node_builders/reshape.cpp | 2 +- tests/layer_tests/pytorch_tests/test_constant_pad_nd.py | 2 +- tests/layer_tests/pytorch_tests/test_hardtanh.py | 2 +- tests/layer_tests/pytorch_tests/test_index_copy_.py | 2 +- tests/layer_tests/pytorch_tests/test_index_fill_.py | 2 +- tests/layer_tests/pytorch_tests/test_isfinite.py | 2 +- tests/layer_tests/pytorch_tests/test_isinf.py | 2 +- tests/layer_tests/pytorch_tests/test_isnan.py | 2 +- tests/layer_tests/pytorch_tests/test_select_scatter.py | 2 +- tests/layer_tests/pytorch_tests/test_slice_scatter.py | 2 +- tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py | 2 +- tests/time_tests/src/timetests/timetest_infer_api_2.cpp | 2 +- 202 files changed, 202 insertions(+), 202 deletions(-) diff --git a/.github/github_org_control/check_org.py b/.github/github_org_control/check_org.py index ab59d05066a630..7dd256e4c1fef8 100644 --- a/.github/github_org_control/check_org.py +++ b/.github/github_org_control/check_org.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/check_pr.py b/.github/github_org_control/check_pr.py index 08cf6f4b4dbfff..79db5940e24a58 100644 --- a/.github/github_org_control/check_pr.py +++ b/.github/github_org_control/check_pr.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/configs.py b/.github/github_org_control/configs.py index 3df12803c77de0..be93540cd4aa03 100644 --- a/.github/github_org_control/configs.py +++ b/.github/github_org_control/configs.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/github_api.py b/.github/github_org_control/github_api.py index 6f9d14c5376742..581921f3943a1e 100644 --- a/.github/github_org_control/github_api.py +++ b/.github/github_org_control/github_api.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/.github/github_org_control/ldap_api.py b/.github/github_org_control/ldap_api.py index c0f7e2c18117ff..3c68242c40cf75 100644 --- a/.github/github_org_control/ldap_api.py +++ b/.github/github_org_control/ldap_api.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2021 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 """ diff --git a/docs/articles_en/assets/snippets/ov_dynamic_shapes.c b/docs/articles_en/assets/snippets/ov_dynamic_shapes.c index fa1f3158365ddf..68cbef8ab0159e 100644 --- a/docs/articles_en/assets/snippets/ov_dynamic_shapes.c +++ b/docs/articles_en/assets/snippets/ov_dynamic_shapes.c @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp b/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp index c4e9002a9a61db..63d68516aa1c36 100644 --- a/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp +++ b/docs/articles_en/assets/snippets/ov_dynamic_shapes.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp b/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp index a5271d148190d0..67afd8ea13029c 100644 --- a/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp +++ b/docs/articles_en/assets/snippets/ov_stateful_models_intro.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/docs/openvino_sphinx_theme/setup.py b/docs/openvino_sphinx_theme/setup.py index 28af421d8d4e4b..0776711a7765f2 100644 --- a/docs/openvino_sphinx_theme/setup.py +++ b/docs/openvino_sphinx_theme/setup.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 from setuptools import setup diff --git a/docs/scripts/articles_helper.py b/docs/scripts/articles_helper.py index 6b01325fa24a95..1065e8b30f85a4 100644 --- a/docs/scripts/articles_helper.py +++ b/docs/scripts/articles_helper.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse diff --git a/docs/scripts/create_mapping.py b/docs/scripts/create_mapping.py index e36bfb53184fbc..b1094dd936f021 100644 --- a/docs/scripts/create_mapping.py +++ b/docs/scripts/create_mapping.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import argparse diff --git a/docs/snippets/example_itask_executor.cpp b/docs/snippets/example_itask_executor.cpp index e951917249f059..0890518e2f86f9 100644 --- a/docs/snippets/example_itask_executor.cpp +++ b/docs/snippets/example_itask_executor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2020 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/c/include/openvino/c/ov_remote_context.h b/src/bindings/c/include/openvino/c/ov_remote_context.h index 07ce1cfbe1fd73..b3dbb57f62a886 100644 --- a/src/bindings/c/include/openvino/c/ov_remote_context.h +++ b/src/bindings/c/include/openvino/c/ov_remote_context.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/c/src/ov_remote_context.cpp b/src/bindings/c/src/ov_remote_context.cpp index f1b9d7cbd6aacf..069802346d9cf9 100644 --- a/src/bindings/c/src/ov_remote_context.cpp +++ b/src/bindings/c/src/ov_remote_context.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "openvino/c/ov_remote_context.h" diff --git a/src/bindings/c/tests/ov_remote_context_test.cpp b/src/bindings/c/tests/ov_remote_context_test.cpp index d0d278acef94aa..4f13fe3fcebb6c 100644 --- a/src/bindings/c/tests/ov_remote_context_test.cpp +++ b/src/bindings/c/tests/ov_remote_context_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/op_extension.cpp b/src/bindings/python/src/pyopenvino/graph/op_extension.cpp index 9922493efdf28d..70834f313264db 100644 --- a/src/bindings/python/src/pyopenvino/graph/op_extension.cpp +++ b/src/bindings/python/src/pyopenvino/graph/op_extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/bindings/python/src/pyopenvino/graph/op_extension.hpp b/src/bindings/python/src/pyopenvino/graph/op_extension.hpp index 1f5f0e42d0c702..5fd0117218bb6c 100644 --- a/src/bindings/python/src/pyopenvino/graph/op_extension.hpp +++ b/src/bindings/python/src/pyopenvino/graph/op_extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/low_precision_transformations/include/low_precision/gather.hpp b/src/common/low_precision_transformations/include/low_precision/gather.hpp index 980ec8f1e9b992..73be6a880a80ae 100644 --- a/src/common/low_precision_transformations/include/low_precision/gather.hpp +++ b/src/common/low_precision_transformations/include/low_precision/gather.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/low_precision_transformations/src/gather.cpp b/src/common/low_precision_transformations/src/gather.cpp index 4c5959d5c373e0..437fae10ec0d1d 100644 --- a/src/common/low_precision_transformations/src/gather.cpp +++ b/src/common/low_precision_transformations/src/gather.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/low_precision_transformations/tests/gather_transformation.cpp b/src/common/low_precision_transformations/tests/gather_transformation.cpp index d710709ca69229..79a581e50d589c 100644 --- a/src/common/low_precision_transformations/tests/gather_transformation.cpp +++ b/src/common/low_precision_transformations/tests/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/snippets/include/snippets/op/reg_spill.hpp b/src/common/snippets/include/snippets/op/reg_spill.hpp index 84fe0b4da609c1..93ff1738830964 100644 --- a/src/common/snippets/include/snippets/op/reg_spill.hpp +++ b/src/common/snippets/include/snippets/op/reg_spill.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/snippets/src/generator.cpp b/src/common/snippets/src/generator.cpp index 144fab766e739b..bb6bd636a791ac 100644 --- a/src/common/snippets/src/generator.cpp +++ b/src/common/snippets/src/generator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/snippets/src/op/reg_spill.cpp b/src/common/snippets/src/op/reg_spill.cpp index 0eef459a47ac62..f09b2a419cf3fd 100644 --- a/src/common/snippets/src/op/reg_spill.cpp +++ b/src/common/snippets/src/op/reg_spill.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp index 5952fc114b76fd..94352953df1e82 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_convertlike.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp index 4af9172e6351cb..ea36cfddef4eed 100644 --- a/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp +++ b/src/common/transformations/include/transformations/op_conversions/convert_scatter_nd_update15_downgrade.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp b/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp index 7724692be61662..8393e0ac1e97f8 100644 --- a/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp +++ b/src/common/transformations/src/transformations/fp16_compression/mark_decompression_convert_constant_folding.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp b/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp index aa80d1e35af1e4..c04260917ca55d 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_convertlike.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp b/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp index 02bb4cbad5a94b..d72721be467a63 100644 --- a/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp +++ b/src/common/transformations/src/transformations/op_conversions/convert_scatter_nd_update15_downgrade.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp b/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp index 785559e4fef9e6..3ddafc7be0df0c 100644 --- a/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp +++ b/src/common/transformations/tests/common_optimizations/convert_convertlike.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/util/include/openvino/util/cpp_version.hpp b/src/common/util/include/openvino/util/cpp_version.hpp index c0998588027c2a..b250df6a38b2a2 100644 --- a/src/common/util/include/openvino/util/cpp_version.hpp +++ b/src/common/util/include/openvino/util/cpp_version.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/common/util/include/openvino/util/file_path.hpp b/src/common/util/include/openvino/util/file_path.hpp index 9080ea5289a51e..34c326e67ec391 100644 --- a/src/common/util/include/openvino/util/file_path.hpp +++ b/src/common/util/include/openvino/util/file_path.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/include/openvino/core/preprocess/padding_mode.hpp b/src/core/include/openvino/core/preprocess/padding_mode.hpp index c1391628e8f50b..5d20859397e837 100644 --- a/src/core/include/openvino/core/preprocess/padding_mode.hpp +++ b/src/core/include/openvino/core/preprocess/padding_mode.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/src/op/fake_convert.cpp b/src/core/src/op/fake_convert.cpp index 517674402ef872..71f7aed3f65e8b 100644 --- a/src/core/src/op/fake_convert.cpp +++ b/src/core/src/op/fake_convert.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/pass/serialization/from_model.cpp b/src/core/tests/pass/serialization/from_model.cpp index b1c3f0bad6212c..9999426d6c6431 100644 --- a/src/core/tests/pass/serialization/from_model.cpp +++ b/src/core/tests/pass/serialization/from_model.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/col2im.cpp b/src/core/tests/type_prop/col2im.cpp index c376cdcf39d264..5532d210f760fc 100644 --- a/src/core/tests/type_prop/col2im.cpp +++ b/src/core/tests/type_prop/col2im.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/region_yolo.cpp b/src/core/tests/type_prop/region_yolo.cpp index 96dc868a0354f3..5eb2b317d35e7c 100644 --- a/src/core/tests/type_prop/region_yolo.cpp +++ b/src/core/tests/type_prop/region_yolo.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/rms_norm.cpp b/src/core/tests/type_prop/rms_norm.cpp index ca7155722241b7..97367d9a6a4959 100644 --- a/src/core/tests/type_prop/rms_norm.cpp +++ b/src/core/tests/type_prop/rms_norm.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/slice_scatter.cpp b/src/core/tests/type_prop/slice_scatter.cpp index fad6dd70349606..2be2d73d6e23fd 100644 --- a/src/core/tests/type_prop/slice_scatter.cpp +++ b/src/core/tests/type_prop/slice_scatter.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/stft.cpp b/src/core/tests/type_prop/stft.cpp index 2969af4e5a43bd..4ee5098797d3c8 100644 --- a/src/core/tests/type_prop/stft.cpp +++ b/src/core/tests/type_prop/stft.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/string_tensor_pack.cpp b/src/core/tests/type_prop/string_tensor_pack.cpp index a81aa8eeb1ffd4..4d40f9a3782c15 100644 --- a/src/core/tests/type_prop/string_tensor_pack.cpp +++ b/src/core/tests/type_prop/string_tensor_pack.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/core/tests/type_prop/string_tensor_unpack.cpp b/src/core/tests/type_prop/string_tensor_unpack.cpp index afdd44706635f5..37efe08b81120a 100644 --- a/src/core/tests/type_prop/string_tensor_unpack.cpp +++ b/src/core/tests/type_prop/string_tensor_unpack.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/ir/tests/meta_data_tests.cpp b/src/frontends/ir/tests/meta_data_tests.cpp index eba4f38af67913..2af1e0114222b9 100644 --- a/src/frontends/ir/tests/meta_data_tests.cpp +++ b/src/frontends/ir/tests/meta_data_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/ir/tests/threading_tests.cpp b/src/frontends/ir/tests/threading_tests.cpp index 7dc1ca193ddb97..a83d53b5151305 100644 --- a/src/frontends/ir/tests/threading_tests.cpp +++ b/src/frontends/ir/tests/threading_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp b/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp index 147e45301316a3..85700bddcfc01c 100644 --- a/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp +++ b/src/frontends/onnx/frontend/src/op/com.microsoft/dynamic_quantize_matmul.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/onnx/frontend/src/op/unique.cpp b/src/frontends/onnx/frontend/src/op/unique.cpp index bc842624474ccd..c1d0886181af4a 100644 --- a/src/frontends/onnx/frontend/src/op/unique.cpp +++ b/src/frontends/onnx/frontend/src/op/unique.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/onnx/frontend/src/utils/norm.hpp b/src/frontends/onnx/frontend/src/utils/norm.hpp index 964becc2f0db04..656dcd8ed1cd1a 100644 --- a/src/frontends/onnx/frontend/src/utils/norm.hpp +++ b/src/frontends/onnx/frontend/src/utils/norm.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/frontends/onnx/frontend/src/utils/split.hpp b/src/frontends/onnx/frontend/src/utils/split.hpp index 5cdbaf287eb90b..809d2aec8d2d28 100644 --- a/src/frontends/onnx/frontend/src/utils/split.hpp +++ b/src/frontends/onnx/frontend/src/utils/split.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/frontends/onnx/tests/conversion.cpp b/src/frontends/onnx/tests/conversion.cpp index 237712e60b2725..94d735761b30a6 100644 --- a/src/frontends/onnx/tests/conversion.cpp +++ b/src/frontends/onnx/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp b/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp index 68cea85c19cc44..5dc1499a39080e 100644 --- a/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp +++ b/src/frontends/paddle/include/openvino/frontend/paddle/extension/op.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp b/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp index 4ab7557c4be2cb..d4d933721ee200 100644 --- a/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp +++ b/src/frontends/paddle/src/internal/pass/transform_fakequantize.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp b/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp index 19abfcbf260d73..23f73f53597a43 100644 --- a/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp +++ b/src/frontends/paddle/src/internal/pass/transform_fakequantize.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/dequantize_linear.cpp b/src/frontends/paddle/src/op/dequantize_linear.cpp index 271b938c17ab43..f30055bf889bca 100644 --- a/src/frontends/paddle/src/op/dequantize_linear.cpp +++ b/src/frontends/paddle/src/op/dequantize_linear.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/generate_proposals_v2.cpp b/src/frontends/paddle/src/op/generate_proposals_v2.cpp index 2df436357bbb22..47547fd46a778b 100644 --- a/src/frontends/paddle/src/op/generate_proposals_v2.cpp +++ b/src/frontends/paddle/src/op/generate_proposals_v2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/quantize_linear.cpp b/src/frontends/paddle/src/op/quantize_linear.cpp index 99e12cd4d0efb4..43fcabd3747819 100644 --- a/src/frontends/paddle/src/op/quantize_linear.cpp +++ b/src/frontends/paddle/src/op/quantize_linear.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/round.cpp b/src/frontends/paddle/src/op/round.cpp index f981fa1e841843..5ce02ffe89bde9 100644 --- a/src/frontends/paddle/src/op/round.cpp +++ b/src/frontends/paddle/src/op/round.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/src/op/top_k_v2.cpp b/src/frontends/paddle/src/op/top_k_v2.cpp index 8f51920f05d1a2..cfb113c7a55102 100644 --- a/src/frontends/paddle/src/op/top_k_v2.cpp +++ b/src/frontends/paddle/src/op/top_k_v2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "default_opset.hpp" diff --git a/src/frontends/paddle/tests/conversion.cpp b/src/frontends/paddle/tests/conversion.cpp index 9bcbf9b855765c..c2ad29a42a3303 100644 --- a/src/frontends/paddle/tests/conversion.cpp +++ b/src/frontends/paddle/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/tests/op_extension.cpp b/src/frontends/paddle/tests/op_extension.cpp index e8843c10c475bc..cbd05bb1f1d212 100644 --- a/src/frontends/paddle/tests/op_extension.cpp +++ b/src/frontends/paddle/tests/op_extension.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py index 6c53d8091169fc..356f2809a10237 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_roll.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py index c19492373a3280..1b95b7c7406d99 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_round.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # diff --git a/src/frontends/pytorch/src/op/any.cpp b/src/frontends/pytorch/src/op/any.cpp index a17b8777e5f916..09941914065bdd 100644 --- a/src/frontends/pytorch/src/op/any.cpp +++ b/src/frontends/pytorch/src/op/any.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/pytorch/src/op/index_copy_.cpp b/src/frontends/pytorch/src/op/index_copy_.cpp index c13b53858a9c00..f8acb4db1749f8 100644 --- a/src/frontends/pytorch/src/op/index_copy_.cpp +++ b/src/frontends/pytorch/src/op/index_copy_.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/pytorch/src/op/index_fill_.cpp b/src/frontends/pytorch/src/op/index_fill_.cpp index a24f3fa2f5b1c7..ee0ac618079c3f 100644 --- a/src/frontends/pytorch/src/op/index_fill_.cpp +++ b/src/frontends/pytorch/src/op/index_fill_.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tensorflow/tests/conversion.cpp b/src/frontends/tensorflow/tests/conversion.cpp index d705a26a147839..db95a045351779 100644 --- a/src/frontends/tensorflow/tests/conversion.cpp +++ b/src/frontends/tensorflow/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp b/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp index 382f6f1914e334..42a6834cc0007d 100644 --- a/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp +++ b/src/frontends/tensorflow_common/src/op/tensor_scatter_add.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tensorflow_lite/tests/conversion.cpp b/src/frontends/tensorflow_lite/tests/conversion.cpp index cccae7494f85cb..56484a0b9dcf47 100644 --- a/src/frontends/tensorflow_lite/tests/conversion.cpp +++ b/src/frontends/tensorflow_lite/tests/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tests/frontend/shared/include/conversion_extension.hpp b/src/frontends/tests/frontend/shared/include/conversion_extension.hpp index f9932b7ca5352f..6df8f185b1b83d 100644 --- a/src/frontends/tests/frontend/shared/include/conversion_extension.hpp +++ b/src/frontends/tests/frontend/shared/include/conversion_extension.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/frontends/tests/frontend/shared/src/conversion.cpp b/src/frontends/tests/frontend/shared/src/conversion.cpp index 1a545b92708d76..95200314fcd645 100644 --- a/src/frontends/tests/frontend/shared/src/conversion.cpp +++ b/src/frontends/tests/frontend/shared/src/conversion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/inference/include/openvino/runtime/intel_npu/properties.hpp b/src/inference/include/openvino/runtime/intel_npu/properties.hpp index 723a8b26f555d4..4d7d14a7ebf389 100644 --- a/src/inference/include/openvino/runtime/intel_npu/properties.hpp +++ b/src/inference/include/openvino/runtime/intel_npu/properties.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp index 69ab036f22e0af..b49fd6f43e243d 100644 --- a/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp +++ b/src/plugins/auto/tests/functional/behavior/auto_func_test.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/auto/tests/functional/behavior/io_tensor.hpp b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp index c4e000395f3eac..63942f86272d4c 100644 --- a/src/plugins/auto/tests/functional/behavior/io_tensor.hpp +++ b/src/plugins/auto/tests/functional/behavior/io_tensor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp index 0b39b8e57dc8d2..af6aa58c163f4e 100644 --- a/src/plugins/auto/tests/unit/include/auto_unit_test.hpp +++ b/src/plugins/auto/tests/unit/include/auto_unit_test.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp index cf1ccda20491d3..f8946664579bdf 100644 --- a/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp +++ b/src/plugins/auto/tests/unit/infer_request_schedule_policy_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/src/plugins/auto/tests/unit/meta_device_check_test.cpp b/src/plugins/auto/tests/unit/meta_device_check_test.cpp index 7881899d925f8c..36f5f57dd31229 100644 --- a/src/plugins/auto/tests/unit/meta_device_check_test.cpp +++ b/src/plugins/auto/tests/unit/meta_device_check_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/hetero_tests.hpp b/src/plugins/hetero/tests/functional/hetero_tests.hpp index 98c2d487761b73..b3bb85ba78a842 100644 --- a/src/plugins/hetero/tests/functional/hetero_tests.hpp +++ b/src/plugins/hetero/tests/functional/hetero_tests.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp index 7b6730c4169109..9624edcf80b4d2 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/import_export.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "behavior/compiled_model/import_export.hpp" diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp index 11bd48ab42e8c0..4907c9af2f0420 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_compiled_model/properties.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp index 711471b9855277..1a2d20d2f61052 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/infer_request_dynamic.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp index ed7d1fe42bdf8a..117b095fe2df87 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference_chaining.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp index daffaaae81f873..3f4e45f265f16d 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_infer_request/io_tensor.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp index 5b691e8ec83328..9f0f1e72cff6ce 100644 --- a/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp +++ b/src/plugins/hetero/tests/functional/shared_tests_instances/behavior/ov_plugin/properties_tests.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h index c26cc6aa33a251..dac07c252e902a 100644 --- a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h +++ b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp index 93f5278b06a4a8..afc057bbe29e0b 100644 --- a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp +++ b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h index b35de8e25fcae9..da0aa4284382dd 100644 --- a/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h +++ b/src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp index 2ee4c0a23bbdab..5223f6d90c0279 100644 --- a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h index 33e12b4045abf9..27774eb4557602 100644 --- a/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h +++ b/src/plugins/intel_cpu/src/nodes/common/reorder_prim.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp index 7a8e431b606227..b104ca7d44aa24 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp index 1aae396f25a0fe..dfb09c3a896a04 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp index 6c3799da70bfda..e6b8242210c57d 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_utils.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert.cpp b/src/plugins/intel_cpu/src/nodes/executors/convert.cpp index cf11633e662e07..3dae01f201e2dd 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/convert.hpp b/src/plugins/intel_cpu/src/nodes/executors/convert.hpp index 85cd64e26c643c..56c5ba02d4f147 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convert.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convert.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp b/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp index 44731d0648d039..5b28c47e2e11fb 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/deconv.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp index 45e71acd476bb6..e725a3b244e02a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/deconv_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp index c342f5106c221d..55c66c6fa1cbbb 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp index 9afcfac56b14e9..849d7122d45726 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp index 38ceb9922eff70..25e3dcf6ae6421 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp index 31ffd979662f8c..649e0d0f058bc0 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0mvn // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp index 95ff85bb8bf851..02a044d89b6959 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp index 5b9479bdf502b6..bfc54e11d42934 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp index f970d79c3ed1b2..3d976fa94d8c1e 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/eltwise_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp index 1d0e4c877ff8e5..cf0e8fed14be2b 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp index 375016038f2b68..6189fac38c06d5 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/executor_implementation.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index 792aacf54a118a..3b702844850744 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp b/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp index 2f1ca6600bbd14..f89e35409008a4 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/graph_emitter.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp index 21ae249757bf9c..43a567b874a4ac 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/interpolate_list.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp index 42be857ba9dead..3e43bda24119ee 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_gemm.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp b/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp index 2c66b9ce56af14..d64e1faf5fa5c8 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp b/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp index 2b08dc2a320b5d..0ff25d1f3dd59f 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp index 99a55d79f58177..b96a2e3c655de7 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp index 59d0447965a803..1cff58995addce 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mvn_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp index 325ae17f161c93..5af8ad8b48d32e 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp index 1cf34912e2293f..a9e2ca13c1621c 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/pooling_list.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp b/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp index ddf4cf20034d92..278e6e7b16d668 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp b/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp index 6e569e62b65a19..2f1101a7a1ec54 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/transpose.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp index a0ea60b9a20a63..9cc83310cd8059 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "causal_mask_preprocess.hpp" diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp index 7628aea386e4e7..19636d0529c681 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/op/causal_mask_preprocess.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp index e2bcac397af164..7e562104f99d08 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp index 4a46a042722a12..16fcc4dd03c24e 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/causal_mask_preprocess_fusion.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp index d92d2d3627b65b..4d913c4ced102c 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp index 5fadd183dfd694..b3ccb3f36d85cf 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/fc_bias_fusion.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp index df05ce5d539f46..7267e4355de1d6 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp index bf327784503352..e789c59e21dc4d 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_copy_b.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp index ddc21e8ddb59d3..b70e8fe122aea7 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_cpu.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp index 48456b8220300a..e0a87ca288bac1 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp index 245f83c13c3466..177c6a466765e4 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/brgemm_to_brgemm_cpu.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp index 4c5f2925ef0735..ba10db13dbdc98 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp index 4a147f79b2a37e..cda7f58afebea8 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/brgemm.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp index 44aaf251bc201f..b9491d556c8292 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp index e0e890a347a026..173c12173d7835 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp index 9cfcc2f6226205..c7292770d3eb63 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/factory.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp index d9ecc3629f2430..558bc0216879c8 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/modifiers.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp index 11fc73b949a55c..b928620706c8b0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp index f66e913f85b6e7..07ed321abc7ff5 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/reduce.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp index 98a107380aa7d4..5855481efd1d60 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp index c619d7b6ab1937..9807dbfafa31d0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/op/scalar.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp index 571e292104d132..c042373f054fa2 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp index 6e1d9f110c6aec..2f00abb213dfb5 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/brgemm_to_brgemm_tpp.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp index 63dd44ca133fa0..bd2c96f7db696b 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp index 0b68074c657c15..faaa20c46f1ad3 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/eltwise_to_eltwise_tpp.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp index 42c30bb112263c..c1b981275face0 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp index 6be200c30b7c1c..e49b48ccbfb47e 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/lowered/set_tpp_leading_dim.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp index 06ca575f314b4b..f5188df53aeb28 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp index 2a7e712ab1baea..4e8defeca762c8 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/x64/pass/scalar_to_scalar_tpp.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp b/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp index 7e17af42d05a97..28796c752decfc 100644 --- a/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp +++ b/src/plugins/intel_cpu/src/utils/enum_class_hash.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp index 415515ef7f40a2..651e75024987ab 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp index 615bb99225b952..3e797759d21ee0 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution_backprop_data.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp index 6229f9a30d3c45..f13ec1e98faa46 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/arm/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp index 538d9f48f88114..3af974dc80d35a 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/common/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp index 3263396b52521d..4576f283d43534 100755 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/instances/x64/convolution_backprop_data.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp index 760f9ccd6214cf..9ff85a02db4495 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp index 17954b438abd73..885435ba8c2dc3 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/classes/eltwise_chain.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp index 0b8086b48a1110..388d7050851e05 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/index_add_scatter_elements_update.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp index 2d7442f945630f..26e9e9d59dbec2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/inplace_resolve_io.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp index 81686bd5cd4888..15d4dbc3786ccd 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/common/merge_transpose_reorder.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp index 2da18851bd0031..9166f70660ee3c 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/subgraph_tests/src/x64/convert_fq_rnn_to_quantized_rnn.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp index 78ee401d169cbb..87a19ede785eba 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/compiled_model/cpu_reservation_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp index 50f6d09b4271d2..558896557dd58d 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp index 1cbd7152fbccfb..ba70b281461308 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/low_precision_transformations/x64/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp b/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp index a41cb4c4300d42..f520b0b53feae8 100644 --- a/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp +++ b/src/plugins/intel_cpu/tests/unit/graph/inplace_resolve_io.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp index 08c0cf2c9089e9..8fc607229b79d0 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/update_proc_table_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp b/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp index ae912fa9c7519c..72b4b870e74846 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/graph/serialization/utils.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/src/graph/paged_attention.cpp b/src/plugins/intel_gpu/src/graph/paged_attention.cpp index 48ae46d83de34a..c656cb1f284ae0 100644 --- a/src/plugins/intel_gpu/src/graph/paged_attention.cpp +++ b/src/plugins/intel_gpu/src/graph/paged_attention.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "paged_attention_inst.h" diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp index d95b008171db24..7ecce23a56777f 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/eltwise/eltwise_kernel_blocked_opt.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp index e2723ed6841746..b969c986aa81fd 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h index 36acd93d225a45..4479e45d1a4a3b 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/permute/permute_kernel_bfzyx_to_bfyxz.h @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp index 07d4879257185c..fb9711e7605859 100644 --- a/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp +++ b/src/plugins/intel_gpu/tests/functional/concurrency/gpu_reservation_test.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2024 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp index 69c8ffe19e56ba..50e3f68f190594 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/fuse_dequantize_to_fq_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp index dbd5e3476a7a58..973a899ef01829 100644 --- a/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp +++ b/src/plugins/intel_gpu/tests/functional/shared_tests_instances/low_precision_transformations/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp index a3d9a1a9d3465d..1f9cb18db521c7 100644 --- a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/broadcast.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/proxy/tests/proxy_tests.hpp b/src/plugins/proxy/tests/proxy_tests.hpp index 014c9ba51aa6b1..075c12c0d7dfa1 100644 --- a/src/plugins/proxy/tests/proxy_tests.hpp +++ b/src/plugins/proxy/tests/proxy_tests.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once diff --git a/src/plugins/template/tests/functional/op_reference/loop.cpp b/src/plugins/template/tests/functional/op_reference/loop.cpp index ffdbc0b8dc6ee2..430b9ee1c76560 100644 --- a/src/plugins/template/tests/functional/op_reference/loop.cpp +++ b/src/plugins/template/tests/functional/op_reference/loop.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp b/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp index a2b3d199adf1bc..e6dcdc8900353b 100644 --- a/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp +++ b/src/plugins/template/tests/functional/op_reference/tensor_iterator.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp index d8e9b3a6284d52..a97fcc4d719a00 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/batched_tensors.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp index 2490bcb1a33cc2..e85c54f853b3cd 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/ov_infer_request/inference.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp index 91baf94a800241..ec6ecba28d1f23 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/batched_tensors.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp index 492a0a528298fc..79f73403e27252 100644 --- a/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp +++ b/src/tests/functional/plugin/shared/include/behavior/ov_infer_request/inference.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp index 69cc1c804257f0..2a461cfddb24db 100644 --- a/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp +++ b/src/tests/functional/plugin/shared/include/low_precision_transformations/gather_transformation.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp index b0b926967d1e1a..21fb8dd6b6a9c3 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/batched_tensors.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp index 8ba8d4ee933781..d84519f897986b 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/inference.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp index fac36d8f56b863..6f5dc0648fcb69 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/fuse_dequantize_to_fake_quantize_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp index be0e5144163f19..839b9f05f97429 100644 --- a/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp +++ b/src/tests/functional/plugin/shared/src/low_precision_transformations/gather_transformation.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp index afce941a948a81..f57d8f4caf89ac 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/calculate_thresholds.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp index 1b04cf83b01b3c..5acab8dfa6e815 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/compare_results.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp index d2930be59d5eac..1ac793ca97faa5 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/generate_inputs.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp index 362258598a1344..3805fde5ce9bfb 100644 --- a/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp +++ b/src/tests/functional/shared_test_classes/include/shared_test_classes/base/utils/ranges.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp index ae963375fc7f5d..b8f41c30b55993 100644 --- a/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp +++ b/src/tests/functional/shared_test_classes/src/base/utils/generate_inputs.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp index cfc92209501e6f..a8b5fcd15100f4 100644 --- a/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp +++ b/src/tests/ov_helpers/ov_lpt_models/include/ov_lpt_models/gather.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp index 9d6e8b175b018d..f7f5b8a5716ed7 100644 --- a/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp +++ b/src/tests/ov_helpers/ov_lpt_models/src/gather.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp index c3e9cb4ae2cd07..4576caf18b89fd 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/broadcast.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp index 3c13af77d110ca..44e5ed2303db4c 100644 --- a/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp +++ b/src/tests/test_utils/common_test_utils/include/common_test_utils/node_builders/reshape.hpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp b/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp index 7ea8196f39eaf0..8876076d1bed0c 100644 --- a/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp +++ b/src/tests/test_utils/common_test_utils/src/node_builders/reshape.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2022 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // diff --git a/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py b/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py index 7a92983bb1819d..56c2417e7dfea1 100644 --- a/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py +++ b/tests/layer_tests/pytorch_tests/test_constant_pad_nd.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/pytorch_tests/test_hardtanh.py b/tests/layer_tests/pytorch_tests/test_hardtanh.py index d0c4c1aac1a38d..728a0cf1d6db42 100644 --- a/tests/layer_tests/pytorch_tests/test_hardtanh.py +++ b/tests/layer_tests/pytorch_tests/test_hardtanh.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import platform diff --git a/tests/layer_tests/pytorch_tests/test_index_copy_.py b/tests/layer_tests/pytorch_tests/test_index_copy_.py index 725c95936664cf..bd9f26814e1082 100644 --- a/tests/layer_tests/pytorch_tests/test_index_copy_.py +++ b/tests/layer_tests/pytorch_tests/test_index_copy_.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/layer_tests/pytorch_tests/test_index_fill_.py b/tests/layer_tests/pytorch_tests/test_index_fill_.py index 878dda7ab3bd7e..18c08669df1695 100644 --- a/tests/layer_tests/pytorch_tests/test_index_fill_.py +++ b/tests/layer_tests/pytorch_tests/test_index_fill_.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 diff --git a/tests/layer_tests/pytorch_tests/test_isfinite.py b/tests/layer_tests/pytorch_tests/test_isfinite.py index a72125799c8a49..00419cb89ceca8 100644 --- a/tests/layer_tests/pytorch_tests/test_isfinite.py +++ b/tests/layer_tests/pytorch_tests/test_isfinite.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/layer_tests/pytorch_tests/test_isinf.py b/tests/layer_tests/pytorch_tests/test_isinf.py index 72e6ae1f198ea2..cd33fa6acf8473 100644 --- a/tests/layer_tests/pytorch_tests/test_isinf.py +++ b/tests/layer_tests/pytorch_tests/test_isinf.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/layer_tests/pytorch_tests/test_isnan.py b/tests/layer_tests/pytorch_tests/test_isnan.py index 6645546c00707d..150c92ba92bdf6 100644 --- a/tests/layer_tests/pytorch_tests/test_isnan.py +++ b/tests/layer_tests/pytorch_tests/test_isnan.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/layer_tests/pytorch_tests/test_select_scatter.py b/tests/layer_tests/pytorch_tests/test_select_scatter.py index 112675264c74a5..c2a881ece0e358 100644 --- a/tests/layer_tests/pytorch_tests/test_select_scatter.py +++ b/tests/layer_tests/pytorch_tests/test_select_scatter.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/pytorch_tests/test_slice_scatter.py b/tests/layer_tests/pytorch_tests/test_slice_scatter.py index 0d291f6bb4d3aa..1357a06c645ef7 100644 --- a/tests/layer_tests/pytorch_tests/test_slice_scatter.py +++ b/tests/layer_tests/pytorch_tests/test_slice_scatter.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2023 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest diff --git a/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py b/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py index 392469646b2803..8e6a64c141fa2c 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_TensorScatterAdd.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2024 Intel Corporation +# Copyright (C) 2018-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np diff --git a/tests/time_tests/src/timetests/timetest_infer_api_2.cpp b/tests/time_tests/src/timetests/timetest_infer_api_2.cpp index 67943bf27a68f9..08bf0edb6279e8 100644 --- a/tests/time_tests/src/timetests/timetest_infer_api_2.cpp +++ b/tests/time_tests/src/timetests/timetest_infer_api_2.cpp @@ -1,4 +1,4 @@ -// Copyright (C) 2018-2021 Intel Corporation +// Copyright (C) 2018-2025 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include From a6ab17ecd8e2568e0b6cd642d541e4da08a4424a Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 22 Jan 2025 08:59:10 +0400 Subject: [PATCH 77/97] Fixed C++ RTTI for Core base classes (#28555) ### Details: Android support: {ABD03686-78FD-4F33-A2E8-C3BE1C030D5C} in header files threated as an inline definition, which typically results in a "weak" symbol "weak" symbols are not exported from dll/so libs. To make these symbols "strong", we moved the definitions to ".cpp" files. --- .../include/low_precision/cleanup_transformation.hpp | 1 - .../low_precision/fuse_elementwise_to_fake_quantize.hpp | 1 - .../include/low_precision/layer_transformation.hpp | 1 - .../low_precision/rt_info/shared_value_attribute.hpp | 2 -- src/core/include/openvino/core/any.hpp | 4 +--- src/core/include/openvino/core/attribute_adapter.hpp | 2 +- src/core/include/openvino/core/attribute_visitor.hpp | 2 +- src/core/include/openvino/core/model.hpp | 2 +- src/core/include/openvino/core/runtime_attribute.hpp | 2 +- .../include/openvino/op/util/multi_subgraph_base.hpp | 8 ++++---- src/core/include/openvino/op/util/variable_extension.hpp | 2 +- src/core/include/openvino/pass/pass.hpp | 2 +- src/core/include/openvino/pass/pattern/matcher.hpp | 5 +++-- src/core/include/openvino/runtime/allocator.hpp | 4 ++-- .../openvino/reference/utils/philox_converter.hpp | 2 +- .../openvino/reference/utils/philox_generator.hpp | 2 +- src/core/src/any.cpp | 2 ++ src/core/src/attribute_visitor.cpp | 4 ++++ src/core/src/model.cpp | 2 ++ src/core/src/op/util/multi_subgraph_base.cpp | 4 ++++ src/core/src/op/util/variable_extension.cpp | 9 +++++++++ src/core/src/pass/pass.cpp | 2 ++ src/core/src/pattern/matcher.cpp | 2 ++ src/core/src/runtime/allocator.cpp | 2 ++ src/core/src/runtime_attribute.cpp | 2 ++ .../common/include/openvino/frontend/decoder.hpp | 7 ++----- .../common/include/openvino/frontend/graph_iterator.hpp | 3 --- .../common/include/openvino/frontend/input_model.hpp | 2 +- .../common/include/openvino/frontend/node_context.hpp | 2 +- src/frontends/common/include/openvino/frontend/place.hpp | 2 +- src/frontends/common/src/decoder.cpp | 2 +- src/frontends/common/src/input_model.cpp | 2 ++ src/frontends/common/src/node_context.cpp | 9 +++++++++ src/frontends/common/src/variable.cpp | 2 +- 34 files changed, 65 insertions(+), 37 deletions(-) create mode 100644 src/core/src/op/util/variable_extension.cpp create mode 100644 src/frontends/common/src/node_context.cpp diff --git a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp index 52de352c0bb5d9..dd321c3a922557 100644 --- a/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/cleanup_transformation.hpp @@ -17,7 +17,6 @@ namespace low_precision { class LP_TRANSFORMATIONS_API CleanupTransformation : public LayerTransformation { public: CleanupTransformation(const Params& params); - virtual ~CleanupTransformation() = default; bool canBeTransformed(const std::shared_ptr& layer) const override; static bool canBeTransformedStatic( diff --git a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp index 13b73a1112f4c5..a9fed138b41b0d 100644 --- a/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp +++ b/src/common/low_precision_transformations/include/low_precision/fuse_elementwise_to_fake_quantize.hpp @@ -19,7 +19,6 @@ namespace low_precision { class LP_TRANSFORMATIONS_API FuseElementwiseToFakeQuantizeTransformation : public CleanupTransformation { public: FuseElementwiseToFakeQuantizeTransformation(const Params& params); - virtual ~FuseElementwiseToFakeQuantizeTransformation() = default; bool canBeTransformed(const std::shared_ptr& layer) const override; }; diff --git a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp index b3c7aaa16ea33a..8b046de904f1e6 100644 --- a/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp +++ b/src/common/low_precision_transformations/include/low_precision/layer_transformation.hpp @@ -288,7 +288,6 @@ class LP_TRANSFORMATIONS_API LayerTransformation : public ov::pass::MatcherPass }; LayerTransformation(const Params& params); - virtual ~LayerTransformation() = default; virtual bool transform(ov::pass::pattern::Matcher &m) = 0; virtual bool canBeTransformed(const std::shared_ptr& layer) const; diff --git a/src/common/low_precision_transformations/include/low_precision/rt_info/shared_value_attribute.hpp b/src/common/low_precision_transformations/include/low_precision/rt_info/shared_value_attribute.hpp index 04eade094279a9..bf8f51b3077fae 100644 --- a/src/common/low_precision_transformations/include/low_precision/rt_info/shared_value_attribute.hpp +++ b/src/common/low_precision_transformations/include/low_precision/rt_info/shared_value_attribute.hpp @@ -15,8 +15,6 @@ template class LP_TRANSFORMATIONS_API SharedAttribute : public ov::RuntimeAttribute { public: - virtual ~SharedAttribute() = default; - /** * @ingroup ov_transformation_common_api * @brief SharedValueAttribute type for shared value attributes. diff --git a/src/core/include/openvino/core/any.hpp b/src/core/include/openvino/core/any.hpp index 6f65efb5233ab1..3e41ed51ee9c54 100644 --- a/src/core/include/openvino/core/any.hpp +++ b/src/core/include/openvino/core/any.hpp @@ -536,7 +536,7 @@ class OPENVINO_API Any { template U convert_impl() const; - virtual ~Base() = default; + virtual ~Base(); }; template @@ -611,8 +611,6 @@ class OPENVINO_API Any { template Impl(Args&&... args) : value(std::forward(args)...) {} - virtual ~Impl(){}; - const std::type_info& type_info() const override { return typeid(T); } diff --git a/src/core/include/openvino/core/attribute_adapter.hpp b/src/core/include/openvino/core/attribute_adapter.hpp index 706d3e9e363450..8f00cc6cfd601f 100644 --- a/src/core/include/openvino/core/attribute_adapter.hpp +++ b/src/core/include/openvino/core/attribute_adapter.hpp @@ -34,7 +34,7 @@ class OPENVINO_API ValueAccessor { /// \brief type info enables identification of the value accessor, as well as is_type and /// as_type. virtual const DiscreteTypeInfo& get_type_info() const = 0; - virtual ~ValueAccessor() = default; + virtual ~ValueAccessor(); virtual void set_as_any(const ov::Any& x) { OPENVINO_NOT_IMPLEMENTED; } diff --git a/src/core/include/openvino/core/attribute_visitor.hpp b/src/core/include/openvino/core/attribute_visitor.hpp index f1790cba959c2f..35fb5fb0ccd7e4 100644 --- a/src/core/include/openvino/core/attribute_visitor.hpp +++ b/src/core/include/openvino/core/attribute_visitor.hpp @@ -56,7 +56,7 @@ class VisitorAdapter; /// deserialization. class OPENVINO_API AttributeVisitor { public: - virtual ~AttributeVisitor() = default; + virtual ~AttributeVisitor(); // Must implement these methods /// \brief handles all specialized on_adapter methods implemented by the visitor. /// diff --git a/src/core/include/openvino/core/model.hpp b/src/core/include/openvino/core/model.hpp index 5733209f492377..6ad235062eda36 100644 --- a/src/core/include/openvino/core/model.hpp +++ b/src/core/include/openvino/core/model.hpp @@ -107,7 +107,7 @@ class OPENVINO_API Model : public std::enable_shared_from_this { /// based on traversing the graph from the results and the sinks. Model(const ov::OutputVector& results, const ov::SinkVector& sinks, const std::string& name = ""); - virtual ~Model() = default; + virtual ~Model(); /// Return the number of outputs for this Model. size_t get_output_size() const; diff --git a/src/core/include/openvino/core/runtime_attribute.hpp b/src/core/include/openvino/core/runtime_attribute.hpp index 82b860cc99d7ec..86171a81562e90 100644 --- a/src/core/include/openvino/core/runtime_attribute.hpp +++ b/src/core/include/openvino/core/runtime_attribute.hpp @@ -28,7 +28,7 @@ class OPENVINO_API RuntimeAttribute { } using Ptr = std::shared_ptr; using Base = std::tuple<::ov::RuntimeAttribute>; - virtual ~RuntimeAttribute() = default; + virtual ~RuntimeAttribute(); virtual bool is_copyable() const; virtual bool is_copyable(const std::shared_ptr& to) const; virtual Any init(const std::shared_ptr& node) const; diff --git a/src/core/include/openvino/op/util/multi_subgraph_base.hpp b/src/core/include/openvino/op/util/multi_subgraph_base.hpp index 5f6053acfcd281..71ba0ebe4f6b35 100644 --- a/src/core/include/openvino/op/util/multi_subgraph_base.hpp +++ b/src/core/include/openvino/op/util/multi_subgraph_base.hpp @@ -20,7 +20,7 @@ class OPENVINO_API MultiSubGraphOp : public ov::op::Sink { OPENVINO_OP("MultiSubGraphOp", "util", ov::op::Sink); /// \brief Abstract class describes a connection between a MultiSubGraphOp input and /// the body. - class InputDescription { + class OPENVINO_API InputDescription { protected: /// /// \brief Constructs a new instance. @@ -34,7 +34,7 @@ class OPENVINO_API MultiSubGraphOp : public ov::op::Sink { public: using Ptr = std::shared_ptr; using type_info_t = DiscreteTypeInfo; - virtual ~InputDescription() = default; + virtual ~InputDescription(); virtual std::shared_ptr copy() const = 0; virtual const type_info_t& get_type_info() const = 0; @@ -45,7 +45,7 @@ class OPENVINO_API MultiSubGraphOp : public ov::op::Sink { /// \brief Abstract class describes how a MultiSubGraphOp output is produced from /// the body. - class OutputDescription { + class OPENVINO_API OutputDescription { protected: /// /// \brief Constructs a new instance. @@ -59,7 +59,7 @@ class OPENVINO_API MultiSubGraphOp : public ov::op::Sink { public: using Ptr = std::shared_ptr; using type_info_t = DiscreteTypeInfo; - virtual ~OutputDescription() = default; + virtual ~OutputDescription(); virtual std::shared_ptr copy() const = 0; virtual const type_info_t& get_type_info() const = 0; diff --git a/src/core/include/openvino/op/util/variable_extension.hpp b/src/core/include/openvino/op/util/variable_extension.hpp index 924bd97c17f9a2..e4f811afe52a8f 100644 --- a/src/core/include/openvino/op/util/variable_extension.hpp +++ b/src/core/include/openvino/op/util/variable_extension.hpp @@ -39,7 +39,7 @@ class OPENVINO_API VariableExtension { virtual std::string get_variable_id() const = 0; protected: - virtual ~VariableExtension(){}; + virtual ~VariableExtension(); protected: std::shared_ptr m_variable; diff --git a/src/core/include/openvino/pass/pass.hpp b/src/core/include/openvino/pass/pass.hpp index fc019859128f25..6440114ab9a8ec 100644 --- a/src/core/include/openvino/pass/pass.hpp +++ b/src/core/include/openvino/pass/pass.hpp @@ -44,7 +44,7 @@ class OPENVINO_API PassBase { public: PassBase(); - virtual ~PassBase() = default; + virtual ~PassBase(); /// Check if this pass has all the pass properties. bool get_property(const PassPropertyMask& prop_mask) const; diff --git a/src/core/include/openvino/pass/pattern/matcher.hpp b/src/core/include/openvino/pass/pattern/matcher.hpp index f39b284702969e..acf7c85841fc09 100644 --- a/src/core/include/openvino/pass/pattern/matcher.hpp +++ b/src/core/include/openvino/pass/pattern/matcher.hpp @@ -108,7 +108,8 @@ class OPENVINO_API Matcher { Matcher(std::shared_ptr pattern_node, const std::string& name); Matcher(std::shared_ptr pattern_node, const std::string& name, bool strict_mode); - virtual ~Matcher() = default; + virtual ~Matcher(); + /// \brief Matches a pattern to \p graph_node /// /// \param graph_value is an input graph to be matched against @@ -176,7 +177,7 @@ class OPENVINO_API Matcher { size_t add_node(Output node); - bool virtual match_value(const ov::Output& pattern_value, const ov::Output& graph_value); + virtual bool match_value(const ov::Output& pattern_value, const ov::Output& graph_value); bool is_strict_mode() { return m_strict_mode; diff --git a/src/core/include/openvino/runtime/allocator.hpp b/src/core/include/openvino/runtime/allocator.hpp index 4180d90b0ce4bf..ae62fb7b290802 100644 --- a/src/core/include/openvino/runtime/allocator.hpp +++ b/src/core/include/openvino/runtime/allocator.hpp @@ -37,7 +37,7 @@ class OPENVINO_API Allocator { friend class ov::Tensor; - struct Base : public std::enable_shared_from_this { + struct OPENVINO_API Base : public std::enable_shared_from_this { virtual void* addressof() = 0; const void* addressof() const { return const_cast(this)->addressof(); @@ -48,7 +48,7 @@ class OPENVINO_API Allocator { virtual bool is_equal(const Base& other) const = 0; protected: - virtual ~Base() = default; + virtual ~Base(); }; template diff --git a/src/core/reference/include/openvino/reference/utils/philox_converter.hpp b/src/core/reference/include/openvino/reference/utils/philox_converter.hpp index 45fbb7d160a247..bf6baaff8af1d4 100644 --- a/src/core/reference/include/openvino/reference/utils/philox_converter.hpp +++ b/src/core/reference/include/openvino/reference/utils/philox_converter.hpp @@ -19,7 +19,7 @@ class PhiloxConverter { public: PhiloxConverter() = delete; - virtual ~PhiloxConverter(){}; + virtual ~PhiloxConverter() = default; /// \brief Returns the number of generated elements per execution /// based on the requested data type. diff --git a/src/core/reference/include/openvino/reference/utils/philox_generator.hpp b/src/core/reference/include/openvino/reference/utils/philox_generator.hpp index f38a947b9ded27..81ea89acf0c55c 100644 --- a/src/core/reference/include/openvino/reference/utils/philox_generator.hpp +++ b/src/core/reference/include/openvino/reference/utils/philox_generator.hpp @@ -30,7 +30,7 @@ class PhiloxGenerator { public: PhiloxGenerator() = delete; - virtual ~PhiloxGenerator(){}; + virtual ~PhiloxGenerator() = default; /// @brief Get a set of 4 random 32-bit unsigned integers based on the seed(s). /// @return A vector with a random set of 4 32-bit unsigned integers. diff --git a/src/core/src/any.cpp b/src/core/src/any.cpp index 1637a139680394..4dd9ce40af5703 100644 --- a/src/core/src/any.cpp +++ b/src/core/src/any.cpp @@ -30,6 +30,8 @@ bool util::equal(std::type_index lhs, std::type_index rhs) { return result; } +Any::Base::~Base() = default; + bool Any::Base::is(const std::type_info& other) const { return util::equal(type_info(), other); } diff --git a/src/core/src/attribute_visitor.cpp b/src/core/src/attribute_visitor.cpp index 63a59591e37839..991e549fa83cf0 100644 --- a/src/core/src/attribute_visitor.cpp +++ b/src/core/src/attribute_visitor.cpp @@ -10,6 +10,10 @@ using namespace std; +ov::ValueAccessor::~ValueAccessor() = default; + +ov::AttributeVisitor::~AttributeVisitor() = default; + void ov::AttributeVisitor::start_structure(const string& name) { m_context.push_back(name); } diff --git a/src/core/src/model.cpp b/src/core/src/model.cpp index c9a25676b9cf2e..44ac9c9177d955 100644 --- a/src/core/src/model.cpp +++ b/src/core/src/model.cpp @@ -221,6 +221,8 @@ ov::Model::Model(const ov::OutputVector& results, const ov::SinkVector& sinks, c ov::Model::Model(const OutputVector& results, const string& name) : Model(results, ov::SinkVector{}, name) {} +ov::Model::~Model() = default; + void ov::Model::prerequirements(bool detect_variables, bool detect_parameters) { OV_ITT_SCOPED_TASK(ov::itt::domains::core, "Model::prerequirements"); diff --git a/src/core/src/op/util/multi_subgraph_base.cpp b/src/core/src/op/util/multi_subgraph_base.cpp index 6b7f78afcfff86..5e5f84d8ff7353 100644 --- a/src/core/src/op/util/multi_subgraph_base.cpp +++ b/src/core/src/op/util/multi_subgraph_base.cpp @@ -8,10 +8,14 @@ ov::op::util::MultiSubGraphOp::InputDescription::InputDescription(uint64_t input : m_input_index(input_index), m_body_parameter_index(body_parameter_index) {} +ov::op::util::MultiSubGraphOp::InputDescription::~InputDescription() = default; + ov::op::util::MultiSubGraphOp::OutputDescription::OutputDescription(uint64_t body_value_index, uint64_t output_index) : m_body_value_index(body_value_index), m_output_index(output_index) {} +ov::op::util::MultiSubGraphOp::OutputDescription::~OutputDescription() = default; + ov::op::util::MultiSubGraphOp::SliceInputDescription::SliceInputDescription(uint64_t input_index, uint64_t body_parameter_index, int64_t start, diff --git a/src/core/src/op/util/variable_extension.cpp b/src/core/src/op/util/variable_extension.cpp new file mode 100644 index 00000000000000..b49055dbccd8f4 --- /dev/null +++ b/src/core/src/op/util/variable_extension.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/variable_extension.hpp" + +using namespace ov::op::util; + +VariableExtension::~VariableExtension() = default; diff --git a/src/core/src/pass/pass.cpp b/src/core/src/pass/pass.cpp index 73a3f7261e5d56..420b8d2729828a 100644 --- a/src/core/src/pass/pass.cpp +++ b/src/core/src/pass/pass.cpp @@ -16,6 +16,8 @@ using namespace std; ov::pass::PassBase::PassBase() : m_property(), m_name(), m_pass_config(std::make_shared()) {} +ov::pass::PassBase::~PassBase() = default; + bool ov::pass::PassBase::get_property(const PassPropertyMask& prop) const { return m_property.is_set(prop); } diff --git a/src/core/src/pattern/matcher.cpp b/src/core/src/pattern/matcher.cpp index 54e153ebbaa962..3681b933cebe14 100644 --- a/src/core/src/pattern/matcher.cpp +++ b/src/core/src/pattern/matcher.cpp @@ -37,6 +37,8 @@ Matcher::Matcher(std::shared_ptr pattern_node, const std::string& name) Matcher::Matcher(std::shared_ptr pattern_node, const std::string& name, bool strict_mode) : Matcher(make_node_output(pattern_node), name, strict_mode) {} +Matcher::~Matcher() = default; + MatcherState::~MatcherState() { if (m_restore) { if (!m_matcher->m_matched_list.empty()) { diff --git a/src/core/src/runtime/allocator.cpp b/src/core/src/runtime/allocator.cpp index 2a1d8a79ad3ddf..729a61d9b0d8dc 100644 --- a/src/core/src/runtime/allocator.cpp +++ b/src/core/src/runtime/allocator.cpp @@ -45,6 +45,8 @@ struct DefaultAllocator { } }; +Allocator::Base::~Base() = default; + Allocator::Allocator() : Allocator{DefaultAllocator{}} {} Allocator::~Allocator() { diff --git a/src/core/src/runtime_attribute.cpp b/src/core/src/runtime_attribute.cpp index e936afd6b29a76..cf67b09cb2b37d 100644 --- a/src/core/src/runtime_attribute.cpp +++ b/src/core/src/runtime_attribute.cpp @@ -9,6 +9,8 @@ namespace ov { +RuntimeAttribute::~RuntimeAttribute() = default; + std::string RuntimeAttribute::to_string() const { return {}; } diff --git a/src/frontends/common/include/openvino/frontend/decoder.hpp b/src/frontends/common/include/openvino/frontend/decoder.hpp index 46afa8aee1cfe9..ea13a714cb1016 100644 --- a/src/frontends/common/include/openvino/frontend/decoder.hpp +++ b/src/frontends/common/include/openvino/frontend/decoder.hpp @@ -49,9 +49,9 @@ struct Union; } // namespace type /// Plays a role of node, block and module decoder -class IDecoder { +class FRONTEND_API IDecoder { public: - virtual ~IDecoder() = default; + virtual ~IDecoder(); }; class FRONTEND_API DecoderBase : public IDecoder { @@ -82,9 +82,6 @@ class FRONTEND_API DecoderBase : public IDecoder { /// \brief Get node name virtual const std::string& get_op_name() const = 0; - - /// \brief Destructor - virtual ~DecoderBase(); }; } // namespace frontend diff --git a/src/frontends/common/include/openvino/frontend/graph_iterator.hpp b/src/frontends/common/include/openvino/frontend/graph_iterator.hpp index 054173f1f41bdb..e199ece6d06694 100644 --- a/src/frontends/common/include/openvino/frontend/graph_iterator.hpp +++ b/src/frontends/common/include/openvino/frontend/graph_iterator.hpp @@ -34,9 +34,6 @@ class FRONTEND_API GraphIterator : ::ov::RuntimeAttribute { /// \brief Return a pointer to a decoder of the current node virtual std::shared_ptr get_decoder() const = 0; - /// \brief Destructor - virtual ~GraphIterator() = default; - /// \brief Checks if the main model graph contains a function of the requested name in the library /// Returns GraphIterator to this function and nullptr, if it does not exist virtual std::shared_ptr get_body_graph_iterator(const std::string& func_name) const = 0; diff --git a/src/frontends/common/include/openvino/frontend/input_model.hpp b/src/frontends/common/include/openvino/frontend/input_model.hpp index f679a28d41aca5..5ea01dba5ea97a 100644 --- a/src/frontends/common/include/openvino/frontend/input_model.hpp +++ b/src/frontends/common/include/openvino/frontend/input_model.hpp @@ -51,7 +51,7 @@ class FRONTEND_API InputModel { InputModel& operator=(const InputModel&) = delete; InputModel& operator=(InputModel&&) = delete; - virtual ~InputModel() = default; + virtual ~InputModel(); ///// Searching for places ///// diff --git a/src/frontends/common/include/openvino/frontend/node_context.hpp b/src/frontends/common/include/openvino/frontend/node_context.hpp index 2fb607f3644b16..9d6bb78789298b 100644 --- a/src/frontends/common/include/openvino/frontend/node_context.hpp +++ b/src/frontends/common/include/openvino/frontend/node_context.hpp @@ -18,7 +18,7 @@ class FRONTEND_API NodeContext { public: // TODO: Why this ctor is explicit when get_op_type is virtual so m_op_type looks to be a custom implementation explicit NodeContext(const std::string& op_type) : m_op_type(op_type) {} - virtual ~NodeContext() = default; + virtual ~NodeContext(); /// \brief Returns a number of inputs virtual size_t get_input_size() const { diff --git a/src/frontends/common/include/openvino/frontend/place.hpp b/src/frontends/common/include/openvino/frontend/place.hpp index ecaed4cd93b2da..be661149dc09e5 100644 --- a/src/frontends/common/include/openvino/frontend/place.hpp +++ b/src/frontends/common/include/openvino/frontend/place.hpp @@ -61,7 +61,7 @@ class FRONTEND_API Place { public: typedef std::shared_ptr Ptr; - virtual ~Place() = 0; + virtual ~Place(); /// \brief All associated names (synonyms) that identify this place in the graph in a /// framework specific way diff --git a/src/frontends/common/src/decoder.cpp b/src/frontends/common/src/decoder.cpp index b9f4b27da2ce99..f140b9e64302be 100644 --- a/src/frontends/common/src/decoder.cpp +++ b/src/frontends/common/src/decoder.cpp @@ -6,4 +6,4 @@ using namespace ov::frontend; -DecoderBase::~DecoderBase() = default; \ No newline at end of file +IDecoder::~IDecoder() = default; diff --git a/src/frontends/common/src/input_model.cpp b/src/frontends/common/src/input_model.cpp index f2c52f2eb696d1..11d474aaec4bcb 100644 --- a/src/frontends/common/src/input_model.cpp +++ b/src/frontends/common/src/input_model.cpp @@ -12,6 +12,8 @@ using namespace ov; using namespace ov::frontend; +InputModel::~InputModel() = default; + std::vector InputModel::get_inputs() const { if (!m_actual) { return {}; diff --git a/src/frontends/common/src/node_context.cpp b/src/frontends/common/src/node_context.cpp new file mode 100644 index 00000000000000..91c8c8c911ca9e --- /dev/null +++ b/src/frontends/common/src/node_context.cpp @@ -0,0 +1,9 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/node_context.hpp" + +using namespace ov::frontend; + +NodeContext::~NodeContext() = default; diff --git a/src/frontends/common/src/variable.cpp b/src/frontends/common/src/variable.cpp index 9c82520d568bd2..b0fb30280080f3 100644 --- a/src/frontends/common/src/variable.cpp +++ b/src/frontends/common/src/variable.cpp @@ -6,4 +6,4 @@ using namespace ov::frontend; -Variable::~Variable(){}; +Variable::~Variable() = default; From b8b243582dbe7f1c8f673affeee87a7ed4a4e712 Mon Sep 17 00:00:00 2001 From: Sergey Shlyapnikov Date: Wed, 22 Jan 2025 09:34:02 +0400 Subject: [PATCH 78/97] [GPU] Fix compressed KV-cache shape infer for QKV order {1,2,0,3} (#28592) ### Details: - This change restores the original logic of axis calculation for scales/zp and fixes the shape inference of KV-Cache operation. Previously, it needed to be adjusted with concat_axis, but after the introduction of independent macros for key and value scale/zp offsets calculation in micro_sdpa kernel, this adjustment is no longer needed and causes incorrect indexing. Therefore, this change reverts to the original fixed scale/zp axis equal to 2. --- .../graph/graph_optimizer/prepare_buffer_fusing.cpp | 2 +- .../intel_gpu/src/graph/include/kv_cache_inst.h | 4 ++-- src/plugins/intel_gpu/src/graph/primitive_inst.cpp | 12 ++++++------ .../src/plugin/transformations/op/kv_cache.cpp | 3 +-- .../intel_gpu/tests/common/subgraphs_builders.hpp | 8 ++++---- .../subgraph_tests/dynamic/kv_cache_sdpa.cpp | 4 ++++ 6 files changed, 18 insertions(+), 15 deletions(-) diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp index 7db9c2c0d59419..03e4af4d16359b 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_buffer_fusing.cpp @@ -943,7 +943,7 @@ void prepare_buffer_fusing::run(program& p) { auto update_scale_zp = [&](size_t kv_cache_output_idx, size_t read_value_output_idx) { auto scales_out_layout = node.get_output_layout(false, kv_cache_output_idx); - const auto scales_zp_concat_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + const auto scales_zp_concat_axis = kv_cache_inst::get_scale_zp_sequence_axis(); padding::DynamicDimsMask info_dynamic_pad_scales; info_dynamic_pad_scales[scales_zp_concat_axis] = 1; scales_out_layout.data_padding._dynamic_dims_mask = info_dynamic_pad_scales; diff --git a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h index e95e2e94ff4ab0..945894af30170c 100644 --- a/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/kv_cache_inst.h @@ -62,8 +62,8 @@ class typed_primitive_inst : public typed_primitive_inst_base= 0 ? sequence_axis : past_layout_rank + sequence_axis; } - static int64_t get_scale_zp_sequence_axis(int64_t sequence_axis, const kv_cache::QuantizationAttributes& quantization_attrs) { - const auto scale_zp_concat_axis = quantization_attrs.scales_zp_output_order[sequence_axis]; + static int64_t get_scale_zp_sequence_axis() { + const auto scale_zp_concat_axis = 2; return scale_zp_concat_axis; } diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index abfeabe2b6a149..e574684e6b4f10 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -851,7 +851,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { auto prealloc_shape = updated_layouts[i].get_shape(); const auto shape_rank = prealloc_shape.size(); const auto seq_axis = i == 0 ? kv_cache_inst::get_sequence_axis(desc->concat_axis, shape_rank) - : kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + : kv_cache_inst::get_scale_zp_sequence_axis(); prealloc_shape[seq_axis] += tmp_prealloc_count; required_buffer_size = std::accumulate(prealloc_shape.begin(), prealloc_shape.end(), size_t(1), std::multiplies()); @@ -883,7 +883,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { const auto& desc = _node->as().get_primitive(); const auto shape_rank = updated_layouts[i].get_shape().size(); const auto seq_axis = i == 0 ? kv_cache_inst::get_sequence_axis(desc->concat_axis, shape_rank) - : kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + : kv_cache_inst::get_scale_zp_sequence_axis(); prealloc_info = sp.predict_preallocation_shape(id(), updated_layouts[i], false, i, tmp_prealloc_count, seq_axis); } else { @@ -907,7 +907,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { auto& present_layout = _impl_params->output_layouts[i]; const auto present_layout_rank = present_layout.get_partial_shape().size(); const auto sequence_axis = i == 0 ? kv_cache_inst::get_sequence_axis(desc->concat_axis, present_layout_rank) - : kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + : kv_cache_inst::get_scale_zp_sequence_axis(); auto max_pad = kv_cache_inst::get_max_pad(present_layout, _max_output_layout_count[i], @@ -978,7 +978,7 @@ void primitive_inst::realloc_if_needed(bool prev_execution_skipped) { if (max_pad > 0) { if (auto compressed_cache_variable = dynamic_cast(&variable)) { auto present_scales_layout = _impl_params->output_layouts[2]; - const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(); // In case of compressed KV-cache, calling update_impl for each iteration // because of scales layout [batch, num_heads, seq_len, head_size], which requires proper @@ -1374,7 +1374,7 @@ void primitive_inst::do_runtime_in_place_kv_cache() { if (desc->compressed) { auto compressed_cache_variable = dynamic_cast(&variable); auto& present_scales_layout = _impl_params->output_layouts[2]; - const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(); kv_cache_inst::update_pad(present_scales_layout, max_pad - new_seq_len, sequence_axis); GPU_DEBUG_TRACE_DETAIL << "[do runtime_in_place_kv_cache] " << id() << " Updated present_scale_layout's pad : " << present_scales_layout.to_string() << std::endl; @@ -1398,7 +1398,7 @@ void primitive_inst::do_runtime_in_place_kv_cache() { if (desc->compressed) { auto& past_scale_layout = _impl_params->input_layouts[3]; - const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(desc->concat_axis, desc->quantization_attributes); + const auto sequence_axis = kv_cache_inst::get_scale_zp_sequence_axis(); kv_cache_inst::update_pad(past_scale_layout, max_pad, sequence_axis); if (desc->get_compression_zp_inputs_num() > 0) { diff --git a/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp b/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp index 908732dc357222..6721d0f9ebd608 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/op/kv_cache.cpp @@ -191,8 +191,7 @@ std::vector shape_infer(const KVCacheCompressed* op, auto quantized_data_shapes = ov::op::internal::DynamicQuantize::shape_infer(&dq_op, { input_shapes[1] }); - const auto concat_axis = ov::util::normalize(op->get_concat_axis(), input_shapes[0].size()); - const auto scales_concat_axis = op->get_quantization_attrs().scales_zp_output_order[concat_axis]; + const auto scales_concat_axis = 2; ov::PartialShape compression_scale_shape = input_shapes[3]; compression_scale_shape[scales_concat_axis] += quantized_data_shapes[1][scales_concat_axis]; out_shapes[2] = compression_scale_shape; diff --git a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp index c774049fe0690f..65221107967bcc 100644 --- a/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp +++ b/src/plugins/intel_gpu/tests/common/subgraphs_builders.hpp @@ -120,8 +120,8 @@ inline std::shared_ptr make_qkv_transpose(ov::Output qkv, st return std::make_shared(qkv, transpose_const); } -inline std::shared_ptr make_kv_rearrange(ov::Output kv_past, ov::Output beam_idx) { - auto axis = std::make_shared(ov::element::i32, ov::Shape{}, 0); +inline std::shared_ptr make_kv_rearrange(ov::Output kv_past, ov::Output beam_idx, int axis_val = 0) { + auto axis = std::make_shared(ov::element::i32, ov::Shape{}, axis_val); return std::make_shared(kv_past, beam_idx, axis, 0); } @@ -242,8 +242,8 @@ inline std::shared_ptr make_llm_kv_cache_sdpa_pattern(ov::Dimension b in_beam_idx->set_friendly_name("beam_idx"); params.push_back(in_beam_idx); - concat_k_input = make_kv_rearrange(past_k, in_beam_idx); - concat_v_input = make_kv_rearrange(past_v, in_beam_idx); + concat_k_input = make_kv_rearrange(past_k, in_beam_idx, qkv_order[0]); + concat_v_input = make_kv_rearrange(past_v, in_beam_idx, qkv_order[0]); } auto concat_k = std::make_shared(ov::OutputVector{concat_k_input, in_k_token}, concat_axis); diff --git a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp index 89612039fb788f..7bb4a7385bcdc4 100644 --- a/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp +++ b/src/plugins/intel_gpu/tests/functional/subgraph_tests/dynamic/kv_cache_sdpa.cpp @@ -342,6 +342,7 @@ std::vector get_test_params() { p.push_back({with_rearrange, with_mask, !with_scale, !causal, !compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 1, 2, 3}}); p.push_back({with_rearrange, with_mask, !with_scale, !causal, !compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 2, 1, 3}}); p.push_back({!with_rearrange, with_mask, !with_scale, !causal, !compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 2, 1, 3}}); + p.push_back({!with_rearrange, with_mask, !with_scale, !causal, !compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {1, 2, 0, 3}}); // Beam search p.push_back({with_rearrange, !with_mask, !with_scale, !causal, !compressed, 2, ov::element::Type_t::f16, 10, 4, 1, {0, 1, 2, 3}}); @@ -351,6 +352,7 @@ std::vector get_test_params() { p.push_back({with_rearrange, with_mask, !with_scale, !causal, compressed, 1, ov::element::Type_t::f16, 10, 1, 1, {0, 1, 2, 3}}); p.push_back({with_rearrange, with_mask, !with_scale, !causal, compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 2, 1, 3}}); p.push_back({with_rearrange, with_mask, !with_scale, !causal, compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 1, 2, 3}}); + p.push_back({with_rearrange, with_mask, !with_scale, !causal, compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {1, 2, 0, 3}}); /* -- causal mask -- */ @@ -367,6 +369,8 @@ std::vector get_test_params() { p.push_back({with_rearrange, with_mask, !with_scale, causal, compressed, 1, ov::element::Type_t::f16, 10, 1, 1, {0, 1, 2, 3}}); p.push_back({with_rearrange, with_mask, !with_scale, causal, compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 2, 1, 3}}); p.push_back({with_rearrange, with_mask, !with_scale, causal, compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {0, 1, 2, 3}}); + p.push_back({with_rearrange, with_mask, !with_scale, causal, compressed, 1, ov::element::Type_t::f16, 10, 4, 1, {1, 2, 0, 3}}); + return p; } From 4a63b931db93e71a53c1a39e660c02a36dd49e14 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 07:12:00 +0000 Subject: [PATCH 79/97] Bump actions/checkout from 4.2.0 to 4.2.2 (#28600) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.0 to 4.2.2.
    Release notes

    Sourced from actions/checkout's releases.

    v4.2.2

    What's Changed

    Full Changelog: https://github.com/actions/checkout/compare/v4.2.1...v4.2.2

    v4.2.1

    What's Changed

    New Contributors

    Full Changelog: https://github.com/actions/checkout/compare/v4.2.0...v4.2.1

    Changelog

    Sourced from actions/checkout's changelog.

    Changelog

    v4.2.2

    v4.2.1

    v4.2.0

    v4.1.7

    v4.1.6

    v4.1.5

    v4.1.4

    v4.1.3

    v4.1.2

    v4.1.1

    v4.1.0

    ... (truncated)

    Commits

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=4.2.0&new-version=4.2.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/job_python_api_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/job_python_api_tests.yml b/.github/workflows/job_python_api_tests.yml index e12001cd67afba..75ce8cb2c808ea 100644 --- a/.github/workflows/job_python_api_tests.yml +++ b/.github/workflows/job_python_api_tests.yml @@ -66,7 +66,7 @@ jobs: working-directory: ${{ env.INSTALL_TEST_DIR }} - name: Fetch setup_python and install wheels actions - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 timeout-minutes: 15 with: sparse-checkout: | @@ -122,7 +122,7 @@ jobs: - name: Clone API snippets if: ${{ runner.os != 'macOS' && fromJSON(inputs.affected-components).docs_snippets.test }} - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 timeout-minutes: 15 with: sparse-checkout: docs/articles_en/assets/snippets From 1616c9d1221373c810b4d9c851853f48a4386e43 Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Wed, 22 Jan 2025 08:25:09 +0000 Subject: [PATCH 80/97] NPUW: Fix long tensors names (#28591) CVS-159901 CVS-160529 --------- Co-authored-by: Dmitry Matveev --- .../src/plugin/npuw/compiled_model.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp b/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp index f0d9950c2e3520..24e70aa7125e52 100644 --- a/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp +++ b/src/plugins/intel_npu/src/plugin/npuw/compiled_model.cpp @@ -863,12 +863,13 @@ std::string ov::npuw::CompiledModel::funcall_mem_device(const std::size_t idx) c void ov::npuw::CompiledModel::remove_long_output_names(const std::shared_ptr& model) { NPUW_ASSERT(model.get() != nullptr); - for (auto& output : model->outputs()) { - const auto& tensor_names = output.get_tensor().get_names(); - if (tensor_names.size() > 32) { // maximum supported - output.get_tensor().set_names({}); - LOG_INFO("Removed output tensor names for " << model->get_friendly_name()); - LOG_BLOCK(); + for (auto node : model->get_ordered_ops()) { + for (auto &&output : node->outputs()) { + const auto& tensor_names = output.get_tensor().get_names(); + if (tensor_names.size() > 32) { + LOG_VERB(model->get_friendly_name() << " output " << output << " exceeds the name limit, removing..."); + output.get_tensor().set_names({}); + } } } } @@ -883,8 +884,7 @@ void ov::npuw::CompiledModel::fill_empty_tensor_names(const std::shared_ptrget_friendly_name()); - LOG_BLOCK(); + LOG_VERB("Added input tensor name for " << model->get_friendly_name()); } in_tensor_idx++; } @@ -892,8 +892,7 @@ void ov::npuw::CompiledModel::fill_empty_tensor_names(const std::shared_ptrget_friendly_name()); - LOG_BLOCK(); + LOG_VERB("Added output tensor name for " << model->get_friendly_name()); } out_tensor_idx++; } From cfbc99805bc845f57f12f06de44034e9e8a9fd20 Mon Sep 17 00:00:00 2001 From: Mikhail Ryzhov Date: Wed, 22 Jan 2025 10:05:27 +0100 Subject: [PATCH 81/97] [GHA] VS 2022 (#28520) ### Details: - Switch Windows pipelines to VS 2022 ### Tickets: - *ticket-id* --- .github/actions/common/constants.py | 4 ++- .github/workflows/job_build_windows.yml | 8 ++--- .github/workflows/job_samples_tests.yml | 4 +-- .../windows_conditional_compilation.yml | 2 +- .github/workflows/windows_vs2019_debug.yml | 6 ++-- .github/workflows/windows_vs2019_release.yml | 4 +-- .github/workflows/workflows_to_track.txt | 8 ++--- src/bindings/python/wheel/CMakeLists.txt | 36 ++++++++++--------- src/bindings/python/wheel/fdupes_check.cmake | 8 ++++- src/bindings/python/wheel/setup.py | 4 +-- 10 files changed, 48 insertions(+), 36 deletions(-) diff --git a/.github/actions/common/constants.py b/.github/actions/common/constants.py index 6a1d165fc7df13..a9ed34ce7a2275 100644 --- a/.github/actions/common/constants.py +++ b/.github/actions/common/constants.py @@ -16,6 +16,8 @@ class EventType(Enum): 'public_linux_ubuntu_24_04_x86_64_release', 'public_windows_vs2019_Release', 'public_windows_vs2019_Debug', + 'public_windows_vs2022_Release', + 'public_windows_vs2022_Debug', 'public_manylinux2014_x86_64_release', ) ProductType = Enum('ProductType', {t.upper(): t for t in productTypes}) @@ -41,5 +43,5 @@ class EventType(Enum): PlatformKey.UBUNTU20_ARM64: ProductType.PUBLIC_LINUX_UBUNTU_20_04_ARM64_RELEASE, PlatformKey.UBUNTU22_X86_64: ProductType.PUBLIC_LINUX_UBUNTU_22_04_X86_64_RELEASE, PlatformKey.UBUNTU24_X86_64: ProductType.PUBLIC_LINUX_UBUNTU_24_04_X86_64_RELEASE, - PlatformKey.WINDOWS_X86_64: ProductType.PUBLIC_WINDOWS_VS2019_RELEASE, + PlatformKey.WINDOWS_X86_64: ProductType.PUBLIC_WINDOWS_VS2022_RELEASE, } diff --git a/.github/workflows/job_build_windows.yml b/.github/workflows/job_build_windows.yml index 988bec1de7f929..e725f15c7081c1 100644 --- a/.github/workflows/job_build_windows.yml +++ b/.github/workflows/job_build_windows.yml @@ -56,7 +56,7 @@ jobs: BUILD_DIR: "${{ github.workspace }}\\openvino_build" ARTIFACTS_SHARE: "C:\\mount\\build-artifacts" MANIFEST_PATH: "${{ github.workspace }}\\manifest.yml" - PRODUCT_TYPE: 'public_windows_vs2019_${{ inputs.build-type }}' + PRODUCT_TYPE: 'public_windows_vs2022_${{ inputs.build-type }}' steps: - name: Clone OpenVINO uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 @@ -154,10 +154,10 @@ jobs: restore-keys: | ${{ runner.os }}-${{ runner.arch }}-ccache - - name: Configure Developer Command Prompt for Microsoft Visual C++ (2019) + - name: Configure Developer Command Prompt for Microsoft Visual C++ uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 # v1.13.0 with: - toolset: 14.29 + toolset: 14.40 # v2022 - name: Set SSL_CERT_FILE for model downloading for unit tests run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV @@ -229,7 +229,7 @@ jobs: & $pythonExecutablePath -m pip install -r ${{ env.OPENVINO_REPO }}/src/bindings/python/wheel/requirements-dev.txt cmake -DPython3_EXECUTABLE="$pythonExecutablePath" -DOpenVINODeveloperPackage_DIR=${{ env.BUILD_DIR }} -S ${{ env.OPENVINO_REPO }}/src/bindings/python -B "$pyBuildDir" && - cmake --build "$pyBuildDir" --parallel --config ${{ env.CMAKE_BUILD_TYPE }} && + cmake --build "$pyBuildDir" --parallel $ENV:NUMBER_OF_PROCESSORS --target ie_wheel --config ${{ env.CMAKE_BUILD_TYPE }} && cmake --install "$pyBuildDir" --config ${{ env.CMAKE_BUILD_TYPE }} --prefix ${{ env.INSTALL_WHEELS_DIR }} --component python_wheels if ($LASTEXITCODE -ne 0) { Write-Host "Failed to build Python wheels for Python $pyVersion" diff --git a/.github/workflows/job_samples_tests.yml b/.github/workflows/job_samples_tests.yml index 07fc17b797592e..abcbc62c182342 100644 --- a/.github/workflows/job_samples_tests.yml +++ b/.github/workflows/job_samples_tests.yml @@ -84,11 +84,11 @@ jobs: version: '3.11' should-setup-pip-paths: 'false' - - name: Configure Developer Command Prompt for Microsoft Visual C++ (2022) + - name: Configure Developer Command Prompt for Microsoft Visual C++ if: runner.os == 'Windows' uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 # v1.13.0 with: - toolset: 14.40 + toolset: 14.40 # v2022 - name: Build cpp samples run: $INSTALL_DIR/samples/cpp/build_samples.sh -i $INSTALL_DIR -b $BUILD_DIR/cpp_samples diff --git a/.github/workflows/windows_conditional_compilation.yml b/.github/workflows/windows_conditional_compilation.yml index 6ce104ad07fe9f..ba9908558201e6 100644 --- a/.github/workflows/windows_conditional_compilation.yml +++ b/.github/workflows/windows_conditional_compilation.yml @@ -157,7 +157,7 @@ jobs: - name: Configure Developer Command Prompt for Microsoft Visual C++ uses: ilammy/msvc-dev-cmd@0b201ec74fa43914dc39ae48a89fd1d8cb592756 # v1.13.0 with: - toolset: 14.40 + toolset: 14.40 # v2022 - name: Set SSL_CERT_FILE for model downloading for unit tests run: echo SSL_CERT_FILE=$(python3 -m certifi) >> $env:GITHUB_ENV diff --git a/.github/workflows/windows_vs2019_debug.yml b/.github/workflows/windows_vs2019_debug.yml index 4fcdc6b58b79d1..c69ea87d1bcbf8 100644 --- a/.github/workflows/windows_vs2019_debug.yml +++ b/.github/workflows/windows_vs2019_debug.yml @@ -1,4 +1,4 @@ -name: Windows (VS 2019, Python 3.11, Debug) +name: Windows (VS 2022, Python 3.11, Debug) on: workflow_dispatch: merge_group: @@ -9,7 +9,7 @@ on: - 'releases/**' concurrency: # github.ref is not unique in post-commit - group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-windows-vs2019-debug + group: ${{ github.event_name == 'push' && github.run_id || github.ref }}-windows-vs2022-debug cancel-in-progress: true permissions: read-all @@ -77,7 +77,7 @@ jobs: with: runner: 'aks-win-4-cores-8gb' affected-components: ${{ needs.smart_ci.outputs.affected_components }} - os: 'windows_2019' + os: 'windows_2022' build-type: 'Debug' timeout-minutes: 60 diff --git a/.github/workflows/windows_vs2019_release.yml b/.github/workflows/windows_vs2019_release.yml index d909c18633795e..0288bef986bbee 100644 --- a/.github/workflows/windows_vs2019_release.yml +++ b/.github/workflows/windows_vs2019_release.yml @@ -1,4 +1,4 @@ -name: Windows (VS 2019, Python 3.11, Release) +name: Windows (VS 2022, Python 3.11, Release) on: workflow_dispatch: pull_request: @@ -520,7 +520,7 @@ jobs: with: runner: 'aks-win-4-cores-8gb' affected-components: ${{ needs.smart_ci.outputs.affected_components }} - os: 'windows_2019' + os: 'windows_2022' build-type: 'Release' timeout-minutes: 50 diff --git a/.github/workflows/workflows_to_track.txt b/.github/workflows/workflows_to_track.txt index ef3bb633ed7737..262465ead6b6e5 100644 --- a/.github/workflows/workflows_to_track.txt +++ b/.github/workflows/workflows_to_track.txt @@ -20,8 +20,8 @@ name: Linux Static CC (Ubuntu 22.04, Python 3.11, Clang) name: GitHub Actions Workflows Scans ==> ./check_pr_commits.yml <== name: PR Commits -==> ./windows_vs2019_debug.yml <== -name: Windows (VS 2019, Python 3.11, Debug) +==> ./windows_vs2022_debug.yml <== +name: Windows (VS 2022, Python 3.11, Debug) ==> ./files_size.yml <== name: Files Size ==> ./cleanup_caches.yml <== @@ -69,7 +69,7 @@ name: Webassembly name: Linux (Ubuntu 24.04, Python 3.12) ==> ./assign_issue.yml <== name: Take Issue -==> ./windows_vs2019_release.yml <== -name: Windows (VS 2019, Python 3.11, Release) +==> ./windows_vs2022_release.yml <== +name: Windows (VS 2022, Python 3.11, Release) ==> ./coverity.yml <== name: Coverity (Ubuntu 20.04, Python 3.11) diff --git a/src/bindings/python/wheel/CMakeLists.txt b/src/bindings/python/wheel/CMakeLists.txt index 3f42940627d730..e598487753d0d8 100644 --- a/src/bindings/python/wheel/CMakeLists.txt +++ b/src/bindings/python/wheel/CMakeLists.txt @@ -132,23 +132,27 @@ add_custom_command(OUTPUT ${openvino_wheel_path} WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" COMMENT "Building Python wheel ${openvino_wheel_name}" VERBATIM) +set(ie_wheel_deps ${openvino_wheel_path}) + +if(NOT CMAKE_HOST_WIN32) + set(fdupes_report ${CMAKE_CURRENT_BINARY_DIR}/fdupes_report.txt) + add_custom_command(OUTPUT "${fdupes_report}" + COMMAND ${CMAKE_COMMAND} + -D Python3_EXECUTABLE=${Python3_EXECUTABLE} + -D WORKING_DIRECTORY=${CMAKE_CURRENT_BINARY_DIR} + -D WHEEL_VERSION=${WHEEL_VERSION} + -D PACKAGE_FILE=${openvino_wheel_path} + -D REPORT_FILE=${fdupes_report} + -D CMAKE_SHARED_LIBRARY_SUFFIX=${CMAKE_SHARED_LIBRARY_SUFFIX} + -P "${CMAKE_CURRENT_SOURCE_DIR}/fdupes_check.cmake" + DEPENDS "${openvino_wheel_path}" + WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" + COMMENT "Run 'fdupes' checks for wheel ${openvino_wheel_name}" + VERBATIM) + list(APPEND ie_wheel_deps ${fdupes_report}) +endif() -set(fdupes_report ${CMAKE_CURRENT_BINARY_DIR}/fdupes_report.txt) -add_custom_command(OUTPUT "${fdupes_report}" - COMMAND ${CMAKE_COMMAND} - -D Python3_EXECUTABLE=${Python3_EXECUTABLE} - -D WORKING_DIRECTORY=${CMAKE_CURRENT_BINARY_DIR} - -D WHEEL_VERSION=${WHEEL_VERSION} - -D PACKAGE_FILE=${openvino_wheel_path} - -D REPORT_FILE=${fdupes_report} - -D CMAKE_SHARED_LIBRARY_SUFFIX=${CMAKE_SHARED_LIBRARY_SUFFIX} - -P "${CMAKE_CURRENT_SOURCE_DIR}/fdupes_check.cmake" - DEPENDS "${openvino_wheel_path}" - WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" - COMMENT "Run 'fdupes' checks for wheel ${openvino_wheel_name}" - VERBATIM) - -add_custom_target(ie_wheel ALL DEPENDS ${openvino_wheel_path} ${fdupes_report}) +add_custom_target(ie_wheel ALL DEPENDS ${ie_wheel_deps}) add_custom_command( TARGET ie_wheel diff --git a/src/bindings/python/wheel/fdupes_check.cmake b/src/bindings/python/wheel/fdupes_check.cmake index 9f2a7860b3b769..d7cbec3343d8b4 100644 --- a/src/bindings/python/wheel/fdupes_check.cmake +++ b/src/bindings/python/wheel/fdupes_check.cmake @@ -12,7 +12,13 @@ endforeach() find_program(fdupes_PROGRAM NAMES fdupes DOC "Path to fdupes") if(NOT fdupes_PROGRAM) - message(WARNING "Failed to find 'fdupes' tool, use 'sudo apt-get install fdupes' to install it") + set(fdupes_install_msg "refer to your platform's package manager or install it manually.") + if(CMAKE_HOST_LINUX) + set(fdupes_install_msg "sudo apt-get install fdupes") + elseif(CMAKE_HOST_APPLE) + set(fdupes_install_msg "brew install fdupes") + endif() + message(WARNING "Failed to find 'fdupes' tool. Install it using: ${fdupes_install_msg}") return() endif() diff --git a/src/bindings/python/wheel/setup.py b/src/bindings/python/wheel/setup.py index 50f0066313d4cd..620ce30f33dbca 100644 --- a/src/bindings/python/wheel/setup.py +++ b/src/bindings/python/wheel/setup.py @@ -66,9 +66,9 @@ "rpath": LIBS_RPATH, "binary_dir": OPENVINO_BINARY_DIR, }, - "ie_libs_с": { + "ie_libs_c": { "name": "core_c", - "prefix": f"{BUILD_BASE}/libs.core_с", + "prefix": f"{BUILD_BASE}/libs.core_c", "install_dir": OV_RUNTIME_LIBS_DIR, "rpath": LIBS_RPATH, "binary_dir": OPENVINO_BINARY_DIR, From a7ef4f89d62bdaadcedff6725c9195038fdcbb80 Mon Sep 17 00:00:00 2001 From: Vladislav Golubev Date: Wed, 22 Jan 2025 10:54:48 +0100 Subject: [PATCH 82/97] [LPT] Reenable CPU tests (#28464) ### Tickets: - *CVS-134472* --- .../skip_tests_config.cpp | 39 ++----------------- .../shared_test_classes/src/single_op/dft.cpp | 2 - 2 files changed, 4 insertions(+), 37 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 44bf87a9a37b82..4eb4fa819e3224 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -189,44 +189,13 @@ std::vector disabledTestPatterns() { R"(smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[.*,3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ .*18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[6,1,1,1\]_\{ .*1.52806e.*39, .*0.2, .*0.3, .*0.3, .*0.2, .*0.1 \}_\{ 1.52806e.*39, 0.2, 0.3, 0.3, 0.2, 0.1 \}\})", // TODO: 141068 R"(smoke_Snippets_FQDecomposition.*netPRC=f16_D=CPU.*)", - // Issue: 133173 - R"(.*smoke_ScaledAttn_CPU/ScaledAttnLayerCPUTest.CompareWithRefs/netPRC=bf16.*has_scale=0.*)", - R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f32_\[1,8,16,16\]_CPU_f32_\[16,16\]_level=256_shape=\[.*\]_input_low=\{ 0 \}_input_high=\{ 25.5 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=__255_\[.*\]_\{ -12.7 \}_\{ 12.7 \}_\{\}.*)", - R"(.*smoke_LPT_4D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f32_\[1,8,16,16\]_CPU_f32_\[16,16\]_level=256_shape=\[1,1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -12.7 \}_output_high\{ 12.8 \}_precision=.*)", - R"(.*smoke_LPT_3D/ConvolutionBackpropDataTransformation.CompareWithRefImpl/f32_\[1,8,16\]_CPU_f32_\[16\]_.*_input_high=\{ 25.5 \}_.*_precision=__255_\[1,1,1\]_\{ -12.7 \}_\{ 12.7 \}_\{\}.*)", + // Issue: 160737 R"(.*smoke_LPT/ConvolutionQDqTransformation.CompareWithRefImpl/f32_\[(1,3,4,4|4,3,4,4)\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32__.*_f32_\[\]_1_1_undefined__\{, 15\}_f32_\[\]__255_\[1,1,1,1\]_\{ -128 \}_\{ 127 \}__i8___f32__\{ -128 \}_.*_1_1_i8_.*)", - R"(.*smoke_LPT/ConvolutionQDqTransformation.CompareWithRefImpl/f32_\[(1,3,4,4|4,3,4,4)\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32_\{\}__\{ 0.1 \}_f32_\[\]_1_1_undefined__\{, 15\}_f32_\[\]__255_\[1,1,1,1\]_\{ -128 \}_\{ 127 \}__i8_.*)", - R"(.*smoke_LPT/MultiplyTransformation.CompareWithRefImpl/f32_\[1,3,16,16\]_CPU_f32_undefined__on_branch1_0_2.55_0_2.55_on_branch2_-1.28_1.27_-1.28_1.27_1.*)", - R"(.*smoke_LPT/MultiplyTransformation.CompareWithRefImpl/f32_\[1,3,16,16\]_CPU_f32_broadcast1_undefined__on_branch1_-1.28_1.27_-1.28_1.27_on_branch2_0_2.55_0_2.55_0.*)", - R"(.*smoke_LPT/MultiplyTransformation.CompareWithRefImpl/f32_\[1,3,16,16\]_CPU_f32_broadcast2_undefined__on_branch1_0_2.55_0_2.55_on_branch2_-1.27_1.28_-1.27_1.28_0.*)", - R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[1\]_\{ -18.7 \}_\{ 18.7 \}\}.*)", - R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[6,1,1,1\].*)", - R"(.*smoke_LPT/RecurrentCellTransformation.CompareWithRefImpl/f32_\[1,2,16\]_CPU_f32FQ_X_level=256_.*_FQ_W_level=255.*)", - R"(.*smoke_LPT/SubtractTransformation.CompareWithRefImpl/f16_\[1,3,16,16\]_CPU_f32.*)", - R"(.*smoke_LPT/FakeQuantizeTransformation.CompareWithRefImpl/f32_\[1,32,72,48\]_CPU_f32_0_level=65536_shape=\[\]_input_low=\{ 0 \}_input_high=\{ 65.535 \}_output_low=\{ 0 \}_output_high=\{ 65.535 \}_precision=.*)", - R"(.*smoke_LPT/FakeQuantizeTransformation.CompareWithRefImpl/f32_\[1,32,72,48\]_CPU_f32_0_level=65536_shape=\[\]_input_low=\{ -32.768 \}_input_high=\{ 32.767 \}_output_low=\{ -32.768 \}_output_high=\{ 32.767 \}_precision=.*)", - R"(.*smoke_LPT/MoveFakeQuantizeTransformation.CompareWithRefImpl/f32_\[(1|4),1,16,16\]_CPU_f32SPLIT:0_OP:_FQ:level=256_shape=\[\]_input_low=\{ (0|-1.28) \}_input_high=\{ (2.55|1.27) \}_output_low=\{ (0|-1.28) \}_output_high=\{ (2.55|255|1.27) \}_precision=_DQ:.*)", - R"(.*smoke_LPT/MoveFakeQuantizeTransformation.CompareWithRefImpl/f32_\[(1|4),1,16,16\]_CPU_f32SPLIT:0_OP:relu_FQ:level=256_shape=\[\]_input_low=\{ 0 \}_input_high=\{ 2.55 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=_DQ:__f32_\{\}__\{ 0.01 \}_undefined_\[\]_0_1_undefined.*)", - R"(.*smoke_LPT/MoveFakeQuantizeTransformation.CompareWithRefImpl/f32_\[(1|4),1,16,16\]_CPU_f32SPLIT:0_OP:relu_FQ:level=256_shape=\[1,6,1,1\]_input_low=\{ 0, 0, 0, 0, 0, 0 \}_input_high=\{ 2.55, 1.275, 0.85, 0.6375, 0.51, 0.425 \}_output_low=\{ -128, -128, -128, -128, -128, -128 \}_output_high=\{ 127, 127, 127, 127, 127, 127 \}_precision=_DQ:\{\}.*)", - R"(.*smoke_LPT/MoveFakeQuantizeTransformation.CompareWithRefImpl/f32_\[(1|4),1,16,16\]_CPU_f32SPLIT:(0|1)_OP:_FQ:level=256_shape=\[1,6,1,1\]_input_low=\{ 0, 0, 0, 0, 0, 0 \}_input_high=\{ 2.55, 1.275, 0.85, 0.6375, 0.51, 0.425 \}_output_low=\{ 0, 0, 0, 0, 0, 0 \}_output_high=\{ 255, 127.5, 85, 63.75, 51, 42.5 \}_precision=_DQ:__f32_.*)", - R"(.*smoke_LPT/EliminateFakeQuantizeTransformation.CompareWithRefImpl/CPU_f32_level=256_shape=\[\]_input_low=\{ 0 \}_input_high=\{ 127.5 \}_output_low=\{ 0 \}_output_high\{ 127.5 \}_precision=f32_level=256_shape=\[\]_input_low=\{ 0 \}_input_high=\{ (127.5|121.429) \}_output_low=\{ 0 \}_output_high\{ (127.5|121.429) \}_precision=f32.*)", - R"(.*smoke_LPT/MatMulWithOptimizedConstantFq.CompareWithRefImpl/f32_\[1,16\]_\[(10|16),(10|16)\]_CPU_level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 25.5 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=_level=255_shape=\[1\]_input_low=\{ -12.7 \}_input_high=\{ 12.7 \}_output_low=\{ -12.7 \}_output_high\{ 12.7 \}_precision=.*)", - R"(.*smoke_LPT/FuseDequantizeToFakeQuantizeTransformation.CompareWithRefImpl/CPU_f32_0_undefined_\[\]_f32__\{\}_\{\}__\{ (0.01|0.01, 0.1, 1) \}_.*)", - R"(.*smoke_LPT/GroupConvolutionTransformation.CompareWithRefImpl/f32_\[1,6,24,24\]_CPU_f32_4D_\[1,6,24,24\]_\[1,24,18,18\]_3_-1_level=256_shape=\[1,1,1,1\]_input_low=\{ 0 \}_input_high=\{ 25.5 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=_wo_reshape__255_\[3,8,1,1,1\]_\{ -127 \}_\{ 127 \}.*)", - R"(.*smoke_LPT/GroupConvolutionTransformation.CompareWithRefImpl/f32_\[1,6,24(,24)*\]_CPU_f32_(3D|4D)_\[1,6,24(,24)*\]_\[1,24,18(,18)*\]_3_-1_level=256_shape=\[1,1,1.*\]_input_low=\{ 0 \}_input_high=\{ 25.5 \}_output_low=\{ 0 \}_output_high\{ 25.5 \}_precision=_wo_reshape__255_\[3,8,1,1(,1)*\]_\{ -127, -12.7, -1.27,.*)", - R"(.*smoke_LPT/GroupConvolutionTransformation.CompareWithRefImpl/f32_\[1,6,1,24,24\]_CPU_f32_5D_\[1,6,1,24,24\]_\[1,24,1,18,18\]_3_-1_level=256_shape=\[1,1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ -12.8 \}_output_high\{ 12.7 \}_precision=_reshape_on_weights__255_\[1,1,1,1,1\]_\{ -127 \}_\{ 127 \}.*)", - R"(.*smoke_LPT/GroupConvolutionTransformation.CompareWithRefImpl/f32_\[1,24,8,12,12\]_CPU_f32_5D_\[1,24,8,12,12\]_\[1,24,1,1,1\]_3_-1_level=256_shape=\[1,1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ -12.8 \}_output_high\{ 12.7 \}_precision=_reshape_on_weights__255_\[1,1,1,1,1\]_\{ -127 \}_\{ 127 \}.*)", R"(.*smoke_LPT/GroupConvolutionQDqTransformation.CompareWithRefImpl/f32_\[1,6,24,24\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ -12.8 \}_input_high=\{ 12.7 \}_output_low=\{ 0 \}_output_high=\{ 255 \}_precision=f32__u8___f32_.*_undefinedoutput_original_f32_multiplyAfter=(false|true).*)", - R"(.*smoke_LPT/GroupConvolutionQDqTransformation.CompareWithRefImpl/f32_\[1,6,24,24\]_CPU_f32_level=256_.*_precision=f32__u8___f32_\{\}__\{ 0.1 \}.*_f32_\[6,2,5,5\]__255_\[1,1,1,1\]_\{ -128 \}_\{ 127 \}__i8.*undefinedoutput_original_u8_multiplyAfter=(false|true).*)", - R"(.*smoke_LPT/MatMulWithConstantTransformation.CompareWithRefImpl/\[(2,3,4|1,1,3,4)\]_f32_CPU_.*_shape=\[1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0, 0, 0 \}_output_high=\{ 255, 25.5, 255 \}_precision=_level=256_shape=\[1\]_input_low=\{ -128 \}_.*)", - R"(.*smoke_LPT/ReduceSumTransformation.CompareWithRefImpl/f32_\[1,3,10,10\]_CPU_f32_level=256_shape=\[1,1,1,1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ 0 \}_output_high\{ 127 \}_precision=_keepDims__reduce_axis_2_3_.*)", - R"(.*smoke_TestsDFT_3d/DFTLayerTest.Inference/.*TS=.*10.4.20.32.2.*_Precision=bf16.*)", - R"(.*smoke_TestsDFT_3d/DFTLayerTest.Inference/.*TS=.*2.5.7.8.2.*_Precision=bf16.*)", - R"(.*smoke_TestsDFT_3d/DFTLayerTest.Inference/.*TS=.*1.120.128.1.2.*_Precision=bf16.*_signal_size=\(\).*)", - R"(.*smoke_TestsDFT_4d/DFTLayerTest.Inference/.*2.5.7.8.2.*Precision=bf16.*signal_size=\(\).*)", + // Issue: 160734 + R"(.*smoke_LPT/ConvolutionTransformation.CompareWithRefImpl/f32_\[(1|4),3,16,16\]_CPU_f32_rank=4D_fq_on_data=\{level=256_shape=\[1\]_input_low=\{ 0 \}_input_high=\{ 255 \}_output_low=\{ -18.7 \}_output_high\{ 18.8 \}_precision=\}_fq_on_weights=\{_255_\[1\]_\{ -18.7 \}_\{ 18.7 \}\}.*)", + // Issue: 160735 R"(.*smoke_TestsDFT_4d/DFTLayerTest.Inference/.*TS=\{\((10.4.20.32.2|1.120.128.1.2)\)\}.*Precision=f32.*signal_size=\(\).*)", - R"(.*smoke_TestsDFT_4d/DFTLayerTest.Inference/.*1.120.128.1.2.*Precision=bf16.*signal_size=\(\).*)", - R"(.*smoke_TestsDFT_2d/DFTLayerTest.Inference/.*TS.*1.120.128.1.2.*Precision=bf16_Axes=\(2.1\)_signal_size=\(\).*)", // by calc abs_threshold with expected value R"(.*smoke_.*_4D.*/GatherLayerTestCPU.CompareWithRefs.*INFERENCE_PRECISION_HINT=bf16.*)", R"(.*smoke.*Mvn6LayerTest.Inference/.*TS.*1.10.5.7.8.*_ModelType=f32.*_Ax=\((2.3.4|-3.-2.-1)\).*)", diff --git a/src/tests/functional/shared_test_classes/src/single_op/dft.cpp b/src/tests/functional/shared_test_classes/src/single_op/dft.cpp index 322d73a08c3172..af7fbd954c7916 100644 --- a/src/tests/functional/shared_test_classes/src/single_op/dft.cpp +++ b/src/tests/functional/shared_test_classes/src/single_op/dft.cpp @@ -102,8 +102,6 @@ void DFTLayerTest::SetUp() { if (model_type == ov::element::f32) { abs_threshold = 8e-5; - } else if (model_type == ov::element::bf16) { - abs_threshold = 5e-7; } } } // namespace test From 52033510a43fd29c6e6ad8422b39efc973b28ca4 Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Wed, 22 Jan 2025 12:55:40 +0200 Subject: [PATCH 83/97] [NPU] Fix High CPU overhead in ZeroHostTensor::data (#28603) ### Details: - *High CPU overhead in ZeroHostTensor::data* ### Tickets: - *CVS-160977* Signed-off-by: Bogdan Pereanu --- .../src/backend/include/zero_host_tensor.hpp | 2 +- .../backend/include/zero_remote_tensor.hpp | 2 ++ .../src/backend/src/zero_host_tensor.cpp | 31 ++++++++----------- .../src/backend/src/zero_remote_tensor.cpp | 4 +++ 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/plugins/intel_npu/src/backend/include/zero_host_tensor.hpp b/src/plugins/intel_npu/src/backend/include/zero_host_tensor.hpp index 7150a428dd270a..65b438284dfd9b 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_host_tensor.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_host_tensor.hpp @@ -33,7 +33,7 @@ class ZeroHostTensor : public ov::ITensor { std::shared_ptr get_impl() const; private: - std::shared_ptr m_impl; + std::shared_ptr _impl; }; } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp index 6217e52c15dae0..60578f3de64ef0 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp @@ -25,6 +25,8 @@ class ZeroRemoteTensor final : public RemoteTensor { ov::intel_npu::MemType mem_type = ov::intel_npu::MemType::L0_INTERNAL_BUF, void* mem = nullptr); + void* get_original_memory() const; + ~ZeroRemoteTensor() override; private: diff --git a/src/plugins/intel_npu/src/backend/src/zero_host_tensor.cpp b/src/plugins/intel_npu/src/backend/src/zero_host_tensor.cpp index 94115bf296b049..3a92a239408a1f 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_host_tensor.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_host_tensor.cpp @@ -13,41 +13,36 @@ ZeroHostTensor::ZeroHostTensor(const std::shared_ptr& contex const ov::element::Type element_type, const ov::Shape& shape, const Config& config) - : m_impl(std::make_shared(context, - init_structs, - element_type, - shape, - config, - ov::intel_npu::TensorType::BINDED, - ov::intel_npu::MemType::L0_INTERNAL_BUF)) {} + : _impl(std::make_shared(context, + init_structs, + element_type, + shape, + config, + ov::intel_npu::TensorType::BINDED, + ov::intel_npu::MemType::L0_INTERNAL_BUF)) {} void* ZeroHostTensor::data(const ov::element::Type&) const { - auto itrHandle = m_impl->get_properties().find(ov::intel_npu::mem_handle.name()); - if (itrHandle == m_impl->get_properties().end()) { - OPENVINO_THROW("No parameter ", ov::intel_npu::mem_handle.name(), " found in parameters map"); - } - - return ov::Any(itrHandle->second).as(); + return _impl->get_original_memory(); } const ov::element::Type& ZeroHostTensor::get_element_type() const { - return m_impl->get_element_type(); + return _impl->get_element_type(); } const ov::Shape& ZeroHostTensor::get_shape() const { - return m_impl->get_shape(); + return _impl->get_shape(); } const ov::Strides& ZeroHostTensor::get_strides() const { - return m_impl->get_strides(); + return _impl->get_strides(); } void ZeroHostTensor::set_shape(ov::Shape new_shape) { - m_impl->set_shape(new_shape); + _impl->set_shape(new_shape); } std::shared_ptr ZeroHostTensor::get_impl() const { - return m_impl; + return _impl; } } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp b/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp index 6fa12b72567792..c218aa14dd10a1 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp @@ -168,4 +168,8 @@ void ZeroRemoteTensor::update_properties() { } } +void* ZeroRemoteTensor::get_original_memory() const { + return _data; +} + } // namespace intel_npu From 50fa19a390038358a344d3daa1a24a993b9ecd7a Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Wed, 22 Jan 2025 16:41:25 +0400 Subject: [PATCH 84/97] [PT FE] Support aten::bernoulli for SpeechT5 TTS model (#28585) **Details:** Support aten::bernoulli for SpeechT5 TTS model **Ticket:** TBD --------- Signed-off-by: Kazantsev, Roman --- .../openvino/frontend/pytorch/ts_decoder.py | 3 + src/frontends/pytorch/src/op/bernoulli.cpp | 82 +++++++++++++++ src/frontends/pytorch/src/op_table.cpp | 2 + .../pytorch_tests/test_bernoulli.py | 99 +++++++++++++++++++ 4 files changed, 186 insertions(+) create mode 100644 src/frontends/pytorch/src/op/bernoulli.cpp create mode 100644 tests/layer_tests/pytorch_tests/test_bernoulli.py diff --git a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py index 13b79b1cb5afc0..a9aec8f8aa98c9 100644 --- a/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py +++ b/src/bindings/python/src/openvino/frontend/pytorch/ts_decoder.py @@ -449,6 +449,9 @@ def as_constant(self): return ivalue_to_constant(pt_value.toIValue(), shared_memory=self._shared_memory) if isinstance(pt_type, torch.ListType): return self._as_constant_list(pt_value) + if isinstance(pt_type, torch._C.Type) and pt_type.annotation_str == "Generator": + gen = pt_value.toIValue() + return ivalue_to_constant(gen.initial_seed(), shared_memory=self._shared_memory) const = ivalue_to_constant( pt_value.toIValue(), shared_memory=self._shared_memory) if len(const) > 0: diff --git a/src/frontends/pytorch/src/op/bernoulli.cpp b/src/frontends/pytorch/src/op/bernoulli.cpp new file mode 100644 index 00000000000000..7a408fce610ec1 --- /dev/null +++ b/src/frontends/pytorch/src/op/bernoulli.cpp @@ -0,0 +1,82 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/less.hpp" +#include "openvino/op/random_uniform.hpp" +#include "openvino/op/shape_of.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov; +using namespace ov::op; + +OutputVector translate_bernoulli(const NodeContext& context) { + // supported signatures: + // 1. aten::bernoulli(input, *, Generator? generator=None) -> Tensor + // 2. aten::bernoulli(input, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + // 3. aten::bernoulli(input, float p, *, Generator? generator=None) -> Tensor + num_inputs_check(context, 1, 3); + uint64_t global_seed = 0; + auto input = context.get_input(0); + auto input_type = input.get_element_type().is_static() ? input.get_element_type() : element::f64; + auto input_shape = context.mark_node(std::make_shared(input, element::i32)); + auto probs_threshold = input; + + bool with_p = false; + size_t gen_ind = 1; + if (!context.input_is_none(1)) { + auto p = context.get_input(1); + with_p = p.get_element_type().is_real() ? true : false; + if (with_p) { + // need to override probs thresholds and samples type + input_type = p.get_element_type().is_static() ? p.get_element_type() : element::f64; + gen_ind = 2; + probs_threshold = p; + } + } + + if (!context.input_is_none(gen_ind)) { + // retrieve seed set to Generator + auto gen_const = as_type_ptr(context.get_input(static_cast(gen_ind)).get_node_shared_ptr()); + PYTORCH_OP_CONVERSION_CHECK(gen_const, "aten::bernoulli expects a constant representing a generator seed"); + auto seed = gen_const->cast_vector(); + global_seed = seed.size() > 0 ? seed[0] : global_seed; + } + + // generate tensor of input shape with elements sampled from u ~ RandomUniform(0, 1) distribution + // I[u < input] will represent samples of bernoulli distribution with parameter `input` + auto const_zero = context.mark_node(std::make_shared(input_type, Shape{}, 0.0f)); + auto const_one = context.mark_node(std::make_shared(input_type, Shape{}, 1.0f)); + auto ru_samples = context.mark_node(std::make_shared(input_shape, + const_zero, + const_one, + input_type, + global_seed, + 0, + PhiloxAlignment::PYTORCH)); + if (!input.get_element_type().is_static()) { + ru_samples = context.mark_node(std::make_shared(ru_samples, probs_threshold)); + } + auto bernoulli_samples = context.mark_node(std::make_shared(ru_samples, probs_threshold)); + + if (!with_p && !context.input_is_none(2)) { + auto out = context.get_input(2); + bernoulli_samples = context.mark_node(std::make_shared(bernoulli_samples, out)); + context.mutate_input(2, bernoulli_samples); + } else { + bernoulli_samples = context.mark_node(std::make_shared(bernoulli_samples, input)); + } + return {bernoulli_samples}; +}; +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 00e3a55b0bc327..458ad679d8b444 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -48,6 +48,7 @@ OP_CONVERTER(translate_avg_pool2d); OP_CONVERTER(translate_avg_pool3d); OP_CONVERTER(translate_bool); OP_CONVERTER(translate_batch_norm); +OP_CONVERTER(translate_bernoulli); OP_CONVERTER(translate_bitwise_and); OP_CONVERTER(translate_bitwise_not); OP_CONVERTER(translate_bitwise_or); @@ -408,6 +409,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::avg_pool3d", op::quantizable_op}, {"aten::baddbmm", op::translate_addmm}, {"aten::batch_norm", op::translate_batch_norm}, + {"aten::bernoulli", op::translate_bernoulli}, {"aten::bitwise_and", op::translate_bitwise_and}, {"aten::bitwise_not", op::translate_bitwise_not}, {"aten::bitwise_or", op::translate_bitwise_or}, diff --git a/tests/layer_tests/pytorch_tests/test_bernoulli.py b/tests/layer_tests/pytorch_tests/test_bernoulli.py new file mode 100644 index 00000000000000..691ec7ed4da6df --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_bernoulli.py @@ -0,0 +1,99 @@ +# Copyright (C) 2018-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + + +class TestBernoulli(PytorchLayerTest): + def _prepare_input(self, input, input_type, out): + model_inputs = [input.astype(input_type)] + if out: + model_inputs.append(np.zeros_like(input).astype(np.int64)) + return model_inputs + + def create_model(self, out, seed): + class aten_bernoulli(torch.nn.Module): + def __init__(self, out, seed) -> None: + super().__init__() + gen = torch.Generator() + gen.manual_seed(seed) + self.gen = gen + if not out: + self.forward = self.bernoulli + else: + self.forward = self.bernoulli_out + + def bernoulli(self, input): + bernoulli_res = torch.bernoulli(input, generator=self.gen) + return bernoulli_res + + def bernoulli_out(self, input, out): + bernoulli_res = torch.bernoulli(input, generator=self.gen, out=out) + return bernoulli_res + + ref_net = None + + return aten_bernoulli(out, seed), ref_net, "aten::bernoulli" + + @pytest.mark.parametrize("input", [ + np.array([[0, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0]]), + np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]), + ]) + @pytest.mark.parametrize("input_type", [np.float32, np.float64]) + @pytest.mark.parametrize("out", [True, False]) + @pytest.mark.parametrize("seed", [1, 50, 1234]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_bernoulli(self, input, input_type, out, seed, ie_device, precision, ir_version): + if input_type == np.float64: + pytest.skip("156027: Incorrect specification or reference for RandomUniform for fp64 output type") + self._test(*self.create_model(out, seed), + ie_device, precision, ir_version, + kwargs_to_prepare_input={"input": input, + "input_type": input_type, + "out": out}) + + +class TestBernoulliWithP(PytorchLayerTest): + def _prepare_input(self, input, input_type): + model_inputs = [input.astype(input_type)] + return model_inputs + + def create_model(self, p, seed): + class aten_bernoulli(torch.nn.Module): + def __init__(self, p, seed) -> None: + super().__init__() + gen = torch.Generator() + gen.manual_seed(seed) + self.gen = gen + self.p = p + self.forward = self.bernoulli_with_p + + def bernoulli_with_p(self, input): + bernoulli_res = torch.bernoulli(input, self.p, generator=self.gen) + return bernoulli_res + + ref_net = None + + return aten_bernoulli(p, seed), ref_net, "aten::bernoulli" + + @pytest.mark.parametrize("input", [ + np.array([[0, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0]]), + np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]), + ]) + @pytest.mark.parametrize("input_type", [np.float32, np.int32, np.float64]) + @pytest.mark.parametrize("p", [0.0, 0.4, 1.0]) + @pytest.mark.parametrize("seed", [12]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_bernoulli(self, input, input_type, p, seed, ie_device, precision, ir_version): + if p not in [0.0, 1.0]: + pytest.skip("156027: Incorrect specification or reference for RandomUniform for fp64 output type") + self._test(*self.create_model(p, seed), + ie_device, precision, ir_version, + kwargs_to_prepare_input={"input": input, + "input_type": input_type}) From db41516dd0873e9db387a042412fb8b16fcc691b Mon Sep 17 00:00:00 2001 From: Anastasiia Pnevskaia Date: Wed, 22 Jan 2025 16:00:27 +0100 Subject: [PATCH 85/97] Added telemetry for keras3 (#28613) ### Details: - Added telemetry for keras3 ### Tickets: - CVS-160667 --- tools/ovc/openvino/tools/ovc/__init__.py | 7 ++++++- tools/ovc/openvino/tools/ovc/telemetry_utils.py | 7 +++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tools/ovc/openvino/tools/ovc/__init__.py b/tools/ovc/openvino/tools/ovc/__init__.py index 25d8b28298f5c9..db0c2e84309598 100644 --- a/tools/ovc/openvino/tools/ovc/__init__.py +++ b/tools/ovc/openvino/tools/ovc/__init__.py @@ -3,7 +3,7 @@ import sys from openvino.tools.ovc.convert import convert_model -from openvino.tools.ovc.telemetry_utils import is_optimum, init_ovc_telemetry, is_torch_compile +from openvino.tools.ovc.telemetry_utils import is_optimum, init_ovc_telemetry, is_torch_compile, is_keras3 import importlib.metadata as importlib_metadata @@ -24,3 +24,8 @@ torch_version = importlib_metadata.version("torch") telemetry = init_ovc_telemetry("torch.compile", torch_version) telemetry.send_event("torch.compile", "import", "Import from torch.compile(), ov_version: {}".format(get_rt_version())) + +if is_keras3() and 'keras' in sys.modules: + keras_version = importlib_metadata.version("keras") + telemetry = init_ovc_telemetry("keras3", keras_version) + telemetry.send_event("keras3", "import", "Import from keras3, ov_version: {}".format(get_rt_version())) diff --git a/tools/ovc/openvino/tools/ovc/telemetry_utils.py b/tools/ovc/openvino/tools/ovc/telemetry_utils.py index a2cc8dab2cf49b..aaa2080cca9c89 100644 --- a/tools/ovc/openvino/tools/ovc/telemetry_utils.py +++ b/tools/ovc/openvino/tools/ovc/telemetry_utils.py @@ -31,6 +31,13 @@ def is_torch_compile(): return True return False +def is_keras3(): + import traceback + for line in traceback.format_stack(): + if os.path.join("keras", "src", "backend", "openvino") in line: + return True + return False + def init_ovc_telemetry(app_name='OVC', app_version=None): app_version = app_version if app_version is not None else get_rt_version() return init_telemetry_class(tid=get_tid(), From 8a7c9742c2758d3806f9d23e019eaa88ef5136b1 Mon Sep 17 00:00:00 2001 From: Evgenya Nugmanova Date: Wed, 22 Jan 2025 19:48:41 +0400 Subject: [PATCH 86/97] [GPU][Transformations] Fixed up predicates (#28607) ### Details: - *Fixed up predicates* Signed-off-by: Evgeniia Nugmanova --- .../convert_stridedslices_to_variadicsplit.cpp | 2 +- .../unsqueeze_broadcast_reshape_matmul_fusion.cpp | 10 +++------- .../unsqueeze_broadcast_reshape_sdpa_fusion.cpp | 10 +++------- 3 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp index 500a156be56fcb..07ec61b155ff3f 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/convert_stridedslices_to_variadicsplit.cpp @@ -31,7 +31,7 @@ ConvertStridedSlicesToVariadicSplit::ConvertStridedSlicesToVariadicSplit() { return false; user_count++; } - return (user_count == num_users_to_fuse) && consumers_count(num_users_to_fuse); + return (user_count == num_users_to_fuse) && consumers_count(num_users_to_fuse)(output); }; auto data_m = any_input(); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.cpp index 6a2a9ba627573b..f71ecc23e7fe04 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_matmul_fusion.cpp @@ -26,20 +26,16 @@ UnsqueezeBroadcastReshapeMatmulFusion::UnsqueezeBroadcastReshapeMatmulFusion() { return ov::as_type_ptr(output.get_node_shared_ptr()) == nullptr; }; - auto unsqueeze_predicate = [](const ov::Output& output) -> bool { - return rank_equals(5)(output) && consumers_count(1); - }; + auto unsqueeze_predicate = rank_equals(5) && consumers_count(1); auto broadcast_predicate = [](const ov::Output& output) -> bool { const auto broadcast = ov::as_type_ptr(output.get_node_shared_ptr()); if (!broadcast || broadcast->get_broadcast_spec().m_type != ov::op::BroadcastType::BIDIRECTIONAL) return false; - return rank_equals(5)(output) && consumers_count(1); + return rank_equals(5)(output) && consumers_count(1)(output); }; - auto reshape_predicate = [](const ov::Output& output) -> bool { - return rank_equals(4)(output) && consumers_count(1); - }; + auto reshape_predicate = rank_equals(4) && consumers_count(1); auto input_a_m = any_input(not_reshape); auto input_b_m = wrap_type({any_input(), any_input()}); diff --git a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.cpp b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.cpp index 9d5ea8db863556..b99a8e02fef85d 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations/unsqueeze_broadcast_reshape_sdpa_fusion.cpp @@ -23,20 +23,16 @@ using ov::pass::pattern::op::Or; UnsqueezeBroadcastReshapeSDPAFusion::UnsqueezeBroadcastReshapeSDPAFusion() { using namespace ov::pass::pattern; - auto unsqueeze_predicate = [](const ov::Output& output) -> bool { - return rank_equals(5)(output) && consumers_count(1); - }; + auto unsqueeze_predicate = rank_equals(5) && consumers_count(1); auto broadcast_predicate = [](const ov::Output& output) -> bool { const auto broadcast = ov::as_type_ptr(output.get_node_shared_ptr()); if (!broadcast || broadcast->get_broadcast_spec().m_type != ov::op::BroadcastType::BIDIRECTIONAL) return false; - return rank_equals(5)(output) && consumers_count(1); + return rank_equals(5)(output) && consumers_count(1)(output); }; - auto reshape_predicate = [](const ov::Output& output) -> bool { - return rank_equals(4)(output) && consumers_count(1); - }; + auto reshape_predicate = rank_equals(4) && consumers_count(1); auto input_a_m = any_input(); auto input_attn_mask = any_input(); From 6eda8cef1ae32cd08bc5926cb35836e0d7aa85f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Jan 2025 11:18:09 +0400 Subject: [PATCH 87/97] Bump codecov/codecov-action from 4.6.0 to 5.2.0 (#28625) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 4.6.0 to 5.2.0.
    Release notes

    Sourced from codecov/codecov-action's releases.

    v5.2.0

    What's Changed

    New Contributors

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.1.2...v5.2.0

    v5.1.2

    What's Changed

    New Contributors

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.1.1...v5.1.2

    v5.1.1

    What's Changed

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.1.0...v5.1.1

    v5.1.0

    What's Changed

    ... (truncated)

    Changelog

    Sourced from codecov/codecov-action's changelog.

    v5.2.0

    What's Changed

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.1.2..v5.2.0

    v5.1.2

    What's Changed

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.1.1..v5.1.2

    v5.1.1

    What's Changed

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.1.0..v5.1.1

    v5.1.0

    What's Changed

    Full Changelog: https://github.com/codecov/codecov-action/compare/v5.0.7..v5.1.0

    v5.0.7

    What's Changed

    ... (truncated)

    Commits

    [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=codecov/codecov-action&package-manager=github_actions&previous-version=4.6.0&new-version=5.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
    Dependabot commands and options
    You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
    Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index fd6a029abfaa67..50ff8d0153bd22 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -130,6 +130,6 @@ jobs: lcov --capture --directory ${{ github.workspace }}/. --output-file coverage.info genhtml coverage.info --output-directory coverage-report - name: Collect coverage - uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4.6.0 + uses: codecov/codecov-action@5a605bd92782ce0810fa3b8acc235c921b497052 # v5.2.0 with: verbose: true From 8dfb9cc73e28c8ad3f3f30f5b0e6210c15cf2af9 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 23 Jan 2025 13:14:41 +0400 Subject: [PATCH 88/97] Fixed C++ RTTI for header only classes (#28624) ### Details: - Regression after https://github.com/openvinotoolkit/openvino/pull/28555 --- .../common/include/openvino/frontend/decoder.hpp | 3 +++ .../include/openvino/frontend/graph_iterator.hpp | 8 ++------ src/frontends/common/src/decoder.cpp | 2 ++ src/frontends/common/src/graph_iterator.cpp | 15 +++++++++++++++ src/frontends/common/src/hash_table.cpp | 2 +- src/frontends/pytorch/src/frontend.cpp | 2 +- 6 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 src/frontends/common/src/graph_iterator.cpp diff --git a/src/frontends/common/include/openvino/frontend/decoder.hpp b/src/frontends/common/include/openvino/frontend/decoder.hpp index ea13a714cb1016..7794cc23b94b04 100644 --- a/src/frontends/common/include/openvino/frontend/decoder.hpp +++ b/src/frontends/common/include/openvino/frontend/decoder.hpp @@ -82,6 +82,9 @@ class FRONTEND_API DecoderBase : public IDecoder { /// \brief Get node name virtual const std::string& get_op_name() const = 0; + + /// \brief Destructor + virtual ~DecoderBase(); }; } // namespace frontend diff --git a/src/frontends/common/include/openvino/frontend/graph_iterator.hpp b/src/frontends/common/include/openvino/frontend/graph_iterator.hpp index e199ece6d06694..9b05849c78e8a1 100644 --- a/src/frontends/common/include/openvino/frontend/graph_iterator.hpp +++ b/src/frontends/common/include/openvino/frontend/graph_iterator.hpp @@ -45,14 +45,10 @@ class FRONTEND_API GraphIterator : ::ov::RuntimeAttribute { virtual std::vector get_output_names() const = 0; /// \brief Returns a map from internal tensor name to (user-defined) external name for inputs - virtual std::map get_input_names_map() const { - return {}; - } + virtual std::map get_input_names_map() const; /// \brief Returns a map from internal tensor name to (user-defined) external name for outputs - virtual std::map get_output_names_map() const { - return {}; - } + virtual std::map get_output_names_map() const; }; } // namespace tensorflow diff --git a/src/frontends/common/src/decoder.cpp b/src/frontends/common/src/decoder.cpp index f140b9e64302be..f793a3fd2f47e7 100644 --- a/src/frontends/common/src/decoder.cpp +++ b/src/frontends/common/src/decoder.cpp @@ -7,3 +7,5 @@ using namespace ov::frontend; IDecoder::~IDecoder() = default; + +DecoderBase::~DecoderBase() = default; diff --git a/src/frontends/common/src/graph_iterator.cpp b/src/frontends/common/src/graph_iterator.cpp new file mode 100644 index 00000000000000..1a97e35448cf06 --- /dev/null +++ b/src/frontends/common/src/graph_iterator.cpp @@ -0,0 +1,15 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/graph_iterator.hpp" + +using namespace ov::frontend::tensorflow; + +std::map GraphIterator::get_input_names_map() const { + return {}; +} + +std::map GraphIterator::get_output_names_map() const { + return {}; +} diff --git a/src/frontends/common/src/hash_table.cpp b/src/frontends/common/src/hash_table.cpp index f01abd3c9d5bbe..4c8a465e4152a8 100644 --- a/src/frontends/common/src/hash_table.cpp +++ b/src/frontends/common/src/hash_table.cpp @@ -6,4 +6,4 @@ using namespace ov::frontend; -HashTable::~HashTable(){}; +HashTable::~HashTable() = default; diff --git a/src/frontends/pytorch/src/frontend.cpp b/src/frontends/pytorch/src/frontend.cpp index bb69e8fa313130..6debdb8c33311e 100644 --- a/src/frontends/pytorch/src/frontend.cpp +++ b/src/frontends/pytorch/src/frontend.cpp @@ -397,7 +397,7 @@ ov::frontend::InputModel::Ptr FrontEnd::load_impl(const std::vector& va size_t extra_variants_num = variants.size() > 0 && variants[variants.size() - 1].is() ? 1 : 0; FRONT_END_GENERAL_CHECK(variants.size() == 1 + extra_variants_num, "PyTorch Frontend supports exactly one parameter in model representation, got ", - std::to_string(variants.size()), + variants.size(), " instead."); FRONT_END_GENERAL_CHECK(variants[0].is>(), "PyTorch Frontend doesn't support provided model type. Please provide supported model " From 8dde87c3149d7d31fda1f423beadcafcf2941a37 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 23 Jan 2025 15:50:28 +0400 Subject: [PATCH 89/97] Pin triton version (#28627) ### Details: - *item1* - *...* ### Tickets: - *ticket-id* --- tests/requirements_pytorch | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/requirements_pytorch b/tests/requirements_pytorch index 261e8c79c587c7..e186c2ab63ab67 100644 --- a/tests/requirements_pytorch +++ b/tests/requirements_pytorch @@ -20,6 +20,8 @@ pytest-xdist[psutil]==3.6.1 defusedxml==0.7.1 autoawq==0.2.7; platform_system == "Linux" and platform_machine == "x86_64" +# triton is a dependency of autoawq, newer versions lead to TorchFX test failures +triton==3.1.0; platform_system == "Linux" and platform_machine == "x86_64" auto-gptq==0.7.1; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.12" av==13.0.0 basicsr==1.4.2; python_version < "3.12" From b65a3249e0b290f29cff9125d978cb00619e9b22 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Thu, 23 Jan 2025 16:08:32 +0400 Subject: [PATCH 90/97] GPU: fixed debug build on non-Windows (#28629) ### Details: - oneDNN headers incorrectly use `_MSVC_LANG` w/o any checks that platform is Windows ### Tickets: - CVS-161069 --- src/plugins/intel_gpu/thirdparty/CMakeLists.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/plugins/intel_gpu/thirdparty/CMakeLists.txt b/src/plugins/intel_gpu/thirdparty/CMakeLists.txt index c3ae531e247e5d..475e9213ecf9f0 100644 --- a/src/plugins/intel_gpu/thirdparty/CMakeLists.txt +++ b/src/plugins/intel_gpu/thirdparty/CMakeLists.txt @@ -46,7 +46,8 @@ if(ENABLE_ONEDNN_FOR_GPU) endif() foreach(cmake_var IN ITEMS CMAKE_SYSTEM_NAME CMAKE_SYSTEM_VERSION - CMAKE_SYSTEM_PROCESSOR CMAKE_TOOLCHAIN_FILE) + CMAKE_SYSTEM_PROCESSOR CMAKE_TOOLCHAIN_FILE + CMAKE_VERBOSE_MAKEFILE) if(${cmake_var}) list(APPEND cmake_extra_args "-D${cmake_var}=${${cmake_var}}") endif() @@ -141,8 +142,7 @@ if(ENABLE_ONEDNN_FOR_GPU) EXCLUDE_FROM_ALL ON ) - list(APPEND LIB_INCLUDE_DIRS ${ONEDNN_INSTALL_DIR}/include) - list(APPEND LIB_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/onednn_gpu/src) + set(LIB_INCLUDE_DIRS "${ONEDNN_INSTALL_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/onednn_gpu/src") set(LIB_DEFINITIONS ENABLE_ONEDNN_FOR_GPU DNNL_DLL DNNL_DLL_EXPORTS @@ -158,6 +158,7 @@ if(ENABLE_ONEDNN_FOR_GPU) set_target_properties(onednn_gpu_tgt PROPERTIES INTERFACE_LINK_LIBRARIES $ INTERFACE_INCLUDE_DIRECTORIES "$" + INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${LIB_INCLUDE_DIRS}" INTERFACE_COMPILE_DEFINITIONS "${LIB_DEFINITIONS}" ) add_dependencies(onednn_gpu_tgt onednn_gpu_build) From dc1d9675cbfded2a3b3287081a225992bef6f23e Mon Sep 17 00:00:00 2001 From: Bogdan Pereanu Date: Thu, 23 Jan 2025 13:57:24 +0200 Subject: [PATCH 91/97] [NPU] Get remote tensors info from methods and not through properties to avoid CPU overhead (#28614) ### Details: - *Get remote tensors info from methods and not through properties to avoid CPU overhead* ### Tickets: - *CVS-160977* Signed-off-by: Bogdan Pereanu --- .../backend/include/zero_remote_tensor.hpp | 1 + .../src/backend/src/zero_infer_request.cpp | 32 +++++++------------ .../src/backend/src/zero_pipeline.cpp | 6 ++-- .../src/backend/src/zero_remote_tensor.cpp | 4 +++ .../src/backend/src/zero_variable_state.cpp | 4 +-- .../intel_npu/utils/zero/zero_utils.hpp | 11 ------- 6 files changed, 20 insertions(+), 38 deletions(-) diff --git a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp index 60578f3de64ef0..0e8ed4529a94d3 100644 --- a/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp +++ b/src/plugins/intel_npu/src/backend/include/zero_remote_tensor.hpp @@ -26,6 +26,7 @@ class ZeroRemoteTensor final : public RemoteTensor { void* mem = nullptr); void* get_original_memory() const; + ze_context_handle_t get_zero_context_handle() const; ~ZeroRemoteTensor() override; diff --git a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp index 034f69f63e4158..aee73a2b73fa31 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp @@ -264,8 +264,7 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptr( - zeroUtils::extract_object(tensor->get_context()->get_property(), ov::intel_npu::l0_context)); + auto l0_context = tensor->get_zero_context_handle(); if (_initStructs->getContext() != l0_context) { OPENVINO_THROW("Using different context for creating the tensor is not supported"); } @@ -276,7 +275,7 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptrget_properties(), ov::intel_npu::mem_handle); + auto data = tensor->get_original_memory(); OPENVINO_ASSERT(data, "Empty buffer"); OV_ITT_TASK_NEXT(ZERO_SET_REMOTE_TENSOR, "updateCommandList"); @@ -388,7 +387,7 @@ void ZeroInferRequest::set_tensors(const ov::Output& port, } else { _logger.debug("ZeroInferRequest::set_tensors - remote tensor is used"); - data = zeroUtils::extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); + data = remoteTensor->get_original_memory(); get_level_zero_input(foundPort.idx, i) = tensors.at(i)._ptr; } @@ -530,9 +529,7 @@ void ZeroInferRequest::update_states_if_memory_changed() { if (zeroState->zero_tensor_should_be_updated()) { auto remoteTensor = std::dynamic_pointer_cast(zeroState->get_state()._ptr); - void* userBuffer = !remoteTensor ? zeroState->get_state()->data() - : zeroUtils::extract_object(remoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = !remoteTensor ? zeroState->get_state()->data() : remoteTensor->get_original_memory(); _pipeline->updateCommandList(_graphInputDescriptors.at(zeroState->get_tensor_index()).idx, userBuffer, @@ -609,10 +606,8 @@ void ZeroInferRequest::infer_async() { auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); - void* userBuffer = !userBatchRemoteTensor - ? userTensor.at(i)->data() - : zeroUtils::extract_object(userBatchRemoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = !userBatchRemoteTensor ? userTensor.at(i)->data() + : userBatchRemoteTensor->get_original_memory(); if (userBuffer != levelZeroBuffer) { if (userBuffer == nullptr || levelZeroBuffer == nullptr) { @@ -634,10 +629,8 @@ void ZeroInferRequest::infer_async() { for (size_t i = 0; i < userTensor.size(); i++) { auto userBatchRemoteTensor = std::dynamic_pointer_cast(userTensor.at(i)._ptr); - void* userBuffer = !userBatchRemoteTensor - ? userTensor.at(i)->data() - : zeroUtils::extract_object(userBatchRemoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = !userBatchRemoteTensor ? userTensor.at(i)->data() + : userBatchRemoteTensor->get_original_memory(); std::memcpy(static_cast(levelZeroBuffer) + (i * userTensor.at(i)->get_byte_size()), userBuffer, @@ -650,9 +643,8 @@ void ZeroInferRequest::infer_async() { } auto userRemoteTensor = std::dynamic_pointer_cast(userTensor.at(SINGLE_TENSOR)._ptr); - void* userBuffer = !userRemoteTensor ? userTensor.at(SINGLE_TENSOR)->data() - : zeroUtils::extract_object(userRemoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = + !userRemoteTensor ? userTensor.at(SINGLE_TENSOR)->data() : userRemoteTensor->get_original_memory(); const auto& levelZeroTensor = get_level_zero_input(inputIndex); if (!is_remote_tensor(levelZeroTensor)) { @@ -701,9 +693,7 @@ void ZeroInferRequest::get_result() { } auto userRemoteTensor = std::dynamic_pointer_cast(userTensor._ptr); - void* userBuffer = !userRemoteTensor ? userTensor->data() - : zeroUtils::extract_object(userRemoteTensor->get_properties(), - ov::intel_npu::mem_handle); + void* userBuffer = !userRemoteTensor ? userTensor->data() : userRemoteTensor->get_original_memory(); const std::shared_ptr& levelZeroTensor = _levelZeroOutputTensors.at(outputIndex); if (!is_remote_tensor(levelZeroTensor)) { diff --git a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp index a01238a899e0dc..9f55897193aeeb 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_pipeline.cpp @@ -65,7 +65,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = input_tensors.at(io_index).at(i)->data(); } else { - data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = remote_tensor->get_original_memory(); } graph->set_argument_value(desc.idx, data); @@ -79,7 +79,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = input_tensors.at(io_index).at(0)->data(); } else { - data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = remote_tensor->get_original_memory(); } graph->set_argument_value( @@ -97,7 +97,7 @@ Pipeline::Pipeline(const Config& config, if (remote_tensor == nullptr) { data = output_tensors.at(io_index)->data(); } else { - data = zeroUtils::extract_object(remote_tensor->get_properties(), ov::intel_npu::mem_handle); + data = remote_tensor->get_original_memory(); } graph->set_argument_value( diff --git a/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp b/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp index c218aa14dd10a1..999cfe8114086d 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_remote_tensor.cpp @@ -172,4 +172,8 @@ void* ZeroRemoteTensor::get_original_memory() const { return _data; } +ze_context_handle_t ZeroRemoteTensor::get_zero_context_handle() const { + return _init_structs->getContext(); +} + } // namespace intel_npu diff --git a/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp b/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp index 19cabfb4246e5d..442ae3fe9b2f03 100644 --- a/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp +++ b/src/plugins/intel_npu/src/backend/src/zero_variable_state.cpp @@ -46,9 +46,7 @@ void ZeroVariableState::set_state(const ov::SoPtr& new_state) { void ZeroVariableState::reset() { auto remoteTensor = std::dynamic_pointer_cast(m_state._ptr); - void* userBuffer = !remoteTensor - ? m_state->data() - : zeroUtils::extract_object(remoteTensor->get_properties(), ov::intel_npu::mem_handle); + void* userBuffer = !remoteTensor ? m_state->data() : remoteTensor->get_original_memory(); std::memset(userBuffer, 0, m_state->get_byte_size()); } diff --git a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp index 0c2367b680851e..e68eb0200a09ce 100644 --- a/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp +++ b/src/plugins/intel_npu/src/utils/include/intel_npu/utils/zero/zero_utils.hpp @@ -277,17 +277,6 @@ static inline std::string getLatestBuildError(ze_graph_dditable_ext_curr_t& _gra } } -template -static inline Type extract_object(const ov::AnyMap& params, const ov::Property& p) { - auto itrHandle = params.find(p.name()); - ov::Any res = nullptr; - if (itrHandle == params.end()) { - OPENVINO_THROW("No parameter ", p.name(), " found in parameters map"); - } - res = itrHandle->second; - return res.as(); -} - static inline bool memory_was_allocated_in_the_same_l0_context(ze_context_handle_t hContext, const void* ptr) { ze_memory_allocation_properties_t desc = {}; desc.stype = ZE_STRUCTURE_TYPE_MEMORY_ALLOCATION_PROPERTIES; From d2ecd45bf5998e413cd44769ed4d8ae6fce1c8a4 Mon Sep 17 00:00:00 2001 From: Eddy Kim Date: Thu, 23 Jan 2025 21:10:21 +0900 Subject: [PATCH 92/97] [GPU] disabling activations scaling for LLMs on all platforms (#28632) ### Details: - to resolve accuracy issues, disabled activations scaling for LLMs on all platforms --- src/plugins/intel_gpu/src/plugin/plugin.cpp | 4 +++- src/plugins/intel_gpu/src/runtime/execution_config.cpp | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_gpu/src/plugin/plugin.cpp b/src/plugins/intel_gpu/src/plugin/plugin.cpp index f98ffd0128bf6a..0d365ef689608f 100644 --- a/src/plugins/intel_gpu/src/plugin/plugin.cpp +++ b/src/plugins/intel_gpu/src/plugin/plugin.cpp @@ -28,6 +28,7 @@ #include "openvino/core/deprecated.hpp" #include "openvino/op/gather.hpp" #include "openvino/op/concat.hpp" +#include "openvino/op/paged_attention.hpp" #include "openvino/pass/manager.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" #include "openvino/pass/pattern/op/or.hpp" @@ -84,7 +85,8 @@ const auto is_llm = [](const std::shared_ptr& model) -> bool { auto kvcache_matcher = std::make_shared(present, "KVCacheMatcher"); for (auto& op : model->get_ordered_ops()) { - if (kvcache_matcher->match(op)) { + if (kvcache_matcher->match(op) || + ov::is_type(op)) { return true; } } diff --git a/src/plugins/intel_gpu/src/runtime/execution_config.cpp b/src/plugins/intel_gpu/src/runtime/execution_config.cpp index 89edba4a69eee1..fde86c92778ab3 100644 --- a/src/plugins/intel_gpu/src/runtime/execution_config.cpp +++ b/src/plugins/intel_gpu/src/runtime/execution_config.cpp @@ -287,7 +287,7 @@ void ExecutionConfig::apply_rt_info(const cldnn::device_info& info, const ov::RT if (!info.supports_immad) { apply_rt_info_property(ov::hint::kv_cache_precision, rt_info); } - if (!info.supports_immad || !is_llm) + if (!is_llm) apply_rt_info_property(ov::hint::activations_scale_factor, rt_info); apply_rt_info_property(ov::hint::dynamic_quantization_group_size, rt_info); } From e0521453128a88e8a662c05ce21edda1a4df3d38 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Thu, 23 Jan 2025 13:29:34 +0100 Subject: [PATCH 93/97] [DOCS] preparing 2025.0 pass 3 (#28566) Co-authored-by: sgolebiewski-intel --- .../about-openvino/key-features.rst | 2 +- .../about-openvino/performance-benchmarks.rst | 6 +- .../generative-ai-performance.rst | 2 +- .../about-openvino/release-notes-openvino.rst | 35 ++++----- .../documentation/openvino-ecosystem.rst | 4 +- .../get-started/configurations.rst | 5 +- .../get-started/install-openvino.rst | 14 ++-- .../install-openvino-genai.rst | 28 +++---- .../openvino-workflow-generative.rst | 4 +- .../inference-with-genai.rst | 4 +- .../inference-with-genai-on-npu.rst | 9 ++- .../npu-device.rst | 10 +-- .../optimize-inference/optimizing-latency.rst | 70 ++++++++++++------ .../model-caching-overview.rst | 52 +++++++------ ...lm-agent-functioncall-qwen-with-output.rst | 2 +- .../llm-agent-react-langchain-with-output.rst | 2 +- ...multilora-image-generation-with-output.rst | 2 +- .../speculative-sampling-with-output.rst | 2 +- .../text-to-image-genai-with-output.rst | 2 +- .../download/GenAI_Quick_Start_Guide.pdf | Bin 3425537 -> 3418766 bytes .../benchmarking_OV_performance-data.xlsx} | Bin .../benchmarking_OV_platform_list.pdf} | Bin ...benchmarking_OV_system_info_detailed.xlsx} | Bin .../benchmarking_genai_platform_list.pdf} | Bin .../openvino-node/interfaces/Tensor.rst | 17 +++++ 25 files changed, 162 insertions(+), 110 deletions(-) rename docs/articles_en/openvino-workflow-generative/{ => inference-with-genai}/inference-with-genai-on-npu.rst (97%) rename docs/sphinx_setup/_static/{benchmarks_files/OV-2024.6-Performance-Data.xlsx => download/benchmarking_OV_performance-data.xlsx} (100%) rename docs/sphinx_setup/_static/{benchmarks_files/OV-2024.6-platform_list.pdf => download/benchmarking_OV_platform_list.pdf} (100%) rename docs/sphinx_setup/_static/{benchmarks_files/OV-2024.6-system-info-detailed.xlsx => download/benchmarking_OV_system_info_detailed.xlsx} (100%) rename docs/sphinx_setup/_static/{benchmarks_files/llm_models_platform_list_.pdf => download/benchmarking_genai_platform_list.pdf} (100%) diff --git a/docs/articles_en/about-openvino/key-features.rst b/docs/articles_en/about-openvino/key-features.rst index c751a5bc65d3cf..7e4ffab3cbb2ec 100644 --- a/docs/articles_en/about-openvino/key-features.rst +++ b/docs/articles_en/about-openvino/key-features.rst @@ -14,7 +14,7 @@ Easy Integration OpenVINO optimizations to your PyTorch models directly with a single line of code. | :doc:`GenAI Out Of The Box <../openvino-workflow-generative/inference-with-genai>` -| With the genAI flavor of OpenVINO, you can run generative AI with just a couple lines of code. +| With the OpenVINO GenAI, you can run generative models with just a few lines of code. Check out the GenAI guide for instructions on how to do it. | `Python / C++ / C / NodeJS APIs `__ diff --git a/docs/articles_en/about-openvino/performance-benchmarks.rst b/docs/articles_en/about-openvino/performance-benchmarks.rst index 4262ec6b2b3732..723bc1a96f7e9d 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks.rst @@ -132,21 +132,21 @@ For a listing of all platforms and configurations used for testing, refer to the .. grid-item:: - .. button-link:: ../_static/benchmarks_files/OV-2024.6-platform_list.pdf + .. button-link:: ../_static/downloads/benchmarking_OV_platform_list.pdf :color: primary :outline: :expand: :material-regular:`download;1.5em` Click for Hardware Platforms [PDF] - .. button-link:: ../_static/benchmarks_files/OV-2024.6-system-info-detailed.xlsx + .. button-link:: ../_static/downloads/benchmarking_OV_system_info_detailed.xlsx :color: primary :outline: :expand: :material-regular:`download;1.5em` Click for Configuration Details [XLSX] - .. button-link:: ../_static/benchmarks_files/OV-2024.6-Performance-Data.xlsx + .. button-link:: ../_static/downloads/benchmarking_OV_performance-data.xlsx :color: primary :outline: :expand: diff --git a/docs/articles_en/about-openvino/performance-benchmarks/generative-ai-performance.rst b/docs/articles_en/about-openvino/performance-benchmarks/generative-ai-performance.rst index 83581d465df92e..1f111563a4f29a 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/generative-ai-performance.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/generative-ai-performance.rst @@ -56,7 +56,7 @@ The tables below list the key performance indicators for inference on built-in G .. grid-item:: - .. button-link:: https://docs.openvino.ai/2024/_static/benchmarks_files/llm_models_platform_list_.pdf + .. button-link:: https://docs.openvino.ai/2024/_static/download/benchmarking_genai_platform_list.pdf :color: primary :outline: :expand: diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index 739c411dcbe7e5..f898ddaf42ba03 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -27,7 +27,7 @@ What's new +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ * . -* . + @@ -44,9 +44,9 @@ CPU Device Plugin GPU Device Plugin ----------------------------------------------------------------------------------------------- -* . * . + NPU Device Plugin ----------------------------------------------------------------------------------------------- @@ -68,10 +68,6 @@ Other Changes and Known Issues Jupyter Notebooks ----------------------------- -* `Visual-language assistant with GLM-Edge-V and OpenVINO `__ -* `Local AI and OpenVINO `__ -* `Multimodal understanding and generation with Janus and OpenVINO `__ - @@ -119,19 +115,19 @@ Discontinued in 2025 * Runtime components: - * OpenVINO property Affinity API will is no longer available. It has been replaced with CPU + * The OpenVINO property of Affinity API will is no longer available. It has been replaced with CPU binding configurations (``ov::hint::enable_cpu_pinning``). * Tools: - * Intel® Streaming SIMD Extensions (Intel® SSE) are currently not enabled in the binary - package by default. They are still supported in the source code form. * The OpenVINO™ Development Tools package (pip install openvino-dev) is no longer available for OpenVINO releases in 2025. - * Model Optimizer is no longer avilable. Consider using the + * Model Optimizer is no longer available. Consider using the :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` instead. For more details, see the `model conversion transition guide `__. + * Intel® Streaming SIMD Extensions (Intel® SSE) are currently not enabled in the binary + package by default. They are still supported in the source code form. Deprecated and to be removed in the future @@ -141,7 +137,7 @@ Deprecated and to be removed in the future standard support. * The openvino-nightly PyPI module will soon be discontinued. End-users should proceed with the Simple PyPI nightly repo instead. More information in - `Release Policy `__. + `Release Policy `__. * “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the future. OpenVINO's dynamic shape models are recommended instead. * MacOS x86 is no longer recommended for use due to the discontinuation of validation. @@ -161,17 +157,13 @@ Legal Information +++++++++++++++++++++++++++++++++++++++++++++ You may not use or facilitate the use of this document in connection with any infringement -or other legal analysis concerning Intel products described herein. - -You agree to grant Intel a non-exclusive, royalty-free license to any patent claim -thereafter drafted which includes subject matter disclosed herein. +or other legal analysis concerning Intel products described herein. All information provided +here is subject to change without notice. Contact your Intel representative to obtain the +latest Intel product specifications and roadmaps. No license (express or implied, by estoppel or otherwise) to any intellectual property rights is granted by this document. -All information provided here is subject to change without notice. Contact your Intel -representative to obtain the latest Intel product specifications and roadmaps. - The products described may contain design defects or errors known as errata which may cause the product to deviate from published specifications. Current characterized errata are available on request. @@ -183,10 +175,9 @@ or from the OEM or retailer. No computer system can be absolutely secure. -Intel, Atom, Core, Xeon, OpenVINO, and the Intel logo are trademarks -of Intel Corporation in the U.S. and/or other countries. - -Other names and brands may be claimed as the property of others. +Intel, Atom, Core, Xeon, OpenVINO, and the Intel logo are trademarks of Intel Corporation in +the U.S. and/or other countries. Other names and brands may be claimed as the property of +others. Copyright © 2025, Intel Corporation. All rights reserved. diff --git a/docs/articles_en/documentation/openvino-ecosystem.rst b/docs/articles_en/documentation/openvino-ecosystem.rst index cb62672c032412..fbd4b6e53240a3 100644 --- a/docs/articles_en/documentation/openvino-ecosystem.rst +++ b/docs/articles_en/documentation/openvino-ecosystem.rst @@ -24,7 +24,7 @@ you an overview of a whole ecosystem of tools and solutions under the OpenVINO u | **GenAI** | :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` + :bdg-link-success:`User Guide ` OpenVINO™ GenAI Library aims to simplify running inference of generative AI models. Check the LLM-powered Chatbot Jupyter notebook to see how GenAI works. @@ -113,7 +113,7 @@ generative AI and vision models directly on your computer or edge device using O | **Tokenizers** | :bdg-link-dark:`Github ` - :bdg-link-success:`User Guide ` + :bdg-link-success:`User Guide ` OpenVINO Tokenizers add text processing operations to OpenVINO. diff --git a/docs/articles_en/get-started/configurations.rst b/docs/articles_en/get-started/configurations.rst index 3e471c33445292..c0e885dd956c78 100644 --- a/docs/articles_en/get-started/configurations.rst +++ b/docs/articles_en/get-started/configurations.rst @@ -32,8 +32,9 @@ potential of OpenVINO™. Check the following list for components used in your w for details. | **OpenVINO GenAI Dependencies** -| OpenVINO GenAI is a flavor of OpenVINO, aiming to simplify running generative - AI models. For information on the dependencies required to use OpenVINO GenAI, see the +| OpenVINO GenAI is a tool based on the OpenVNO Runtime but simplifying the process of + running generative AI models. For information on the dependencies required to use + OpenVINO GenAI, see the :doc:`guide on OpenVINO GenAI Dependencies `. | **Open Computer Vision Library** diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 387a0bf2ab37e3..7616a87d6f3384 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -11,11 +11,11 @@ Install OpenVINO™ 2025.0 :maxdepth: 3 :hidden: + OpenVINO GenAI OpenVINO Runtime on Linux OpenVINO Runtime on Windows OpenVINO Runtime on macOS Create an OpenVINO Yocto Image - OpenVINO GenAI Flavor .. raw:: html @@ -30,13 +30,13 @@ All currently supported versions are: * 2023.3 (LTS) -.. dropdown:: Effortless GenAI integration with OpenVINO GenAI Flavor +.. dropdown:: Effortless GenAI integration with OpenVINO GenAI - A new OpenVINO GenAI Flavor streamlines application development by providing - LLM-specific interfaces for easy integration of language models, handling tokenization and - text generation. For installation and usage instructions, proceed to - :doc:`Install OpenVINO GenAI Flavor <../openvino-workflow-generative>` and - :doc:`Run LLMs with OpenVINO GenAI Flavor <../openvino-workflow-generative/inference-with-genai>`. + OpenVINO GenAI streamlines application development by providing LLM-specific interfaces for + easy integration of language models, handling tokenization and text generation. + For installation and usage instructions, check + :doc:`OpenVINO GenAI installation <../openvino-workflow-generative>` and + :doc:`inference with OpenVINO GenAI <../openvino-workflow-generative/inference-with-genai>`. .. dropdown:: Building OpenVINO from Source diff --git a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst index b548353b36977e..026a76f2ee86d7 100644 --- a/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst +++ b/docs/articles_en/get-started/install-openvino/install-openvino-genai.rst @@ -1,24 +1,26 @@ Install OpenVINO™ GenAI ==================================== -OpenVINO GenAI is a new flavor of OpenVINO, aiming to simplify running inference of generative AI models. -It hides the complexity of the generation process and minimizes the amount of code required. -You can now provide a model and input context directly to OpenVINO, which performs tokenization of the -input text, executes the generation loop on the selected device, and returns the generated text. -For a quickstart guide, refer to the :doc:`GenAI API Guide <../../openvino-workflow-generative/inference-with-genai>`. - -To see GenAI in action, check the Jupyter notebooks: -`LLM-powered Chatbot `__ and +OpenVINO GenAI is a tool, simplifying generative AI model inference. It is based on the +OpenVINO Runtime, hiding the complexity of the generation process and minimizing the amount of +code required. You provide a model and the input context directly to the tool, while it +performs tokenization of the input text, executes the generation loop on the selected device, +and returns the generated content. For a quickstart guide, refer to the +:doc:`GenAI API Guide <../../openvino-workflow-generative/inference-with-genai>`. + +To see OpenVINO GenAI in action, check these Jupyter notebooks: +`LLM-powered Chatbot `__ +and `LLM Instruction-following pipeline `__. -The OpenVINO GenAI flavor is available for installation via PyPI and Archive distributions. +OpenVINO GenAI is available for installation via PyPI and Archive distributions. A `detailed guide `__ on how to build OpenVINO GenAI is available in the OpenVINO GenAI repository. PyPI Installation ############################### -To install the GenAI flavor of OpenVINO via PyPI, follow the standard :doc:`installation steps `, +To install the GenAI package via PyPI, follow the standard :doc:`installation steps `, but use the *openvino-genai* package instead of *openvino*: .. code-block:: python @@ -28,9 +30,9 @@ but use the *openvino-genai* package instead of *openvino*: Archive Installation ############################### -The OpenVINO GenAI archive package includes the OpenVINO™ Runtime and :doc:`Tokenizers <../../openvino-workflow-generative/ov-tokenizers>`. -To install the GenAI flavor of OpenVINO from an archive file, follow the standard installation steps for your system -but instead of using the vanilla package file, download the one with OpenVINO GenAI: +The OpenVINO GenAI archive package includes the OpenVINO™ Runtime, as well as :doc:`Tokenizers <../../openvino-workflow-generative/ov-tokenizers>`. +It installs the same way as the standard OpenVINO Runtime, so follow its installation steps, +just use the OpenVINO GenAI package instead: Linux ++++++++++++++++++++++++++ diff --git a/docs/articles_en/openvino-workflow-generative.rst b/docs/articles_en/openvino-workflow-generative.rst index 14521f118f6dfc..5ac880ace110c3 100644 --- a/docs/articles_en/openvino-workflow-generative.rst +++ b/docs/articles_en/openvino-workflow-generative.rst @@ -96,8 +96,8 @@ The advantages of using OpenVINO for generative model deployment: Proceed to guides on: -* :doc:`OpenVINO GenAI Flavor <./openvino-workflow-generative/inference-with-genai>` +* :doc:`OpenVINO GenAI <./openvino-workflow-generative/inference-with-genai>` * :doc:`Hugging Face and Optimum Intel <./openvino-workflow-generative/inference-with-optimum-intel>` -* `Generative AI with Base OpenVINO `__ +* `Generative AI with Base OpenVINO `__ diff --git a/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst b/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst index 1f19c3eed7da8f..7e26f0891f779a 100644 --- a/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst +++ b/docs/articles_en/openvino-workflow-generative/inference-with-genai.rst @@ -2,13 +2,13 @@ Inference with OpenVINO GenAI =============================================================================================== .. meta:: - :description: Learn how to use the OpenVINO GenAI flavor to execute LLM models. + :description: Learn how to use OpenVINO GenAI to execute LLM models. .. toctree:: :maxdepth: 1 :hidden: - NPU inference of LLMs + NPU inference of LLMs OpenVINO™ GenAI is a library of pipelines and methods, extending the OpenVINO runtime to work diff --git a/docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst b/docs/articles_en/openvino-workflow-generative/inference-with-genai/inference-with-genai-on-npu.rst similarity index 97% rename from docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst rename to docs/articles_en/openvino-workflow-generative/inference-with-genai/inference-with-genai-on-npu.rst index 8fb6ad27c4232f..540d13894c7d02 100644 --- a/docs/articles_en/openvino-workflow-generative/inference-with-genai-on-npu.rst +++ b/docs/articles_en/openvino-workflow-generative/inference-with-genai/inference-with-genai-on-npu.rst @@ -2,9 +2,10 @@ Inference with OpenVINO GenAI ========================================== .. meta:: - :description: Learn how to use the OpenVINO GenAI flavor to execute LLM models on NPU. + :description: Learn how to use OpenVINO GenAI to execute LLM models on NPU. -This guide will give you extra details on how to utilize NPU with the GenAI flavor. + +This guide will give you extra details on how to utilize NPU with OpenVINO GenAI. :doc:`See the installation guide <../../get-started/install-openvino/install-openvino-genai>` for information on how to start. @@ -24,6 +25,10 @@ Note that for systems based on Intel® Core™ Ultra Processors Series 2, more t may be required to run prompts over 1024 tokens on models exceeding 7B parameters, such as Llama-2-7B, Mistral-0.2-7B, and Qwen-2-7B. +Make sure your model works with NPU. Some models may not be supported, for example, +**the FLUX.1 pipeline is currently not supported by the device**. + + Export an LLM model via Hugging Face Optimum-Intel ################################################## diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst index a3bdbfc7c2b7d1..ed28633f1a9198 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device.rst @@ -22,7 +22,7 @@ for more streamlined resource management. NPU Plugin is now available through all relevant OpenVINO distribution channels. | **Supported Platforms:** -| Host: Intel® Core™ Ultra (former Meteor Lake) +| Host: Intel® Core™ Ultra series | NPU device: NPU 3720 | OS: Ubuntu* 22.04 64-bit (with Linux kernel 6.6+), MS Windows* 11 64-bit (22H2, 23H2) @@ -33,10 +33,10 @@ Follow the instructions below to install the latest NPU drivers: * `Linux driver `__ -The plugin uses the graph extension API exposed by the driver to convert the OpenVINO specific representation -of the model into a proprietary format. The compiler included in the user mode driver (UMD) performs -platform specific optimizations in order to efficiently schedule the execution of network layers and -memory transactions on various NPU hardware submodules. +The plugin uses the graph extension API exposed by the driver to convert the OpenVINO specific +representation of the model into a proprietary format. The compiler included in the user mode +driver (UMD) performs platform specific optimizations in order to efficiently schedule the +execution of network layers and memory transactions on various NPU hardware submodules. To use NPU for inference, pass the device name to the ``ov::Core::compile_model()`` method: diff --git a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency.rst b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency.rst index 7d6df9166f163e..febba3134cad40 100644 --- a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency.rst +++ b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency.rst @@ -14,34 +14,62 @@ Optimizing for Latency improve throughput without degrading latency. -A significant portion of deep learning use cases involve applications loading a single model and using a single input at a time, which is the of typical "consumer" scenario. -While an application can create more than one request if needed, for example to support :ref:`asynchronous inputs population `, its **inference performance depends on how many requests are being inferenced in parallel** on a device. - -Similarly, when multiple models are served on the same device, it is important whether the models are executed simultaneously or in a chain, for example, in the inference pipeline. -As expected, the easiest way to achieve **low latency is by running only one inference at a time** on one device. Accordingly, any additional concurrency usually results in latency rising fast. - -However, some conventional "root" devices (i.e., CPU or GPU) can be in fact internally composed of several "sub-devices". In many cases, letting OpenVINO leverage the "sub-devices" transparently helps to improve application's throughput (e.g., serve multiple clients simultaneously) without degrading latency. For example, multi-socket CPUs can deliver as many requests at the same minimal latency as there are NUMA nodes in the system. Similarly, a multi-tile GPU, which is essentially multiple GPUs in a single package, can deliver a multi-tile scalability with the number of inference requests, while preserving the single-tile latency. - -Typically, human expertise is required to get more "throughput" out of the device, even in the inherently latency-oriented cases. OpenVINO can take this configuration burden via :doc:`high-level performance hints `, the `ov::hint::PerformanceMode::LATENCY `__ specified for the ``ov::hint::performance_mode`` property for the ``compile_model``. +An application that loads a single model and uses a single input at a time is +a widespread use case in deep learning. Surely, more requests can be created if +needed, for example to support :ref:`asynchronous input population `. +However, **the number of parallel requests affects inference performance** +of the application. + +Also, running inference of multiple models on the same device relies on whether the models +are executed simultaneously or in a chain: the more inference tasks at once, the higher the +latency. + +However, devices such as CPUs and GPUs may be composed of several "sub-devices". OpeVINO can +handle them transparently, when serving multiple clients, improving application's throughput +without impacting latency. What is more, multi-socket CPUs can deliver as many requests at the +same minimal latency as there are NUMA nodes in the system. Similarly, a multi-tile GPU, +which is essentially multiple GPUs in a single package, can deliver a multi-tile +scalability with the number of inference requests, while preserving the +single-tile latency. .. note:: - :doc:`OpenVINO performance hints ` is a recommended way for performance configuration, which is both device-agnostic and future-proof. + Balancing throughput and latency by manual configuration requires strong expertise + in this area. Instead, you should specify :doc:`performance hints ` + for ``compile_model``, which is a device-agnostic and future-proof option. -**When multiple models are to be used simultaneously**, consider running inference on separate devices for each of them. Finally, when multiple models are executed in parallel on a device, using additional ``ov::hint::model_priority`` may help to define relative priorities of the models. Refer to the documentation on the :doc:`OpenVINO feature support for devices <../../../../about-openvino/compatibility-and-support/supported-devices>` to check if your device supports the feature. +**For running multiple models simultaneously**, consider using separate devices for each of +them. When multiple models are executed in parallel on a device, use ``ov::hint::model_priority`` +to define relative priorities of the models. Note that this feature may not be available for +some devices. **First-Inference Latency and Model Load/Compile Time** -In some cases, model loading and compilation contribute to the "end-to-end" latency more than usual. -For example, when the model is used exactly once, or when it is unloaded and reloaded in a cycle, to free the memory for another inference due to on-device memory limitations. - -Such a "first-inference latency" scenario may pose an additional limitation on the model load\compilation time, as inference accelerators (other than the CPU) usually require a certain level of model compilation upon loading. -The :doc:`model caching ` option is a way to lessen the impact over multiple application runs. If model caching is not possible, for example, it may require write permissions for the application, the CPU offers the fastest model load time almost every time. +First-inference latency is the longest time the application requires to finish inference. +This means it includes the time to load and compile the model, which happens at the first +execution only. For some scenarios it may be a significant factor, for example, if the model is +always used just once or is unloaded after each run to free up the memory. + +In such cases the device choice is especially important. The CPU offers the fastest model load +time nearly every time. Other accelerators usually take longer to compile a model but may be +better for inference. In such cases, :doc:`Model caching ` +may reduce latency, as long as there are no additional limitations in write permissions +for the application. + +To improve "first-inference latency", you may choose between mapping the model into memory +(the default option) and reading it (the older solution). While mapping is better in most cases, +sometimes it may increase latency, especially when the model is located on a removable or a +network drive. To switch between the two, specify the +`ov::enable_mmap() <../../../api/ie_python_api/_autosummary/openvino.frontend.FrontEnd.html#openvino.frontend.FrontEnd.load>` +property for the ``ov::Core`` as either ``True`` or ``False``. + +You can also use :doc:`AUTO device selection inference mode <../inference-devices-and-modes/auto-device-selection>` +to deal with first-inference latency. +It starts inference on the CPU, while waiting for the proper accelerator to load +the model. At that point, it shifts to the new device seamlessly. -To improve common "first-inference latency" scenario, model reading was replaced with model mapping (using `mmap`) into a memory. But in some use cases (first of all, if model is located on removable or network drive) mapping may lead to latency increase. To switch mapping to reading, specify ``ov::enable_mmap(false)`` property for the ``ov::Core``. - -Another way of dealing with first-inference latency is using the :doc:`AUTO device selection inference mode <../inference-devices-and-modes/auto-device-selection>`. It starts inference on the CPU, while waiting for the actual accelerator to load the model. At that point, it shifts to the new device seamlessly. - -Finally, note that any :doc:`throughput-oriented options ` may significantly increase the model uptime. +.. note:: + Keep in mind that any :doc:`throughput-oriented options ` + may significantly increase inference time. diff --git a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst index b3253f775bdb02..b1b6da190a0192 100644 --- a/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst +++ b/docs/articles_en/openvino-workflow/running-inference/optimize-inference/optimizing-latency/model-caching-overview.rst @@ -9,16 +9,16 @@ Model Caching Overview As described in :doc:`Integrate OpenVINO™ with Your Application <../../integrate-openvino-with-your-application>`, -a common application flow consists of the following steps: +a common workflow consists of the following steps: 1. | **Create a Core object**: | First step to manage available devices and read model objects 2. | **Read the Intermediate Representation**: - | Read an Intermediate Representation file into an object of the `ov::Model `__ + | Read an Intermediate Representation file into the `ov::Model `__ object 3. | **Prepare inputs and outputs**: | If needed, manipulate precision, memory layout, size or color format 4. | **Set configuration**: - | Pass device-specific loading configurations to the device + | Add device-specific loading configurations to the device 5. | **Compile and Load Network to device**: | Use the `ov::Core::compile_model() `__ method with a specific device 6. | **Set input data**: @@ -32,14 +32,15 @@ automatically and reuses it to significantly reduce the model compilation time. .. important:: - Not all devices support the network import/export feature. They will perform normally but will not + Not all devices support import/export of models. They will perform normally but will not enable the compilation stage speed-up. -Set "cache_dir" config option to enable model caching +Set configuration options +++++++++++++++++++++++++++++++++++++++++++++++++++++ -To enable model caching, the application must specify a folder to store the cached blobs: +| Use the ``device_name`` option to specify the inference device. +| Specify ``cache_dir`` to enable model caching. .. tab-set:: @@ -58,23 +59,25 @@ To enable model caching, the application must specify a folder to store the cach :fragment: [ov:caching:part0] -With this code, if the device specified by ``device_name`` supports import/export model capability, -a cached blob (the ``.cl_cache`` and ``.blob`` file for GPU and CPU respectively) is automatically +If the specified device supports import/export of models, +a cached blob file: ``.cl_cache`` (GPU) or ``.blob`` (CPU) is automatically created inside the ``/path/to/cache/dir`` folder. -If the device does not support the import/export capability, cache is not created and no error is thrown. +If the device does not support import/export of models, the cache is not +created and no error is thrown. -Note that the first ``compile_model`` operation takes slightly longer, as the cache needs to be created - -the compiled blob is saved into a cache file: +Note that the first ``compile_model`` operation takes slightly more time, +as the cache needs to be created - the compiled blob is saved into a file: .. image:: ../../../../assets/images/caching_enabled.svg -Make it even faster: use compile_model(modelPath) +Use optimized methods +++++++++++++++++++++++++++++++++++++++++++++++++++ -In some cases, applications do not need to customize inputs and outputs every time. Such application always -call ``model = core.read_model(...)``, then ``core.compile_model(model, ..)``, which can be further optimized. -For these cases, there is a more convenient API to compile the model in a single call, skipping the read step: +Applications do not always require an initial customization of inputs and +outputs, as they can call ``model = core.read_model(...)``, then ``core.compile_model(model, ..)``, +which can be further optimized. Thus, the model can be compiled conveniently in a single call, +skipping the read step: .. tab-set:: @@ -93,7 +96,7 @@ For these cases, there is a more convenient API to compile the model in a single :fragment: [ov:caching:part1] -With model caching enabled, the total load time is even shorter, if ``read_model`` is optimized as well. +The total load time is even shorter, when model caching is enabled and ``read_model`` is optimized as well. .. tab-set:: @@ -117,8 +120,9 @@ With model caching enabled, the total load time is even shorter, if ``read_model Advanced Examples ++++++++++++++++++++ -Not every device supports the network import/export capability. For those that don't, enabling caching has no effect. -To check in advance if a particular device supports model caching, your application can use the following code: +Enabling model caching has no effect when the specified device does not support +import/export of models. To check in advance if a particular device supports +model caching, use the following code in your application: .. tab-set:: @@ -136,10 +140,12 @@ To check in advance if a particular device supports model caching, your applicat :language: cpp :fragment: [ov:caching:part3] -Set "cache_encryption_callbacks" config option to enable cache encryption +Enable cache encryption +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -If model caching is enabled in the CPU Plugin, the model topology can be encrypted while it is saved to the cache and decrypted when it is loaded from the cache. Currently, this property can be set only in ``compile_model``. +If model caching is enabled in the CPU Plugin, set the "cache_encryption_callbacks" +config option to encrypt the model while caching it and decrypt it when +loading it from the cache. Currently, this property can be set only in ``compile_model``. .. tab-set:: @@ -157,7 +163,7 @@ If model caching is enabled in the CPU Plugin, the model topology can be encrypt :language: cpp :fragment: [ov:caching:part4] -If model caching is enabled in the GPU Plugin, the model topology can be encrypted while it is saved to the cache and decrypted when it is loaded from the cache. Full encryption only works when the ``CacheMode`` property is set to ``OPTIMIZE_SIZE``. +Full encryption only works when the ``CacheMode`` property is set to ``OPTIMIZE_SIZE``. .. tab-set:: @@ -177,4 +183,6 @@ If model caching is enabled in the GPU Plugin, the model topology can be encrypt .. important:: - Currently, this property is supported only by the CPU and GPU plugins. For other HW plugins, setting this property will not encrypt/decrypt the model topology in cache and will not affect performance. + Currently, encryption is supported only by the CPU and GPU plugins. Enabling this + feature for other HW plugins will not encrypt/decrypt model topology in the + cache and will not affect performance. diff --git a/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst b/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst index 051e83eff184bb..19b3f849a0f102 100644 --- a/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst +++ b/docs/notebooks/llm-agent-functioncall-qwen-with-output.rst @@ -258,7 +258,7 @@ pipeline. You can get additional inference speed improvement with `Dynamic Quantization of activations and KV-cache quantization on -CPU `__. +CPU `__. These options can be enabled with ``ov_config`` as follows: .. code:: ipython3 diff --git a/docs/notebooks/llm-agent-react-langchain-with-output.rst b/docs/notebooks/llm-agent-react-langchain-with-output.rst index 7313d4c454c42a..34c81ef6e11e75 100644 --- a/docs/notebooks/llm-agent-react-langchain-with-output.rst +++ b/docs/notebooks/llm-agent-react-langchain-with-output.rst @@ -438,7 +438,7 @@ information `__. You can get additional inference speed improvement with `Dynamic Quantization of activations and KV-cache quantization on -CPU `__. +CPU `__. These options can be enabled with ``ov_config`` as follows: .. code:: ipython3 diff --git a/docs/notebooks/multilora-image-generation-with-output.rst b/docs/notebooks/multilora-image-generation-with-output.rst index f6445e5a2ec1f2..e2da1edafdd8f6 100644 --- a/docs/notebooks/multilora-image-generation-with-output.rst +++ b/docs/notebooks/multilora-image-generation-with-output.rst @@ -144,7 +144,7 @@ saved on disk before export. For avoiding this, we will use ``export_from_model`` function that accepts initialized model. Additionally, for using model with OpenVINO GenAI, we need to export tokenizers to OpenVINO format using `OpenVINO -Tokenizers `__ +Tokenizers `__ library. In this tutorial we will use `Stable Diffusion diff --git a/docs/notebooks/speculative-sampling-with-output.rst b/docs/notebooks/speculative-sampling-with-output.rst index 8ca9ca5bc7002c..8dd300fa4bbaff 100644 --- a/docs/notebooks/speculative-sampling-with-output.rst +++ b/docs/notebooks/speculative-sampling-with-output.rst @@ -136,7 +136,7 @@ In case, if you want run own models, you should convert them using Optimum `__ library accelerated by OpenVINO integration. More details about model preparation can be found in `OpenVINO LLM inference -guide `__ +guide `__ .. code:: ipython3 diff --git a/docs/notebooks/text-to-image-genai-with-output.rst b/docs/notebooks/text-to-image-genai-with-output.rst index a0f0af9ef41538..d43b900d9133db 100644 --- a/docs/notebooks/text-to-image-genai-with-output.rst +++ b/docs/notebooks/text-to-image-genai-with-output.rst @@ -23,7 +23,7 @@ the Hugging Face Transformers library to the OpenVINO™ IR format. For more details, refer to the `Hugging Face Optimum Intel documentation `__. 2. Run inference using the `Text-to-Image Generation -pipeline `__ +pipeline `__ from OpenVINO GenAI. diff --git a/docs/sphinx_setup/_static/download/GenAI_Quick_Start_Guide.pdf b/docs/sphinx_setup/_static/download/GenAI_Quick_Start_Guide.pdf index c5632a7e3f96277412068c8a806854d0d894ee50..2046f7d9427421b62da26a220b35f3b6d96ae786 100644 GIT binary patch delta 2591568 zcmZTvcRbbK|L@J^n#m^j8kLdI-S-NaZ52&zt7Vi*+AgUyWb~GU23lyM(zGfg4K$4u zl~GYr86m&dc^jYa=VpdFDSc8E*_{VoSDXZ1H#6kO zEhon#{d+1-WZlUJh?ObrNH(0ZMRr;;j^w)23ZM`fITUja$i|*lMQt{8C-N(F+gyR& zeinlvG?LWNC^xzz>7O(Ul{bMNeM8I%IXxd=RTr6`asC*??kHyY%)GpcA#kB%d`^9+n$A)Ls*Gm}yDZDmJN zd{zNH`vrdKj(m5g_Jw*fJS8j$*A*+=x7rNdf~hfjq)>+ zMiF!998&#)Jqkg}iYQ>EihxYG;7A6Lj-+m?BFc@2$^29yD!;BQBG0AHMlQ!yEXna{ zifB2VE>GI0O^CBqb|9~%DU!AC_GC;9g%whDCl0gGYZL2kw^N5G8Uxm zMMX5{G7f4fwe~$9Z<+f`V?~JB?WY4E$rrgNk};rS$0VgS)YQrO_xmnvp5?m z-;>rvDHG_LWY}fZfyFhKi&2{)?ACb29(^FdTs~KUt^#b(c2yAtzX7`RYi20mG$<_S z8fXJ6xEC5Om!zkl&^cg)_zW{tu0x-K92hd{5^O%od~y6Mdb8b3W&jjNyJ15 zv#qz(u*W!&Pj3xc;`1$2#JVFrg%FaN5dG_Ko5s;V4{vTe91j6^S$0PeS;)!Ak(=*K zAm`>n`asGHK!x3ROb2{r&0R-S-VCnWa@PT|t^%W5?iEq$A@|%@K&MuM3d#FsI4Yb- z$sF)7X<+?ejw0oJKrUtskh_%waS;ens+jA5@*gTYkt8&zq@$HbY1@fBl{e^wH}cHl z(p4PDG5HRpd$bdomT!+nna~wcMhrNcc>x5R1?=irFkwJpnFTN}RUs}<*%EP7Wi-M2 zG)V6U>Ztq`m^J5t0_B1)A51{{OMuGUoyG#v{vjCK4)`c}sDN@p7;>myO-7sSe8|L} ztbj(XrJIw^k3flrG79ACN8_m+Qt@aG`u7HySX}6U%Dd=lsO^aKMDkss11QxADa(Qh zb`(uOqeRM6$uC9n=zcX=ChH!g0QF*ZG-?b3Qa~|yAX0{qhvZ1gm6+jpa3pt>Kw!v% zdHYM8Nc|#5f`>AzA$x|-fLS*lt0TK*gafHis)+h#Kv0h@HA8k0F#TAm18M(w8aeuj zJi#H2p74+hDXl`<-IFH+vW&=6Plk0vj(cj42SK4v!LcH6a$?G8k#C=ZD_0vikQUDr z(bG@h#Z#Y|qVhH{_04CH0Iq>7R?p>0g~#%w>vL0__MFJP=TtyBA=iFs4T3X}R5zCy zAl6w(6{Tg0s5YFCBkRj149s^aw?}R7LEo>-i{V63Kmpqb5&5A)5&6x6c^ws?v>_7{ zLR9b#M5i+9u@~w{KUrD}x{MP%d~#Q`@y|HcfpNq}(bZ)YR- z7C4a--%@TYPiDO}CEKbTNxjNBr2YvJ!9|%;3{6yV5mLd6O3JcMq-hn->0oaH++cB) z{=o5ewQA6<3#$YuLq%m0$*+dw@K48yyj!h6npMh^71aW?;;E5{w5^$g0@lEx8Y9Wj zK$#W{P4ah*Dp~c(iFAHPo#Aq%=er41)(hZ6^q0R=L<)usd9wMPDfVtB2`-zgxTFpY z+J`{W2&+{iMT8k2N~$W-pZLE znd%2UQNw4nUS5r>Ks z8UG2aoux9B)cq`v%=AD^-e-t;93^KyL;SkJX5$+aP=o|SfwXU+j=3r1wFW!nA_;*W zeL-4?d2|6$W;8;IqVn|&IDKz7IwMvf!;-XV8b~c<$W1)ez6Nww*))Ot_sfx-+FVSX z8OWs{685?;W|*ldh%f}h|LT`kCuv{xQ28YgaM@Rg-3@U2`ALJVI=@c9r>g{4IABI9 zF~@#WLlOFr0Ka{6pybAk$;xPu2V2aLi$A2yt`-H<8bHXw0Ci_DY*nK&^zK$g(z!;S zjBPa~e>6Ce65kytHRCITCK>Yzvcq>!CBCmH{7^)fX`rTYKOlhNY6rB~CNS$F{Knx$ z>K6(~%{E8UHp78D*;b5_4ucBQe!>yEiXo3Gaists{ zIFOm`U`R*i6tr2Eu0ilf`zpw@zhJr?oFqBF6iDYddGgh-38*X{ZW#{0A>?t+FZw;O zjGX<)9X;Oy?0p?-DE~NP3R&A>H*gX3?1YRpc+?4DmO(UZ ziz-e*sH=6Uq4%|r3H-Vo@C{0W$DwGEGTjqU#v2Izpl%1Ge+3%7-HNE?52!`$uPLfK z0czplRINpx_&Xl;3t)y!k0N54(dA&^=SL3(zjYK zINnTQdJsGK0+xJ!^jjRXOCBH9tvYtc19uKF5q+kFy* zDwQBnwFE)k0g<~TY(d?Nw@Jc^q0}tF6QC3XiJ(A*sfpBRunt=p`O*l;tPV^^vi_?E zqDui=lt*ln0uHckQJ)luv}Ynnq9#p1q6jonKp}pR(?g|UDZVo!Ivw_;d>!-+_hZDT z6NxDKDgjm$Q8)MTGQdj}S6N-dN`uNA-1J(aJk5Gpuy)4_6S6rfgZMKXcbF<7+QoD za-S$8rz#xm74QeCb0L&9h(ZY-5A7}^w2`$MA%i+EFjUZO_@5+~FGL~M##$)rDWQT~ zG*o1fAe907Of`~`SiM3nGSC5`^mGV0rieY*Iiy$)Ymfpoka8yo$pVhx zn@~mtozfxbdMpdzdSH=2fN26r?htJ_yK|ot+MomhYI_FyRZ%5W6#g@W01iSe!W48f zNCFy2Nt-Z0<#xuh3d3~xA`a41C$toXO~QwJdUU#x0g4Y&9)*d%%OYcKLN(4*MivD;R*_K~6nBN( z>J8r5;pKQDn5;usNb*D?G^$iZO)2(naK_Nf8?%hw3tb^d)@vzZC-PFi)i!h?|R|I-P>osS%rT6TYuN6thhVF z`C8Mh)wYHoE}&2MXI}pt@NQLKYFFpK8}}~1>8MZbeEThM=gFk9OZV=1_ub0r-nrtv znEap{IWe{NZGC;`!eisU<;6X`!DvEJ*KBiESoF_MU7u@o;&tDi&GDyOGsG7sD^DeRijVEoWBHYxE9@3`LSCY|1M-|0;*!9MMz zv~5*UiP*uYOE$rejM)`7`uW=z-5l*K6K`*sQ&stn7cS_=-Z4l#`&6;w?$Vedg~(b( zS7!C+LWL{7jR)F98)TwB9Y_nlT(MsvYSsZ$>p3j%rUyGl9%vEqt}YLjyL)+iZCq)?KoH9JRaCzx+G(MCr{O3$g9?joGsup1R~csa4X3|NdmX z>#N)JneNL%dhd{0dzej1v|fw({@%KkDbYz1Wrntb+UQNT-j`*9{g#V@x(l}6dT>~K zvs;PMo5l6P0V5meJqP?%JP-J!2H1Mv46yxNE{INg<38Tj@aU!3q+`~oJ#4GbiKOU) zV7F4GH_74^(@TFA8Xt3X9Ns|heh;Ow!zOJt9{OG zzh)gHFUx4WP^y`gyK-?s@@dbNCEGyiV}pMuji`ZW|+YkPk#-cS}Dqvu5fsv45; zzj^y2#Jz4a9H$0n3{Kgrx6`(g{o{u4-CgE#dR2Wl$-I1MYEXvJ=&DSAS#NLpw+k`M z7LTJX?n=Ji^tbmnSuHpVKPLKl(|5Uj3QGGo`c~MlUx!Y?l2yD7;mhH_-TbUDzE@tG ztoG$!i-I!5H#da;-8I1~=tx*ZjlnAMm70{yTUlWiOREZcR!y+Ft#PHMAflzxe-x)6 z@|l6QAJ}v81Y5Unt%|`*y9=C+ZQX>YdJ4e5FP-WQ0LD{;+QayjkCoPbruS?xwq2LI z#rD-FtL}o)EuX`=?Ov-qK<~G0n?KWc?KZZJ{{jw{v@$rLc2~FkuRg`#o#O5S%a#WE z+Z#ylM#U4QvZ!A7*5B=3c~kdB^w!<&Y!7SP)m>oYFcD~c1(&eO>2)_(5_W0tO)@L| zp&QoguB7))>2;yLZ6%iu6JON@uQQkKF4*&|E;wL$d)R5{?0Fs#mSi6sFjrWjv?{8% zE_eIwg0kk|bvZ?K#hewq4p(4jT6e)x-G<_S%cMCFDXU}kTD;Eo)&&Q}>Meii8eCr4 zOi}9#h)qh`b-QoEWGNNhrw8ZywtR?Nop|l1YfRVesoPyXiROMtGP-keV*UL3vVLWb z`(BdhRnrlPhOZlJu0ZfVx!4k2+o{Y(vSSE+WNk?3p^$L|4N17j=n?{yX-qIt-8CZ` znllE5DohB5gn)(E;|X1qJ_as8^T!hmL?27!!p)puz_lm_hT<>^M48QpC|mq2CBt?6t@=8GLF4kFwJDy?q;YYdM=+C# zCXOehkm`7lV=ivBn?P&hcw!tX&|v7nSHPydWSWzEj9&^0q_$pBy2;aa4bQXH^3&;oPPxOiGF5iY9ef@xe9 z-r@&t!CI7J3n3sB>BoJS2FqE(UIw@&!m~2N!ASXB6&iZr&t#zJX);m-9~A^Kb>Ynl zObx2SJ781OU?m6$ElJF{>sBU%CgNfm;59Ue5)N{qOn_%W)PV``QXF@ZuowFI18)i< zP*$eFC0ier|Bwb}gf7Y%>}JT&aKMA}(;yz)7A+QT(3j$IXJFLM^Oc{bYU?cWi0ZtRpl3)o@01r%^QNW-f5uL|$pEq5F0D%gy@6K2mMA07h=$)Du&Z0*UZNiRGMf#}P%9i19B3&!VCGoRqHN6O z;!&`1oRGk)H^FN7*bEjB`ZyWc{{jIlg?M%WgGm$O{fQe4CK7)JhR+CKTm6X;#18}n z6N(;nu#Co`h=X+{Al=k&!P4bZoDtt4Ke8S%q+kPk1p2^xgMpb6@Qp%DBy^?O9N3CK zkeKkM9*-Hil2{aM-?%xTeQ+;m(%3jJxNK!os@$}d$&_Y8v~dUWDh(1L%mWjn{B}wz zY-1Y44Y(=fQdHLn{uEORv<1*L5c1$gunik@A@MU93=z&}G&oIoIAeoi9y6qn!vP2`{d;iMQAd~3UZxZ< z=>bZBNag{PHgH0^&X>^#gB5IH>e7JU!4L-b=20Px%1VJ!#Z~YQ7UoSk5)`Z-2I2yr z_LwWkU_W=_#XK1HSRezA#i!0r3CN;9Y=B=jd=yQF^Yz0MAjO>w+k+2*X21l<-M|Es z!Z9q(finu8HR^3ad@kO9A;lU_nD8vp98Za*twLJw5I_zdLq&OT(bez%y z#+c9~z@7uC2-BewG6#-59tvF~gS~=>gIE)lM=HzS%5_v(C518zjm8k7xCrJ&tlrEO zGR6|%ZWX2`@kBdg%T7z9zCFo(DNDa@bm7H9BP;WPeB_=C$s=7Az%U%*u*U;Y5|+Gpyi$hEb5!n zHK8;Bk5#J9gc@pDqvD8soM9BdWuhO>z};jEP>=L%P!E0^MS-(n6hHr=zq4T!msODR z92lkgLgql_p>#Uh6-2t9CWqLru!vH4k}Hr@+6aVJ@#rE6z5sbB!TY1@a}6|a z9;~C7PoD?tpjeFZ=V9K*fdwYc#}9rv@RZ3%72yQl3>OK}$N8`b9v7X^qy;d8TAsTA zOGg8*dc6SFZ#)IhYHci$x$Vyl6Y2C5u5Bz0R<-;tI%nU@|Jql9II88SNyy)w%|1jTc5a6R|<4Rs~Ka^P^}MxYn9 z99U-(OgVIZIn2g|IJm7-sxe&w+pqKo)g-I{8W!C;j!1VUwDQ-&!gDKuVR$@uL?)}C z4@7f9S8)s51aaM=g&&&H9e3Co3gD>R9mr8j?L1)5qc5QPw#Wne%9^2&BA{NjAs@iY zHfr|(_9)BE@EkO}yC;!}%9q2&3%y|DAJIU%$_q9DCxRgGhCW=_L!Z5&uV0c0HQh@p z0-h?0Tz zBiF+MhBx*%a??w7V7Q`R13g#|oCeGX7DoGE63MWM%q2nyipCLuZu@{zM=8oEqaW7F z9AxYZL?9m-qVhPn?qBi6x=D~ng}#uJDBW=Vuv%Y3z~1s35FJQi6bEYX14H8ncx2;0 zz^#)6yc+&o4P({*u%1dn(>A~uF7KnK(E9H&TQK16>o$Vy7=?n~Y=nuF8kYsYM4Vpg z0)Tr8ve6mvzYpO85m|o{Bp&c?aucD3U*^%*O@p=}HUm*AzBg{hN;ZaC@&Vk3lZ*tc z&itp2@}X9P#%{sRswm@tc5T6XnZV3rTVQ7XYKBu>AkYI?2ht2A%t^by4(LE2@JOje zVJoNwrxp~m6`02W6r>phqg0wd6$Hd6C%zE`jQ?nGL}RwWAeA7~w!s3WLinG>cIboD zCW}`45Y}2kmgVpVE}t{}c?&3k3y?;(;~2(HZMg#zhjKe|*#Vs5vLDLDeY(rRnO@^Q zoKTR#L=bT4PGSOeLY3@z?@Ba{(j%tUi`!2-(VSMLIkfV#C4dc6x|!dL;cbT>%u z{~mxjyJ0!j?FZcXf4y0GIrN(ebvk$__GxTJx|J;0xS5d@O_Be+q2-2>ZEX5SbL zl9s(Rc0j*_p^u8eonoNYvK#W#F)=s_hIqtJNs5~6g8j!96p9;#pa5nBc2%d|F-yMV%k-lKQ zUk8Z^$I3v)f$-RV2xL5}56Wvi1Qc)`3;7<#8$rbt{X7hP)Rko05g@QTcqrkYKLYYo zS*8yUPzhfxf-pz=8ODIGx7z@ z2L(}$gk`v9hT@?$Tq{G&D45!3021(`hWzPV6!;UR8P z7Tv}zeK(+Kb8IL@6u`H~L->XA;)y9}28j;^?D8q8ptm3rILtT;1JCim>}<%~pGnvj z46TgbCBT(;K`iV;-5M%m|Ci4d$3a+phY37Rn2~J_;1?Hx;@{&$2KxN~SZIxdO({KZ zMwl&p3G9RxuGynK@mQOHwL`J-upO?z0-wreaDt6a07syBI+OrB?XhLb!B8kphS>?g zF6CPzap9E8`G$$GJ|D|=<_389Mf?+Ay^z)k*b66&(&XD^c2jdHWQtOIn=c(_cSII2g%_ya9zH)!;ZzXl^nnkjgC{H$g#PlVJeD+Yv2Efyq>LlE{XE@RT9pKH!0?i-Af< z3QUIBasmycRDg2q-5~akoq?Hv7<5Em&cOB*j~mVcMVuU9<|aT(;RX1zGv*wjMrmQk zIm~GyFw}pJu*ZlW6m}lEsMITa0agPL%n?OhfIceqs-@ylPq?Eb;MRW;uA2sm%}9oe z0yIztzQCBIgYJ^R23OO;WWl-My^qp~@#yt+VhTWEh;tff=EXtBzjG0!%XJ6o%Aso@ zliOaRT+4~DM4?R#4frQ>1=zd&5(tWs1!%_QLC1LxjsGc|TOwr(h%}!oV0HJCFtO?i zj8mbq5=yQ(OzjCvvf>@4S>X;eRjv)D3ZHAh2bH)(uK^#_mG#Fp*oG<-7Z8^4aAAK5 zR(N>A|ITCpD^!N)&VU_&DFrm~9uO60g2WiahmL0g6OMtPNm>^C4=w`=Xw_p#28Xj? z1zet-(5UO+3+&l2TCkp>iKWGE{qQ=Wf*f;+sYvGrjPG~|eq4GVzIjvd&yj_2?Y1O; zUIh^VSgL{M-vm~M?js^}=_WAXdYcR3u7JRP^DS|?oNLcd8m@|+vZUeK_9qY;M zL9yW8gzo|C?tuLGIELLY*aZHmPXb!ByRaz&DbVt}uwvl~xT9MN26Wqf7gz>?r=WHB zaG2nax7K@L7>uw&wEF|Rfp2aGzdwwBQd@W5hd6BgDu?V}K;#(bzy{Qb9-IS9zi+|D z$bq>OImz7ru_-!~3v%Ne4UZH6YDL+(@Evbf1Xfba1Lo{YFmu$c2&Lx%2@0V>Sy3w= z#=T2{!Bfzp0%%MDbWzv%v;x=|UwVLUW-8EqQvf^-}CAsDurL>VX~sJPPY2GqjL?CzNmT|I~*| z;X?jU!5iLzpsa0;!XK(IL_;_Op>S{*_!uB2f=A1NT03iSL!oG>1TBK-#btHClBeIH-yg_p8TI0 zhLr!G#fMlQwjF=SPq;&75lR4WmjMv7fRB9%D}Oli{6|_el!AsmW@t0YK6sUYH8_V# z6SzmjvP8Wv0n7u>(m5{yp~7bmMn5dVBU;9M-k<{yujUL97GTjSUw{zC%hAMFgsl27 z4H0+Hq5?J-S-m3kG}#ucp%gVtg~t-1h*yNx0wH^lz2PCzP=Xy6;XlOMyW!G%;R$25r7k>$y zwdhE3vA1nbb5@@J#Jr7B^RxVNZ>+l8zpD4!Cyt>{MDU#2#M&JIfyuu2<*rT6oFn0{ zzp{5|6&DuBG$I6osCRj{NMJ1LdARj^f)RU#^pBG8=v7Tblkp=Q*v9{Y;9#RgSuvWTsWR zXsJ(hrU74`Q{Zy@`603{`yb>A9%<`5sE)20wf2m`)5=jh zU+2FotHeKi=ISb>OTSFicxCDM>onc5wEQ1gGxtzumfkzn%{>Lx33S^6-s%=H{q59$ zs=?6n_t-Ss%4^Nx&yDW%9c+o>PWkQV|7JwEvwiUIH6^bUSGUkRn;rxQh%H*^zP=u| z>e*Gn0a2!B^pM5ACs5&an259=OSZ~{Pe_mD8Om+Cxv_QV^s-n93>f6sRrS02Q-<9^#;OsJo zqTqnsi_P?&)wZ{DdG!WoFE~Pv`Qi$t_fLY$FUsvMsjIG#a*#fnZeVcsi@1N&RH3Z4 z`IM5%s0)gmf~e}Sx3a;36E9vGT^H@D#_2dK27wZTVb~4mIMJZTAS9x1T~BC4Uvrmy zLUXE|*krBaq-sYat-IG7eRUr2vm+0r*?e1fuxO{h<7m^8%IL@rFe6l0yc~C0?~PDg zKWm>e%jri=)IqxeLi)JBoq= zCRRFGEl?F?pIxn_U1R@5r^YpXZ$xVv&C#gRHLq<_$JxzF+C8|huX&xkHs~xbQ_nvK zG8<7ozNU!{SFk|ZPb<0=}#AzO+MNhU{r0Od9-!ii7@9Ij=#z) zN1krbsi|jYGp(obvcuM976p~Nds(?n1YWi{`C4_|udE*Ve2PwMd9_a%^JaMCnVWho zi+yd~PAu!O|8b?SZe>NaQdCNxdyZc#{o6Je$XnJ^u(P~cX{mo$l2y~`H^Jo|Yi+lW z{MJGjKigJNcB<*(^WcmPfgNGH=B%~7y=`m9+2Y-xkf^A>x_=+4lupZA_0;FqC{;JN zOw;oRzxkc14oIWgBX3}O^PQ;Rb*DNy!n`W!ws$q3t>0*SJ8oA`!KUx9`P_}R@3!?6 zaA)cDm7naWPuI*}nR=(?d+P6qGxT(un!_7?GxV&>Mph`j-d6x_tpm>G znW1;`NI`JhNmixO^TJVm?k8Hqi&=jQf~%q?Pu}nRJ8aJ_y?64`J@z_beeTZ~E%Ymq z9brvz?7t~@w9e|?kL;`4sn<&HnOv>(-O7uaz5BFoE4^&`*}l3RUN@3{*h-f7H=1rF z_D^hl`r%Dg&Z=uKzX-$&*ZsRYr|Zn~dR||r|CuMl=+@vaU4C5`4ofz57)F%7jhLNONVoq~Zf zriUF=^DNnj%q-L}arqqPEy55FXry3p2^2b^>nq?7a2Hj( zhSDwC6ayuB0aR-<)j$IGk5JYJsDUngqUTYH)(03A1sURkdMU686p5)qwj>8i-v!4& z^}q_ih$jPHhmTwJ#en>Y^C0rViCG$_Re8Gs24A%VTY+5jN|Pyn=`P7SUAmZ>1| z3hXaX!3JD~2<5eazW~Yy_yWsNC?6PvA~Wa>Q^xj^;;@9!1d9N$1?Yhiv5WAa4~z&d z0=vQbMu2hQ-6-@HAbt=Fa0M9f0|*4rJ_h^%Q_w;I^?(im7YB@jBtU*pMJNCwqDMke z8}`NsFThlwio>9K?8q#r>|?+PI2guXvGw8JXm9}A1?XWD(}a|e*uoe9;DG>U0Im$1 zfn_OZP(cQGG6x_KzzV=mC~TRBB|vdFJbe+OivM;1*?_k*3daLz1dtrN1=QRj+8|mb zA*PT^7RGyE_2F0qGz_#L9WbN-G9-jo8`wnx08Gk7U?7M}veS=i7w090T>3*r)2 z(}ffi1%roZ@CHGJkPt!v!Vol*i6aS+0Lt($HJ3sv0B#7D15$woxFJY8Jvc%zNkA%q zqV)kWvI#=>h#zoxAr+?-mM0njXHq)YApsgwe*#nwxFJ|UMK^|XfnPC^ zT`NS+v75lUCEkL<9AO!-uVB~^u!-R)@Ld2X6AIb~)PA0XC~w=&q@iShUw{L1QQkMu z4;Ov-26|z$1aU1mfx;ij=tBbmV2Fn7p+y6K1%V)%aXKPSgWPQJK9Cj=DDY5I2Tm`+ zhX<2ZjIRN-5PY_T0$PXC7q&LM1R(+jDWnj*tAgL5wjUtExmIxBQEiaw3tK_^5M3e) zd4eqh&)5#2Rv~t&0UQx-VZaea3yCNj!x@4nQqUvtEfEDWq4D)a11Xlq!7Yql!x%SL zA!H~vq=4ujSWGZ##t)2K0UwXK2k9teB_z}rFh&C)2kZoP!+0PG5#VATfkp667)k^l zBpOVz5RD?psB*35PqQE9Jh-r`!;t}#NZsMSaCT6k#qlx&y83h?*NJ2h=zs8ecqEXFY zyijWyKraE@yPePjwr+1+YK&^BU6IIkLv`iEh^$vLa5>)sELE|s6U|v@JKEU*Y5>w!jT11fVrhHbO_Wj z04SogO&!K?4*@;G(M#h2fA};8Y|>cB{Gf6>h(OROA_^%Nr1oqfG=~vKDN<<8D~1BH zBP5LB1Dk`02xb~I;FBAmM`*Hygc1CL%*{7hg-^`<1IiloiBmy%P=rT3a{^M4l+b`F zU?uoQ7sD$XREy`q-(F3gEVm_a^J))&x$Q(t-`ps(J8^~`&&yIvJl;xvy>Tz$&WWl8 z4|atKRTpq)oTcrvD)F<6&b9r$Y~0avQ?@VolD(8WW#Riaw>dS-dz6Giq0PtMrv883 z#o~V3e}Btz`?~Y`|9%Lz5!;r&)$9Lzd*vU^{`b4YoPRdDeg8g+`+w#2esxUx@n=s? z$E!~{D{Pz4hnmJe#-hkLic6bNd@sor%?d{qWD(dtbz^vqfm*i~C7>3tgBkYf^%1p4^iA zr`Mg-ad*})ad7X9YOnMUryG{-`F(5;yYq8@dvRW0zqn7_-zv^4Et-dR`(NM+zISzM z-cq`^YaKG}Ol|I_yS-`O8b&vXqCGtpZEZ)q?%lay>c2lv1S$(8-a5X$RjoHtW%rK> zNvXkW-%YaLV6b~hxA@=a_hA#G{?-1GBWkx#*0@uX3XtAd|%>g0LtQ&*E|?eu`Ef2<5=O)WGJnboMW=`vE3h! z+cNJ%t44$t-Ko(_GmAbn-{g|%7*>+Y;>$ZKE$Hl-bDdqaO$6K3c#}6@(=0dVmNk6Q ztN8xeWRK+ziT~@fCwXTn~pmv=YwMTLk4vU$oTy%MV!`%?M?sROy5^>-D#>|1SU;sH)!nCRl1hK= zYQL^+5wp#Hbp%dZ-@jq(y+^Vq#9ebuPVbo1OE(RQnLDY(+Jp9O)~K^U~bh;c3pP5!!#l= zDaS{vhdBD{x7{wIWjEE&bRX05O?OV%w8OJ>_2v5VxYQk2Qq4I>8m#-ZTZ1RwT@{?< z(mJMMn^CZlb%o@%EuD+DSslAM{q2;|X(#f<%_lBC?f!XQeP&UOMs#LOPG+|JL#Li; zu{37xXz__HbKGw0{EY}2S#D6zt^N~|HLL62!8s~MwMMaqCv_f5yuXuPvAV)o?N+45 zW5wClkDkQ*oZ&F;(>caeUr)`NwZX!#kp@3L-P&tCVVXgWEB)7oU8ff}_eYGBEw}KP z+8?uOh4@8EahrTpt;XNIFJ1ezt=X*ds%vwKcDY!+p0;46rRp;eVTwO1di5XO3nS`Y7HkJ>|b5e71`5<+9c-2$hd%n(oBmX-6cIc93AC?Tp!3DT4^!_xh&DL#NO_l} z*Oy6=SEQ>yB|Ivc_fEL%PH|SG7a5vx?>fW8|KK4T_bYo3zAsAHKW%j5m(ZYHSyOK{ zMPvngFs19uf&$)j-c9MP7W)}1RgAyfWjog6YvI%V;$l|7g9BO3b}L?NS8kp0+f*Zg z^~<&FouAMpD1Sk()0oMPjZfkXp82m{6Bn9Lb6V-yOXjH?nT;~S&V4)7&9+<*x7hzp zYO8#n(8_RTYhCHIjR9YN7bzUcxrlzrbZOrnwdPEIS-6cwa&N8I*Yxz8yTww>5rLJb ztGO{X9`D|3FWq<7;_u#yXHJ(3lHSNd~Wqrp6&Qquxpu^>#u$w-nTZ5m0$AOL1DMvgyT~#9s2Gh+WA^3n7pV@OKCc0wTQgwXsh+Ze^q2M$dM zAG&hp!=h%wPSb zU7MIyAn)uFUi+s+!_F(`ZvL~}k>})|$ns)l{+u%6-jUjIi_#!V&%!>JcGCY9nn~Voc&=pUI_e z?ppiec;YI13AcjdGt_hgUis^%FK$e=fb7t$wgYlNT$B7qdUys}R`^;~fj6c51X`9{O zuiG?3-FzHc+TE8<+>6T24_O_WzT7iv`l9p(pCn85gXX~zTfby))VOOnb;NVmDTmhF zJ(y8`_V6fPl~kmeu4h7o`K0L^?z(MQzls~NKFQ2=@%cH)>YB@9Z=ZC(ayCC!|Ifh% zPo}(6bvZXuIzP=t%%6X1v`)6oWzYSpT0PZ!LW-WdzmkTdBA#4Yp_Q(~l_SkGZ%7=l z&p%eGGkrw1{H~u>A!kv3dURH9#&O%M%q~nGYb51_4 z$gG_IrZR({xD(7=v?Kcmm)`T7-TN-hdm#i-dFnX)i{1oLsqx+(b~UWx%lrJv5_-rjv# zS|Rt!$*ebve4DJ7SZ#dj>;I{%R%eQ{*z3SE)t#&L=rywb(>yj?E;!**ap&CUujf1F zznxRNo1UI^TvX0GRTnLue^i*ha*vd``h4|;Yql?FylOl#`=QRd$H7nL+ii^=Vd|!T z#Hxu{{LBdj2aGwhdi=Ii=-v3B68=V>j{E1hcgpfgUFk>Um#95C$C)SfdQyIx{^74B zGA;>=J!ui8@0y7%PA#MEoKqjUXqnRj+v($7JwjqGl`*>}FS^N0|LCzm+^8xl{z2;; zCCk=$7`kbV$e^%otAoYI)`K&qr>7Lmz4{~IhHp{q?WZaR-06m&)8J1{vb((>Xpyf% zA`*5lDJs%@wnj2y#PJ1Ml^T;6{Le*(=W`^QJne+;w7{#%ZT8nMX6<~`=9N8V$x5U4 z55LTcHcCk@bIwrxXDH*nBdmL=m>lD|eA$$$-CysIa~)~n^XY5zRqJ>=n)Xlqy7>B% z*0#1FcZdAcoI2?mF4sW3P4wzCaa8?AN)TQ9sb<)L!jOCXF`Weu-HA5sqB{vDEQ7lE zb$h*Dn4CQ{Ey3bKWwl5-aN^O*>Z36?=N)EVGwpp95M&v6?l4NTDBlw~b@NEE#K*|R z7bmUnYTj5=7{PZmaw=bUU9T#^L7ky|F6P8zQSvc)_QW6hcNE4Kr?N)3kp|C}i{j0a zmy1G8PsmNp4=G&rC+OT#o%40q-*GjX4OOSExHfwF2=zx>eC`ppwU7AGuO_T=u3N73 z!JVt79wPT(>{{JxW0FI)9xXj)njx1j?ocWB$dmCDU`epk!2Y!7?+B(h17 z=e1(i(<~+V3cG`@n;Jv7%pOg8m2v3myY|nrnV;0IWhTT1hO?z-{ay9dqe8!-7Q$Jh zXx7NOub(ShH7Zr&HpB|O$DViE^We%aTIWv-L*}^JlN)dS*%W>+I^VH`)--lfRJ-_i zls(U&ey`Q2mVZ|s3gn+1)hQVhx3zEg6T8{jJ_)zKZ)KNEEqpFHVTsiNuf|Z*iw;jX z?pOcTEAv-yGq1i}T#^%z$j>z@-|s=c=P-Tc*WEPwPq%$>${{hyN@d>2Vr$6D1-I8; zQ7gB27r_hldAZIq?1q`1DMzXE(83nuh@)atL&4~@H=Cz6-V!fosHOet&bO-9kVm%P zSYK9Jx0uA_jn0tXxMg*1vsJMDP3zL>Dy0kUe>UcO--?)!aPe#)C&PNSXXC`h9Xm!} zdul#58Ucw+gCujc7?ow5z#hAMIw z_L=EEp0j|R$w*I7oVnXxz30z*%L&&~;15F%iH)++5uF3(EDqb}SD^Fe=qldN4P!mt zT3>Y0I;2`Ory}oMTF+2Y+KoId`TC2pIs#o%6D;Y zIi)Tx7q5A-`;hbBlJ&10c1-4YXf=G%emMPEm->F2_8V2tv<_Wnvx;lnJ$NqM?7eCo za@(?FWpv_lm_@4b4PUnJ{avI|-B|Qs&!QcpS54Zv^FfL9n^gKm83mtnCtr+s{7LQ@`>YBE6b7r~UN^)OW+DCJh zEW;f+W-dFLxsztO*9c=;%~us3=9BY8iVB~;Uh8_BvcPJw-kP10|Ly;_(D%>mj=xrV*I(#A z`=O@r``T`oZzXPGj`z`$NR63y!#`cwQg1CdztLrax5JF`jHu;3rxs1WdBrh9*T3k! zi`PTq@TN6K-rD}0J>EO_&ncz*aTPJjHg#EvVHd*m?qA7kH<-}+vBBhxpNZ_->D52) z?dEvA4CcFR@^asi_~@z9w>!>v{#Cl4x#}fu^m1$ZB%gTk{?QvsYsI?fw-*n(>5IRG z#~zK{qFGz>GU@jm#=iMH;hUB7&u4}2R*$url;~(bEeD@?^E~XkW*!g(yTA7$q$_yWE@sf7}|GqU(Ttpxmw@0M*L|R z9b_z=$2I`DRgJ!Q0c558u@3 zSo^R1&i8lA!hMs)&(=*IDXsWndWg@iXK8odJUu_UWnH{9*Y-*H%ZSV?9v)@clC2v0 z^Dmh$@BTLSuQg;Ps6vYTD3OF&+X8GIK}o&mm|3Ii@WTNcH0+jo3=iBmDROhybsQmv8$v@ z>A9=FJ1+O$5Z@9LGpjMUqWxJIvxlp9!pDq0Q%5+W^^3;B3M)p{2h}y~=PxB&JG_sI zH&9%B!o8J6L+rEBRNBSba_;qE|-M&^%)M|a*Z$HI%TGf_ky>2^#jn(yujCu~ms8Yk_~ z4^BFBWB2IB-_mj~b+{)N*F9P}`EORM`}def}{) zUUI6=$Hm91UiR#p{bf(`_3Mw9m-H$drlfYtwtlhEyHmPKf3x}Y^ffiR&opcQi~XAX z&{EPmKhrDxNT%DAAF;3EKCX3Xl$ghtm}GtZ*|ejPJ`eTWJAB^DI!}L9cS<%=sD6Ip zp0$7f2oIkSZ{5D~o^a*@Wxc{XORjs^78qV0y?k7D|V!3Abxgn|t?Js$Zbw6-o zmVC*mb;PyzRxWJU%g28!Tvlk2nr#>EwJdyRVj=JBjGi0DQ@LKo(}Dwb9pQyJ|}uU@#bpXG1?LOxHVvM?WWlR ziCq#`Zdaah+OKC(8L1-j@_%=3r)%S%S8jCxk}FSY><+Rj856=(?)P++* zM7|2I_T*ZtH&xnJ2g-$)oKJY1U3p~rz;!!XkYDo0t!{L%`bd-aUQ_4Qus2G#_7D>? zM=l$2MPIS&!oK2=#TF06b?ud}Iu5TnYJQtrdBw}i&^uw@-6{DamPl696e<&yPG+i1 zArse!#*|9GmS4%wxXdTDe0M$8xpgy6p2fd${=t%ZS3wNHb>Z&1ANtL~c?LtzgJ$ON zLQ=`VHvi%UnHe7*vL73SjcV*CPVeuk|D%8IQO&gNM$O@~O4i1=`&)|N*JlQXT0OSf z^+Y9sRrm3G)1F7N%+DDGp7}9Wb>H{pe^g;S7P=i988Ic?xJGvWk!o6yp2oFr$892~ zT;HkUdjG0a(;55Ms>^O2jecufxjEA2qP$;wXhW!8oyEAeQJjK=SMyfh_o&~7R?jp4 zB8=ZSiNR*-zLVSr>|Kv`z9^pO9n!F@ZsWHfUOx{M`lK)4xQ=n6L!M#q)@`k-bX}YF z$s@7E5iQ3fA+wDX0~$6@#LVDZDVDkC>@Z+iJp{ z@oZ+AnVjSzw{x*0|Bt0}jPK)n-hOP`ww*M#Z8o3SR)c0^+qUhbv2CkK+SpE;Mt8oy z|NVF$?Ah7ZoITgfd#;rZI7Ixa*FJQ+A^j_+wHz8L80AxbO@jK@Ih66YN@smc(DXG= zK-_UX>WQxpqe3MuN&_zx&ktngt?=40TDs#wsRsfynl@kPNl{&gnnb2dJ2V%3q$JzD zAu5aZ%bz@!=MIp?nq)p=u%5=W&loHg>+Z)MtqjSkWZ95|u z2oF9chdEv8Gk5(8ov-I}`(!Zb{jIo;uDkKq<%2=|rPL$%@xAJ%G3zM+j|VF$V{{mv zSgoolD(wovbn_zsy4e0JG@0y`X=g~WP^q4z+XKW1UCX%Cw0Y7D7$q-!>h_E7p=q|g zy$7as?DF`3x;_MQti3ac_0oQ*+qhD$I8*`NqwjOz}cttCXOU-lBTjY`E0 z0V7zYsm{&Bi9cubi~$5gkK26;DB7=2znj5DJZe3dw$`CQ;HkFY4s?E2436<&GV`PT z(!UUc`DDJ@E!}=Skmj^N@?H7n6vs3SSFy0uKd#h7F8bGlZ!Z|kfUNSUrVM}2uoOj? z+gYo!AD0>$bXGsz4~7i$`!ylu zLuV#{_{4jNIB&~%2frd4Hp%2dwdNXaKQB2SB=`)TO{y&v;17}9Nw~t*D8_FOiAcJR zM6okU4Va~8!ln?zXlm=hN}(oraif#qWwiO-@h;YAiAWHIP>7H`9Plm;C&5Yg2%1|9 zgU*BA)PI;Ij)yuv(MgdYl(O%|R{O1yS*E;(dw{2URtyFniLIGs)g1 zCFiKTv6otk{1nDp5Mz@&=^-l2(GO~%e=hJ9xcwW%&uROP9aBg!T7WT034EW`S4yxT%RVwcOZfmlCC^Gv}AYCC~PzP4GdcNOL;a0-yLpLzl=GY@v{rE!wCE ztdA^o!Ub|LQiG<|6(hcSdrC7SHlhiAGNs?eS_PsJV=n57CV0!!$O=r#M27WjEDlv} zq(%F`2OQb3G!zF^K-z}nu$YB6m=lH0Av5{!aCnnABBo~N%3f>um1Bg}xX9PeQ9I@h zE~69?Y3%RZz+aNv5N~bPBZmAw6aZrlCnh%6j1i-qA)9 zhD1@6W;F%#bT`l1nb}-CTrp4);*YG5v`*{pkn>;l1|Eqfir{r2bE3d z^S6s*K@HQ4o^&YJeGX$o9cSn#&|&0j`COn1Ikwk#_b1VC_3th1vaNhdx$ADlTNRxO zd-UU8|MR*@sJH>;GjsK{WNn;F{=uCCm9mm4gKfrUyP3u&2}q^ofIE(YexC{HOxxG} zWX?F)1h!Zfjt0&^Rc1eCGsHdk^I3xKt&=tz_Mw`*wW;|4;l4-6_3mImLFfg2u5SSf zaZpT?KUZ9A4t!xf3ps;(RW!Cl$1=^O4lx-#liO47im0H$r`cURwu9{#=8q1&EQrQ} zh!eb0A+eoZ~Ek?GL=yt#&eEMY+pG~`>BEZ>ORmJt_K%QVDe>M3COI$MmXDa_XjBY6!$ z+)Z`o7Ja(a4cT-$<|LN`Nz|Qd$y5m^2#B@<6l8IvWK-F6Ef%5UDO`9{9o#LrXt4sr zc5#Ue0|{T2b2i~*TchQ9B%YaV`l1^)jq&-i$e%| z!YPdE3H{rI(S5yuAWvSGlJFeK0mP+Pvm^5R?s)6lU>~q0@U!6fi876{`y`#}wNcP>Gbhh%*7KjEeRgH! z+m%e~?O=FD4#0e|64&^ibU9s~?Gi8#7G@iQvGgQVwSPoqSlg$-mvveO{EV|-y5tMA zceKwXyc~9F^oI2%kvJ}Z=f6mFX)_kfwJB4FbpVzj(k}D{Bz$Q&%jJWxDBZ_v8WkYS z0`3&6?Q|J|?u8aV8QaE|B0-#k@;L8JIX=2wFZUBM(`q{yOIvrkoD4zYsJ!^&sb5*a ziYx{4ik-=Nzu+EHhDH6FBx;Z2@=n?l&s3o1Ni!x)AD~9Sa=)3P?q8T?_V5u}GF$$} z)C4xsu@{-X@v-+_Sqnp~Nz41uG~&=s50AP?pg=W*j&* zQo1&PI$;zfs_j=F4v5s`#y8mBz?p#B`R1|gS|yxmeg|6%$`qOuNHavU({BYoc1hA$ zUT*72n+=#Itl%pOHW??n)y3u2oanCDZUN%B^WqihxqXHhEp5e;MbbTF<1s3Ev))W; z8Ard-yY{2I#+JVEI{y|ZOfGj2-O!hR94?}!P-MMUFEYzpCDLLyA3Cvqn@%oIZZQa& zJie3ksC8bo=Z{uVW`f_D`2M~MqJ1w7(*~Oy;L(uH}G32W}K_e%>4Hbo(I6i zW9!RfXZ}R!*(@!y-`i1|RUApw%g{_1xc~+)x_s$@MeNLF>x<*n;zxS3*+);Cc%udt z!UKNLRGSM;RLsVJuH|^nYTk5|yzwU@lzG>yK@w*IVJoDQ_h@{PN8_2?*VioU%S?){ zS&`k#X2fTUf_AXDu+8Y*=(ENk7Yi^yuuaJPW=KYmmMC|2B|JwE@ql|Ym-lSw58zWQ zXc7^QqQT5!fXFHmP}h1be8f*JqAZlwmD%XfD(No!0^5?{(*%_L!NYdGBtx*jL7Gk> zW_gM-Sr@%v!y8g5&_@ABq?+?`r>TA`x47q=G@KC#j4Y7o<{Y>!!znRZlmzx;&yGrR zgrqNi?;pky#DZEAR+mht^N{9Qg!1&m=Rk2eItbIS!P<?0|8`oCar9f%J55@ckRTosJ?QQ& zpdP(p@ol^|Zpi!)b&`BkRFVF5M!H9`AT|-;`Wg}iFbmRNHCcBhGy!ZpwXZdntBn=5 z{0$r}@o7|0h?PV9#M}zHouXs`=v_{BSbQfNT(tBAV{X;^dnUpkUMKY>D>`sI8k`*9|sFUi_%O>sB^iPq3`q8%fG zn9?mjA}5xJd(eUJ&xKjnupPeCbogSl#;H`L-W?fX_2CJ>4IL(i2yGVCdtg|uQPy$v z-(>me>vK~=)@j|d+>&nawQ!`w<&6HB41OVm4XW;4`B?b;Ab_m?Zh-%5eMaJst2itb9r#$r$N z>IUqBQ-gNSjgmjQ6nSojsJh?vLq}G`SF-rl&tl^B@Y7d)vXgmb#SFjh^BK zgh_9!*-~J_pF@kwfg_W8YL(KQCzl^56RWc9@vT_ruDL$DSTYPjm7*)fy}M_UXRMG3y_Cu0 zX`HKCbc8AaI{QE*Sz28){pPtnJXu*tXU>Ini!JcfTs&z@uWp%;FQtEfAAreGHk34v zJzfJ*`cr+3C2U0dQ=E;Rqcvdh6H$|l=Jne%_OtEzlY!^j=6%#@tNl?mW%~7Hqp+q# z0_zAF=FBdh97gb%Q2he?^#;<`)kNcdJ>drb@KP48ubieNw%fnr4AUnW^pj+sYhSpK z*Ag%qv`U$zv{Swr&(bfH|2;M|d7U{gmFmeDYL##a8xAgW6+juY9Ri}VRvA9EICjcADOrW(hEQr=}$)DJmc&twMe!3re+kL=$>l3V@(TT zxQM;e+pnVZVpk)@B-{i8ThSRGcUZutYPXFb>mN1kOt*oY!IW{jJF29c=h@n@t!BwY znVKx<5JDtqa>S$D_IAG(5>{5z|3Ol~(|}jNX#OXyVjjQrU;o67=Na(NeG~VW!b-Im`GF ztQGIh#iduuS09~&%^@JIi^8U@N1`mtpPtRj2CBo6j8<+KwR)CF)>(cND#nnN`UvX% z221c$|4i>ec^5x(+)1A)zDZ2a1k!4Mt6W}KC4Ibt?tQ#JX?pY4Wbj~bI#&rK9u7f= zZo4JKRSN_t5#Wb@h#qn@r>?imPGB3psWw=@kvMWi=OGUTJe?6Aw+4PDcZam8fUsgl zsGZ|NUb%n|xmWs=9?HMU-Bvb~jZF-eh9h|IOjBJ{n6qia0!$x4ut}+P$2< zu};rzK5qm5{%_IDzi&4^VQ%iVKU@x4BkPmyRC}N0$m~Fl2vVYiIcguax08T>+c^zx z;~gsT9GPT4g=sxb`ypFCzJ?rCfkL zR+jb>ulRMr==5y*DowF`c%%VJ|B~Y4oYFfi?ajVNcQ}PrF7l{+tgTxOKgB<>x7#JR zJN|B-{&rh0X+RW`<)jp81qqywpZ6f}%;Y`ZS|(e4uM={%K^|5S2%dz#nO+0kUlvbr zoT{{O6s@0qjP{LQhSt~2#MvD&o6@axW_{NpJ}};&9ccIod47z4tgzb1Ex^_BoW4HU zH>BU&%|4`@BpO=e1Lr3hW2M#z77z~(kbN?P&JJ$SVkthj~B zatzQ_^`6M(NfjX4=gKZ!qs~*}3BGb2D!?Me^xJ1tt)d44!jmDq$T_DgW#lo+uRbeJ zZ9bmftlg=ojA_PY^uXb1VY8`MssxN7g9Vu{n8pmW=HFf%vLK#b0^QPo&MN8j+M%joTQ(eIa{M}4ylIxy)uozz%hz_){a^S3Pz6YYS zfnfhYxk7bZa%56~P{gmqqB}qLEp8eZgko@+pD;;T!FRZ-8h}RUbs2uceNsJtT2GF7 zKxHLihZ&lkji=XE+C4=FoHg5)y7=)IY*dw!HHlw2c&vXE<6ie3unN5Q@o}ytU4+Gg zrHTU4*sM_amW)sq$?G>~mRlfVIWf!}b+>B@CnBLwtnnQqfzGtMP0-||6r1E>U!#vA6j=^sB6%yupM4`p{g zS`OhMK3+^{5}K^N46ey#;J7h5vR32HF`T!feJmM48({7g>36$0q;mzsMLCzFYV9^X zKO~}Z_r;h%w=jk)Q2sr+&Ng9Pp82XwPcjppc%Gq3vk!*H8Jbadq%gzVKb|T?Z~BM0 z0SqTdR9yxmv;Rw^qO%G`VwK7BqOVDN8Xf{>wzi0o6FVbx4vj$|CB-kh~cdl04cMQKpz?hbt+N|ML{@ zf05=^cz2FeZr3mH^EM01tqLe>aenA`0D4Ymf0TO#V?~!lpn)wQt_$WC9m}RITwfJ5 zH1V1o+XW@7&&hgBUROIp7@euK&Ifc+L?m{tp85x2o1ou8&yK;#4hcaI6JXj*>AS(> zRtFPoK4pHUMZ*wdwQZPH_9@=5(lQUk!WmpH>E5rBexq?Dx2jkL;NNO> z)-aaSR_RWbg34uJiRiaQUC&>X0Q&BwS$OmH0VxFt6{KJ$S20EyQDCnI8Ni<<$|Kbi z6LV?9RP0rpKcbhOIj$9%GSTfo6&cU?hW?_33Zxfa~}CFCl`a`l>`jF0{GIHWvQx9Vw{ajE0TeC(=1?h;vEGg6vv` zEclb)3tgX?9g+fZxkEXpWn(rJ>HR<1jQU>#7is$6ZbGe9gGPsdCG-3Utx=*nyTGh~ zpnrpWP8CPEI+EHCK6;r9nA>YM8M;w%lZX~2>|}fe66Wt2@C~8z7>^j>S@Xn-McSsA zWMQy5l0u;RA`>iD@&0ECX3QM;R)=e66C|W)!9zyrGqPYF?*E0oHpL?!_*LN>C={tk1|t-?{p+YC;EGZL zaXm>EoIRq|r;QA3i@adK9?_G3VJMbXA5=Qqz0FA^*X*P`mo!b7wQ*Q~^rtn;nha_> z6Ov^~s>z|5HZcH~TR9Xzl0BP#1hEyx3`tuw<RzN37A{o@14aG!K$vT7=@)DDEA)W_ZuRh%}30tHmSZFuXKqz z5`_F^GWZYPX+vT}$Q^W7)B57%+zO&OI!72wV5X~vExH`mhq4O41{3Z64Ggs%>Pp-) zgHWnt)*NM8Lh-hKqx)|-4-$MQ7s6K~n2}-wZfE4I3?3^?yKrqC_>=PFJD1R^y|zQb}*Uj`gBDD2}71 zO|cst4{!y^aO&Mhl8C3G)hxK8@hF0T&>E%bC2J2ZbL`_nvbPk4rjE$_bU`5dSfF^4 z>-pnwRA?6VMSO)YTMxyFkLqh$>7eLKkfbRlg;RwY^U@23Xm-@o?WO{hq4f5!PF`F+ zp>!j%k%~rpapH#BX#B@=UOhC{_?umzQN=)UY;0cwy z^~>5SpP5Ke+M3-JR*vJdB)to$*_H8oPaLOkYCW~tom%LJ-KBf98T~*Bs0r?a>eJbj!ACdPz ze~lKJ<)Ow5y7&G*^Chb~D?Hy_y=d5gQN+z)y`eG>qcoDls^iHxM_x?LSlf;I#IZ2# z>sgKW^v0o@?=-_U?51=|&R2{`DNg>og2`eWNK20S3S2+!A{zfNdF}Ii5!pz1A?x+; zUi-c>wUjl?3asNLb`+wELg zEPbG=e9i46FHSPV{R2fjW6HPV*s#?9HGfV^K%63}QRfB7)Cr=_AAIKgkWF1(sB)on zsq*8Vr92)H^rURknbY5d92)c%^wQ3|s@Si@4PWM^5?+a(dn}#gHp@1k*mNy=ydgW) z3(Z~ae$HZQ(~qm1vfiswb=XKZzFT+ zZ+Ui}$x#G=tr``FlG%5@B_^3|B6u@t43>3~?pf<2{<*Dgnk#(48Wo+PhWNFt>uhN< ze#F1T?E#zSS4sQkbCh4iTX6gA8=x$Clop1WkNS+nNsh`lP4|1PU8$hLugr7YY4{tU zl(SS}eSE;*NvF{5^57s5Xr#?SiY_c1Df$EE5JHfR7a{^n`Lya0iXnPH)& zSVTdV;8$)77LF@y%&cHsr|XIvG@9ucOWKb)Tl4$+6ohM}Q0rspBv+)l4ZSbt61n-7 z=4j=A^J#cMLe4v-_uf^QL<-Mrv=L&XF(X;Xt4@ns6g#kFAs zP<{un*97fvQV+@hSO_Jcqf(dz``#=B(A0iRIQ+_O|l^sN37m31qJ) zjMgYEqd2d?WRVFGjAZ%6Yh`Ao7rF1QHGBZL-a0b7)SgN5?$%-y8~mCqf;#NH%%E3s}B2eoO7I{;_l;95NB(nlth`J;t-bN0^y-`Z|-;uTfHhxk=V_n+(N>q86osj z!HgcQu_V^wFHbR5{mZL1=F*nDZ_f52dthKaGg;u08lp6Dsa~RvQ#cndg^G6by=FEr zS;h*#vNU+sgI>K+MQrMlfzJCy7tsF39}oY~La-AT9mSa;+{K+z-lNibLdSYr%)yFY zQCRe}@^dSGiQsf6tvTm8M^$J`OwFxZN2Z}q8f*o3HX4&>OhtW@!fjIXb|@J9j(#89 z^|Er#D8eb;>=7&3wJ0T#tNdje;`ALL-a<4E$v7)qZb77{50MXwb0Li>h7a+%+Rb@C zX|?MgmxjdjH=`WtvAk#R)dL8*ouUIK>hRxLWUy~yH)03NfAbf_+&;1sWvx_c6g}K-03{{`Tl{b_Gh>uh<*n~2hln>RZn}E+ zDC z&FARUoA=QOtFc0x?`Rgr)tB2qR*Qu$=|F~+k!LNw?H0m12Ez~|vm?qoon%>fIv%AI7-&hk{G# zS^Le^i6sclu;Ol%Q#<<6lE~gr>3HiQX?p0ANC87N2vh%_oe4N%hNosn^`Gu&mE{KD z&yt&8yfPt*!?6R{><~Hplo(onNKZ+7=b=6trS36K^2m6OAGlg-W=`_9l?H*8O`G|i zHA?pRsV)dhfLBt6wa)MX-h6n)j7pnb_$YGKPyy37$Exn)}**E`P=$OPQ!b18$oysqgM-HvRjwt+@Q0WWAtaGSB_h3 zOC_Wjv$Xhg-(sZJYOR!YTL_kvZgSB#uI^`tKIzLjO8-)Zt)1^^%`I|ovwZPolwZyd zCe^)4@OW0>tUEaL4%w2_vwMc|acD~Ms}NT07a?7Nv|e2ck7^(}mzq-8*FsA=u62YY zQpmKPsyn}9uJxw`r@W5#Gv2m%9aXqTrS&!TFh>;8d7tN z=O$50qM#^-hG@{PvKhhnW3Y`-7y%yJHg9z8sDF;>!d9)f(MMlb-OiVZ)!-vQ@;LS9 zXN#b{7wh$Lgc#FS=;>o(;IQTe>5hN zd5{ly$qHXdMePrZ94EXFWeA=wtY%0ofF+X<>XIh1(uAHx3-E}%s#H;K$aT?sz8h2w z4Y4ZpxyKNU+pR`pRF3AT8}SFrx8NKWDr1r}gc^a<;}{mF4YT4;bYKK&)LEP*WQ5jO zZ)+VDSw42MVdu(!+mgy_Q^jMU)fX}y%K-uZ^yjBd<<8HKZ7PjSoY)nZXP>W$&}6+( zlme-tH*nJ?%gx4RPaaRhX7z0?1o{v^Nat=Q1!?!)C6Cm`}lnu`A%n_d_A>gad zgmNMaFtWl>1;)d87`tLQXC!piNlG4?s=r9`|Fg#_F#khq#bi&%+eO)Uqnjq$nmh<_ z|I@E-boL>eJWWOp%p!MG(v(dyec!~5$mbj5;gje@{s%>Kx>q9qou2EzSQs4V$FATO z@h35SZLD&iFE)w);L~>C=jVt!(g<&AE#ws#4{6HR=OO+oXfUU-Kdr~nc%;QIDOj9% zVdDT7uGK?miKCiWELHduhqTE`4;~;MD&if3vE_XF-6u!bd9rv>cEWDF$yg#$myVe#oLL#xiRtA6!Y{a;zhP4VEdDtC^M4&RTo3#fbOV7)195E6h%0}2K^LjrE3r0^IPJ({TyFWdMS{ROTA|OS6Ig&#{=wNTx ziK9qrFNal_lZ4dE$tjz5&cFw0)W8d|>LzFO#H(t|Y zZrU)>NM-YBzzU7P8Z0oqfho~vwAI}= zcq5~%ds9~M4%iO$CSXcb9`|7n4U^IpYH(w=-A|0JNw>@^)&;^%2pN`oo{ zEi03gpu33o)W-&N=U$>1QI8Rdk{vVY&{=X*?!i#IRhV8E_!7%tkJSLF7`mxkanlui zO1=%sXK-JI?RnVBs}MD)wL1?%8rVH%r;3sy8Zf~pP7>AyCV?y8e58Yy1^*g`Kmf6G3J8N_E7LEK|2Iv@U?=Qa4F@fXI647=OlGBjU_H zyYDgu7dg@HHPJq?|6(pkN(a+%sXi{O^R@WQR531#3q33Wz1JY7N^a@7_Ef zo2aT#AW5^7Y-s04(vHU8i4#I!zB}jctS@C)lTv>8Kh<=Ls^g}iz1YGjIV}wD>=GG} zuru}wl5&7zBuxTmPtockp8h*OpG4g1BjlKEV27P!!YY<93-XHqjN73GF|P@a2P|Ln zx6b0zRwtwIV znBwR%d9`VVrDtI|#nvHPrPgqfiD%4N1iM|fyrCXAVYB;{lU+15( z3wF3$QFbA`1%@2k*bS8meQA`Z0;tswJQv_BEw7*JvP`J%E39G^$}afYQ&U*OB$dZF z646sgx{h;M^4Pu*>5Dw6yFKb4Cla-Xl2l;sj)}BvU9|xB`u?qi|2toJj?bl8+xm2C zl)tF6`xyf#;iHkK&8t^}BQX(1!Q1oP301#&mgqlF+>4pBY{_MTO}!CWX$yteISXKC zB;X}&@&kMsJGms}>wtC58S-ei$A_xWdOL2#j}RMSXe`rF+iDyx#&zAfE#b$5J3-17 zVzL;b)(2TwY0I|TV!@wku}*^?-W>6xe~CFCTc=RxwR4tB6VD^7kP$p!c^ffRQmzub zd;HV9dn!O!Ch|~IS9&Fv$_gW2JI8^wn$aH58PAd&F2r$!nhL3qs8+Qp%7iD4(Z?Af z&4-ePb2RToFr8Ex!jpvRWcA}z_-iysD!UXJfpHb9W+9hU62MT{(pSUWa4e~pJ9;}Y zOK3H}=-9$6wl@(m4?{w`Mq{zZfZWI9PhkqHx5MSW*H;*J7Yw z29J|!5>*YWqKHfKx3x-Jqg+JGF0uEK9k9(B_<|;>))EI&6gz-^eTRq^jqRb& zxsp3p31`Tp6h~}Iqd32;q5GdkDbGjiOxE9Y7IVnwc8x?~IaP>h-;2EhQLx$0#0$mU zTe^R`&Vp`Rzc&kSGf3C1LQZtBk9aYdRHU1xz1~vP<%vH!_;81Q-HcJG;0&-ujq1-H zDL5k@;~x#}J-khyp1cEGEn9OCV|9cl4KT!qPD(c0Ze=WG8eQ9yugD{PjEo!b;Bg35 z=uq23#ech+lc;jT@!KfPCRo`N4-Pa2OJ$-wy>f}t?3iPT{Z&>Y8EOQPz|cc4tI?b$ z$lax9N4fCBElSU`P%9nkkxlTb`gK@-Fvd5%P8GXs6d$o`KA;0;bq6C}>ZRc;6lyF9 zOJ;#793)a55(kk@BONz(dj?FFIXk&OCXc?5Y0DL0X?P?g>+|gmGBFd;Xx=n597)ln ze4(=C6(b!u)np_2fU|T+J%O;z(_6ku7YVxGHyy9JG7t&EEWQ0J!{)PU^oZ!A*)^Cl zMF8LUk<^w_>)&>uSn7Kup+vvrNoK2=f!BdT`HUw`Kj;t8Y z_%thiuc)uGpk9*W-|f^03lf(tVrMTTGc{Z)r2M5^83C#vtMH%9ZK%PFONoOTKP&}e zmFNR-$cgC=XA)X#TBh7qn9Us$s8A+b$YN!+ZOoqEY8#QL z5UZtOp3JCPO{(fr5pyaaiMY}#97*%7?)%zm~k zqvD&RXQr&f^0;6v#vOmBv0V&FuUG=U-%$==;H!2UA)?NqEK{|Pg90ReC~+^WGH77- zQG4Uy;Vl~N2AaN!qT4OBH>pn%G6xTQAw~80?UhTRaoH5>#QU}36fBx7#Pd-oY-Tv?>~~xnIQPbIn=8xh&zdAcD0fXlV@ed zQ4~|i96o(|QnC%kH|sdOE>^@{vO3Kc#!=&J3|==>3418{nIH_7m4S>(-}RkeIPK6S z!#rl-qb%>HIeq4Y5q&3?Z%ycu+rCpp06 zxaV!*4DhOXqmcFG2moY$CWfl!p~1?7!rgI2QZ?uStwj|K;o>H-hYAd=v}ZhrvwD>i zrJ&@=yXyj8%TG3ni@^ZR{ubLYJR)GjB!A@g(mr8AmjMBlE$^HNhUf?6Rat$*RLgY( ziMw#smm%=%69{MZhp$z2>MnG&t?N{cRTr zllm9^p10cR)O%9Za672ORP5|xQUG}_B|Mi9Th`O@-Ff#Jb)3ITWswd~ed9mp zRRtCAk=%rJs;|bl;>klEY0{mCqA1F+GgI!@)CMyA+gutsgFequG@`{%2xm=#96pAC z&BaRwD{tQd}n9#W(8dmUQ`K>V0MQl%*WbLn7|J)9yI-%-@KcnDxi#)#gn%`Wsi(6#{3uDJ%Dd5B%d>!f_d8th zKR>*i1|#mdxqClT%?$o9xe5%CzBw1hS!@~SbhA^+1;uU8l=7@vmj;E8F6#-|1*9{3 zqhrohA5N;ij7-~?A213|j2Ue)m+f(O83xwOW?l z*nq)t*#}8vmYl6->Lv2M`vu~oyG*f?E&={^zi$4HQc;`E$hZ3cOqc@IS{GuPdKY5B z-#u7XNh|CV7A)Nst?`r?!anAkP5!Q7CP3H~wCJp)l^ZdH*gco7B651wq{!maF}BOb z4^Q`u3^DJUUsWyiTgM;jxG#<#TKwH+1Kh0Abl*w)w9nbmnV+20^ci3~WaqMM{)&!S zZ;hK+Rv7+6C&n;@^MdEJ?d1xwaXLQXKtT1KdU?R^_J%?IG5~)#fJ#jLivT}*@IeSx z`aC9j71F|`OZ6y9@~sr~yH6;#Ff@LtU+}Wu+HPufMa}DCO%{U_714vq&C0M_2q@Im z2Bb{adW0OVMqNO+xUVrsqhaiV;RQL*BB#O zQ;|n$Cw@wJzU$Pc@fd-~KQA((19czZa@gEp8GG^Q(i&mAydotC-GC*5rwAcpG93Ij z9A5v$H*Xegt+^bQ#1(60@#vf#K<&@Us_1Bvi6uX73a#6LUd@0F6Y*R@Jz*8ZryPaE|HF#^r``r5iX@gzda(8Hc8;UD(QD@CNT7DSB@s@;Oe5LS-2e)PA3 zs=0>pE`2(@$W;(=WMZ$5$u4^urBIA+W%1qC*7&;FUsDht45NzI@m)w zm5bQx)QByeX*Jr!U^Leff7c-6E38%>nXgcv`^XQ=OY}suM$f_tb5M|D5z;w{J|$|%{S$k^-PNdK*3&k**=wC=h-N|Z6@d|*q8#4~7?OnS8QfhWF4b^6 z=2#RQ_{zz1*b*2um*(vz$%Bfs>NOQ{bCUf1|3Qv9}Mz!9=fn-*Lb-{TpP#=^xZVU*1(d1leZy-EMw&Y$i-<=(<<2pThg z{RFwwMcO-7G~Fq5KKdRpc+c}Z;&5}J>!+`ut5XvHz3a{~*Un2-L(&FP%TIKPA}<**0l{yevX@dslM!qX zCoix{{P(_rsce19AL$jNyyj*wU=T8{6PGTAwTC}KOhwbH<4a}0MJP@~X;pv$UW4|W zJXFH~+B~QY`kE`+=Afd3R+}u=5?|nd5RVGZI6w%CLxa`FEEEy;IWVv0{=b3wAEh;) z6we0jsGNV~j55UknN+a}p#K`cEi$1DBDgmvjldqwGFN&2Tk`2r^Mfe!)-%hs**vO=i-MA!tK=Af;NQ|TjHq1fPOHT9+08N1F4WAR8xpE^~Fg40++6%gS6^I5m^d~)Qq+aIS0#>HWpx{hk)6ZpcZwCs(wZK!RX1NOGtq!9Wa9+YXjPP*pIes2A2-NWQ9>H z8M?jAY=l6#jt;a2qmkubgJYwBu$+mS>%Q{ z6Lo37*+26~@l>vq@~e`63#8T3CSMYB#AEv3aZeeprWic#fY6>DD$fX~VMCebY#Cu6cR9~5ygCC0^(wIIa|wD8a;?(4WB)oY^+%v~_}aJSRa8LD)aSvC*KE&(T05Zg znZ;h5GzDA|>bEKDo<1+>bV**&Wg9M^!;e|&n<@MOYccm=5i}--M927-JuQ6st8X@} z6dUra406y<_}IW6lnAwItFk_M4(kT&5JL0U9i&HlVyHWp*#FY`M> zaMcwUWj_j`#;o&lkEnO;>7fNAJKp0HvZFGBbv`2VUMkQCIm!n8pn-`t(-iH`t=Z>iF$~p6-tMq4spnlmJ=kaZBGM=89OO> zg$G|uuJv!kC3JL>Tj*j<)VbvEWZHOBEaBOrYb<4XkOqA-1BySt`35T;xaw$A`xC@W z;;Ztm{?-`83LRdGEfz#ve8Bi-u++=c0urI7q3!mR|tQa%&hPDXHyq=*Ci7j z9;m1g8_<~Nhu|E0qKyC25F?bC6;ak zk!}T~OGS|IUGMvTp6C4o_WJB~u5-@J{AT7m-GjS?fia-<$a}O;@`9DW^p0oX0=*4$ zf0Dx)Ztov|j_CikYah}d^vw!BeXIMojR?O+z)okPdd->p_i-uIy;^kjwdW)`tVLw# z${L#4g+s$@zs3xPX^#$B4u-TeyeNv^#&1(aa{gjxvbt|WUS-DZ&&3=Sh4l&ELaV5j zd|^e^KNc3^2d^ef_+b|lv@3XUN0YV~^5*n37$I6b;{OpMZU$+a9ga#TuQ2OuX{0-_ z&rrw2i7ER_>qO9`3S{)g6(G_iW=mTwno-Yw(C~A9$#v+2R-oyT@Iv^MXBT_<9kJf& z0m6qU{EqN~NAbIS20!Wnp1|njPmsN}#lYyhv#Q~uP@pEhmz#y$U!yKk)_@@aDui*vTw1L2d?R!?qnrD zn0&<@ZW^Jl%(yr8IqVy(dYwZ&;NP=rNtd#22j^K)tWA6RzDd*Ua}s-DUD`jKO#ED3 z{qTVUd)n!)qhK$%lRm~tc?P;WB2&&C+uc8RR;nmYM6R=|U5MWp4g_a?SnEnMpp*`l z!e+vv@cKf}h+YF#!~FvPmUb3{-Keff9N=qQwo~ zg}`d&HBoM086ve6z~!%YPH-dm1+SkgL)lifyhaA z<^j3IjKV=tMh+|<{qzn^6Q$T%5*B131V_M^CXX7Kpx_}Sabczs!kB}cnn3nBM$Z(Hh@hA^RPm7*CEEXg19gsLA-%Niyp(VlymGY#j=N9(X!|{! zUR%%xn2cL0e6gBxQ@jF05Hp(FF^1rayB>55DY1VP#J#{xoe=>%b2c(;v-tG;VWyae zAN_PUF9-+j+49seK9i#TyB{3ugs!Y;TY0$jy?TS<^AAv#V^a&RF4&G_X6Fgiq}}-& zsn3(_q(n=pA)j{w!)W}iLP}Jh7&}9~d_vmvLnA5mEz%N0>;Sc&fEuP)p&d}m3ZxS? zH^-iKXrp&2ulw~{hCP)t7@sD6lEohHfCLhl%druR0hu`Y2I zH%3m@K^7ZSX#ZHr`i$1gKzz`M%ZsckjhE_5Q`)gAqn-t6wMB)q5?J)5PSIlC^koV+ zb=Jwa6-_8|e@s5%=q%ClA+oaXDNH!;R^6thu@W7yJnHxEP!YFxJU_`GCMwkP|;&fe{9+0YA6_=Af7d zGyelcO9frqq8Q%xeyxV4y!39;-0-XV zOFPD$C*rYuCBG9WLhbljjp5yzQGR0rL~z^Olo@}NixWw%BrUofX=$w+Zjgpac1S9| zLP>--Lh9+?(=W zLa^JEzr_|j>8$)$u3z)rdl1}zs$7BH0F%R4 zLqP4eG}`}N;cORLH?^^5-Jw)Yxq7x{aI1$z>T;89SHhKqEQ9Tm(PD0C$wuS2`S%N6 zp-+LRoBK}#AG1%&^X)xf9M;x-tkeM;09%+*6^eQKuM;G9vF2YZhzP0`M0CfH=a9~m z^#iy2$0Uym-EWw}8)%Kt==LrS-Yo)c(dJ*)G8v$j$|$*&EcU?tAo0B0SWfu=>yJwC zb!}y<_ZJvm%lTB|w5)f#)win`+&|DbSRMKkN#pd-I9u*Z(-~ow$-sL z*z}2n2NBsKDEzSDRMRyE;Jk9|n#8&OUbr?RR>>3nXV9Z@@tD+=a zWC>F^>qo!A*z=WP(xu1KfjuAX0DIxP{hz61-9>%gu+RJjZ@dReanS$IRbBr;8>o9s z>t~QuKV}H=?NA3r89}kIQ1YXT>pOBjxj2~<=P2s7>WV>M)7W@__J`qEI*Pm^o_Ui`5~@)7`=UAIcC z)oARmUVI2Q%0NuW@byGceMtd=#bF93!qk<>OdN*SHW$-a!>-@((PyuvM_wI8DaNqP z!*>ktR)#e>uWNctZU|<+VAX89a(1bfLB1__n>#UqbG)%s5u2BBSzh!F`(a?h(gxDc zVi52^>p!p*KUQfmn=D5UF@G~- zV2}L4rdq%m|7zJ9y3y&NS5kJ-VCu&MKYh06VNwc^@bRCRa_UZi&>JY__;(Llq(;CpS8i$%7z zFdm1WW6)NIV$B9Fi^Qc*;;0lHksn@H=5ss0#>3mf?1CcV=r6fF&rDVL`;uzekLnIiK>vMu2WkCO}}a zdZ=upXc$blALjt#`4hS5FQ-xC&}k2Iw)wYzQCwN^Zx!wpSoMb&qFy|;=(Z65d)jq9 zIdtPR@8n%Nbo^i%Q)F8!DdO}$QF#6BozlA9<%mpU_;#TwYHGkgvaCi{pxxkCIsE#8 z|2g>ojlxfF(D_|Ale{NLpv{z>fFLAQ1f}%PKo(}{8+l>rs^|~n&xorN7F+?$@z*Fa zr<0D2j)$vJ`2Tx>@$#CI1FOf3o zt%`!3Cv#!g$G35#z_}lpnb{?Wb?Slg_&k}7|um9`sm z$@_P2np}pcDsd)BYWAwDE&U!|1H%{fUTO0_^94^i{Ho&>L`1VbdPqrC2q=lVxxG>)_8k?0p= z&msH+miCDja5;*JOi9L)ep}v~gb3ZaYmHT+?BmsIR zl?-@W{jsY+b9dfIl9$w@6R=io8=XWUPd$ zOP4E^B=1vtn5~9D7N)TTX0CN&RG)15*L5?&SE)$1_j()eW8rb2MMaUagWKb!{g@v< zeOd4RqF7>xe2O1pYiXu%gvfVNO3o^=(L?%flac30F{X%Zp#A~pWM3jjkACSa@YCn~ zp)2E0mP!x6N!>TXCL>~N6AomY8{d|*LxS91B>AUo+)RD7aNnbBMQmo4H#-LRXpMwO>Rb!kSA zMP8B12x_i{_w}~vI1)}?Nf*=vh8K>oE6K`K7~wu7S?@jLCwZo$`U8<|w~ zz#8INAYp5ot@3Gd3UML(BxaY$q76Gu15I?p1^$%PG@QVC+vypm3eIU&GQfx!>F?MX z@k&nC1Z?6>E+V*Cj!u@)O&pSdFOc5EeFS1fW@|=oE&)CNC~J%~4eq{2|4P$5@yPwLFdd_8E_r1OpPZ{(M%Me3(jQ6MBr332P zM55eZ!d1=)IGLG^k7K_CZ8Kf-s%ntfZ3)aI1zVztL+OH||iSf}qx+vT*q zS0#f@6=cT=e4%+ld^)0^rB@@jeEBxihbv1AcH2-#S@~QUFUgxP(ZU$rLGV4EL}SFV zV`-H=B1J?GVfK{Gf<3xjJo=08pWz^izEcaDO3$=IbORrV_=*rL8TDNi1ZcIFtoN-zG7-( zFwO6VaEy(;_v0x~@~j4iX#HCx_ycN4 zu(0M6=>4sv%h|%a=g6AHJRtk7&u8FhB@OFQZ*EFld!yfWinFIDtY%w>xl&gnSP;aEyiuZ411=zdvMNGPI@_+-73+ zq7)p={_^%uhv*_xIlRf{ZoX)hRCJ(Qw`~F>sxNKsx{+7aU%Zd>D-h|^Dif`@GML{Qoob^)@mGtswE7ZT^p;eYYwIEtImbN1QsmNN|ZQeLHDZ$>(MHu$I*G zLCMKb>5X5ZgjXo0%Wcb886q!8icX`R8qZ?x9?Og+o2&H~SSrGaCn5zNw{TYVMs4l& z>qCW(8lL0I2%w@6n1ag}rG-jH*sQ8mX*qn%JOn;xJcrl8nvI^MRL1c4K`QK_K&raAy*cq z?qB1|=pD;^Ca=S=&w*dR8o;4~d>FA!r;6V}iVNi>+ulerQMNLwsUDVu=l5dMmDy>2 z?B?pAS8(!@Wc$>>%tfk_rf$;Ab0Dri`!l^Zj?6N$_k< zRgLv~?Re%d?z$OEU+vp$EfcmVSj{yr@+O4a+rHnvnAm|IqhkFvg<%~*cWvz>Htes> zw_Ra4u0}h0^3{VVD9_m`@=X?B`VxMdAi3;`^!7kUBhiN<3JQ8M2(Rd%EpmeO9XV~#$US6W5T~$3j1wzKfgYIqTDLn zn6l!_`!P6&Adq6$NGrxvx%)2$50jEA!IK$fX;XHQKRzw~^ZqAb(hIH)VSsv%U_|%% z&|hZ8*oMKGnoO>N@9OdxEqTh>V*P*?EVy@R*WiSc_zK|ojTsf)j9J$Z1}oay2o$Nr z538jb2_UJ~TPalYi>djaM=zH!yc`hqjqn>SVcZwcx3}nPs)En-*`p00sD&HA`&Q+H zF*taOpZC_?&qP6`1I

    3O(upO(14kJzEUw=t*o`YOB*Mr|~ROZowTv3`g*GV5vno z^}Nm&i=eWvx(WCG%ri^uh1pO0lI+KzOZz7owYM5ZxlrvzPU-?oD_pC!=s9B#d-SsD z`4Pfyc{ZtWZ|RrX8*){Jzu0B(-2Z`;-k$Zxtb&EywY20m1{HVFAdFSC`$6B-h$RHJzIQ6 zs{m>M?QSO29QkHFmHDk{Qfk{TcVG#CIR83D19@FpyqiCK>6cY-XQC%WZH57kNZ?_5 z*1x>y75jw)Yk|NLiB0^4VC11f|OVdG~A&y^+=4=DYdHpG>JFF2=)7fXiW4zpfWW)s-eVk z-hk;O4))#^#U(CyFqH*cZHR5JMKJGe*O$J>d6x5%|g0_ zEL0f~#BV;10N-Bjx+(T(H*`Frzt}ZZCnCN*r|+vf=Rxgl_m~4stSksehmJ zZ4HMNdM%RPLnvC={odh`iq}V@Yw+;mva~ijl#ORmJ8WjwH4t*SKHwHLq00*;6bX|=>cjk@_`DR75y z>Ir0Pfn>>o0m`xoC1Or0t!G0D&1S{R%DW`e>smx2K zv3uVdy6Fr|W%Dp%Xfm1`Kmd|cFPE4Z4f{0A`Mp;n4Ao`-F=ZV0hZ*}yX8(a45_C2#WM&ehvQiMveHtl#_i%u6FZzR4SO`BNc}RqsJ1{I#~q93*j-Xz|vw?W@+2! zhXpwQW1VdM3FN(ti8hCh24zsWc~Mm!ur=@sR7+6K(TBRJ9r@Su%auJLc{`*!u%db! zouW>ie=}}!ngVajG5^g~6(caeP_`!M&rS=K5|3JTY3jP7Rjs=Q*DgiGN}kxSa~y}s zL_8g#Fr^ksu-!)SR9#CwG@W-8(JDz%{Pc<(5W5|H7?QW9xUTJW(OxeHBNpk{1Ul(+ z(kV6GIi;f(!Z${;ZyVk9+_6DOqk0*Xru)-X0za@U0sb=+fl_oSbch*eXfUXsS&i|J zDxZm==Qk~ue%xH^Zng8x+)BR{mSK%NP6RQ0d!>-;BGE5=kKPvEgp;<=l9T=TWgGY0 z&E^nx$jsn3+OZgUT~O|GyD>|kXAyLAXHu1?RW(HGHk60al5?yIz?%BRyNWt5_J~sylKNxV6oUm0MQ-#8;M-~q0Y{&_u1i-Ti{;eEoo-;H zE&mcOy;Uq25OjZ(gJ&1%Pp<995!6$-#`sslE6k;|yBN z&e_#}U}7c7-bPNN?yqF5G|_ESevm-R02Dz5Oh8aYpV>;oKQ=^L$J*v=5W1Nu`os0# zEEu&dfY*372&8!T9A0Q5tH}T~8CwdVw@>pUOYZVq-sp^onqR!1(_7D$@qQ;?T}|U7 zl-)|{^HV{S*-JVf#k9vzat&(l_srL4i*)-dXe!H>BimMivR7H(>y%`xN{d=&-pH5? zqVXOPpfdgCRL&;&S$PCWdxEmy3iyWR%LwmS7u+$cBfi3RshF@$;Rg2?K&YD{>FoBh zkyVN51ko?K^@wS%3Kt3#d})`H#q$Nf%~yQ%ssXi{$y}S1|Kd+(e5d6MJj!&C@i__f z-`=qy9fJ*D?9&t2b&|imX9uBbC(w(-UR+*CuVC9o3XoR*=>1DS)V%|Lv{M@;nR9ih zI#BQ+ct|407*T}J)0oP89X;=EO%M3|#9G-+&J&NR8Fq2KI3ZlNu+#};Pa!&el)4Xt zs@a!5|5zsSw^BQE#@lC`mrd$8gK7>g;IDtl9( zLmut?6nOz7W^D37Rrm1zrbZ*J{gHZ2(M9W0>TJUeIiH8Z%PWg>yV|ZaG(9bfWNi*F zt7;78l>RET>KS{N!oc*#qDXYJFZ56#4g~yOh|Z7StHzL@Hm2XCTI@gNCu~$<11SB| zSr!A+>p#NR_HxFgOU83{$RbyS#n~knqwVJ3{huVan+Y zQ(=rIhKu4wN(ZX=+Zy%pqCuMz?1a697#u>OSr?}8=aScDEaZ4Ekbg-K{WF%S*Jz+w*8K49fj_q)JeAqtTGSp_9>Ek!a1Wo; zFLiw-D&>oAZ&-)H^X%scIX!?kbsijW7;+noBuMSUhslNVZh^XWX&bOyD9lFuD`QI= zpp6hH=E4B#u`@Er#Vv%LrOL-$T)n$LPZ+V_=+!}33GNAlT?+rqxwX4aq8*0Ulj{(n zRYUPyvCa7fXw9Mxf%}TmnCs-eLhIr61Ddv2Q&_!rh6#F@>5C`J`nNwZ8#bDH{rn8K zMe{+W9Yz~zh^}#wn~(-9h@MI+9h8UC))0vp?F|2djkTidj*&7UMAd!dWR9|+%W%{? zBTANjd4nICB~6hURga38AXM$8r{?^r)Zk_GWg@oH(VNRrM5OFU?v)NiROeSnm=DK@ z;U3mk?>A)4nGUntCd>se?_?&!NQ%Dce}Po&S9GDE$?KFZ)J;8kxul%%PrM~YxD%#g zeR>VbnC<$XqbF?tgbgkc<+^@7)^bYe$Ga5@CA*I zJ8R8P_UOZ_z$PWsD?vOh!ssj6C#_sN?Q7Zx3#+71>C|L6+~<+W*w4i3blb!XW2kWN z8YgQf0~ZjN!cwr-_E%XJgL-5i4Hl^kv~R|uM8{8Zj|5F$NoAEh&Z_qP*KVT3+i#J> zX*47rR2-UdL?{wlhvujwo#I~gBbwEhFBAeVOaU-Q&U3RG3;3cM3~uR<;MHB0 zLR6~7G6Q!MqW+0lq#hw4{x1^?d8H+C0y(UcW!_7>aCg(HpB%ScS}xdHZq>1_zBD+5 zcsdI)Y?Cbt!m{kEmqpkL_#xr#bMlABrQ5+@T9YWqWtdEuUo;-WANMP0Pq2X^-#cG~U#@^qtHBGr~ zEZoUOP106IIWC*UkpE~T%#`py@}a&V{qH~s@w7OKOu^5)&;qqRMPu==4oKqpoqn1B zibdln+#;Mu!ZV?sy`ktei>1Rur%7*4;h>R&`C>|ac#+|C+>>pWRnsxT22PgJWnCdH z5uPge8z-h5#JPgXxA;!>Kz4D^Em*&|>`qF$4XbeCn?o~}q@V!9x-|R!xUZagd21(gGZ{)~R@p>yC9QM*{%xxKLfM+{;oNDUykt^^ z7?+4lt=@=~$r-XrbNz^t{1?2_llJI)ouIs=8`3Eg-gwGN_Lt9~?c6y3aio&bjJ%kb z6JIoNNM)fWIt>*X&JjZfr`6Eu_o=y{QeP>FFfeHv%0A=h#QOcaHVY2*GUCcJl|DM2 z|4$xnL-X`XNq}cO_24*E#9;c%;TG`O)%0^|{40h&=4itRuG3y>b;d9=Rs%1&>8EnR z5t?DMf3aoiKk|xgdCyeF^NGdR&m;xhHf>H#PY2|oy>uLmpr4ph**jvuU?@IJvdf$M z(Uk3inDL2VX>`n{;{>THCfy8lJ^?XoW{0pWCokU||IC1k2Z_*y<@c|yU`W=<7R7O{ z&cDVLqhlU5`!Q9&&F~yQ?N#ltxq_=>YF11jj2LwGGfPgSp9qQvw5Sld4<7%-N%4?$ zBfu|Fl=C?Cf%+-ac0#yE66RMH60lvk_V&Gl05jHp=+uPt!Qv_tRo89#2tdKufxDlA zZd!PpCItd7xi30H56vpKDPzoWI4z!NTfim+)l6lYP*4Hdo_?0J5vA*y`5D(04i&^Z zNFBw5nItkx!f4}}WGydnDvlwE${3(O6_O&i2`&>dL99dxE!}I|F+%U27?7khoOnBG z`t2zRXm(Gqc{>i>7NRTTYf!Fx=Hwz&&_$fR?egH(>Dv2-vRdO8Vpv(O*bG)}tB3tm zVpVjQWE(YA%8P*~Hc5kUB$-xE0(J`gh)qN`iLL)AWG8(N(-E<9%Iq1x5pN0MsBWM-AXM zKU&ie*JTVH(ksMJ4##R`LB*i7Q)(jZc!n5l_kUVSHHL62o@0c<#F-d%SK>d>!2LZd zGX#s>S$~G*2{~#Q&BTDKhZGhWNbK)1XH12S#Fr(GBF{)IUyWGZ+A@$7(I!K2w%4Es zP0zBZX_FjpB@fVKWv?@wjXsAH zf=qVQ`P=pybJk7^HyD9YKw8ZVIA|2&XclplfzO&{;Toc~!TteO>BNOj?+YgI-wL*9)<=WNqL@U;FpIRGE$c~Ok1HC~zjIsZx zd<4r9xDZw9sVu^9(&u$Fczh&?N@(%iRwS*~5!4dSbnR~|_l+9}nd7*HOm&ys< zg_b9P0Nr4CFxSg6=(ZTP2*ugu(IUR>+^#5+O=Nk4kYFt|CpKw{LWw8DavZXmBF1%5-gAqOGb1{cGX8>GdzNg zsLXZ@7J%c1_VW8!8_7#dl@@M@9?ohE1t-N*D3>UU(%>Xb5en1R*A--p)Y&$y|SE;cg=t75PM9@yDofR!HJ2=oA9#hxwto6oG)0o2?5V&awg016%of}h-v=_YeF7Z z-YOgA8Z3cRET;%HsY#oPqlO8Bc*I-%MXcxrX~-h|mc7oJf`s*y-&H#)WAQ%E&ED&8 zZ^rJg;l@g{1gY%XWVJSH&|+pcO2ZUW-jn3*DMA(tJTN*wp7@?qf8$yDfCOpdOIh5z zKm;p}_{KcM#V*ezKzHL8bN{($*2o-y>Mn$rRe9yb&8<_C^wXD3Ii0OzQ`QeI;)+SF zd1OZ^CHwJizNR_^DgL>o0T7)&t1R}QbC zbKHV{9&zPKsOk?|u_3-0!5VZWKK&!l$vTlIkmR4~{h5|$c}Jt<_iL%(AvJV`&VIr< znzLlrD)TJG@aFWX4kXd+nP8G=sm*xRi=NF1i7FLUcSaC^;5Y35{`afP09v@_x2$;* zvk+*jOld;TIU(V$%o7Ge(T=6+%rnAGO5-i5Y{HIMpp~M(xMn`B4}6j%e(D`N2cO7# z9kNq@UX_K&1Gga0>|(OTeCg$fBEgUZ5-XP|9R?$6-j#;JRt|&D_piBuBkXU^$feR} zkL=z-TQ;}n{7FtiNirim!VcgxH3e*5;okiu-U7$9?ija__iD>$UT;ZsdOngXYf#Y{ zh=C9$#c^oU7FQB#*bE=iY78&bc}|-O7S9S}ex&utL;H(=P_aR#Zk2hU--SRPvcuh| zt{<63$`G0)tEy0Ga7~h_Bcv%EcR(EotdU_|Rsq#`HR#968*ke9m%y8IKs+i0{mann zNzCHOlq$EW<%?M`dL&k}`W3!?j$~srlHiG!@WnV`j#69gA5xTExg`UPfBZjYMed(j zDOvVljE+JkLnG3AP*&x<_{h6LP#n~2KQk8G{BmPn%!yu%nucwUna41yL$yw1%JrGz zexQOcu`K+h;vK!EzSyc^KytPhfMt&eJ*n zd+_`l8KU9*U*`&Es9>d|SHA`EPyCHQm|} z@>=8X8?@(6&aK0>ae@(pA&+;11|PcDh)V1YSuMBI-5jYv*s_eF)79^0r)B+BTQyHF zwy>Hm8;LqE0zFJ_94$4wi+~AXrN(*_aCj~amG&h~oHz0f{>eRY>MfqxKCmyMYyGw5 zAt5UPMT=T9Qq$bAo5z)%mmIw#V*u~ZkIb(ZQqJNch6=S`?d5i8p0;@=`wH|Qzg4RB zL;B%<*3o1faT|5Y;gwzYOYq0_P`ZE#CHgFKebp1M9r{!uF;$ATw>$o6bv4gY7=BQW zTQY&9;-lY5(lsplwDuccq=5$?s1Ye^Ct$!z<0LXbC%A$ z(eCMSlyWX-SrHpQIPNE3+}SSs{;6`NjA?}8yt359A0rij_$>$jqUXC0%`{?(-Zs13 zn%s110SUmi4~4tdPlH?yM${#>P2pnTa}&LlR76;0Vc z^aA_ACMEqzm#1I1p5`R-eOSM^o<^ zpUM-^O{kj0k&9V}id0wtSa@GTi`3X@xeSx_R_|KfbE3=_9@IO>t5TFVmbDkj|qIj2a!g=CPgE=26|M@&Yf zxr{{7^sHK9VR~DcaAo;*tVq076@6={wq^WFasiEox<*8M%(@+12nYnAcVjsCBiB;V@^3KIojXEUam)=-V1Mv3CS>kkgT`+fuvspVdF+^fyHP$X> z<69CiY7q3I4T3e-PdB$6wz8t2O12T-2#-I|>DuQAKAK!mz=wA`X88vE?DZ7S#f|uq z>Grb&Ab4VQ8hOl^8op#=R3O{tqA@HM@>nH-DuUt^1b@;Pt6ZI5gDX*(Djq=?rswOT z*NT@|sf>8tymn z=Z#x49b=Fs75gm;D{sr|!r(;|Q$!gSzCv;0;RDZ7rlt6u+#fe6Lq=|3sCx!2C$u=XSc!&Vx(}hO zs!VU|DX9RZ_+f8bfBO9C`JjFv$KGrcQ9Q9ostAq946(~`*rHE*PRF7_5T_Y+sWK?f zaoi}pENyReK``sWLqeF2$BJF1^*d?`F9{yv_Cv0iVGG}1h)>3siNeeWAO>FTekPw9zo~@Q)7P|PXmC?% z3>bv6ql92k$B#XvbHk_Ar5oCjQtnkfv`<#L$ z^|uVlF_LJS($YPf!Z2dwCn?uqBK}r!3B@jt`b4-A9pl6P?CsADB;gzdPihqaW-x3) z{)I`dhB1{g2D!0EYf>&scPt6tc-u-}fbw1u^D_9~h`!mM7Iz(zgJOQ!MA*<>J2Wj# zAAL%`+J5Nn*#|eH`pq1izhA=_r^6#p6yBT2d?#C&3QIiQ^sx2NY^fJ52}TIvuiGfT zV3Ns2VXA=lvroz(et#R5qz=vQQcfyD;S;>!Gu?+%aTW|T$A9Zb@L=rh-wru#D>Xn^P5~-_<~nouJKH>Ffq~TnjScv)#GHOAj$2y6?*-M^ z4T>5L!-*P)z$x0`=V1fdzp|b^{-7j~du z7Z|=#n*hbLXI5xcA(LDB0Ag|agU5>Dh_2Au-$NtH6Dnat0&R!b1=8v)j;OIRQuxs( zICAX0Ymo{TSP+LJ7w%Cp#=F8Aa2P#>g}^l03O8kwEe8;nwLhqW5y&2@?bf@_fW!# zI^*^2xsgdwt#0hkLDo2C3URx2zBpQS^(u+NODnnnsPz-jaQ>LVXAo5o@km|pCUv19 zdt(T{P$=B7+?-PXfEvAITAN2km4eJ`>Cbbm@so5QX-c8q6n#Bs+oRpx#+=ix)wn5k z!Ei9L{+hPXXWjvPv*?Zl6$qF$q<3xQMX`4n_ zU;K1U?kx^qO1e|nH_s8mW<-St)Rzv5!voLyP@RBZWy7w~#>gr+^z{*wSvJy*l?V;5 zmuMf&CGenCmc*C(4l${Q&VQoBv#mPgE*$%}YGWqR9MQio64-#}kk1$o1n;rs{H%JdWGfo<22GzYYiQei7fBF12A5-= zW9VN6`VlkfyPY3DLPx1lg`pX z>*4mM&i$#N#`?L9U}h|Ug(NFh6rQEVS@PofhMHKmjT`>zWaRLu2%ouUgGp>M%ZvUG zNZ(_cLb)KcmkL#7II)O;Z{u6CW`Z;0&WAGucu8KjrweJWme=RoR(YZ(*y*jRjUH-~ zr78|gi{mg3;~kV3kdv}4I%R$F<;0JT%EX=n$7@!H!8g*wKFe358B4+VH(Zi;{UVZb zeMFoh%GI5!F)ZIxm65B-$Cdr9Is~sANUT{y$3z zCCh4Zp|j%rc`qTZdBwIg^EMYnQgEm+W@uCz+^t`XS4-)1qL@a1jx;wqR}4iof>VN1 zE^MWpYmS*8RhB=n+Zy+T5#ORe63yl?U&(0R^3~8LSR@Qj$u_URNkwHz>h1ML ztk3ER({5DN7@ta+Hrwz-8xZ;H6{h;U{cZ zY5-%lC( z>yX^G``x*iok;TM9R%;77T^97CC`SRYNhuT{TYC@nUlj6!ec}dOrbaxsjOL|d?;_L zaEPmihE?wO+7mAc7sQE!Fi@JyUzX%x5z$kb+6E${TeGrU4r57@Moa4#{^j*)U)8AT zYVeTd7A_N*%v_t1l{5rOlVfd2w)9#~kEjU#xrQW}_BeJjQnqD~WpwG=R;^iMiavB! zRV3hwjl%oiEy{GMFG4?_6S$1AMs)FN!g}aFF!@*4^_{!5c4}t1l5S7RkV&D9w~b%j zJOAY`=fIhpyNH-_OCv|c2lVRi&K&!l-0-Hi^n=|uFMLGKNgtKZQU>txPa2M&T?juhQ>thnE&>!#SfPZRVA&{aGf40#k(`lEDwk9S$Utf>und#EyVh(h!lZWRnLI;Esc9LKuvD%UP$_h7d+tyH2SVkGGP!-+rcahq_}6|w+{YbIT+JIL%Z&J z(HIrf;Y_OK9q&=HNk+vvb}JQ3?vP34q+{tSd{Z)J^1_dK?Z#N zo;p`7L%pZ4|BtG-42vpkyM|@x?nb&ph8jwcF6j<|8M?cqhVJf^lvX;Vq&r1m=nj!a zQsNu$`+1)4cz?|N+2~50E1p2+xevz=?xGGG6D> zO~|4fi=L};I!HR7DG2JMs-^+>LZXAgSMg|3$iwY8wL7($n{@yQ%L!JDA=mhajOk)yW3AzneMaO?jms0M0mV&9P{t2ge{B)Ux7 zu=J+Y*L+^{`6@hpg&0SLA)@cTem?(UM-{_h`0c!e6PPh-GJ>A+xRgA)(jwfXUr4mO zVxNVe)67&LFc@t!*SUk&uRqrWTCuSB#ybfKL7H~RwaNpglQJM};3(sYGjYF9c^mt; z60KUejbc8d4BV+NZiv* z$Gu$xqm#l6wG-R;y5R~7obA=vnwgMQFhl$xNzKt_kf6U^0jGL}^1*2QHNzrVY)L!} zk;OFiQt3!NA_>Ysq$ol^^krmu?pBiv;7kFtjR*2BPYrt~e%(#drW>P09>ee0d@){+ z@CRsqI*en5;=e*-sV*0uae_zvYd+9herXBI!5ZJX*G=D4fJk0#PIUlrHi*w`KX5jjw+aNB$#xKlg-5U-MUVnKKSUXG9V%4lLNsiqUG3$q#?BN zikAlx|7zW&p|pM;=I?@SkU#M(r(Y?TK5tcPM5wi_TYLH@o{YP%FA=Ik zbq`McdNs=U)1aU4aH^IO2+Kt$D3Wso)Wx#6X6buL1>_3-)QbigbROPa8@Vu=0IwER z$@Oot?jcWic?5V9M~Gf| zu`Fg(;q?}EjuA3FnZJmWkT7L3tti~s9qV!qPZtb%VKma|36vEwav_S~UVmLWDWJ!= zkSJ&Ahz8cTiVzv7Lh{ndIadHIq{Nt<51%H9ijuX2el^KFG(WO0sjLWtUkmlCA*Y$b zgcWu~P!wHozjeRHPFiyV-ynhl0F10SP661k0cnGh1LHO}+Lz4AG6g?~PF@;mOprKt zB$Sm%)SvlMgO{y(ySQs>X6nFhAH4 za!%~d0$v{Xi2L84cY`*2<5m&QXlU(O^ixnoYgRDcF0QM4Li(L#Ju_dC5tmtQuNgj( zwKXsttp^VfWH)=E-hT23?O?IY(wbmxVv-!d&>?t5ypJN?Q#NVhzfkvbHWU8J@9U>O zR+>iC{V^j@9&ZP*7B@O)6e$67eP6HYl)<1G#A@-H0W9o>7SUr{!wC!-edy8U6xxjL zon9EzI!l7U@9_sw+*flmpWqw22{I3XK1kp3bI&vrSEeeu8M1XhEMvHeWld7}SQfrO zboy%6%>R+}oJIk`C4Hb#wl^^+VQIbZcysQ>3-tGP_XOpiII2+rq6vy#0{>o*9fZQD z(r1LD+0~hDJiH&M@1K&A`tdj)5zA-72Ud>Cwd*U9Nq%LmNt5zv%D0gzKw5qntA-x7 z6Z&ZEUVqbVl}6{Gv_jBWljwLxk_PvFFl7^}Fq88LHq>!bz^iX_4@Oh(eOT#iXa5K5 zps6=SivUG+VSIq-2lI>|-#epa-+W^UK6xzJgdyTye=) zr>IVC<$wIQJkBQaVKJb0?X$o`=UgUz3oDwC)CQTNCkH)~&Jz6QnTi5~+~eKd?_t2` zUMB-*Zs0#UO(rzAz{kh>CLSyeHDgvnyr6H{0Cy$Q~NTTIa7&l6G#V7nqtz6+$t2s3-I@1R z*{k&G`0XL%3$r6OfR-y$$+{~$CEVH+KZZONZRz*XEj@jh&_d`APNoG>#yGen7=k+l zt8mS_O0%-%(h|5!IsS^zZ^0|vWlFN<2*ED(FKl->|7A|yp7WuD^cU*7lo}Jb&b60~ zs5+TdqdsPCc(DlHG|JffCa#ArN^IGh=hJUIY1># zR*8>5{-`^S&G3LZRvbSj&nFeX>`uuT?pIEE@=4poCC*(^U?c5hd!9CP;X(k3;yGy% z$sy+wHEVuM5nhxJD^gBWT9vvPiaS|Ew8mrBrikpk6<~fIN4%0&3PL^&xkd!>(dtEi zRt!M>C8VcW4#7A4UI6ddCxS{3msBNfUi!XcR1HNV19db600aA5v67;%skEzQoWYD) z9MXl5A98iA%@qKx41bKMSLod^@UFtUq_@PYJPCLbVXhP;J!2?im!swScZiR8j-i%H zSZw>!>3wmj?3vBGOp@f~y?DR|dRXjV(wfR`1KRM{zK9sT*o=}EuPF)mscad7eQyEGs3Dh};Ub^?k={M7s4jU7626c3DwFQxKBYdjxpQw0+?5&Fun~;AM;Vtpr^4q~ zPSG6ptEP24@6LI6n7+ODcpjO8kXrW~tkB5TwdeOGqk5x$$s8EJuh_E{uqa;J$ zS3-ghO?=Lm7_zD!(w$b5(vHPAt4o@{uP7=Ot2E-*87aZb2{bg2Fu^^Oh> zNB_@jCz7u4z!L&I}1Zb=3J%ZiBW?<5Fh7U(4Vlc692)}{*a z=)E1dAlq})kJ2yXjl7^T=rKdBN&R;HOwY@RQk}Xqc@5HKuFniQ^S<7B3@myf?2JMp zIf=?MI7Nkl(%KZ{UA^Hc_-}9uwZ+s+-bl+Jt%^shV-S*hQ*s^d7lJ zDeLb@OncDfk@);fh;$oigFap%h?0IU*wh%yj1r9-H$xW>KA^tmI@YqmjTO}u@&5jU z%)h5Eg7JPiYa@od%%aBF#Q7uItc(_8e2P_Ap|^wFv}_k=1a z!v?!y_0LuXzG@(8*LkKnrQZ_uTe1}G=D0=EW&KH@6edox`IjxnxKdrmLih<<<=dDG z64V`X6)E&8Vu)cuZqWB3t!n~b_TeL-bBvtFl*{mZt#VWU|J3-B1}C6xss~n>9gQhz zu(JcOX1_O=_=64ukJncXiTq$wuys_#8MoluD4|Hc0J!NTFscRYCmF8H%5;#xbgKrUMI8XFrGeGUcRO!%U+D#9&{!(bXYlGK6!wW*axC1bL=P+S>QEGc2 z(AA<%A>49GUuBD~ill4nF9)jfi!~QZcze=v58U#y^N3NY&Lmr97OBYCL)YvdjMjN_ zVR!w@ovW&uL??}27k+(?$R)w7rg{p*qN+J=Q6CZXq<>C=|17MU{@JOnd8U6zuAwWt zm^gc=^i%4tnmy`H|LRcpXM9Z+Ps~(>9t_)I_^SFfy7wxU4tXO#!?aXQ_$lhCZ6MO~@-ntOp z+p{$ELymSk0?F;gvVI8By>(Q|oazIxLo;a&W;}$&ttg!wr-35K=y8+gSSsW0lT)V8`^s^&MF&t*;3(RbHgloqoImM zHCMWcUC--_MplI#(}o@)(SI`%n%z&w$DE=kE+2xb&f^S3`r}(OXpJq;U4G^RB@>Du zk~lbpoB#Uw_i+&9%TgrnhA@{#rEkHl57EQX7qIt}NEr-~aC;i20VbBQ=oPsfUeJMtm8-Jgs>$X zk(0201gz`@z>#z88D8A(coJbVwdwVx4{Cl{->hx3q10W$4l%+L^8ke*x=?vdMo$3F$@%)7uzFv1~ zTZEQ4Z|?5?4UFxi0lS5tu&TEbc_@#*fhj#vUS;icqFPq+U|IgT9Xhq$11t^XZT~hz z406|K_S7D5+*}B90=80zBh9B|Bqv1inTo3XKhkvrDPkejgjomni(lvk~vKq1On@vA%r%Lq1yxFJr>XjY^yj*Nq5KNMXU_42k zDdQWfDb*4UI&9G%n;y@$cVhj#jI_#pK`{irVEJj!t$~%)$ zMU{V5q-@{9)!8l;HCV`tR0LNXo0?veaEu@HeP>5L76PxJ0x{wYAApY?18!?t#39yC zMqxn`2PX$Z;BGHzGjJ(*5N?R1usExsdgIvk1;3r0Is0krzkbN)R|(PnBAZ!1lNQfe z9sknvIXwB;tZG?^QyDE+38ZK&}4tFw>a zSU*x`Z#g2$H)cP6@kB6yOs!yRm9NG;zt_yN)I8+?Oxz#qsHfz?!&l%N8kH1c%`1iwWEZ z2Yjbb2qrQB!DMV4t=}7<+vHz+4WrtNH+~aNF5}cbaE)R;8fW6aj!Z*X~j0 zlLvKXKloDoG!T;Z34uOqF1=Ri`D^I8_K{lWjlMQEOc_~27930`0cGEj3-NTttQRX#6YrO| z5UAt~dKXKWQ$Z$wzb-M)%V8dgy}lRE-976Rif>?d4CE=NxVY&0&=hS&anqEtLrTWR zTYHC1-D@58Os$|z6FG|rC6Z+(6TYkb7Ae9WAuhx!ey8HqmF~pYdAYJGr{ZAcC><~g%2Ntgs+qQ+- zeit>B`zGubjT8g<>4=A*I}^sSM@i+0o3t8f0vE&M%ca4QEYS5sb!IZRvYLbriDY}f#P2<=U}dq=K2S9 z_vRcM8DfB;bDiq7w?igLZx|F$$KqI3wDSvmTtTJ%O39(SMr5X^?VtX1>^ZN@X+Q4u zDl}s6M^&W5JNOlFTMbo@-u1>RY%(|WLhQOIb5`TO*a5Rmj9>3LIrOF~_j37d%EGgP zSP06LX0C3o^*rCH+<#!n(+VN@O|4AG3Cpcc$IQ;e58264+vuZQ$%p>O0?K<*q0s5f z{`a4i+S}x`4QfB>eJ9<+{7-GG5-F%t7Pxd6~#vy$jv9O{ z%)=8>883ZA7+0;<@*|z1ug33wpF3pUzgD5g}>W#V;$ptnG0k+bcX1FL+0#FhUYq9>oK`(Rq&+fygfU*9^&8&904Ne-=NCmmJX|>*7 z|DD7!F1~ntU=^)t>5XODq3ryoI=Ug*obwp`F||B+!R|vb$cQZ77~f?NI!CJ$_xZTy zUayQo&KuqaGa}Z&AX7K~roF$^WxOb0ZBs#cf-W4EXj2O7aTNL1>li;{DaLDk^_QC2 zJ)6}f*CsUEgY~M#0A3n@QE6-H_4dA`)@E(E?mrinhShg(;t4PKazwNT1g++#OXo*a znh*}dD?BfbZEV1HDAE};H=W)#n67Y($Mb9IW>|V!Gs!%V;Xe>rKFS$ryOaSgI)Yk8 zhQ2~qT0oHx^F6l;Pdhrol6#;a*|(!*h(ylSsUke~edyhiAhabHfNx_r7;I&-g0{6!uxF$C3oW&xF@-;_|ZY=*N&4XMV?2L zi9J-rIXtV){_db8gfg@vb(n+rzW2-w8Yqr^84oed)+1>ohHC{oX0gfSl$-;I){bT2 z+R3t0yHGm3DlP;dJ5`P_UNL@K-&nQ2{9Lym}>m?UhIl!v*efkYcmeu3Op_FO-gmCYa+%xTYX_#uFV|9jsyAPJW z6IQVH2j?W+I%>?80nEVVg2BUA)w<+}4skN8vqK7V{R`64+D^_#j&{@BHS{^WeDAGwvbu6Mu1z8 z*BOCRaB@FL?pGN6!vLTUArfBBio#!7LgA9G93E(*qWJ==Y9I#@DMe2mQs59(+?{cp8hjT#AimQ-l*6YkM>Ji%)rmVKA2B2=X60+}!`n9eIwQ#g)WevaTKbd@NTHiW6xl z#|d4bGP|3YvtU-T|4tkQheKcOwCNsD{o7F{BC(3LQYGdLxuZ8^l!4^WpYU8INYmvx z!Wu@G*FFs>tYMH_Pv2>(YF|v%;p1N9&zBQj9qS%y!8;!d;_{uuuck z8o&pej4kZ_ye96a6kNg@-*dQ`ZwjBvI0R(x5p_#@kF5H0Pr~{LzDQ=| zipMQlKHZ%qLX+K4(3x8=zQqx^1UE+pL%BDR?Qqd*zP?P8Nb) z<2sg1dC~@=XhfzjyoQwf|1mXJC3`-8MzG8Zi1Fs(N-y+fo64`e{~A||LmQw- z>eaD6%ar&6I-E9T4HcKbP~Li$6%Yz-Gt=nzl7h13|0gX^8#O;2^_NdjO+p~Lr0}@d>bn5ILLjGMNn>I<1?cgDYXQ>}nyd7w)Y|A-#C@ol1&aV?AsV9! z)_PfKDcaZn?QuRCIpw3LHs%tkE`TP;?*dyW?n48+E{&&YBka(TB=~&1IrcwaE&lq z;<8ZXa?NOzEx>a`mXndUa&)3YQ5e=&8Inp~dgL=G?lL^$KL_>HYEKz{=03_NrysUt z4W^L2#>de0Wg$x@X^Cx;I1y2Em7UY-DxPEj82?f(M6QQi?zrb`J&R(9l(5Fo(qy78 z0d}BAJEkv}Wf*nsgn>J54jAa*UD*D|~O`v@K*vA!#)q za+d`K`0l)tonVqv$Tct%Y>Hmnj0_snJ%*m=@?siaV93ydrQCyG?ygBp!l?~HO+vw$RqKRd4&ZS{az&UaOM34gOFAE@=9?o=?Pv%G_8wPi{eN1#ZGCMPR z(_b3>G@Z}(CSIj7Rv4YMfBbQ>^f3L8BBZ$L^{!j!EcylfG(%T45?C;e!g+?BD}WH# z=mondi7WC8U@0y+l~|)Q}-D0{?l+<++0?xxfSac zsnQ>L-GsJ}%uZ3#`QTib$#d1Xx`vynG3=y@McSm)x8Hn6iFKy~nf((XzdC`b^{w`= zf~sjhTfQ1f_4&2ToXyh<0e?}t(42Z5m=cA5^xa6Rl%9%*bOW+pb!U4|1~iv*+5wAH zPh?p{kdkO^UBeHTzID=3vG`(x>FLaSAVT!-VM13DiB&!!Akj&6r@l zfKTvGh@RZL=jZY5RojQUe_v0R{}WCLz+NMYawBGuQhY*NWq;%Qx&h*ARc@ki^UtsD zUlU%{YiL&V&ZCxi3x`I~KnVe@@75Di%$E1&LpsMcJ__`a-Yu}IXz-KHWnAjzB4b8g zF8bWQ4w3s}R~*hp-(%k+RFCAJOBJ@sY7(qmJ3`>ghJ#!bd6Q-t?40b6`VBR0o0dd> z#wi$i?AuXCF4dI|A!WD0FdODo%aS_Mqkbh<0yR}J=g=LHWkZ2W8=5EA_+I81lGm9~ z(W`^;UPLRFhOWoYaJ29ikBij70>_%+x>G z-}5@RuA&nG3C+#HX3V?6bB2}b(zubknYrT9uk06txT;Oxm^1KmpyHrA;TTV3{KP+| zZWoO@WjpC*tOFAwoj}`~EqXW$*^EMSodvahSmdtSeWKJy2dcCn&{*Y`E5z{?wsQ5! z^8rg{&SZR4J1xOtqoL|&4asg@#x_{&#-|Gnk=XR&^@w?C z%`hjJ`NuO#wP{$P-G|Q~DUA{92@smlbAIUCWTmTlBm(J?IH2{)L}n>lff4QUU;a#l z%jrw+q2&_#c^0@ne-!c9xzZ`7vtSd> z$w~AFuO}EnUZAsovHzf~<$Sj>002dDyKrvV4V_ zoVbXZAqHdG*z~llXLs_k$nvPCisxWmF-T8LG?VT6OV#|(1*f+!z6QAv=9K)SEy*|& zrWkUHHH!X5^uoZCN_nIFt)&XQg2yjBs_$(GCgXIfZ#|~lfW_oQW<4$OpW@S$qY9>f zqSj*Ff|#Mz=PhrXxtoaLk9n$6(B4h6`9*qFJ=d=d-MPf*YWsj=K8k*eT^Kz{rjz486n>Il@cj!&^$uIM5AIWffsct1gES3~@X z4j-rmDon?;NUp}ZYgl;O+PxN$9ceoVa@l}Us_aYOBTlBIBQz331Ox=wK-QfUNkz0{ z3Zqxp?Rt+mq^f83PQqIt=7jVr08uJ=))y@liu1gQXd05`S$C~*TLCYrUJ)f$?9;6b zdZ@m>#mpO&*B3&whtTL(41Lncy760+s>MSq@l|YP?H5P1v^RFEg$=65*E>TqW z$P=V^!)*{GK^-6jyG#dg!#X&P*XEr?b6Z*Oy7tid=(vl8iNybcj=b zcjL5K5T=djq@&OS#$@80>dYem4fJ6+|D3HFmKl118EsOO{fZS!r6E%WGd}*bU5R5* z@YJCTLbIT6^Fim?KBdOMIvXCdRe3B(2Jk`*>DJywmM{7JlhB7z;QaN)z0JG(I;gfS zh6zgn*>!@)wUCl3kpFe{#=>(vKW1? zj_LQt(5A`FGN~8rTeYJvF;_fV>RNVOWb>;D$X}2WkmFTkeKkB0KF#2O%XV!JOgS3{ zG+0k5($n;b;*2j#T_I3}!q{27e8**qYnY~$=6*oIXC zJO7>f$}5xQhj6(c6P6dGpOV=W>wtfi7O*$?mzmKt5R~;_-{GD&yq~em#I}?*T-B~K zpotj))@rQpZ3ny6iNPS(v|D^fY3Ms#Kzt@&DlUKCI3$yZ-)Q`HiItYmr`xCs@`zKp zy(KG65e%y<%TUGZ3N6aE#mb0LS;-~pVkXX_v4NrF``AP?U&zN?L)dt4xEzN&?cXx{ zk*`|(Tsg)8Gw>CfB6|-XMfdNAYo6k z4hmDt&Q84Qrec^iSL&{tnc$&$sQ>Yik%(arKPMias}m}pZT0f_P|^7|xT-B7@e&)M4ubUGDj?lmr-=|mwSF!sy(tUM1!%i0P59{;g-wwLyN zo1LbyL=}O9IivLBOSSH71|L?OK15KOK=IZgA9a`3ih8%gg{P#`QY@R4^L)XupgTya zuOLG3@hx=5QyWbsQ>JM2`$euu$kWMyZT|t`_)3iFx@t0O+SeN$eI-5FVOhN^iPs*_`cA^!qU??BLUPOd!oJ~&?iza3kZ&3c$su2vK_od1mP$=Chwf&! zj!z=&4zNf8lCfYliw7E#(8exA(Pbl}1}6T@3T5cdz^(Sw zj{2;mbuPk=Bpx0%q8SIsO)56}&$^IIvWC20v6&Ow{MuB$^f=km>)q)@Dqk2o_3}5C zB{2Ex4OL10zaV`xXrj|fpWK$8_6s@2J)oq4iB7}i{)+2CQZg}R5l5xun}$l0+>8u1)oo~MiLHkD_m40*`!eZ)UEiRD9&R1+(-=QJ2_matrt@d}M> zmg2*w-0mqs|1}MtIt47#+??-f)M(@v!W*0R40~6|oSh^NggVK_I#G;%TL}AMXnEu* z1cH4xAzSe1*18XyKO1Yr#d@&pq_L==1eN>}6S`q?K&@>xJmdbIMNH9utz}qb&fmY; z2Kz|~#{BYt+H{xnu^JjwN`ycC^n|QiA+)l6du3?D&*gv(chR8IyeSbnwF&>#<8&vZ zlp5inffy?m@+DqW^J^=nrTQV?Ga)60tUouPG7OjGcTyGT%hGhs**dQ1g7F=~vA|W-3C2Lq6(>vdq?<-+ioB zNeM6T_#f=aaCUYLhYCszNoeD>Lxq$}sN6TH$kJ5kCC(5q9{l!Z;n>1wzhK(518mscZz0{3-7-4pBcwJEXMQDfX$qb zyC8H&i`{>teUM;*hYuX~#17z|%P~-0lX083XgG3?To$!MzF8UlgzibE3pT9oUJt&q zi1eJ3S~9#DC&FPp;dN|Dxh>7NkN?RW*t8%;IJ4{c{A=^uR-qrZsHb7PLQ0jz&MV63(AW|m0Lzn zNRc9hRgy&H&qYgO`;vMblqBO9wS%vjbYk%<_h+TmC2?F%OX!fu5jFMr%7cb;swT3y z56cGlBAXdFu_zHcsuZ7G->g_@r3yR^8wpL!Yl!!uJ*jUx zVOC4g!Zc|~Ky_=8Vaj;IS-bQpfoDB+*u9`p22CnfSNX+a?hKsprhTcxFqkO!qn8Lk5hdfX-I1SXio#-}mom5|H|oUxk5 zHtoX7b?Jspo_xbANvBCJE_Yfm&?f<@ij&F5U1lh;)wj@`&%+2b!c;y#b|oc^~BOYm=r!hsxBiloD7R!{u=u4 zT~9IcJ|6yv?4s+RJBGmHCF;*_ZkP;-d|eyea|xC{Yq1k43NqNdi)Xk&Y|q7)|H)GQ z-glZwY+Rx_R_!S)h2&>OFm9z8@n`NVCH!c0f(J}YmA^mJY>MhL|;x~0OBcP`@A<} znByf&9p_Muf5ZJJ@ISPqS>(`gG{ug4GvbEsaiwJAXP)sPd2fN^JKAJb1oyWasW8B< zP)vuSSym@wZ`V222E7qHF1v)(SS zl~0gOKLDsEmWL?YR^j;!%zDMTVRXLoe^P?u`KgmvA0g4Z8jO|c8zK8h>?$FKqkwDs zK?mpF?fmcyg}T`^{+O*^may~n2-7XccbdqQS1tBmy(gMlxtl6Yq5IJOU3XVDZ6`y6 zq*ei@)Z6v6nNFd>uj-OvYEQiNZ#m;+)-zDxZw&)=Qp~1kcKH0Tre$>VKZAK7SVhf& zbxbmJFZ0uAkdxB+n=@G3<;{rhLt5He+RUm4LrYVrtb8p$Ggdp>5?Bdo>MzBDqI;rm zm0F{qklPrOMgT)UG*qnIWLtOxL$Q~=M5$6v@hcu%DAlIbiR<1skDQGn^8O-VORXGV z112PS|MC|#aN|WAKghx>J!K<4MWYA6c9L*#)3or`x3{PR{PbQ~SPC)z_g`N@gGY({ zj;ru=qmo{Iwhe$LCjAL&S=U&5@~ikjn~pm;?BPH$h6lAkSBQf%djo^s*>Ju%*mPDt ze7f9_vuImVFfHsRUkYV!c|6V-)^%$L7dUQr`jUQz-s)o;9PLXb4;Hl3l8a14QAtV$ zmj}Xfe@xW>80b7Xm9P+E=Rc}C(T_H}Q>Yp`pK8aajhCAkXfGy7+4*i<s!#z!b-%HUSJd?u`O7oTo#_+{o=Z1`-+<)GMnK zg(P6vYDa`EwsROd5{PR%7C- z*gv~D6O!)>3@$8foU!TcS?oJ{h`#Wkvk?vOuTHRC}BLF@k^<&24 z)(bX4!*Mzl9$z{0_sv>vfFg1Zoy; z{GfZC_A2%h-p~02E~fpz2qq@z?f=T!tmqcn{?2Hu17GlWJ{Yr`aGX2k+Q0s_krFTPpDx zDBquKE#D%&Tex`fx6LpPdjm3?*b!eBS-00ax2i<}2S=o- zkp?|iI8LG`5l7l(!BSVz#3i#L9F{CMLs+w(kdG@Dpz>WO?9a=tsX2!%RaYrLfu~)Ip4}uOoeUE+;c%;rmY5isQiF zx>U$xXUBYfoslAiccK+4x}Q`3bzDUE!>D|?oZdWBSae&%m4BK3YcU+Pg1&ij zD@b(gQ}o9sL+{`NzR*Tu$x_;U(B`6j^d~d#ggsHUPP?&&X;FXoegzBpw#`P}q>Kcj zeAa7O`tQYk!j}=3CX5sODoU#c7LKIo)oKP@*Oq>pNiG=J#uFkJ==(q@l+Vr<&p361 zRQ8m{-gfv<5H$#hS)st+Mj9>Gp-^}~6tkKC0DDJ27x6I{*~XWWFjcC=7GlguvGdu5 z+_t-9FE6-$ZZab?i%J$yrUQxy))77ju`RBI^RjB+?&OH?zl#YQecDQ$^xN?9E&YV5 zM(@M3jQtODN(j3YL7=+g_=j*JuAkNHi<^Xggqwv%Hw6>tA62B+g|~#aZoyZz>jyLY zSMf4hnqAE$4~_!^)PrTUxB}tCdN8e`*)C&&wDfPgExJ~TXg(Ez2HAbCch_{LlXx(D z2oYa$ddX$YZ1w$NxPH4$GjmpxvxBo9r6V$ZN=Rr0WC8bl9r_i-F5e$dIN@##HjTuu zeC?#SvwG0qo>tLEFA4ZD_J<_=p?6i60B&E<0#Ooj`s}2%l0MKKKBq~$>0s?Mp&lO-10iioH>lzRRO)SbC^V?;)7sIR#mjy_R_9J2MNSP-$Xj!|N z`!NeX&{b%h1>~Hewypi^DTO%ki@Ixl#)gexz2uy?xzF%1zN${$GA$|ZXwm;-cju*b z6723=aX5>zBpZY_g7p`5|0fUGw-sY5@fu?>dtFM`12=hX-D!$;7LWcoe*7sYjznE! zdT~+jjHN|Pon!yL`ipNgL4_HKdW~RKKqyr3mhh;r`HIQx-|@Q% zuU-JN_hDb3H8&(~!|aWrl<;fSh0BPB*T26yCoB!f66u5LHI>=&pg{Y&Zfp=zzeC0o zJnxiEThwySS>L=aD>^)=Xt|IzpHdBtGwTEA03)59(5H>n{1nWce0VW16K5Rx$jo-8 zhoeNSRNq4>$4JbRC1j_}P42)bS+ehpN^>3Z)ENfZMc-#Q7C5>exNMoj+H12bnw6^? zs!h}pp7b13F{AweE!*-aPHo+JPjBOpX+y%&p<$zBr=^z4)~XI1V7{r-=#%@P!pWa* z&)UJ|ak%3s#-U4E-zz6vtft7O7So!YQymM_ ze2@3~!bzE|#yQ~xSc`lko-+g~`X(=SGk1)gkSDeMH7!~IiaUd`m#n46;?QT^(hcWe zoTp$1fF17E3l>TO#e_a5@sBw0^L7LrW&?g4ZNGqkRY3Ok0lT{l$*O4FsVw0LnF#E# z!FC3A%4WLBZHoFo`xT$&lKmr>haWAraMR)wPbOLa#5uenOuN+2;-;DULZmQj43=?8 zLZG|=y*&iMJ6C3-mm*1l>L$|)P>7$isLsX%B z8~V#?@BA0Su&Xk497^F9?pIBwL+1ug?Nv3$kYxa~{3LjWtp9p|k_4O2orp#IwqrL(8DT&u9 zE&HBRW(9ehj7<)6wt`S)_neM@)J0XA=_BDP_y zy^@J393yvY1R4&iWX?S(?$VWbS z4XBvoIa-SCNJwjE_>rlG`4_6s5)T!#rBT!Rh}bZ~cfcb9=#e*dklt=%_wUfh|g>ArnWoj%WVK20RXYhU0Zg$m2P zOda1@7Si#?FK&z-#Tq+WWK&we(-_kMhtI73CLWFHyxrsJ#>wN)1}~Qe#sz&H^EUpm z5*|kUKtBJ1Zj|4aFH!LUOCznYZjp!?Yz{o?HiGENnDxFO{sJG-!kxMXuY#8aakeq| z;!A}tVBdL5=ODcA!X`wrO58u4G*f9af+&e@p=npE|H%{<<|-Uw7fFWneRii$1IUq!4ahF!ygI#?-@5XJzM$6&I(@-g0+s z)%2o6TC1QzFHdbAZXuF+k$OE&nCnfaerN7h_Gl2615|0?+AD{hZa0%^r;;xNx109+ zxa9L$|4ys3kg!=7Qfk%xcA?h)$rQQX_XeY))m}to12|z?FR7xnS;BtM6H-Q5%h>xv zI>k46bf-nV^wbne#(3dQ0d4YJRF+IF!A>+It9f%-BT9*(i+!o+Xt#c5X*0{iHM^@* zaJ(JJ*1@NRsw-dYV=r=7tQO2cy7}T4taUf0YvC#Fk@5@Gkhx;V+ISYL{7;35H`47d zM-Nms@|L?KPtGkzx#qDPEU|ehOH<<+q6K~4XRlQD=a){(;8fW2>mNWO zaS2*mW~5QJXpg8lQGBR*$QzxeSsR#-%?2n8I=zX0qh1-h?U*1gV}jFM{6n#G1F1{U zU;{~-U7(GT-B|D)r!BrL%6iJ<(6mdXwTHSbCS91 zFFwqC0!62R>8+Ii-11!O4Ze}u)G<63f9=2$Lfzb9(E2fFvaHu=$(#`Y0T>hp-sQik zO)4h(_^Oqs_|LqH$0xakR<=^Wn4jqXl}#*364|ybIJ;!2B<3?Y*3>>64-k0Sq%E6o zx*BlEf=bzL5>8b+813FTk4e3hv%@h_Hhrw$9Yl8?fqs+WWJd?<>-|0*!|2BgJym6x zRlfdU?`}p>`N|XRR5DpjgWsvn-*iZK9o6D7CFg$)AL$}hKt=Wc%Go#nE=$ASI%}!h z{@-sYR_wYzoOvmx`|5AkK_)IVAdRxZXfIqWF?uY2&-;%+qDNM&-k%G{ES`&%{H0G+ z?YpmaxR)(6<vzaO(M1T6_N!YA|S?9($A-rOYq{xB3RMoeVsr4GaZF)e6{#j_^TB0RXR{jX9(x@ zez;@|vF=;U+X@P+5X!Rg8d(K9&Ct1jFC#9wio#r(w^`83(`E`4egX&M?>MHS#}HYV{vRZ54iPOzgXS7p_T5aC6TAx>)1Y$ocyQsN;yF@{Vx zybozDOZ*+p$L?GTljc@AUuBFLN4uQ($SWnWjV%>A{ZcwvPTXoP4i!=4HfGCv`{>Aq zLEQ2gY`kJ*LmbEd$&w&E2V#OO#ietF~}=6z30?a`0no{9sh(+D_KV=;CzB={g18?k{*q`0=_ zUHBIsb9}-3K>sHaMo04l^p7z&B$u7iPok&Y^kH(X`>rw~x|>ZK3cgIv7y6Sy!#U&( zIguy%Ogv*oKO&m4)y8EoM^FgyoXJ&JDAr%K8eB-cg%)aUWF;tzcvRkC&zt^D)YbyA zEVcc!>kKzp8B88}aVviPs=(4s%!8&k#P#0`wNOkEml^BFzwiCY7qsP{Jim_`r>49? zl6|$9y7~sG8fb?<070){K<*;q)_Cfs3y9u8Jf33ET7clUF#dL;!nEi6+e*a2YS8&N zl~#^k|7Do3#(hsvL#8 z2tSPijUm2-IT@dj8!_Nsmz$tFNZe)XpwH8}#GOb`gkCL|+55V#khdq|wGYdk*)7I(fBcORs)1G9w_tO2Kh$cSZo zYvyjy);!_}$Ode1o<3q(UQK>GgFFkG{CHS}kGee;z3UTRCqrDdi$ie*5FLmB2jKPU zA0*!$*wkM8IVejwZKlOg1*wk{v=($E@*jHhW;)5@iPIYPZ^I&+kCaHr*ilj2Nk>(m zP=Xvr`_}l}wF)jCuEuqDrZ-cBp>R6Yomz6$AcrZ$W?QC~fNa+8z=OJ~K+uqN&o*!7 zU1C;~vFH80dp|GmSsWh>Ad-ci01)D?0dazG9w|_K^5<_L=(ak+WMTERgQtHEN{Sg- zggi5%k5fRiUP+@QYf|KpS;p2F7yAg6f`;)stII#JTctr&W?$h9LLbQBqp*9U_zF*1ge;PY`ioUXcY6V+B~Y#ifN_j$aqH1xVNvPTwF`3-=<7iW{={_ z-=Q2|m^{&$#Z!X5Ms&nxF$h4i)0`_lQktQ>9IWR6C;R{k0%mU$M=`O~t2ELv%MsV@ zPqdycsfoe2Qs{i%PfzyDo;yv@OO?94KgRgT-$Bo-AjD}<*mXJKMBOc_RrHl@01+}0=m~<9h?PvR?N{8Pw`0wy;3=`M^3}x5+OW|>M())-kcEq zd&laXNb@3N0tWn4Q*9GF*Iv1~|6+HjHtU&euzw#8Ca*casTgq|ADBS%+ow;C;V}B{4S|CoaqySwe1ht!{VV zCX|NyxOdj{C;%wHF`k2{E-_KC@-EQ0c?J-_A>Md{_iwHRb;%MckS2^EsRO?<#POPX zGXn`z;U#k2H~eV4r0_wUamO!lJ~SgEqUHDZx$4UcX*hBny(oStNsr1;0{zu+^B&tP zQus$<0D|KC|6y&zNM9@WT_5ZnTTR|C{GhoTtiHwNCw158@{3uSR-Ba1=>zAYuZPL{ z5C_{_w1@_^PvA$cO!9-$#=l8V5Av-Vo^Oo54Ev%X=HNKA`9telb6hz$5*`mFYn~M6 z@Oi?bj7`h&>{N){^S6-+hATjGV7K`ooh^xyW)Z*WQ(5wGx@LE=H!-K^FHio;fHDo- zkE|wY`d&ezndC%VW52jWw^o!d%akVkrhQhRoh5I5JwG&x+7U?Xdb3^*2kIm@v(`#e zWR?lp;#GWfPg|gRpVsmQkIQ*@kSDz%o)n_8DXowS^TVwcfvZqGq*qEm6>+bDC*T$B+c>?<1 zbJPp1$|?WE(>S>IumtD`rNPz;w5G9D1XlUXW(xi)- zgtEMWaM|SV-+vfH3)Q{eWVjCBRyjI0uOk(d2UKf?n+Sa%HUyFhswRTZ=oC?-uBaOO zSExL4s;BsSIJIeRCqH?1O}sky?p__k5W5yhP6z#C(v1DE6g$7hr^W%K)>((Vg@yj8 zK)#8*7d1)q|K8-*0bkEM8SpXWzm##puzc}(y7h=psbYlcy*^{3zW+g@*Z+v~-+8!b zSh9g_`35 z>bvf`Mz!!1l&hqrMRY@X^^be!O#Id)e==KSZO!pBAZZ7`=hfXVUw*aMk!D_;OY9lY z9f;TAa;rR`9`gQE(((%pSzWsTDK>Inh3Z za7nCxoeE%{cLyAy%Peg^Q_Iv~hyBo%CKAB7DVun^T4!X_@-1y8LF2)a@OdjEH2d!~ zD{NS>rRHB%lzx!@-4SV=@OP-d-6iob6zoq%hoM(m9k{djmf}#kUT!MJ>iw_M`jbA7 zHUp5D#pUu*=}8(PUO-!+%s59${bLip$Edf=f^(6ObHf{Nczkp~jiY zkSRMe~GE!izV9=;~fqZ5jf?Ub_At5ccEpawM z-gO#SMSC&$J&Jutqh_p|u?s5jU%|JbKJ48S%n7TD#d)a=(DAr73ZJBQ&!C{$MH~O; z+WsrgG!^$?hC5WWdq~)~>*l9z(r1}rvB~Yr?cNQNw|6p%n2TRt6K#b2`@ZZ~OZr+| zd0eI22cCqb%^2>8l?i(k2!*zTKSmE;UnMOAvG0117wk_l!Zl%_{ESLof)cw9ca*BN zKi5HM>cx!X+enX91LE7z);B~6u|=lN*5wVZz>?{Qu1@IkcNQ*`)|!cvwRJAeY2ya( zp)#BT0rZt24+6xW^pBmnqu3EX##=+~;y+W;GEe)jsFkHH`0d_HEUS|q#TQia!uqs< z&0AzaCN3PaCo;LZiB9#$B%)jN=wfBP=a|3rVj;!VY*8`%5+SN*EMtA=O+0enOa8hN zlnA|4R5$($8+t@dW*8cD=p*4;{-DAu0FYd;4lUL-gL@ZIW7I0vA(BcCG#{}l%l{D6 zpoRwi6fr(!a`>d#3SIAc;xemUyW|W6!tCSYH-Cw78;5YSzN;B2v+@PK9W9J3W3bvi z7peY}JV1hZr-p@33|yc&E98z2Um~Jk@sZa#ai7-RrP+gWIpU0qa(3b0Y3B) zi^#FR^)nk$Uk5Wcu{m=_(>qY<*>tCJ#akEXR7Ika6-N6$mWrsWP#-O#KRY6ggNKsJ zPJX;y*-$_R-ClnmMQ^X~iJ$yIC582B+v9A)n8E|u`Iwgkz9=h3Taq8ye6Y59JKiw} z=nig5#%g^3vh87-V|yrTpN>}h6@by5pB|TotX46j@zx3Y;7oRuhCi552bnbMF|#pC zttAOT<1d<%15~f?V{+TFj_YtrPH0oiA~yCkh;ch)kmI&y(Mi2eyNO-E#Ql75Y zntI_FlnpMT;x>GD_L4lk&_{Nzfft1ER@AmW zuXGq^Ii&fwwF=yoY2u3r0(YODCOTvaodtZ5gT(2*hN{*S+FIploGgi?JM6o3W>Sy_ z4^eUdo9@8KP}d07)r2R+nM$fdXnuandrn0cY1Yz@wevh9rdxjVN_k~YpK2aW#)YD7{wpcW~v0l;k{|N|~O7fbXESXXp;&hJyk`I+uN64%*37ls7 zV7h;x)L8KD_g^h4*;^>e>rY{%8cXV3)nvDkY2Pi$ z*S}Gs+dDTlDhp;#J=ODC9B)RL9$YREvxMQC5}VVsz1-vb3UX`zdyE!8aBQ2k+0{Mf zk>H_=nS*zbFg*m!Ek3S{rG4)+Xxusut!O1ri|lG;`>w)d$94o;`Te@(X6w77^Unj7 zBF=j3GI!^>Do5uReOc9CB09(Vr%L1TpW8;`^h9nV$!`av;cme%F=ut)9Z}#BnsONH z1}D2#E3Zx|`pBoF*5K!YWS`>Jbu%lmGV;!n+jxaOEel-WC(~{YQ4Jcc4@>NqnTpnR zq*9`^S^VakFcT+XGsIusAA(>kjK`HvU?Yox0dr>;9jl*nl5aPK19%)(iHUFP6~;jd(t4&eX(yjA6*Y z@+eD6-AYhoDkcBWi#(!>X=l39W(>am*$g(KXxrhvD4SHMij1N*UwJOk7#+JLmyNix z#QH5lXVdLMs#cC;3)e9TNuDZZ5zKIJu4DF@x$JEbqTHC8@ zg;9tLlTij(DsO_BA5hiBsO{6CdwCB(Rv^WRQ-&B{^ci#dfxGu#Xrze-)JxejY|fEX zLkY}I^tsnzzUD^s+t;CyltM_@gTeC(#ZsVu$_?(j52EzI(8-K|b`(Ei`%ZUtrIBbj z`m~&85tZ^4DxK=QA|8=>>jiJA`}_4Gv1)eW;BVnCp8lZs2L}07kj7)=v}!p04Ej>r z{MwybM(je9FoCkyKvTQ0iRbv^#o9r~JD|$*D^}Hg6gjBl#wj%PAb1jG#X?I9wW`eP zH}=GY%q{;7p`SF zDb5tW&W{L2&%d{5evq;_b)Ii%v0BtPXJvVKi0XgGk>AQ`s4ZU3eF#hSG$+_F!6)S5 zThR$ix~uh!;KVz~!e^vIMu%&o2U?_7QZ+Wq%;f9=#l(~$gL^Hl|LFa4*uVP`HsN6F z>(KFpHLU$_tVDAzd=Wg6LSIu8LYgwbiLFBC3e52=9?v-_6HxgaQN&9Yvmt+O!+UC~Y8D`Th*9~mob#b{E ztp{-6)}>v91ASK;yY%k1&oOD5!EHPMx^$%4n1>1ME+4t0`GIOCIvrKJo&(;$6{1Le z>*RShN9i4s>Pc8w-qHm%4J34HisK}r_AmP~^7)fH%3p4^@{RKfdiMkE5j^R4;a!58 z2Tp-$HqkB(iwUKa4eU_uEuMw=MI7I?pV9!$mB*3~%h)5cbyuc%^&?I(`%%#WrJMVg z&pE{Ir}3rsZN*9#un>WMwv?_yBzdckXvQRFo>ko@NG(=Bo#%R8B>MG{v&I5UxJw;L z^g4~@54?Fly$Wo=D&w3p0TYhC|1r)nZ_(v|^r7nG3Z1GH2NxIr2K^H+_zaPYsL}yQ zhwj5hC}I$GQLT)L;cpakat76!AyMlZRRKJrtvnG^(bvJ%Nj@BlbU-A++M8Uk{vXgp2O%>}~_((Ri{L!-&gmgMp7PCttojOz<2D&A?sjJtU)h zn?2?ziq)iSk?(b|cLXJ_oh)gck@^-|PyQ)Tj-zt?1@_cvNpfZdMvLVxNAPHaowMC< zn&J42tQuec$;K-G5mNl{_D1Eb*OT@Fsf=MH5oX+eq2jfoq_zKoB*bjuf2{#bA>KH@ zK9Ot-cl>^7a}ivk%{dZdpQe8h6}xHT<-Xv~Go}^LxmiNP0nsZZlCR&s$pA%&6UOMq>mp4tz z`3zk=Jomxm_$Bm1kl$u05?__G$!vzZzQ-6d*Svg0FdcN**{nMzGy_+|T(&vU@>tpx zWS3Qoy6hYFk9@*|zKRbmnKA|rC2r0o)*Bl;HYLmtS>d6nN?Ycn($NwOKX$gF%Af;E z{8}tYWn+3RJEkA%5KR*RTd=rl3K6YZtC8fE(<^MD8Qc3%>D^9nA zHeE$$Dbo2IR|gcd?FIB(t2D(-kMN2ATz<=mx^*_%C-7jt{aUwcJ$!OAFYKDcs}(b* z9zfS@s_blT%_Wm+m6^={pFbUog|2$7%$fw`SWsG7UTY^N&Xt+ri<;k z%hB2&NcF!1UtWjm(L+?jKo8jzeBF8qEN`$k^lueZ>1!AFhw3pUa$0l`k?I)U#6~l> z8l3%zV3a1zrY2fH-`|nWFOY?XW2LBPmppsMhgUnitRz)J4+z@@Z1)E;TzEE*3lPcalVUC++jaW{xu?%$ zVhZFVjYMTw6Wx%+4U_ocTU<*}rF@IHT%0vP^cJVHS_$ChM?RqpKH}q66S{F2kGi*6 zW_4WVrreY9F4t9O(Of-G{5`8W=$@L(PgT{Z$Z>J(g1F*t6synGUWmbFk*7R5$BWDL!ZIuvkwEIVH;8U;oCTr zw_p0%RSdF{Yk@YyPAMo`>TY7~;)(sRt|;+vgTP91VfE9DN!*BtDTW29@+Y3CApFs2 z^>|9H8>0d2q5dt^&(HZcKU*i86>_yX+6Z67K2Hn}Zn-9xsI~TXp|7r8E-zwNnKnhO z8r^dZ>MCw1ycAX7H1bYyDpcJbplJgnhGeP&^A{pm&PWg&4mh6h!s)o5)?OfY^QvGvnAGjFQwddLl6bi9AGf1-;b*bEl@3S z;&$(w0*VtkEfe+uB2bpuOx*2kOWp`HM>E4s+q9%SV5*7)zhTgo(6h{!+%5@Sne}{F zicmOarwfs_%2MBzT``H~Q~LN_F`j;b^Uzi1KBYStyVbY`wk+)Kto-{vzE&3) zcb7ZUe3&Xq?UGha<=K6|QrIOck;4CyTMxzqw~4)7>P5nU~T4yGl+88Fd1kUNS;szA_*E`B;mCMqV>eXd>4} zeh$+tcC_^4RXSy*cAct-aXT1h-|sBeAmQA8YRJT^I!M+q?0DoHqGZh|J}{|3=|?Rh z^G|kfzNk4%i$Z!JskYtTqA?CzlGKDTd3ufS9XKxPP_I67uQ+czHQz*t&~euR)0YTn z%<6Vw>v3^LdE`KJ*nU$}7DcRjP^ z=8hH>ee#k=jC*sn<@%?rdR~&O$>DX!8kZj!^Wj!CNS?oxbJ+Ds>$3VR)26G+VQ`iY zDwMSuC==^B=}#Wp$9(9S=F{}Re&2bFZi;^T`AsgeA<3HX)kyEtTX%wXDOs1EBGdD& zFS{b2R zMcrFRy@h%r{!C-V7Z$#FXS1nS%49c{XaWmoFITM3#ErSmbv?FSc=zk@p2 zXp0$Osm6tVJ40-uBU5_$X~){n9EPrs+@^|%uDkW5mKZmlzl%6W z?|d}W6&^eHDeq}Jd(jY7fT~(lH>SOTER54qdhbOGppUv=B=Ag$TDq^_Dq$0#Z96^!EmH0dDF0;A>_1W3v^< zROkY{g5N;-;cV&Zx@KjoQh(7RqeMW8Xi_=5vzqxh6%7P30fFr8)GFxh#MpmkBj7a= z0z+q;kSQ#ej`0?(KrV-oZMwRB50ly@IO%EjlrNi+ADYr=$#2|Fgg>Swl(w{@6-^qD zxdZ_y6*s9xr}TL|&|`QP!VzTK{tft>h= z1W7pyKFU3P=9n+hpnt}xJJDnjTmSR^t9~ULnh>kD)ca4biV|&8$18Q+JmU*X^>d}F zA#&-tw?h18a%Z4eoYx8Vip<~d7dJWDKD+wL z>9-;~XS54kD5y*5A~*eDAvsIMNb(2EiBDgf%g#oKkfpb#WUqljsb^|_sRrgsT^oJ@ zqW~=7WH0gda%<|Z(v@6sPPygvc^)=EL{_0-X3x%-L{(22Lrml5&M#F5HPA}*kRH`E z`;|Y&ZC=ATzq3}v@+(E2Gz7=OI9p@;@*Sl;jn|2`ar9pw_s2Oi)v+u*P|EE~3>;$& zZ&~aqeQCGe;!Py^Q_@YuTR zHNd_lw#s{nf7fK*m;y#pL_1Z4khMsau8n;{TPJe{qX@#_ne8#(p{~hi@m__rtY(zN zO2D<>JgT}hllJBxy5fLBK3NrM$Ytn&7tTiX%|xRoxv&}f9BDGDq=lp>ePy32ReNy3 zIEEgEpgXt? z_-r5Z5^^;Be~nOtaAs`wnWsT@z#{W;Qa3xcrFZvvoUlzu9l;??QDZ47>6wN_HMhvr~APW z2{*r|4M7#Y4>{Hy^}x-$eowa-8RP`r2-W|n(5~9SAa1VazV!DldVhwJsHyTo9}^ zTj!dS9Y$(VhNFRO4@<6+Vr*BU;Zr}uhg$rm&KpOZwwuGM0V~MG?H!3A%ZSlg5mgzh z2vmy*cy4`zSOjiofyZ|EX}iJedV~;s7jXeH-hoXcAkVkJ-6}wzun7uz7&V9Q23$aH z9&WZja~i=b5!?u8f6ulAvG~VR_$p!paG%@fd46#sj6R2N4I;)7&;E!^1bh*3(aV3< z{v3w@MmA-GK&9otQv*WyXTp1NveFbwbPsVIi;*DxEh$o6Cp- z;tfzc0`c_gDN(7HdGq);;}82ii1Fv3zM!Bthm9EpNdr0!gYbCFnb^`|=)-L)wdX*x zxDF~K-qj6q-4^sDX#O|1|3$%T_@g%Lh7mCS7f&QxvYpPHf_0;k5PNjXirhZ-5<6&Z z{4lJ>o8;!%i8)(98V|#BU};r#22Z=mkrXLNG})k!E5mME1)ob~0#-rpL1$a@qn#*E z<_5BZ9e3GYkAs$!-S0{vM$dn;{`pyIikn8@%WKp8kRPn?)W-q>P13dwJHDQlqu>)VBfk7!<#M-D{1#EeUsx?3o%l-P_x`&XWo6huiRjTj3NyD> z8ZvnbAttBc&X|ZlhVFrgr>@y^y-sVludz8Kz2&O{mf2M161q@Sq#wQS_guT3vv{vF$9h^(j6DZ2Ct2d)si>~In*}UL^z^vq7}UCi<``2Xh;VTx! z+P8Zx!}&nQmha1rWg~`?Z!grx5^0R)l&r!?L`LG#ysFmd8e9q&O;02W6Va5obI?DF zOy)-65w@_!Om>BEE^PC-3ER3;jL!YbI7&2=Fb`i`in#0~uNK}JmGK2&R^pQZxaveS zbjE~;(MkhpLu*30uJ9eswb}1Q;f=NH9Zw2)(ky77(C&bNFpQ9dPow_5`is031gl>a^#2alUM291AF->=7kgN?&CUd_f;54pHsNWL*hjX<|u z!}$B2?z3aw+;*kxTVPHq1QhUVW67v$bci|4T{hMZd5DKLOUalzB^>xysMmGS#UvZw z+~p|{@h&~m9i=A5TiC0QHxAcok$YhvP;Iv$x5GwM+w}|Z+jOBfexWP>r0OVFY^`H% z{qx_`n`YnKbIyX#3C2cWddFQEVD@O49MaYp!jL(~5ux(?fz-4!n_$=ttHD0RU#78F zVMmE3Xx&g*yVi8qUCZyM&_@<2k}+bC;ulBmMyCul<0e9%;nlN5qqQV+C4V)7`*k-S zB5GvxrISqHd>xs;2jen(tKPRVVhi8MD|8nnR0e`Hs){^%H$_Ys->N^adU{sAGJUrL z8QD~Y$xJiTJAb-<+E%UXJHUsX`kvzI!e&34H1MBBw4YnlJMHsFWDcz|9Zsb(USG!b z7;^4Y-iBLb9?`Mgt=BvLm6!3OUC$)$} zFWaPOwsX0<{OK39kZU2!P(DH?RLFnl#KD)?K-r{zavnYsuWH~l9LStNRu{F+VS~4~ zmjl3_LK4V%&SAf*eRA0>s@O_HN#l?BGo&x2hw*_e?HZWd(-t#A!{*@}+c@!QB+G#- z$jF^zIHS5|A{E$iVJNE4=WHaAbA$K!&nl08tBYu@aeDFQa(5hWro^%1fCPh#`ub&j z@4aRi@(j|y%6&oIB}`LNE_k!*AxvrB3xq568gpjf)Tz4USz85cbDEA<=k8zZ(a#)P zT;y7`tU`ruNnkrY=0&@0G*YvzH=>I@H~*N&qYD}Oow@Y%b8G!1;{w}q<8#iF#w~DY zCquqWZK>HxZ-^SQ?^RESkKoezb*GHob*1i+`cAPxedQc4YmR8TsK0h?rbLOk0&Ts{ z0VGe74lAysO)@8HdLE$d&J>WPu>TH&TuPrrZLBww$d07S#V_MsDP9j-S%bZk4ttOF z`i=3d&71QQMGJN9lrlvpPQOjE2ee_sN|wqN{~y}1>8D11Am5(F@*2sOrOjoCgY_9pMm%?q6RwdgWhPQgcdOnaHLvhK8&1n_NDI?2$1H+goN6pkvlo9wVoMe!8HQ*0 z=j1vs8|_{EduxT;KG1&qJQEUR1v8W6xz&s#r75%0n@KsL6so_(7-0=bf5duL9Z3sz zjB{T)z;(WfYzo(2jxYwiE7S6zrcS9+nLiSBcXw={RO?D3yer#j_qN;uCbYZJpcl~HiJ8{z3+jo=tT*VyzJ$g{g7$B!!?&uxn! z!4curwlWdCzgR*gDa)f}tsC{vHT^8f+%yE>16=9$Y6`)IT_>QFDPbQ134$z7tl5wV zJ1wf5J9BZ^kPw2T9}{LI!jO0zddH}J!73DaKKGfBjCMhlA8t}-@^z-;JoXe*jh6c) zU9}AR%GzzcxmHj4xyzz zeH0R46Dy5xv`RT!jAe6lHXO`4D(O`d?_d?M=>d600bah>E)W;4TW}DqKA(WI`Y7+e zsvg@NMF!6-Ja28~T*dQSi?5Y#89gULMb{XL;dVZt`WmRGqSgjVJ^5;E9Pa-AEON?R(h1is5}== zeWXVK8_GQAQ#65Y_r=#-GWv)d(qk{bu`1V0D~erL?bI?`(GanH%(%Yi-1GXpA#k)! z+ui*Wd{j7Scu%hYJVEIZrL$SLQ#_Tz(xfXJ2ZC4o$}iXV8*@C%)bopP&r|eBx?9-< z&@G-c|6^i)jtd`*%{-0Cd}-3lYUe--`V0YNHx-54Mgz`EeFNo40A%kBDw$8Q@VsnY zIegUA;pZ#wkBfX@sQL)qN%iEU!(qUE|L4wi4&O*b+XXc(wBJ^U(_iMs@R`N2A>ETH z=uBy1WH9V3;WEaX-|A-cP+caf;yQL64V(b~^=z&UQI44v9Lni(Y1MN>Zhu~rF9(_| zEgDJg%VG6WESUVOh~_<+pMPN{e_}vt6@T3AcZTMq$_1R@Tp3|4Tb_9QWA$d}Ld2h) z9DhOW!LJBO2v_)8HMuzC4>!6xZHCqNpvGu`1rtMbqI4Lb=tMMf$s1(%d}8q9BDPqG zX+yX2=hxko0uWP7EUVXuo(Jaw6u=M6R=3gwM@bVm?40>no=0&3lBv=4A0*w59Fj|1pxm$C!gj??EVwX%65y{@*(E~2J z(-^19E)2qIwj@%@BDXivKzOC?vW4QvtWVpb`Q1VS9=p5IRh(8gL$d>%6TcpA8t9=4 zDi50}lLCYo$4_!fKoH#CLtPX2ycwpV=k=8r>YFXBqxwO0@x z6|j+TB5rZ&PQ*&R3Bt#jr;)(&M50(MP6@IH87Dc6Z$pPmqcDj32_B=~3H5iWAU6w4 zBRiEA-2Qp1{MO;KX5jVDTv%josk_BT<62i+N3fEx4;6X@P$}iQ;(|U!9rAHON_vwN zfAcEJ0!?((a=hMbk9||kqpZIL_hv|@2X%m=5UtyAw3pSlgf*IL(OpI_d{ z<>B3@eP2-kzyK4as_KcX?~Iv4gny;VkgI0TS!lKzoWh>NdA4jNlrcJjL+-t8i6 zaB(~9E_aU)T2SM1ya<$}w%z(hdOXOea$bd14Dsm;9?`bsK$gS1cf5N>J$3iyH*WH! zv@R=UKA3tP_{ZBYQ!9fPg_Yy-Gr4 zB;Y)2dEVy~AVt08`jKn`V@WHH_N1qUcD%qe58l}0sUg$qF%K`k{k>bve9z}Kza_qV zs3ArM_vql2JCZHCD6KQviW@!W|7l&PS#awBHy7I1;cBa9mhlRxx|8D7RXK2#yH1>< zQ$n8-qkZTp9Esy!^L;(<+j}09$mzXL1_azI&gbBvVjb4ZH0#|Jk_hQ*Y{L=%DS=&`Jlm!5n6nsr#crg`c@i4p{J= z|MtS&^+`3~@%Y0%9{aoqUC#n*Tck7&NB>lgxn&TaQ1^GVxWr%CJ#lMuyObU|D+xNk zjI6zXnPgXjpk}dR+0{GM(S*z7!PPi}j)>i<>;|QjD^+~0R@A^++bOV=rT_mk#|G2g z;a5o7iLUx~n>mVv*^;S1Sj@#H-mEIg^$N*qZ#ZoXZKp9N;&JvmiR8q}uFW9*@ihJx z_ctxP+r9E--;)Kzx)H@Guf!YMh!;g}@LV3tW1Zr!QXNAb(l}NAHw^4dO@5h359$Mw zOkS>+6b!%aIeTwwX2&G~JK%gTf7GKLbg49$E97j`n2X?%0F-IM&)@M=`K3LF9EPl( zyAJ9Qg@Qa=`w;QCkn7L&>B0373NU4+{dA*k#8L{01F^z)<9V&60)s@%80n;y;)AX9 z*}Zpq>{Z?Hr$YwZtTdUV7$f1*=vj|BbDrCYp}qATo;*Qxl9;yU+}3E4HiIOG%%6pg z%QcsC`@x|yK-M2ZRZ_mMGK(?N(wn8$+*FzDM%E`o!#Of_f?qk{n;o?gp{%rV+k(`- zR{w&V>(iZ&fEvxbzpZKzT(~Q%3^B(Na zXohQ8@|=!ZjML@U{Mlc;AIt3wbG-+al^>V3&y!;gj1=Ywx(g)>1`6%To&CY=v5oa# zy3PuGBW|WzWARxP+uOL)y!S=D8otgq&Mt^MhYih2xR>9ZD=|782nBbjXI5xiw>}z% z5LcE>-+IUfj-t;FmBTBV;O{mmW6GU=T;8VQv>zxKD_p=Gj)O_HG&hw!2!~mzSj)8$ zo$AQI^vImWU9LrUFC%Fs6DjdIC;fWl=nhP$&Ek?p$%mmPH6}`$r^_pjHE3D|XpLF1 z7WkYJG0NG4^2NZVYqiTJ*Y(G z{7{o7YG~*SI-#Do9b;j5b^Q3<0LepqtI#yyQ!^YUWR3ebUAQJj%B#=&K>mELJwU-w zBLH24VH^j5U8hz`DqggwI0Rgox-Z3xrQJ`jm-;Y5Ypzi;0F5)t44gs^rPU$twbs zpebx&SpF4b%`Z`^!t;l76MfuA}7oIR?2(nr9b zDIBBVx4C(H6(UFW*UeL};^Fi(vHE-?cRUpHD4?e8?(9qdzDYcjF2`nYecMp5cdKyA z#gc>1|0lT4?Ef_Z0pQ9Bgg-c&l9XrlDS70OWJeZp>gp$=KHcWJnBO#ycar|^ZBgkt z7MHglHy85s?wnk5nd~~GMbCQV=vnGsrs1Z`hDQ#fRAWDy9TRhNDZSg4=zU5LV$B@2U__40uKB_n@jZwQZ(q0N=_3up{RHWzTCYr;HWI8Y&ih1)&i&Cd;sv?Pkip|G z>zTczKi4Aa9+k|DCDZ2oNB6aCdiy;O_43?#`Q_0UCFA2n2W6;O_1O_r@jM-us;Io$ZE z={vGKTUj2gXHSe0PAXhi{0`SyRO&bTqxCCN0$cV8MUa&(8%;jm)0g+`&TZqCa8k|5 z`z2~LyoIoLxTlVgw<&OgbF;1lVB1iRhwL>A={R63yq0~QaAtorx98X}*LM$?+}^p5 z0tYR|$8Gxrx?1esM^yCL_W4n#=RM9*D(cNbk001J)g=7=>pm+q_v+$z^in+9Vy9EG z0Bui|3vEcXBef8?A=q+4{drhhve8IV)6q^X)2ib{*-r~H*}~qL^f}ELcrZWZ{qEla?k) zIW>3jCu!o70(v)+okhWun!Ht>FF|ch&4VnypjMAc1a1%|> zGhVoKNXtFEyfjD$T%G+k*LZMVxZrDZYB>8jUKQj}p+E2FRZM!rQ|e@UD1Fu2K|*+r zO7&?06QGrrRnQ0yK~!`AQ^xfn4g>D;QB@k4J3WgDSQxu7NIf`*#{3l(KmB;@wAp@z zakiA|o;{uCgIo_bFEG25A`~gi(d19QNZjk7bvy1h>5O9&dMLLDy3Xbboj6xHj3RuD z{_K_wChME;Fq0!RJbC>+AT?cp#^So+*Fw6G|4f4|q2MdtY*0`l&X6u!YX;Br@Z)kP z&9#3Fhz^bB$p}?wzf!YN=6|3Ja*mP&mrL-5Cj9HA9120rO*leFTKHFS2uB@YV@9aO~ML zK%SI04w-f<%&X|U*uDj1;*52P(V|`Y0|3NZIssq&m0P35$iEh71zMqEBO8tyP`oKn0nFGd z-+jsQ62G1!Qyx1s`{6bI7!i#x$K~{995On>07sy5jSwSyzx-Vane+T(hE;Lj#SoLV zQ)9qxd3q9k<{)|R5>4l20Y2hX`(tNXmERH0#-88q3^|q~I62$UJ7xBDihJ^id6j#^ z)Kch4jYzr1FJ{uI{QlDVV4agd0*Gi&$k=@7@qDtt%Ay+6xTvky2OG$mhnXY0S*seN~l zkStgs;!`)p$xSbVi;67b&2A+erc(Sp$e4P~CE<&3GGuD^0x|ecVjhKglFm==o%Bdd zd|g+ymJjrVQ&}T*#+=%e)A5!opZdv4n?b1uXqhmM*_r;819k6f=?%2_4V*ST6eI;m zRGgOQt@)*8F@Z#EUPG<~DUYs>l0Hq`fJRF-jVrHKjFKuX$mYxKr2A+0@cVJTNj1N% zg>+GIH>L?n|FjpM?rEY`kIWic>Rq|0Hi_$lU{o#aIy}Bt$gC^)3;irewitw>A!A-aJ8%?58HSs_8 zi~T8hF?MfDj}bD>xjUjh2TU=Ump7LYo{ITxp;`Ypjkq^n+FfB;18o%v#&*{@z-dL{ zQ_nw8v=3NHKI>NdOc)ENZ#)68>$W<}bL zytL7rtC^9M3#F0VCy_+_hvkPr3=T@Y0FU5h@x6S>A9jJ~D9yEhavxAPw^2<yGd3 z5B*ql+E`g3Sz~k2)r{c6Ydz&g$YWbCd89FZsa0@x?ziPkl>IYaE+um@{(al0`%^?4 zdkl&`cC1G-&*dG@uvC9Hh^bg{mx%$5`|vyIII`fzAF=q&(7)`(lmi9rq$autgZ@9< zRJClgP#m5L+N+Hvym{G<*`^@{kJhE#v8{eM@$;dpCQFCP@ZRm) zajU|NSlwit9(>U5NCPaS5of>+ynz2SOab$_n-*HRlx*6(5n}mL zNhb72^~#kokMqW4Ns65DE6m*D5`R(rCs?Z16*tE<7kpMOyPdNK!daq3sJeRb8W(6k zzB}`H8Sz91lDNjsB)pQ$cI{f5dDiF!0=)m!z3ac-*jsqaQphdL7 zoCrykD>XUVtM>{M%PB2Yge_0CzQ$KxJ&(Ol;`7)5U66j(Ktg?Dbo*4xHyrtAsm_&# z>*3uQW}g@9$)9aI+d}}(!StVuI08@Fy+!gpfl)rcOQ$#fgL^OciQ)lSw13jAestRU zepYP9+$M{gOTpYybBv-A7>3V_n%5d5+bXsN``&Lr9;abtI4vnnkFW4;QuHo&xpY?% za_krP)2{EPpLD49QEAxL$ELHNE1!lN`Dm7*urYD+z8+Z)TT_)jkEd1R+6 z=Lv`NDLt32DT$I6IRByLU-0_0^pV0K~x z{Yg-TCnBAR75ARnIVVo0oj1Rfdj>-Na(MtxjHplUUIR5=^nNqE&XShN8}5i27%km? zJ0h^1H&Z?`dIKw@zB#iG+Cl4_as`W45{b=H;Q{7l}D{kL;tQrr_JQ`o=5;fte;FV?Tz7XkS1nToHgh z8#-Jgqs|P7^IN58!W50cNy#rz;IClzpL5>et{is5XlE5PZ+D1;06#k99nqOS7WFdV zPkz?~jQ)hC!^RL!gBF#9>zMKV+jmbMrbl;kj7?zoKiF&G48`D=-X^6GnB>%O}xMQq?UU*tLo?0-WuR}1owAtafW)QZf8B=aHd*0U~PxjMc8pTbUQ{fIWZ2= zM`~BIPOE2_s7gtmu|3oHHrM5u{+|R?_bGpj()LwuTH3M#E+N+gWc&r z{apAn4^w`}r0Dko1*#}tK}iiStB==mW(d#NV>fNUpmJC$w=(y#Nx`K}cpnMLw3-L1EP!0LzS<||^-)C;-B4Z%Knjd6o#_if zCjK3$nIl^YFs#s2Tq$_}4)n z1n4ZCDF_Wdb)WY}%eFM>=g5&0bz$urz@>%bRTs;bv>Ex%6nQAwn6S*jRaUk3$n6p; zkkvMd%S4GH@MRma?@nLFvD8>2MIf(TKf$vzGm+5`xHdT$y6q2xlP0{Eu#^G*N!V;~ z{!R5h!j<~EY15Z)%A8h3l0_FgUTgT%;iuuEk z=-LUk4%!q&-fS0O9U%P)2sr$%x{%Ywes3x9x_abFRPk1Ye|hBDO8x?-tDrJ$`{7?) z1apkfT>LD8f+kZZAz)jp2vWr6#HmrN`%}(hG0ZKqRIrPpn7j5~AW&5=Vxu;{nk9a# z+Sj5_%H0#nd&7XXmbaw-(MOc}WM=9V?rAK;8h{haV ztVdp-(zN%KDmc`0O`XH%9Fe@Rkk{HLq+bu-9@X_=aBKHf;W5Vx7XaF~X=$)5TEjh0Q4u?P zt%g$oF|LE|rIzuFca;L^^VG*?A+^@Z2IFj79W(Z znj{mPa-Tl#*8%FS!Fzk{(nCQ;rh`@Dx~AZuj=zQ0P!8<8cIismWK7y;W#GkZvS(&u&e$aI%+S=%KZ<*AYUYVr)~#-L zOd#{0yF7s^m=4PTqQM437&|M(dHO>{$O6n zc*Pv-0{fCxO$Eo;z=_pExbh;JOc5Tod zA>CBsow&HEa=gWjGA3~{vTj6~JPw^q~HI`lMW zlpP>%SZY+GmU(D)=jFqq5zhUjqKNI^I_}LS6vy46N$=`d=jA!g7YWAzS>@~ZqUMSX zJ})w#u9d*d0<&82ip(gJUw~y10a3A{nu$DDV5dy4$XV-cB+eWTya$c z?+y&oWWsBWjLEbuoT@6FJI4|QM!X_Xeo0s&J}PledwvyG7gM%ke6u*OwiEyx(l#is zxE*>{j|y?eqt`cuneF{J=Sw}5ttOu?_I2AlI1;yo%SHM*v`)Q;CExh3YnwbdKZC}P zpZi+}zY5`I!Kr6(=!Dpkjozx$&5=?Q6e@PLX_#!!Hn{97YW^qV4hk=FEw-Vs6z|8& zqgr8Yqi71Vh1f~LozgwPmXCn?bt`e3V%J4=rnZ+}3-qAYz?stXbwI;Nx%=D-DpHIn z*!6|qn$07r1hjD0;aHM-l*_*25NBJf?vbAwdiqM6i5UM2Wf)TNVQQVqeSd$1l7nS! z>(eO2#fsK03Z0X)Fjl)35BkZG$wTs9u6c$S3Txp^*Gat$*1B3|9u=4*hB4S-+OIU> zAV%7$C}$)KbF|ilqTJVgn5bv* zica|$`wlT-wHn=k9zRs_jY3~~K zuBm)zXq>g5CISg)0YBg5r^@#Hp3x;Ql-*NE3GyA!UrK`i#3)r6kSp0LG+;_~*e#0-n?za(ckebxjL;QM*L9x`O@mRA-d*l)lJB_QvUY!H_(Vs4-I5Ow<+!rxJ(vPxvfAB?8!HuQZ z(VZ$U{<}2|KY00nYG}QeP30~r)X48!?myAJN%zB(6~yM0~Qt8~?9*tk9Y#d%t7r1B)4jal1zIy7grGWyEV1rNm_V zUQZ|bjt3Uz%MdcCs++|{q?liA{o~TLW#41PxZ{XDm0tSeYMy_cLjw|%D78eTJ#W~B zW2z;oS*o&@h0BtZCX(0ls()nmZ3?{U!0Qe43=l|!pSs3~ODIS-j}xXgN;2UX3ya1T zmh_Q&?e_L^uc74{alZkl&`4zKL#eT_KX;9c=%4c6AS+uG!!NESJ3e7nTE0 z&1~6ytU7wx(W&28RVKAi=gb83g(YiYv`c5bBG-1cbcA~HZa>)tG~CjV$%{jdrhi)x zTLUV@6#Nu4(bhhksx%P0zQ`II#(taoxKc(JavP}sw^&>uAMig2!w_>!Of%WW!q#a( z(s*T+mS1hE|bEN8hF z_-S$M)znTdjjwfB;}pST5_3N-N#jCaS2?pX1{Q3ja><3B+!zw3Kb<^@p@u7!xD8F# z6g?A+s^s;^RhhShG3~Tt{8B9WXzGfR@rWw?kMHTY&l*X`d#~h&`8K>?|ZhztD#^ z{++v2PbcCzj$-Kzwat(M?~!RG+)E}UN5^z|ybQ<<(yaZ8m~A0;BQ4cPuZ zbSOg!lsSAi!Hmm*k^6B-Gt{5NM?wfPktPw2tCQeM!Ie_Sp zQ&(#puzO54-|IG6A(fROrp}zTGJj z3o9j1)|611^D`8p#WssRxh{4m)Sz1tuFxQlboaxl!dEq2Zd_X_ag^z}1IrQL6k^v1=2=*B4cx-oAxC>ZP3}_C2HLnsIqM4KVbAcO zwqV_0{L(_(G4#W0J2Lr5P@AC+w&G2UvSrR$@Iv-0yHh3^hxY-l%-2Frq_31et3FhZ znXc!Z$y|GF!%Hy8LbEsaVW^Y2s@ZFeXlbpQA(*ZG9MEkCl<8t&zpox|8htoC9yFZj zNAv{)mD3AX<~Lrf%M=wxO26eeW;5QIdLp&cDGxOvOPmX4xze}Pw^;RVJdS>Jf_0&m z_{M}7PZvu_)E^ZRX8z$PCS`_YHCZOn?T>FE-g5CVCeVU7+xC8Kni{yLsJ0y!TAU3b zo=9HXIBk0s05r2J@KycGo5xR$;bOL&N=iH=Hg8LUdBq+4s z^dVAT5y9~*m?m32>}!||R*@N$&cj^+h3fem$ODgiFDOWTK1bjr$zj>G9Og4=pqoIG z>=;x9$Poi;9YkC|2&TClE8)xjw?;3mpg^9FWWaIt#2at#^Eqi&2xqUFa6>Aymv^(7 zuW|SnDIT?pQT2{fNzge_{Z7KrIe@t=3xV zO^xfcTFH{LNE^+2efI?^KbfMQ_=*FiR8xV@@5F~r1g zOPEqYv)|U({YT+YjUsye5QVfQ(xpz-WmVl!6k+1cfe-$&=#;a)YZ`;}e6=JZK%ja{ z2%A3CUWMESO5|2$x<2^?&L7a^#s_?egpFy>UUiV^jitkL5F5+FMtUh+no5o2zic&M zCqQ3CWYQnt+HAmPL}Bd0MX`N-DqxgA4_Kb z;~uw8&>7O`9`wS;Hhb=LJA~d=zzw?=<(6k@1+h`U6JMSaX8vy)s7Iq~8m{UCa5>Dq zmwv+;5Gdkn?4?Y0m5ALQBy~=jgN=wNsjh5tgG$k@xkfM*{I48Se!b-(a$IVC4MY(! zd$@Ien^rh|$Tfn01mOtD_M>VxOpo5a-2UCVQa^+?l$+LJu;NI>3IWKPE)hhWSUnQ_ zx|4~@V|2Kko{;s*F!x#*LQT)U6reh52>AVZ{fppWklUmlZ&Enq6G@WK*pF|*ujug5 z8;nhoOh>}m?Ob;Cr%$SAcGSJ$DrOatlGq)Ju=rt+ZI(vQy@5r$=hZ~G#*-4yg$kr+ z$$OyaUU*4CVXhz{9t-?LmTwJ7{1ys>O^Dm<6Pq4XyM^|-v~!K)T^g~+hgb>q$>E@+ z>U9oE0-O2yV&Q9HZ~XU&vcii&_N)dGc!RxZcv9?M-ygLW0#Pp;?Z4PA)8n^g*B5Q4x_H|_rYlCAe7QIbtS0B8!p7#gyJy|Of zBD!3SREI*TJ;!AaR!E>}1*);??9T~SmXxk!wa$b1ReO|+T||N^j<^di_9OJ<02z-x zs@ydZ55dN5y|fwt-hX6ZmaQ1pG=d5tj_wuf4m80CbzRNKDiwyDL;BG{)_glvEYO@n z9>ZM6d;b9moz?a1ho*FEv#odNFL?rnJ2_V-KK*{r=73%+aY+p13de$aj0XWKF9C(#JcpDFXi>r0O$2hXFKNeauCC3W8QKIoG z?!qP22?X8Wo(96X`N>_xhU&aHz2*{RuZ z&!2og?v>_>J%_>o7QW0RY^!*Uyf+pHiip%Krptg;uVBSOGtkjjYu(I*XWzT3fe+e9 ziP|rq+BrxuQYW>ERcFbFO=@*^bU8SX!&lWx@2lq zrGTRdHkOKx(_7v@N(OzYg|4JJxh40~|07)=a;unKVy<6c5dV(dK}5TmZ=Z&w;QbNk?$l(Uwq=;~ z;L@({EF@Z!D9rD57>^!Uk+{$}=FaY3`(zJ;>I}ny4TxwMfV0$?-y=%4G}UCxaa9nM z^46dBPl8aVE3Nm0ThqJ18<>UIq=#W~%--cm{*{YL)IwM0)i ztE6`*D;X1{=UwOBe5u~_qyTsS)hQr?enqhRAQ=;6-Nr-V5#vW|@lF#pAACla=e2mLWeO!$1NG`A1_-J!^1igp@(@rJaR+25 z{ji9aqUlsJRxb}VU3V*E0*k2_?gBG6QjmFYO#K@lYLYd_(5up59ykr zA8`;=S92UQ{DlW#e|Jx=-73)yH_Dly9&s?cn$Fbtrzptvw%6&eUaa3Vb6gwo^EqJl zVB!I&uA%t{V48=h@ey>yua6-o#f;4{Rs?`r1hcr?ZnkX+ba<{ulttjU znF>+ookfrwJ3TC0J2$Mg0w5C33ErAJfi{Mdw@tCMMZS#W@$Xe^!S;-R7yM&jTdZ6e z?N8i7%cCC`>n z+Z3`}_tjbE{G( z2*OI3?WK|AEafY20;^bkB=i~E3=B|w^M7*DzwN{@e27+?h+Nj{+M*zD4rb4kK0~2m z`tgjQP02Ab{(C@&+XRboJt!rXsTpy&@abUzn9F&taITu29g)e)G3Q*d64=Q0l)#LR z^)C+@{|A;ljYz3d-ukL`vVVfjmbCxXz3wfwF z2xee2%PGss+4)mj}xHFFM%Z-vo>d?H_W!3kc4A9X-@eaL^eo0TDzR zr|;0(k@;qNRctvz@dJ?&?|5o1$Sjo2sOg(!Qi=2qT$SXe@8P92U zjTJ3S6^d_vZ|ZRB%-}mk8Hj8t)`oHFtP{Q+%z}jKr=@_^_nEq~Z&s>aJSlz^OKtqS zY6S-Z+r=y#z?~2`_BDe&4HQObpkJls9dWl1f~14-b9uEg(^9I@gn$v%jNDJm+RG{K zSgc0lxj7^t;)UOICV%@6lFQ`4RGPdY^S{&-ejh?j*$2Fg(l1sFAlS5l;Z~Zd>P$)! z@9w(6yeC|J;%IT5KL1R^Ka-L>&lga{G zLvKj~xzbqLVesLl5q+vAZb{E0boOdothV?8%v-Ul$p1^M+3~OvaL!2P&9om}_)MX# zzcTDYw%#;4;_`>aP(`=!sR5pCScN@nucW!VIECHVfc8l42@#Af!{2&td1hDbsl%Zk;gjtlcFeOC8(ykD0lPZ{ph`f1g~6(T%aCB{h&Go zrx{gd{4z*n=7C4__Hq?e!`)w!8=k}Mf>@P~C->2A43$oRa;$GJBy7N_{wIRIQlmnNL}afUzy&$t_AgGen~#Wh3ClK4;CC8fpA|1V3&UOOKTm&K!q7`@1iS_MAvJRyGMuvX

    6j*7X zMcVk2ZzhL+H9IrhXFGe_y0us5)8-`_nw|*m1W-7@H}q&3VwW+;mT@JoiRfrT`M(4i zJw&9~n{L%Ox9c6viNts8nCeF`S2v$TMR~W{{8f5h|5w6J{hWF%<<=LTyu>NL7IY&= zhD^h*tjSn|Gl~i8?;?6i;cgBQO^_9btmF};D0?gKyS4)m>B=cDnyTynkfs3_3kzs$ zcLo1Y$mIn0N58G!KFc^8*GgS2Vca=}^6j8b1zBi|4ZszT#sy%}#NoyLaGAJ2Bns zZs~F&;Z^Tr(Xl7g%_xJaNEncTyH_l~`%x=Z_Mjk`GHKZtdAi$`&N2g*>|5hyOpVW_ zYT2v^`wxA;&*)-`1Q57Kmg@wr87S0+<=+eF#pmIAhMeuN!q$C7_e8r~3&6c$hQw(%w>$^4zIco3bu1O;##2*pQU#ccn` z<_wCsSh5T@IJv|Jo4T1C6v~7(2lOl*G-A^yU`M-KLL~~Sl(6BE0}2oltgcHEk}>Cp z+3f9Qqr?maA&?pR8yr-S;xM-Zq3CH&i}<6a&%0&d-o73oylh#=a=eb1rbC=Aj{IN4 z#%*5fndxkOGnu`$60@TidoU-XHqO)9mF10{4#Yinm@5-(CF(1j!*m zjnyl~CCJt;#6?`&b2e)Dav{R{x%}^zwv+gn*r0baADt}e%WLQuMH$jqz^Yx#rU7IX zf`%+UHpD%{Hb`&Yk+#w4<&W5_TOQZ4 z-Gs+bDrgOZZtRghK~J7`bDuj885s%W7EJ%;y)|BM@_BE~C^*`(J%i?M8k? zMON_h=NQy(JzOtp7 z3KKTd&!Ku?)9_G>As9Y?n8#z=@6;@gyY4a*ucSsd{|jDMR+cm4#*#6;+z`Ce4=t^7 z^7tB4R)mMuJeW)y`l63kUN~s| zwj76sTAz+0&NI6P%B4)X(hffFLxdVF=MYCruZ$*S9SjXxzZVEO6CyfCL^dfg z^3%wrY4}hxMJIb%e0WutGsd@&d6NF|C_tZ&ZW?n^pxRU~%zcP#5X7XQ<}Qv1b2dPF zbQ8R7pkX~tE~F(+7SAHZ{ z8Wsz6*G`iXSmYY3K-nQ|yOEFK@#ih+5!EJlFhs#xdYS}Ue7D$sO4a|!(41A^09BXJ zIT`nzAY^qdMn!heDi~PHt88Isr2bSSzCFay9{Bk*nvH^}x~W&EHYDSsyGZ9>6xgWb zzXFz?tgQORaO^XK!gND}ejDQJu3^Uy8$@8;x#f}6RYK(7!go)}I=kp8U?e%F8vruw zgi>cBFuH%nFF?G`M!pQI0o0(=@+r^6^K;#DB#S;$+g+XEYT+6o(<1PdYU{F3K|2pQ zGDE!tnLBq>L5|##606uVT!|E3zmFOAQyqtabVE~smg1My8ZR?`MSe3_(MI_G0^?;% zzhuEb9$D61qPHfrO-46PYC!}ph?|Zw(C?883eWk}g4nm%Xz<=60TepM}`l1RspN7?>_a7%NH9haXRW(0%jzY_hAh)W(n? z8BkOOTEdnK(r*YoTis#LtSgMDg}#8b?nYER@#^_l?X&g#vmbr1jSBWa+t6Gr=-=id zq_?Obr=`-S&z`%`1J}p%@{7c-;3Z8(XLjU@$B6D5c<3^ z_aEa@b7Ae@=CHoj)VUq>`7Eo6hGCc~RROh@=HCwQBs=%c3Ubdg9{%5K-8FroHG{*U z+1zhQ|7TnFe<%8NXQOn7i5h?jn$_rrTy;M+{an?OpcuBgxYu;*xKvva@IMbp@?6~5lFlSvli`YN;6*8QqStxk(a&zj!5 zCWR?7r=)AndCgB9=Kl$4l{3~1?!+XHgosQ+31ePKSd8w9*qM zM1B%BP+5eSrtG_uemZZH*E!4mk!FLH`O%3p75C?TxJb5;q8rO)kHdq+ilL_-l%^79de$&$A`d00mae=-2DH*MFEC$#mP-@pnUd9MOA8p z1kEnE&%EO8Bd)Bt=99M!)ZcCnA9(n`#J-tTCZTjYM3W5X{Bk2_docU`bOjrcgR8BJ zH%#;k?9Ch8goPup!>#~0jt@f*`J+e8{T8R`K|@@iSbNmQM2K&I4EiMXpqZZ(k7>bD z-%#c|Eb>Jcf7ahLg^(!|W|I^C3%bn|zS9g#W6p`d&!tQW0)_N2Y@A9-`N{AIXj?>@ zxXiPf=$isfYsUO=1C=n=%$imo-1wWyzTQ`?-U&aw#^Xr^A#MVdq@wTaA#E`~#5UQz z6=F13flY>Q0{oa#$*-t4arFkxv!sf84EngEzRO1GIAgtO4zNFl1DoVE%e1|nC#d21 z%~{s3uOdUNKW!dv6a6FFIr>9BqTbD?p9VpeeKB5x9v*BZB}}^3@?=>t-3b26& zQAPj2zOe|!$zKgG#ZOHN*e~l1)P72pfz|T&+3_gc&p#U|SrNH5|2x~jtb9+gs0Sld z(6MRQGmdzDvByIsui^cG-AOwDXe5m^T<#Wza}BP*zXey=u+yt9YE70gX$t%awyAV< zZI02izd(|B$Alg`?y(&f_Mu9iVLC&7H}CAin|+mS{*+ei+mF}`ID`c#q*VMx%gwgv z$^I41psKy%$}hI8zbGz0sDK)={CkElhf0@vQDW2+0UNVg@4=(Xt@K_xU@={-GYdN= z?|ZR6vOc23x602^R5!uF58kbvk5?rS;pw|H>r`hKb#1e%T1$S){@&w7<;`qLJ56;54(d~ipOnD-*v1RVSwStn~V|JTxhULCp4{FXR+$T z-?hO;gcH!=e*WyQO__jER?5^rl+_5ia1a%gjxE<{sS>mSh%&}d>_7zwcuU!hZa;t8 z(DQ4gvEAJ|_vb86eQ_-CPFGg;vXqkdWx<*)xIg^L#1=>jVmGF+NNz8nwHEXe!T zO*%4+)aH{5z-;f4>9y)=bt{yK^7&PEQCah1Q0gGm58tU;PnaPugCfp(!3biBL(l_^9Wb*gi+=Xj)8S zFn-D-_^Wp2CI8z@Y$htp8^7>ur(F){?tDRr259aEKJj7Z9L1R2aqf=otVa^w)H?_+ z)lJ%M8kf=xzH=m*SYxF$RSySvkU0EXeg;7$-o|blm zEkzkkWY~VLM>;_WJuLmk+h1B4HrwjKvG%YmtEc*JC(o zp|ti|V0$0zwo!MuPCG0$W~#qZmJrp#kZ$f(tX#xK)<08;ZE*`G2|s3m90DX%D#QXi z+?hETXeSE%-0&Q*5rF4N=ZT7xYv?|G=^3zAWMQqS7%vcaXWLBk7X!|3mS{j?(_F}4 z2SZ7+IfM9?I!T;1RgSRDI+=})+jM?OL$1u*T~DaJ#|;-H3zZxlz!x5~ z&s?`w7GxjWwE8WXrccRE>-gI?LzdryaR^UWof;?7WY*WrJgI-QKXcfsU*QRR{%TaO zFrj(Lpo#w=vRGr-WaA8HG1FI>jdzzq!XS%}lhZZN?dBZi?)St^FcD9Ona05L=tLI) z9AWXCaeo3WgRr}R7`HofyGvZrxbGtq<2q5z2~-amYmWi`3Js%o8>!9Q`q@eg^fJFR zH{?lwz1Ad0gp+f;j<-eXKqy9AikP9iP=+oC__CJeVgW@AGOmgqZn`%%y(I5?zh;Kh z7WFL9&SL#bTt~5L9507va$I!tdpwX)lQQNW8Pl&Q&pe|yEjZTPZFUe zY7u~DKtFHCuT+9yie4l>-Zi*ngB>GZl2u_h)bgoGNYbX?%xqZUjHA6@BfD{Tkd`o0 zbM7#4+1sbac%E;r4f{4}&^|5a_ff~dSjRXdz(z9p3)Q1!NY7}9gyz@!+W}u|?fUYX z+I{|hRxbY~ffo|ENreuLZD$ViterB7H+z@IVT$u;9h6Ak!>+!8q0;ZFe^2aJjAvI^ zTok=sDHoc~|dA zv2farwP9lxdA|?;Jw|q(9}C2#wiHZarA5_9QjDx^bo`7QoAC0`se_DfC#|>fW&J|t z8x*yxFYd_TFiy(=*4FxVWOWwWs2X^^neQ$`k7=O0_!%W09>=g=O^=b$KMf1Mr?uz6-T>IQt{YV@3;aav z5yiZ48tKsfwV_gg_0v6*rr^rqF4E{u$|)3FtH*nN_5&=W!lBRKmU5AX98LGi&TtuO z)t`UXY<8{P^XJTbuvJry=W1x%0iY!1uuX+HUT@DO{)7*Aw2^BFrmbq4u9@r?Q~2Q1 zIF^W2rSmORXY3Phfq{(`7+m|?jgF;9>Wv3EZ-<2%XbLV<0kqnu$JDv9vuVOeE^M|g z{#=@~$aw9WWd}0ko2Kc{780)B~*IAfjRHAo9 zpE%!+X#%IYhTytt$y=T^Esi4w_I}SWIz#ZUOsM_Up!HDxnoQ(d9hD`M*)U9R9g}B@ zjcB^)2vr;P{DzPOgOgc5Vr95*i8Cb{O&OXtM&l&cDC38riWSZ5V$aVK{4E6M9)uK{ zjwo9TVA!q-D{rT-3X9RYDbKg2+A_@AN)9drrWvS@+?XQCpXMOyAh#(as&wNH+eCef zJ|MkOJm7l+sQEl$-foKCnD%U;e|#&a_WGF09X;<6X&!HE|H;zck_lA95J5zmjWLW! zm_E>O?W?g<7zl$qCV-gGDj7DcvBkNrB7ZFn^pR_`Cu5^dIkPW}MG;~VqTWCznzwOL z^}}K!kfIt~J0^3?&};tAK-xkfp{r`w226!Vb1ai~sg#zBJulTf2)sWf1i6kXHU!qD z?xF9cPAK=&U3Vg3w11L3g5Evv^bVLi=hQRGORxbbQ<~GnTzR3RvDEvhsVr19mpJIa z%oug1WBz283ams2#%b%un6kcTGm?`&ZSe0l_KUaA&?vj zBcxhG-73jHUw8NIl;Rf8(+Gf9BLPqauEou;mFZzk+pw9Z3)^y;tnye;P2N)Ap20&J z31J{+wEN^2;cvrVEj*XXwE9eHQ&?0s)5d76rH6)o-jW%6Z%aUfu^@c7oK+(hR($~3 z3zp_`#E>dl`iwPM|NHbeN6ik>Z;cd2CY>Qf^pUn@AbtEUle8L!*mg5BKcH=k8o|J8 zYKNXnvq6e;D9z}X>9!mf^zXe6(05giVSy8M_C`<0h{?2;BfXCkALQ@(7>C@|2NGO{ z>kl2!L>v{7&sKIx;78*VaF0ps7fvlsNtMFDq_Pt!;P^J3>KyL?-u0Ak6pU+?ou&n*+6=_Au;iv$leAPd;Xbe z&gAGd{I0T2h=&T5+vNWWul@hXddr|Tz-?_5LV`l47F5e2aW z508uLD95*$br=#j&(ct4!N@P%4Zb$4d(AH6yHKsf1A`a=k1ST-qh8@OQFd0M_Bbv! z8njP}f}ix#QH-2|L8amML?}2#6Og?49BPtXj;vYH%ncf9a~l5+vNaciA@})6&=~$W zZ~iJwhKr?qmgan3=i>(Tbs;67fFS%KkJpdq%g@+;aFe0VRsN9CPyH>gk&=t7vppujd7UM#(YP;#qg~MgEDN|qhwoE^UITz z`~_KC;TQVRcCadoGxv>JMSUr1OIW*}a$i z6HGNz?c~sj4C7NH)JGjAA6Tj}RwuXF_&!QJN+pLqnXwPJLoTee}lIsZrpy7gF79JY) zRY83XB}p+r!=ahJYUuC^iHnuD)S}({Kj?>)xU7ZOvR`Cb*dM0;adZ}E3d#L2-GA5q zhxGG1_K?MQGfN5MivQpT>GbJ+6CfU>q9%mPt*p=yEY~;z)cqL&(>8l-f!@+tD~L^B zVl92{PYYp;yp<6k)Q&+~AlOfPtSVzz*v?04E0*pmDuZE?er`ezRZ-1ZWWufWCWzs) z;6BGULjChxZo(rQi)DVDj#BKF1Z(-S8xw)EXd@YObBsaxa)vlpCIjQw(dCe8f;Q{A z5Zjw-<%3H6yn5Ocf8D0=cWEwK(y)63E2<+X{#M&=JHsX!0yk2a&flX1NCq?hWib1? zShJ0XM3l=7m@LA#^W7JCIy4dXeF>k%@=!A=dp215@&Mx$N5{-4`Cc+C(2U!XD|qWB zc`I*~c2lId*1kjpx*;-WLsG%g&PH062Ez~C^9=TKrnE0l7XY_KQMt~okHxz#qMtap zQiSO3e73ileK%Wv@EQP{Z3-MUC=)rffIPGH)Q!WVEQov(0Z(YGZf z-Tx%y^w$f`69R@Zm~=VEDv#>XF&eS?>of9fL{qBh&rbn#lm|?9kauCNG?8A=aDADVc_P4HCQYaaW7hsR52u&fJ&tm?7dI2 zTV7mOl-6e`r)sezMi)O27ItkW$xVtgPHP`om7<;uL!9%8CTtoczk9h}6s`38jkcc) zRPi_(e1SbLE`yOspyMse54P5dwWo9RtwmA-NskND%tu5XJBW7I_e zpOZ}Y_(MxJA=AOp@T<%K|0|_X0y;q&=aLN;M+q2O_(8?-oPWipA)7Y0T_q(c9fXZd z1nES6^9mxat$ens{uZcW=jOWGj@WsCs@f-Bqg1fMV9YB3FjEjj!cPn z?8whrl{VfHF-s)@Sv50Wl8+mflRpkcy(;=RD;xGz^~FWggbSdACXvYAsq{{V72f%K zFrYDcJD+s5?wqf~@X5`%IwkNj;>GH~9=Yzwx##Ugw%4|*NP4)e`2O#UKLB-QtPI_w zYTSc}Ck;E_JPZv<=2A3=n)Az;;MWRe)QHbr6n-Hp-NY;MQznC2@_!}H@;U-?$ZXlL zPa$cp=0d7rHXY@;<`4r{lIxuPeN$_~3hs~uii&G|aiR~dTCKQS|E;!Wd|y-kL%<$H z*_INrUFtJt>$D9k!ZeozkA!A2|9Ax6Du}|pWM&S*vX2x@>P+nBvwMf%3ayWtD)a= zA@Ab-sXEj6ebF|aRcITseu@Q5TYkxY~Pra$I z$#u}}e0DkV<7frr*{iBoTkq$jbp3;b09|hLSxK{5YfQreLZ2YyI&|;tBC+SuHkxC) zVJJ=vVqVI}_2;8Un(afIiVpN(Ku|#OPBpF&VPgW~+ATU3_j8Fd0!ITr!Z9&vkFlP= z|1n=5p=~kWyYy!rLqY@GtDM1?N#%6)w?3oB+O8R;{r1@s$<|)6Ym_N(NEmEu4``<& zV(1g-189UPo3$ZV0fADr4#RgU*q;I(61@vEBLL0~a%IvEk-bw`XnDm) zE|dG?oY||(o9DfeT(tj+MtQS-jsA)S(d?qEF8}%4h(!{--#6b~(}YdgLT2Z59{$kE z+^tWDTv3Vt${W8}`363S4|z*E??7Bk)t>q7V3=6E@eop?XzcMzw8ZGY;#fC_-^{*^ z9-9(5Tw$`#($l|a8c>!z7YWE{n+R{oaA5bQ;bonj;CT9I65x;u9eZt#RXof}bAc?K z&B6RcZxgF%ilBftO@^P_3iKus+AdNgbf~m3jEQgx!youOF8n|!VT<*6)l?M+7gLlqzU;#Fr)ydT}L zcLsZA+Q#!B%3t3y?5sH;>{VU89naUEHjr4lNwgiAMQDZqy{_EO5CW8ZixkGmswxfS z{0Q%52-r!}m*S~&dTw2f6paXvK*j_>J05b_C_$Vn#KNy#4d z+R$ObLQSt*l^(DR>=K$AuY##NVU2NCN+nzIP`5?1{jaBU!9Md;0<}ep& z(aGLeT92-Obk9j zve#R{6>+>PdB#pfgy(#PClD`FNRJ4Rsb@b!MnV#nV$+S`wnb^P%wNuvk2t_#$wR&R z=SwrTR@>u4P|4DME@w8aL%W!D1Md?LLzj0@_pP+%un`1TM*&AiosJ-pQg7Cgtq4su zEL#6+Qyoo)s9@j_-gHcX(F;v6LHyZS=Dsqd z5p7s{2EokTL1oMF8`{MMaP3Sewg-ZN(NcwqqxD2kVYH>gwr3wZ#yy0V?3T|MWBrNt zgyEaVAlu~pSBzhgWKfQ~+WUT!g>h^lwlzbDZ(qOw<)bGQi4~| z^bC>HD^d8jwncHyRynQ3y{LRjk(Q9bsG{uc;*)eD9vYh*GSlA=#-$HhlVOB%~EAbPPl5*14f0o z7K>UNr2c*!)Tg}0QyTghWk`N8vhI^;sddWF_Dj=7-Umj)R=d6_UjYVZJZz+Ihl#%l zb-m0HGHD~;*F*inY=h>crIv)Z>8Skx_HnudamFBKrOvc%vIHFANyLwi8Tb7#T*~{b$|P}s|YROu7Ualqkd>cX|iD(Smq90 z7<`Lhkcf)3ag>GU(#IZv?7rphGrx^yff<#TI`cp@wAar0z*H8o0NtFkGmFPtgW2Pb zNFkz1cC4(+Jpet)?{!R2|osR)w+6|>8iK4>)56cWt)8y3v7@-}g! zDWKho7(odrBzE%2%4}Q4PQR0@jQ>m4lsCSc^N0yB(Tuyp`<_1R{=|RQMc>`$rM|r& ze>=`MP3}`AZa<7=b#dz;7Yo>z%A<=VYcut9-lm~^=-PgZOrvBr{55r&06GMChB7Lr z9i(A1Tg=7qcy(RquP&EhqCJr*ajDkK{xvUuz)x=%_}kgo_Vfu(O2K7p-91y~qkXzl zYx0<*`aceFSqV!6^#f~Ke(c*L67~@MI`2fH1Yd}y3x`sBE9y( zm;i|*-|$)l_8TWWQe|<^$@Suoe7{*>ZXX6@p-p26gikNNq-dstG3McXZ5K6;NSMD= zc5%44f9Z_O@4j3Kr)9G$-CArpg}=PHVOP&qO9*M0wl3zrXCxg&CHTJ_Vyj*{q^Y?r z%6vz-$E41KFWD4`%Md|o|)>ucJ-ux!pj|NaIc0-b6> zobGi*!OE(YQzO?S#)-iL@2)?!r3bF{oDY><L$zqvcWBeV3;n3U@n`$J z^uM4^*WVh@f&HCKFUjuZR@x*xH0d*>X^oW7M4OZ&IY(*iIX1{ESEe(SZe&0m1*qog z)~Yrc|JfWNiHB<>LIgV3n-<{*;~w3NGY4QiHV1%a!6b!HY45$En85g;87q*l>xJ~E z7F%2AJG6|;OQ1MWgL-xVUW+Hur>@1d3S!uu(!x`8=5)p~q|zCvc$>#dFe6oPId9=| zGz4ufY^LR1GlZ*khDWGOXF^+LvvOAU{ZvZ?@cZ5HN7D~#Yo{Ne&#zisB-!y;3V(2> zvk?3ejVx3Ne%;)NX^=?Llzsbtx$^#R_=5Qo_0VBeT*u)<=uFCrMj7oks)*W4q}puj zyZTky(tbmBwJ3L$f^TP-LsNQUWbjesxI6;M{`nv$7j?)C(Ju`@{sbn*Zy*S7xE)`r zL6l7n)wd(G13gKP$@RSz%)|f1y-a5EV$dvat*$d@?Z4?hDo=fMaf2ssd2ct+Z&TER zmjGEi%1=($A?s0Rl9M6lCZLg}fKE7X5s4QZ0VQVMl8-x#a|e2VPo`wuQCJTQs*PCzH{5o-hz*kuw}?&Ea}amD|Z2gKwWv4nXmSx1MXoOA(4@~c~(Z8 zM~TLzt2r*o9x&-)e{1@-(x~5Xw*RCO1?J z5et*2etAo(xaeNFVqlbZ}cfLg2o)H+f7vg1WXXH$&Xf1RqUU2Egy^wIaj9h2*>ovW(| z|IzisG|`Idx{q+Mujfg&N~= z?A<48H3b)#$8l)dVmVnI_d~EoXxOKwJ)1LOz+)`mr^dKzZjq-5RV zmB;7*7IrAKfe&NoaJ)gn9JLosQlZFCO1PZ)xcv4?a3r1M^rSG+^KUeV4q_^Xugx|c zI_&Kf2Z{0!rc=Vjz2eb+zI_${FToOB8n7t-@FUnA+dciAj_dqVqo7M4FDiU=3o7C>%7ipc><$qXB%(npo~| zO_i*DOdOx7FZ@0$~Jh$Ltn9bOFN&`xM@89)LaX@qqVj@M$ zq`?MA2WuH6op~P#4W5$Nx=lGK6y)3QWJbQ?cz64CAIQH|cD}JN7k9eQ(h+Sq$gRYw z`;^LD2o{^+(k18)Tr76uE}za*h&ko`SOUip;0oKHp zPl{HVeI3PCD!|a2 zf7wMl@n3Q&#}nU=l;KO4Q{mpFV522So}JIZtT5&|Jx#4+9)Z{f@pZYAq&*eMAAc%9 zf8*tv#~tLBkceJv0Ag>97*&tv%q$Y_lQ8 zv8_6J(X7zC9oU0Rvhz(!zPsXl>>;Gd(~85=re;;L_!uSkgK$)wiA|vNClmj|pzA|^ zJ`a4qN>(CxjY!}mWf|0J*;*LAIUvw76Vk%xcKEXS_R3+$6L&Kr@00p@v{ohQ68&mv zL1N*PbJp1zwSO{seAY*QWsR#Ikxn-FrDs7qQHbB zxJ*~S4>|R+>2|cpdzB` zTgvjT&uxWc7frw}(lxZn#vElU3}#olgv$EPk^GIvLd{H_$-eBmW2|t_r}yp)Vc8hq z{oXNwJ?(4!bI#{@G-9s3iAl|UfEAgGT1J455(+}D)ZthQ`9HpZeX;e2{D(jx&$|d5 z2+PM-|JaVubLFSQEd0fvTKUpJLSnApoAt-;(I>OSVv>EUnW2o|>aRyf@Mu`#?w`Os z4wKx=EYsSPeH(xIiDFo|&^=w5$xhY;UBzs8xVjyoZ|$S-ONoz^eE9h~xh}Uw^_KFKN$P)Y`E0)73W0*O@TrUmcK`jo3tH6KOL}ao~ z++fIC7tZs2HVA;-WgHuo{ocxF#lwC@x2PRH>=$8+K;6f()Ndp50TeqHyaTgORUHfT z>>Bg=mC{jt^xSeh-Iq@hW2XO4N1Ld*5BbPnzk{?s|g5W&Y;=h9g zhbOUWzeX(}?|3_#8%=m5nI8;dn!fU>9q=!f@1gU^X=tTzmPm)fH>JXqtkNlPp{7PQ zza3Wp)k?d zpWl9$6B}4y&aUVM8N5o5c17o)wuCa%(mj_lIuQ`Fp+I^z-@kE4yEo1H%lbd-i)Lae zJ<)PfKb%IQP1kU78M>|iiiq2pEAiMRu#iSJAlrm_O%iZgs8EqK{#W7WkE4inA;E&Mn_%2sP#OjD&68Y-7((NNG^SFbmtfPtw&3U7y=(cIwBc z&c}}&kGuV_%}q-riO;3?Zvd5f9=)$tGRu}iU(EX)w`wDXe`2(}Y7Fcpjp_#;b?x!& zpJUGlb}(hX5#n$b;;>89h+-6@w|{ur54rQPypM>GCLO+RUTjGW+9h7>hxmV!@t@NE z`7wh}D>N5GAc_TD?(oykv!dCB~^-roY!VvX87a0nP9G2Ty|LZrZ zncx4Yi@=-Muw;kSmuHCNPhLu+pT1NeelH5mZgWOWfa8DGpBddYmEpwY7)`sgim|6s zMo6GS3C+GzthQzayq)7-mKy!vIr3VHL`XkH@-jQo;XQtIIy%!~*VJraq;RI7`}a@xE^SGf zsWZxKK?NZfXsnMiV`z5S32)j61z&x*J)_`HR8ONqgfTCK^cT_1Uy- zwM+2fw^_Ony5vB$-)|i>s=KW41Mf@9C6h9;Mnm2e(@k=WhqDL|+b_)1>Fgo()Q_4q zIk?-i*oR{E)E<`3?$eLxKqlw2_3hjxuFmAKf?%Waxx{=tatEEylYeO6gL+%^#g>lXL{v*!RUib7O;tq&sn0>@@>_J!)e^TY|U`tIcl4^ zIbw4hiFuB`kiw>RKtXUJR;T_qR*o6-w4u1aM_YG63i>hf*Hnx+)%@j>A8xI2f=^$w z@^(c$ib+mw%Ghq)y=N!qe@)6+Hrpk1H<-Kb8}ad^kg?v0_S8O=|9u1bL#RJ`ANT`3 zad7ywE>3+>NBdUUK+gXZC+_&OX{Ud>k$23j8Q*g0w0)U>uqM-6+bQd;-QQ%(*(kZQ=;e!FCjm?$< z$7YI}8FE=M)Y?P&iGurVe$wamJ5Yz8bHz4!d3&u!1G0A-0u1ph zj5wwhdBN9#^`|!*fA2P@)yGh|l6n?V-SO8vcqZQzu4wCJEUMf>!(78 z;w)jy2@1NeUX1JK53nX)RRytH_`CPjV0U-xC?NamEHfn(O&Vx}w)T!T2FiRY83|Vl zW{PwYJc9NyNrQ&_5}&yE1Gx+D3cEUW7uQPZ>qPp(_T$!dOP=bv&pA3?#z-eV6`U^DGOcX!v3C?WC=I zq@rq}r)Ohv7Q=3#ghE@qe^?0>-TrTY)iEa|qWW)vrOLZ|$eQ%m{?P>-R`wxpu{*gJGjg_eVpw!De z!#%gzW|c!K!N#lV&kJzdiQn?Wc#pss2BIEgMokq+pQ(TCFQ|S)IDgA5$~E|O9yar%WIJ(n=TLM z1kzF&(F#lUXzotUgRHb z2O_Sj-|<|1+HLdp@ZjRnvHXIaU4-npb^ES*6;fG@dY(+CEses0V|$6Wq(V+j@P&rP zS~)110gt9n-H@4>K3b>4)L2=~1n`znf{1koCij`TWhJ)YNG+?1nMyvUt$1wU*t&F9jtU?j4P^sslo@{U&1vm|rf0 z5C;?ceXX4W;&5C?wn8H)x|V){y$hTqbbt)ZJy7^XwRz-Dhfilr8&16$c?S^eD5WLp zBg;W_ZQMlRz-YAc+B6#zezI2BLW!<5q01#776DtxG`r*D>&d3~Dgg;5DG;B2LV%{! zI-8Mr16a}d3&ad%wNHb?t#Z7ZVL#s5kQanco#O5WDI|>VGF=GJQnbm(8z7)vCnf<0 z?9tHSpA@;`)r6eacnu`dj{H&N29Q56yC8SXR#%-WI@F&pz@%jX{;R4uqRqr1Qv{w} zqU(xLGl|{QbFF&M2A^XZT&jPFLTpKz0m_NrMCqB5l*CJtG|K>$8+z;t2}6mPji57U z+|#IKcyx19e$(9t-mS*h*=aR4c+v%s$tRRdgIu<3Sw-}yFFQWt631#^bR)v_-keiY zZ6v9O4}T8tbkdNr4&)y;_|Ai1jSoQc8(zv;{*j{{o+~d^jp(Hz)u6!zgpj=~CCm_z zee)68w%YXgW8CN(fL^6-9o_4qqV+^v+}YT;{~cy=QY6NAoTA3=FpdSv_Ub1YBA}b& zvkKYgd+^Nzy;q6tZRUj|N@HMq0}I`fw3-LsVjVd$3@Sl`Pg7CZ#DFw4aAY`+K`u9Z ztIoPw_sFOz)C2&KfhrIa!n-Q*g=MybWT`0FD%_1#R)&zKM;4s3x%Y|wBhpnV?$!c& zJ*lvlkIy&PPcZNkSE6zX(+3wJB{%7VJhg=k$eE8fFBnawso<4Z81_efMfl& zI$vYj)YkWE0EA>WdidO-mJdAl-~%Yvx-Qw{@R7J0k+=?wBSecM-^9U)4HNe6vl1ke z+w=TfT@rUoRLL#_ac?y-DbJ7Nr}NPK(@#24Wf7zXiJBbfS;aGtC0=8eY?u z5Cj9K86Te@En5l-ZGO zey+&HNbE8uZ5)&V_f2%GHJ+MoJq)n_N_|hGb|n2$4_5w3_=hIu&agV#&YiG6k;VbN zk%&PF(_fG9B8i?M#819i9`&no3Fd-hs}#q`$o+|!RUsHpITDByF&c?rUE$ZVpJ zQjM-=h)nxTQ1Jsk%Al9VyHBzaT7&4qd4}OD!jRj>0_q7ru?q#6)3X1p(^2{r9Dx2c zjX+kagB&M&2o2c-llAmej;iV=iZ zFo6)%{M}!hVSzT8Mz*W(6dC%?>P=8*U9&RRWB~>Na-0T6<=+8OBX3xN1l5knWcIdl zAHR*mJpHX7=NEJLI}#WyjTr(S9hCRNaqez{U}}8IB#DIP7H}cys&ALo0%K$96T+!j z)aV5{t#5_Gd8W6gM@P}55@1M{rf_|*Ee#^JrzS2P@A8K=<(eP{^y$_khGmV9rY(Yg z3@>|cI7oM6^bZiaH!4N(K>){$ve$kZJ{rzRc9TmW$ZZ&qgA#At*jL1I;PWONp!qz^ z$KdyDAoQBb)w8k|DIP7X;b z=nhK@n+keEK*W5by!KfEU}dd;-VGkFT?NSYZsY{AD83w};VjMPZoWc^};U za3~+-j$ij)lP@A}Lc_H+Qb&W#Bhp<*%($~!^w!|V3?5RQizKs6v zl?I}Ap$6$uS}kU&4kOaR*MIJm0)D14&G(Td2nns9gR4n1uLCVcyuzjEeV6jzBK=tx zAwh*=geD|V`Nm^j1O;8F1^)*huDBTYU3jBt=DK(+d|uPr{Ehy>_oU25hGmeX4&m7_ zB{4chHKEh!_n)hwC#RW?JbsDui#&P4Y7|>aZ0+)xq(GeABBhcoJRJS#)(GD?yUKb; ztniwb)<1oq@gJ9jOUbDrpZg0vp1lu@i*=^ZPbZXZcpequGJB^jT^ZMvOP7|2mx%ww zPUZL@`W-gJ{-igeUE-%@CF+0(6qOPCFoyU+8n1rDZUMah!rAPAW_mI8E2%7Df7Mt1 zwX;{LH;mgZinG?<&jZm9qzb0o!d}9_>%(?I^hjMWgTf&`eo6rv zu;lkOxQ!>6)+kG;5^bIM=~A_F4X_Ft-rI*KcPP>tYM$ZW$qDHvBoux&vUe~dWqKm9IpZc{WIQd z^7`1Qp~M6A>>+AC=bOAZe&xMBljrqOwWkB_@OELy1ReP3>wMw8z`P6kpF(riGT-Ak zc*y$RJ29}q-Iq6iZm3B%{yAyD5NG-pmdYME=d8K64sW?KI(qxq1ln|{&qsAaaaDIC zd*h>jQfP4}#ZgO>J-cmYn;jwU7LuT-;W{PdgA;gL4In!18-Qpa?E_=}mDJalsu5pK zMQ_g*@Gzffq*4h{C#|g1G^MIxkD`}CCsKTUJ?U$5_U}JtxMkF&Z8XpcJfogpnbBXQ z>REwhdhq(LP4;R)f-bx`FhJ7B_)$D@6uFOJFnbpP0eNHigIF!tA=@6@dC%m2r86gnXHP``IxIU%J-J{i54qB z{j>aFi#f;Aq-U(0q@>c3aCYD=ra+3y4f<(}^x#*2u1x9=d4&lYxAe(P0EymxbsCX{ zRw2dL7r1n0xsI@e(ctVWn|G8}Cv3yp0-LlMj}S%rxMC>RxWI+Vxqo;_dzlee2dGd3 zj&?ij2NNaOiJyy^!ORsYzl1$?uLEn9MDxws-sLzn&|FubXXRtVxEX+E-~?1Ct%mb% z{YAmFjx;hr!R3etXJzVO!0&N&@BR%mk%1^!3lv_{U)%{Er194!OS{aAQZ(wMMPKbF zg?OY371c*Zj;r7|36nT+E5@U6xn@yh{ zfnY<7+Dyhjo2>08C$JL>pCOiG3NO%d1QqKqqpZy^viS-qGnCLarhP=}FAXgI2ZZo7 zl$QS@%2viqb5Yx=Vy%e@ZaTJ-rmOU3zY8~;an#Ns1GTWm6fYs`QX_G5st%BC+<)8Ouhm3dt-k_IS{x@-8_k4$G<~PyeXx^VwZRxI zn7qtJ-nX?Hye2a4B>Oxx5S!(szcQjl&ua%2;kr@)=RQkWsY%U;62h0R#u^$XXZYpv zu#9-1`T&Sio+g`?w!<=gJ_4KBnJwTWD^`3okB9yFXU{<5Wr8I*+Nj-2zRYplWKUOb zrN}Qu$c?CrUUq#*f#zNpp=yut_RU*#9@@3*?k8Q&X6 zy-FZ_6;)?cnA7kalF{V$6yJ|;2ysJIHm)FFf^Z{hEwFe-LC!dh{pm!Q5e6N6>L%>x z{$#+vhrNv)d9f1DQWxF*;}i5Zm;sOyfvBle`oAw11To;K23pWK~d-L}H| zpmcHgmiH4CfaHV}5Qs1p$tkklQW!gwLmNaMLp-EZB$k8soo>mN)aUw+X~Ymm%UCL0 zGkkXMsh9g}bv(y)k?62M-}m)4$B1H5{(4ymxPMlyL{B+-dBcMVybsq2#@*jb1ET7; z?48sSqww&|_;Uew^OQ*Y{Dp#&?pG0z8pBZYR|_nfw|cE*&2h3Rg(|Gf=?cLI$-Q!e zu5FcaGC*4Pa}MYb@pwul8Vbr8kweGiqNl{KOwkbzs%ccVb>El|K-XD!aV-Tv&n~1D z{Y(nB3}mT3uM@pW;uCt86iLNQqLj7uzhtl+89j)Ws02p4hiet5f_=CdYahqYoKc|l z#Zb%Gg>FLrUot4_^c2nc6ZwF+2(7>H$|oM@opk+h<+Q+xaFr-GB2UOzzM9_3+I^D=^uyK@P;)O{xhYbC4|}Z z^!j^z!6U)plGsIJfDg|RzJY%Fg9}P-_!*QmMmk#IHMiL7#Uc`3OSyayzBvWJv-E~a zixiNC9YAN~apRS9_MN~c`RhuUsD8N~5^NNh@8+IrdKGQU5v8b|7 zooe-6lU##g;P-E5G4KXN^bu8vt!&$`rHOADQa|1ha$+J_B*EU5vRxyFmUACLjfG3KW)t_4D6s3Nh&6#!m$jFPF?< zAtX0#hj+@{Y5-{a{oY)7jUF$+hTh!~BWWcbwsDIgAr(%jwgtwL{=qq;+*PNcH#UAM zpL`RjQE3ftN9_y2!DqibU^UlSWk>E|Q87Se9eFdp7#*+NzfH<$9gM6Blpah7JIlzJj|({UU3kutJ**Ja8`p(LiCC z&HPtVB%wKr#_%-eNCyt|a1>71XbL=`VNS*c)cr+|1raz4o?dp9BA%p0+l z#-b&&>z@K?V^&baoWn3JA?EB|)_x+|l6v1*s8FmM?Z-i?B0B`|@_$$-ZD|yJR0LsH zrs|SH!>Pe>;W_X6?5>Spk`aDbD3LqaC$BmfNw6sFIR#B`*)IL|Pdm#TCNk*Rr1RoOI`LD8f~ z4LG4R5giha;++!yid}77OPBuKa`8{;_+1>L#*u-CYgD$Q8u#FM?P`q|hwn z{Xc(@@j^-lr0jN15fI5|WYIw#IJ3pti=BVgH6CKNA&q7512-HO_5Cid&JY^V^(%tG z(Y@sf0W()7P?M$sZf0Lfju^S*;~WycvfOfaJvQ7+OGl*LBHaJRjsz$2ZX+gx8d|9URRL)iy8s zF;B}2WN>{5cit$HCjgQxFiT6ZjJhL3`^}qk%$OiGh{;Z6V!+t{bfn4{gDgOT z({0Q@T(Y-AiGgoF+DTQ1(X}xx>*{O1ffop9_y>hSzW$jM7DHb{NWG38fs)kBy?Ik2 z3y8l406de|B$kXb4jYWJ&8;~Nq-cEDhmfOt17>89WDRA|ClhhqbE&BaZ3I?&6}600 z@uH<5;q~;KLVD(I`HkoYcZ4W@DPye2T7>YqzvrA?F;R@R1YBKyRi0tvQOH)mF@pAD zlG#7~F8DI?{2x>-orW8-x>4)dPWl@^u>Qm)$DDJVa-%70;7-*gh`xh8sDERVy4L!`F;Z8!?yi1jRrci19lH zc6-xwX|hc!tGN2U>pOBN@@aCe|73xvnhh8b4Al3?C1$m4_&<9 zViUm8X|M$IXbMq{t797@EKN#C*E%$PR!auhkVoU-s^*6Y_Q$Cg)n5)lmto);m}@gW=j-e9^XQSHZq=yR*LLekGw^|p4dVNp|bWZEPQN?{JJ$d&ro32(Hh-PN0dgM?JYme2Y}(iG(ZjhIqV==8bLB zX8Td>1n+!W(X zsut&{{+a}=fYt&LaL?sbvnlu&tOceKia?_}BCdiY2-w{-;YiCk0<2IidPM?a9c<8y z!@ic3Q^D7ea{Hk{bdMr42wAPbw%`s6Sr;gh{hcI6kDTN}D^2J9LpMV^AUC@BkJTnf zmEM)V@qhDz^Ox4$3)l|BjAZ^DK-FL*h$;nMiVzH4hQED1sV*3irAC3(5=JLnwZ83d zchN=)UJt{lalez-Rysjau!Dk01}t#18LB!Gs9Ho}R>68CUGPC(*G^kEBq7`ipkH>j%|!ob9Bl;T~Lu4}b9_&;^RJXp+CSZVBXIXY@ifyC%A5KqrOwXr+MP&F11%doQD#98TGf zhD*ZTa>SUR7Qjve_{tw~&PctOfOeEE&;zu`FnxJbt7SsGOsLSS3>7=Pr&N}Ma(5exT7}2UQiKxfmJ{Wi|TlzIhhB<|X(PZ9Hw z3Sq*)xz}`tunC>MV!}vmr;C@_6IIjPG7=`p&ahj-^bYWrWlWx+I}}PW6?#KN)_+U* zKEl}67seAZl`7%etlUAW0Ns!oNq)`+(r(bR&cpW<<%S{?LeJ9ZhMb_S_!Yi!_sHg; zQ=l^j^WaJ3OGF6^r!xvil$^C1Xqhl|mZ7Ncd@WX!#R^d_UEm3!=XJnL9iY188fp=R zvXO2<(jKlx{mdWI8X(ILx08ig!hg61Lkph!&d3qDGZG{ApMcp81_&AkQ(1sw)(I(I z2t+-CPY@eop063V?NGAO#Z<*iWV|oPR3`3RCrIi?#wB9Lu@W?#K4mlYyB1OIVNpNw zmffOsR)($ra)F=1$#CBKbpTThx@-S4e}(HB3T;mk#V=iHHNr`G*r}@1UK84BDG(Gt zj`s`W3f0C5x)qd6TbD`713!OTH^N)c6im3Lf`74w*-LL2NiDUI{EOg^6Qo^Y2QzMB zIBd27fvqn z*`ziz_u@JsQ>SzjTsklT7N{)d5hV;e&78iH1q)f|DyuJ+70Au%zr zYtM}665m7}AJ{Onv59|{vgd^4r4E@pL6I2)u^Dnh=qW8awnee=tcM7ClVXC=3^bYpgS@ zJwS#cN=zn1ape^egMX}Tfp65!RH|7?JeL3tb-IEo)!6bt{2F@qe&+LTMzNzndyK!$Jv16cykSKh%!iN* zOep4oiZ7C~F}jLMCPCu_#dxCU^G);_DbDtgDcFBOs);&##W5_hC`!z9Y8avDPUVT2 zowrW}%@Q2w`Q#iHJspGgwN)^@gLQ9Al$5au$rr_<&3C=KJ(&D1G{3D)H_Z<8VzyW? zmU`LPp9;zXpVR8TYPO3e9KG6sX znrQSnU13R#GaM#%u*O6=k3MnpqBtED-fj~&C&Y=FO-i>~Uq8MS~eOwYAa+J-9pc4F193uR=+^&%wgELo^xHzR{7?TJLsiJ5;* z`v>L%d7{yCV6!SJSTZ2p+Zw6p*d>D9SxNlNQn_Jxvyn{{#)M|OMwzn*%AU|I`XI## z=Et0QiDrvFSd;R*hg(QQ&+u=_kffN#^GplHVxr+R{GsZFg%fPZWMhs!+R9@kUOAv7 zg+56j#LXvx7je9ciUap|5_L&!WbJ=_G>M!Vyc(g~!vj%G?1WU?I9fCGdM4(A&)T8x zF6kE{<7^+w4&WnH`FgH!PCp7$3=r=pTP{#7}g@ zFY@Thz>F*yH^PuHd!Y$pBY7Xid+g?{xJP+-Z^6nwU^XeQu~6MdX!DGeIhu}I@~p`| z+{5>{h@8xp`8w|;nl?M0kWV`&i3Uw#xX2Vc&FAKaru-g>%=U6(p((WnZBf?IrjOa$ z6~sU>)~HyT4#G0CC+g5hPH2B9s(Ni4Y`O2Bcn|NC-exo(QVNHix4vPtNw!MqCh-(Z z^C7d_?yyjqFsL9w@rvSA4ZC6;^Ka{-tt1r2n?Egrx{}fW?r&jRGpr=NdAnKOaJS%d zet!@VcW8+C-d2*Ul=0@zx{{#dEtYj9p*r5>$(Yd?QGN7@@mPyyLaBcZ@(#R-p{XD1 z5+QF{oT#Jr&S$Nph1Hd`(6y3a`#T61#mO5M$@@xD6=h`B+`f|Ns1ArcYb7nLuB3%> z13tna6je&$v(^XRmtgHw-Z8W3J>Kg%QKux$2P3n8?JKD@cfy2}e`SgDv8FkbbgXj5(4k8_0 zk_Vn7KEQXyGUJB@~ciXH7%Shf){;a|l>dw911TIfhZs`k#M*y6E|mU|*8ybK4E*IWZE@{za>MWF2U;6N=YR6v5$BFs*L9l~0*H z(W9UOwwvz*JiQVWjiAL>&AGbhm+fwq6e7*J`k$BE-8}1N0BNW#EAZr_0!OqL0*rs=-0my+dtnU34V<3o6IGp4 zj;w`b37H($VaMQ#ph4?M8r?cY-LCNF?&wbTd#a-m!e%eD7&d>9XX_SukIvrV0d?&o z0kusQ*;@=P=~y}Oi%byt12WSDi9ppWVPn(@qg!Ab8`u?7;Jnq5z$jOk#=E;F94Uso zi8gMe3TS^ykBpWl;nz-h`ld(BK!rK0(HV8MG%S|XVGV_dHGDX+sAf{oB$I)jpOnn( z>~F)01;e{@$?S>Bp}d&fzbGnrg7Nm^J+aK5QckpJ!yfpA;==GI552oR#Jb#L#}?Fe zX{8@)o3kC(2j`cq)lQ|ZS}@1Flre$R<6RbHtMjyh_IVpov zkStWIc@#eMqW0h?8Ir=fJdD-Jv5_hk4k&Ix`KnS#4yVUKhKr`^on%Uwf@bVD3ng7$ zbb-(ASWjQm)kPCFe?CLix%SIn2UxJOp3EDr3ZKBXj1TZ65_l0QaG2&e??GARgQq^D zuF`)*2a%30$pcRkAK*eG9EE)9=o%wkf2%qV;}!4x&`}L z`A-zhg)XxPHYTu~Sz+CH*SnEm!Np`ixg~%3wSes9I}MUL?B4=WN@;@!h300h?w6dT zyhwvP+T}rk*#|29k>jk96)NfV=Ln;k&~dSE!?NB+DC>y{Hdu}p17nGcT{mdf>p*pT z7zLKY0oMLitkhfuT^bNccXbQVV}2^IG!q&v78W{dE^WvIF7AAe;%}SX)(Sx9@e(mP_0DHwM{xx_KsW3NcP`VAs`{Ndw=3fu4 zmnPN&9|1>~M%Du#3y1Y!HLdYYBORQ(mvGhtLJ)7{Ivgt_i?hRKe1$K1HdmLz)&o=# zp{Np2Q)ae@i#IHir_hT&kC!Lc1C;@fm%Y~mDknbJD(9K> z87e>HkGp#3&JLm@R*-aZfb6^x2% zqMK8nTsz^&_B5Bg*aId3GneDo11k#!OIam)f|;%HcJG%N*#j#9%a=vj13iEC$kN(3 zHMSpinwZ)8Qdhj63EhFlpD!aH**wQxa8n6p_LMQRzw6WW6dbl<_h-uheE~W7Si|~+ zt2kqU1+Ve3qqB$I!E*WlFCu{_kqTqF*%aG{q7^btnvMHB5JzhUX>>(yuCn~FVDuNv zl9Xj=sQ$tY4LR^Vx>$l2!w-LOWk|r66y0{3GCq=D9loD052S|jpui;&DXlc>?^0a` zk3@7$R|9B!Sjolmu_b%~xmnxC-Y4A_II`W0?NqJBmWUQu6VXCfB0_VBI@3$ozpln* zkqzu8@hOG-H8s{iqZIT`rqnbe5k=8KQ>F;_6U-SAMx}{`W<)kJ)!2VPH5T*_jm+#l z5v5NA^SCA=vRnF*s3sy~2Wmw!zU3L3!^$(gL?U~3B%fF$vgn{nl!bDIlS;L~6Wnw@r_!G`@-B3f8YL<>cnxK={24d#M+ zrkC&@cA8i;P}$?%-hF>R(1`yq(#`QqM8QThOc^s9OS7v!UC$J}?Kdo!0r~=pl|6{L zN1>4QGf}7Cj>d{kt4{RoKs5@L{Lz9fBU64v*@h`-tYyXO#E7QFp*7ck^K+nA2mkwt zHPiIsRsSkxOA7C%*g}^AL~?B35cR_c*l{E@n*0lOPQ#76+|ieQ+XD*$yO)aF11~%# zBOh$joO_XPgx_mRED_oIhtiGpl+%@4y!3K??xaODadJE+29vL~&HM|p^z8MkIQDM$% zbdJSe-DaYXbx?5ex%+TpQO#Z%w2sspe42d<#bL#Q;oZ5If4$Hg{Ym6tyC}}>z3`rx z*-Ofa7H!xApGI+Fc$0_T-5&PBcLm(Fy(cj5oIE@zunGe|1o`EfHUP9y*$WTdyhlhY zji@sgoHpSZbLUO)1=+uP54-dgagF<+rN#!cy~Y(2PNGw_l?70Pfe! z9y?<+7c{mDTxMYz*>7DU8)0~-u*}$ju7r-}u(HH&es*ON-MZux3r!^~^p=*$K(89E zcN7&?f3^b@o0Jo)h2=syW5h#5VkGPtq{4|GmhRjv#Lg|@BS{6tW=Esdu%Dd`*6cj7 zQdkxgFXCFJ%V(^GHo ze$w!-E2}M_drqOx&4n9wCU7AGRFoM;?U7ztm#f|b2_3E%jdx4MSc@w4gpT938QIqN z07<72O78q-!-B7u(cS}20fUzy-vc%Q@0VBK10H{?9{1C1^{W+uPop?7ygQf7o~T^P zi>VYAML9QEyghkOEVHMS6D``X2R@DB#PB8$y_-Gsg_A*b0R(kjTKa#^TsJwB0Xt!a z((7`*y2&jQb@^vIgyl+T7eTMeIp(HUS9cKouk%Tf5}G3majLPWj1>ab`S2e`_Ymc2 zC-{HJMF}!Jjik>4k$pIF(IGbk6OLlf$REQ9m;`sht~-|L6V<|vxyX;?3n#VET5h;I znMTivnN53Y-Y7WH=s7S;ou2Yo!5jv0u~;*^pkoO(jFsd|*}-g93Um7A#WI=c|8*dq}V{{^|s~DoL3;o<{-7cM}b#;SaSJ ztZ|w$S>~}v0xeb&xekyEB`Neyf_oN;>Dia#c)MtvS@?XHs7b2x`E;dbtjMXs`x5w9 z517E#)%nq3!D~D*UTy?JMjCq$JC%=c0qY+=5n(o4=|Z(Vp=#2K>XMGJArTF88CZYd z1H-5D9fpUscz~BO;7xkA^i`opSO)}cI%ZYi*s7Wh$2*iDb?`Dsg=0XW=isKYTB3Bz zit_u*?pM}YEGut%Z9iXdh>WplL=@q%u(|{n%A?FgjF8OP@tTwIoGtyFVQmR^?B@^1 zF$D(ZW#sK7&GnHe;$smN3~%GCkj#IcC@Hgu+`)t*-ci4(gL)Tofh`R4AV1~&=Wv)B z;1{|)9yDjgfjXc3E)%CXl@?)^T)_@fO*F*_RF`Q{v<&D3YrLa-cBFyDo@!sH;&h^p z_wMk_4#hoDEe!8q-5V1nWh^4UEQ(W`#yj#9y&hoooqlP^W(6xYE9gx5Avq*vsEjll_p#qj%18!G>k_ zM7?5D|C&jN)Cp8LZeYPK0}X#z5FRf~)LQmMugGwwm5_{e8oy|8!Od!$7#Xw;#|AY~ zy+X>Mm^C;yn1bQWPS#~+Pt?gjl4iA{NFK=@L%L>mc(bUDz>AKV9VwygJkg?!7>+#7 zoeRSoM(-A{+P*IFzF%2s*D0<%Xs4O307Gd4xxwupT{BQdw$(bxMp!;tb&UPp#0WGZ z04*cJY67{%yR)v<3}pbmGWKjN0}x@|v`TM_XR+RauLQ*0Wd%Ap+_orxwCM(jMX&}3j9vCmJGd3Qc*B`u6- z@knB)pgNb(#yf{Jm}9`AIC;Y&c}t@tKN~3;W;92~6O26iwlE_L7Ba@Ae^O984?c-S z6d&xygVtu`34dl1te<2$mf12l3vtmzqs^0){J9Cq#>atScMn71!uOdiTXf#J1#Gk9 z37U6KvmGJJBTu4MoP?&*hD0O(Oe{2=C7|p!(WZ~tGNqJ-VzWlUa^RIMGJB#DF*%{3 zcqV(yYIhwNYjiG_qS=?mFOKQN>F4_ymg8- z)-mr!Utn9;`fDzkG07sRD=AH~mk0TkdXE!|7Scj_hhk7CNEC0Wgsjrt`g&=vvw=IAIbM&g7Q0-;baQUMtj)rDFNAsVSB zNs3*@>VIDB^>AF|yT;7?C9HnW&86_O?Uqr@cKNbfs?`FA?z8|3EI@*Sk;)hT&A56l z?GMe?B-o6r9pzL?(U^=4{PQ3mVSRdmkNK{^@{Abi+2>&mqaiTgql+bYG5i2mh6F@L ziVE1C+7{(`HFPk##l4Xfilx>$l2!w+y}NWf?F#T%P% z?irsR9QTX@&yosx=o!k92Fo_BP;wN0KBaa>5f3P&c6Z3Wd)WtHl^u^<% zaDS({avz#QrHP?Q(P7w$>hxJ5CZiodW{Jo0JO^Vt5;u*%RfA5f4O)k-9-D zocLks&P|T!+=LHlPf)32qS0#DFBJx@*nfFp@;A9?N`{5vMO-VPQH%iXxC@JG*?!n* zVrKK*1#b~M(TMNr?24s|bnu3aXqYl)wn=7JeY&1@L-))w<1#>BKw?Q*{HjZi1-6Go zD-(W6S}%GUEoG7tN|2!9mTLeTTchSG!?DdC=XE*a{QVJ&l)#zOB4_!9wZUJV~@CTtWI zJ5{TqrGy36l(5j15_GDDsFbHND@P`m)-9HHEZkH2K&^)z?MiK`msj@FkrM9uDud=+ z=%G1OnpkMclHITq#lbuhJGZ%n5M3EsbH%9*x&!4hkj)flE4?9gPvr~8)na~|*#D9MpX)Bs3 zA=rq9DPv}T*Qe>JC{t#t6N>@b0&;d&PwT`T9u&APAgI#a#a#yP3+U_!0eso61r7=p z#a)}g@#F{+PswrWbsu;=wDxkYXPA-th|;5iIl>Td}3(oJ&3+5%T82I6%9sap;Ej_xv;u`7RnhT9wHJW zb%Rtm@k7PVZ3bMzM*_MTUcxZgu-{rh3#$ugp?DED3kW77Id8!BWq-l;!%h>+YOBTnXriW} ztO#|p%4f!oW0sRFFTrwfjU5?G!}h#ZG`)xU~~x$tg^ zEp#bBBn_GuqF;U~qJO~g1sYBMg^GIIxGPv5i(IL<8!Yml^`^97CPo69UTEbKZ=$it z2}MV5Y2Oz<1(PlCX2i^%=t0od0c+;_08g(3MFWapLHSSvz`^=_uuXHWv$fj1l|T|J zjoFuX7re$Je-k5`d6MD>SklB5`=A=wcRZWm_L}Tj(vQ=e2!BSHvr!O-ckKXY+cT># zkU?}u-i#$#r|P6LB5YwYWpv2dwx9_>C~qJ+B*ii#Gh5E@wwajOOVEinjnHEiX$x;5 z(R4S?bz*W=!!moK%d9a8kvf42#|_ecmw^T>2#*&gx?+b6Ct5lEi!mediv~`D=Vyz% zj*&sbN#TBO4S%PBqW1en0DT7W9g4JC7~bq8b!7HLDIG~Alu#tkBYxgjH{QBCXV;w* zGn@64QnB+yi#9gA$YT>+7~U{?rz>&S8F88RfQy|GXf0mvD*yb+ycQ~>xzAFXCXdte z@i2bEdXM{9GzgK!yf6%-RtuGd>iNekl1xYvK|T}!V}Ca8_Y170t&W*}=lSiKUm=+2 zJ#RAUBT>YMQ45B*ajidAP*O5~!ZZ}|j`~X-%!iN*Y{v3HS(sl}upgl7xD=BJb!c7z6Q^OOFej4mBIz#Tw)becq0R~R1vveH@Wl! zTJme5@E&!iIqHwVjwx#0hf?=Fe4wE*X$^{=_vk^kN#Q-pm~YV8#1Hm7!&22JUu4B= zVcqUHWgQQUgBJgZhUP?<*#q07JJ};FLxK+n;oleC=(us!{i)rLF1N`1 zbbrF_{^WibUK1k$?O$g$OcgiKVkZXng-^k1#j?zhFw+XYhAfP=s zj6EC6xJ0nkwc{X0HkvS6c{nm{MiOy>$bV{zz3AZZwBRVVk>A-3uPDsG3#*xMp>~nJ zc(LG>$KBasg&giSzvwwJvuUp+y;~p?jh+KrMbW~_UGxDRsVA6TnzaDiPZ`1Rb{dgW zVBMY+pR)(bp3ouOiNmQZmy5z4d zWD%5=^f+4+>N9)aRQ6Xzd8{k;ILR+bwf1|NI0ya6n@)s(lxvymR0Isehvu&o+Qw3x z92m!c4m?In;iR~bRz>wVNeL%=;0>=BtcCc*Naf1{3Dt>4&4xCTH` zYG2XPH($Je>HmG@FT?rf>o=-cZQT_=fBpEozyFbrXZufdjF|Ley; z`{&6Va9_Xu{lEJ0KmGPM|MTU3``diGo!Yniy?whr|JHSlD0Q=T4RC6YU4`Y_<#KM{ zGL`idW^4@5i)i?*6S<91{il%h?cRnALZ=%FzFn_>{hOb-=y853!~=xSg>i4!m)o|# z`S04OTcdF5^iH=+`*v;e?exZw%OKkXungL_@oa$YCv>&;oFav613S$@?3y8_4`PGu zcsAJfA?HrBYonOXw<9r^CGFe!@a@zl$VGqq+g})cv(M5NFzEGf=T0v+$+teIlXW$k zZ;ir#-~6}!GigZHo;MP1YIRs#b@_P^*`M}r!$6Jlf8#$M^zwiC?eG7^KQinDo11i7 za{kDjnDg&r*qvze7q!ao&RCXtgGeu3K3y^>U`P_vX-20jq}(>*+Qp7GzIEw!KJ%>& z_N_@jf66n>@0-@Gwn_jzBxm{H5{i=)*tcPSmYs&lz74OAX9utDK0X?|f4iLeyl{x= zZ`b=JKWAoFN=qK_R;iuhaLEv1e2WhG)}J-e|IU99FQc5p-M#(X$F~t^bv{G6FALDO z!^(@4QqTEjKZdxQ_BSHt+}=_&F4z2jLL7R>(rGJTSdWqZW5sU^T){Bjf(IW_xQ`mV8WWP_Tc7%0`_IevTqNb&*SU>t zQ%k?ZnrzXgodWJL5C!w?IKGS7nD}Xb4Dk~&SIiEVk2vdMGy^3r?%M77c~Bf<^VGLp zNZ%SoRk5g_gCmTMU5V`^2>2(5(?y~9SB8_F6F0(qQIBZ()}QV2(xaL8aZVonYt7rA z9Z8OF!)&_@V?wh=pB6rZ)nT}_F9sHs} zpB^6nj{?ml;8_;dAuXW@_U$0TzrWuVd!w5BNBjLBn?{-q^UY5!hM#J`07LX)_c5pI zzJJ@5Y!oK-eCvsSJRc^h&i@yGtIoY?xnKLJZ-Zs#`b|DvS&*dc%GS*#8D zBEVt#4i5Pn6=45qsJL@z;LdM<$TB7*wS{~B`zFEN>D$%DO{~z%PYw|0Y=8Z+Z=K5i zQx|u#?aw01n>}ndT9d6gDc{KUEw8k|`mg@J9T!}l2@YRe>cYVbUk+bq#@T zJ$BW;zbun3E`)veMIz@!FMNH5CyhTVje`F1L*sS-u{`+&pFb4*&m&&g9_W0+=hQEp zr*Ppr(WObq%(<6Q&?Ogt--7eSfs=*0zdYzx@PoVu+Z8JB(K#(Vbr<#@i>1SL;&WO3alpfM3NeS&h}~ZlxvTlg^bc@B|M)R~n8bc9>>$~q_5q$0{#+Cawl^t~ zcjmI5&)3?Y&sLk_b<<(#oA+O_;z>3prOR|qyDtqBoi|-{O-t~90<)VwFK696(mOr1 ztFsTN*4^upKR;JNd+e)o#s%ZHNNnPRC$5VG^jvx)XUbYyQIFIcdf)o1r<4X3+`8dvj?P+QpfBp*T0-hUJ zQzw`n^R1234}3J4UBGRp2I;rclE651ftwaueF4)-Z062?o31nu33Asj+>Q{F-B3&C zdx0WP1=0ozpeVsZ_;z36`?#@5#mKBHuj8X5wops{9;}Vr6?XF!so6jycxp`S2byPH zOJ!TYKSOfpq8_=5%}fs=rq=TmPYi4%f!yz9^1`U|(&O%;Pn0_~V>6NyJw>oWqI8&I zg30*Z5-**9dr(%zjfAG+%bf9vOw--eG}*3%$+**PSNWmg2ucO*_@i+?p^N*EHp)U%ooCIwdk3;rUv#!+1 zI~k0QoOd1mbiUZh1vAkjr@q58HwBrRY7{6zNW52n^jwECi#uV$-wFEKGMhjnS=}h< ze(!+&{Bd)M`*2@k_C!^0P9B|&3zo215KBrXAaf_k(ekSJ_zk@wqR4xRJnG?m3y+FX zMnL7}L6}VRo}|%7vJz(>FxqoACMFlqozxhoPT)KX@X`GbYKbN9P8e&F)Ygj=AdM%e z?A5n_Zh!8E2g1bUn>CclvBL1v8-K?AML)?ZNi^;^|GHgb?}cLXxU`Y3oB?4o#c}a& z$v2t;qJ_LGgdODQ%RbU6*7`MBEDae8k2lgp$_M|p-*||pOOQVEDQ#AmNJDLF2Kn_v z!Q&j4tg=uy`VKo8X>YNS9x<18-EcpDEKqKL6&FDCPR`Pu`-rn}b_S!+!d$wzTscAO z$cN09{L(P45D#z~@u)0nk{v1Sd2L zsIKBASmsYuH1vcQn(mXM6cgy4Y%VW?o)Cp&TIz7>T`^`9YGkc;o7cM4Fp~&tk-+*}AHzUr%s6K^%TAVu3*e&d6Wl8-m z=>!u2`_6k-5NmAR+pjUrtv6$zXJfg3?!6vKdKNg3TK;G?&~8AV1Bk3epk&FyUvL!L zB7YLj49AHTBNUVclW8TQ&?IVqOB-`z4Syxs5k!CiwmSkwOgOr}UW7u+$`MdUux!Hz zbnL?ov#~OY-A>LUYOpQ7YT2l-MYBn|so&bjfikMo3}*AF8?v*bO=1V;9&ZE*- zH@CQe@2W;S4-yw4Vef8aCmddCr^r7yV5V0sJw(3+s*w7sHGiQ?kH{rLua`yp10@O`7e=rTO!(|Bf0uOo z11Nuuw(OH)Wb^_!`sM&4w$NWN`pV|p>5(#bCTFNtt+6kf9|pTQINxRDN*e4OaX%}S zpGB0wc?Xcu*CYH+zNQiC?o#Mf#y5@B9)46$lo>Qqf7~v{f6-{ciwt-X`2bfUm_D0f zIHZhk(kQsj4-4{sc;I00`&nP_{>2CjuJlYpLD+?yaVPC=OI_5tiPv=f|Y{1pd?Lw$;P31yPxx0%|)m1kDTP2 zU@~%fpiHyX!ebY`EBc%YdQq0&ABN`FYM0ae11CQZYIUmFzK&pZ%R-6TH_On>8`a!t z6_v0jQdPbgMeEek@)ft#F!JD;cWXYiH$-kg^?{R1X{l>@}u z6p=y;K2W!9o?=9je}DyKmU@TP4ugnef2xQu)eP6!>arZ*E1SC@1|Ntl_`p)WIAJN8 z-os0sz`~&W^9bA2PT~Y@1#9o!1=c&ffU8Zw%O(|$4INtA*VkytKJ^mD=nLa4nYOqz zY8182027uF6)L>kqnkpg7qCWu_d?}FUA;CP&44-CI7&4Pd+puVD`%jAneFQ0(U;sr zFSZ$#lKUVT$Kh_r9_)@{PJC_a&1|_m-T#6z%|cTlLSq?+N5u?hI3OO;yvfAc)<@)h zXu~=$rO=S6Vk6z)zfiIqXxe1MaA_{|Q)JRrTTw~cjNdzLZ87+Uf?Q}PeA@(Yj8jk$VCw=xjVP@?0dCMm( zuM=h%y>6EfAlL4)&8mOlR6%5)n4KO{d4cm6^|Y~{mmvQGAAcU=aDoW9QB{FHZ}zhZ zN7u*gAs9n-oG=S$ud}pq%KU-~lYQiUyC^9v_muQRZ5Ur&%URNFk}9I?oC{@C=F7j-tsThth12|{f78Nm}5z(&_^!2&_*3m-WNZs^Q zn#kw2JhmNsGi@|d>^Vv86GeoqmO^$SOvau8enX;>rp>)wd>5Y0e}w9#doF#sduorV z!pPe8zXBt_;6*0zB=P~SL@>=2(z^rp)ut4b{a*s|RRxP(z9 z@&QglYg-1`os{Au0sRTCM3iKTlD5)5z*WFP^AoYQ;RVx8g=3v%Yu}W@%@7l_2D)*g^hDWrZ0(IERy}u* z?YjM83ylah!~zjek1573JrNC9v7+Cgi>AV25Hv9>AOVtge^WXZl&F#jw9&-M)>d?S zr+>2;!A?>;uiN9Sa{|OJR^LY4Y~=$D-N#j9LQ?aBRGrtr4pL229vrh5MM@2q73K>x zUx{hWI(`&5X1yU`-$0aKhp7jK{q)L03A)doB$q;q`^1__lmTzA?yd^4c8$`!H5MOZ z6*vWEBP5#Dmu~_D8UlF;mx}@fSARdj*|b!mVCCEMmC=stDn%t<+dIdFqQj7_QF&KE zTB^#N?vqOvoWB(^$_$BtBscO|q)a!5nv1I%Eu=lH3AMabO&Cvx`qZ6Cp=i*Z))SCiR zd^zuC!@N0JR`1hNP4;H2i+f;77U3hmHITsuD`}gVm@@?Qm7K9|V1HvrsJYh}3lz9h zRmD7C2()4qeN z3JYB-+$2pFe{e$)%YW{=Va5#;j0~1E6PUl?L>;M;td|N4t12w?Agey}I8c6o`FsR< z;1%QrwkVV+QR(|?Q98NdKGtbgi}LLuOH1(!zCE%m?5x=hMx9!)QV|uDEk>Q%XkvA! zQ^Q}rwxJdf0ato5*Vx1deABGaiWEJes$dGU0un$Fw~DZ!M1Qr7K^sl1R4hd&{J-g+ zqAU+mS@DMvdDZNqw{}+QsVXNLx~{=feJcO5syeTM9i*D5l5(FtDN<@wkA?XH&DS<{ z(oIph;F$G=fCPjnzYbFm4EyDki4t_5JxLaYCijVTlUQYn+V4T6oS<8pZg#_Q8fL5o zuknI31b zNq-J{6(ohS`fk41U2yE6E1u1d?w0uxo9r`vqN;GhS`Z;QyvVYumNwhMWfV7W%C!yz8gkPx@&Ib?wqj`1M2cThXcdOMyF3_fpaWad}s2 zp{k`F-&=iu$G4K@a(VRG^Hv~Z*-u|TaXfAXLR8l-1jseAPh>iOf7kb2)qQ?wa<=_= zz((nYD~}S#)2kD!alBC4jg%akLsa68G^=}Vt7;9u1>&zjdpwL4ofD3(+m|*61SA4| z_6GzYe>TQ=Xx#3H*2rhI^h$-XB3=lHb(^}=p%o}nV`@^N zAlMg%ri*#iom9NgyOYJsnKQ*Z)xTmAH|OD!*%Q^a9N`DMr1Z&Waud8`Tu-uRFiC&R zrn~j5dA>qAs#^|uPmq-%$sRFTtk*J`J<$XfW4590Tc~qw)ztXKqHvcO3ojQVCx`&m zwL~w^m!Jp)JObUgm)-~jH-GIRMkV~cmMfU;3>V`vd!ho@?KJ-zDr}kQ+RDy7?s5ON zM%o(BiR0Z{;_f%q>;0ruh*@aGvK~~pRQ$GDdo-7ShRtKMZ!itUB66luf5+=<;otS_ zukNug*1-fRu0_8w@3p-lV8G}uSSw+Lh%?|R@2uwG11tkP-4Rzfpnv9?ue`e@I~>Xt zOfyM9b}1HIl`c|zfTuwNFCy>Z+z0qTq&Ta(1u7+sd0FJFgem z;?EY*RC3C@x^qRwX5EgPEa)tzfijMYEq=xHuU3p*vx#&x#zV zk`6bk>VPIiXp@ul8TFqz9w0&mz;z>|myzKAr-Z*+J;rDOlsYoo&&y;q;|nFezd$xeChNd&w1=SoO+( zFQf3c=D(hT@6H|yVym#g8o~0K1Y1wR1tdWH)^g!OiQ0FLMt>8lo{n~%@c*WNOGh5? z=!HK_%d2h|d#;0|6UjtFH)q>NGGYqB!E0d1EU6|c*^JqXBBjRJv@lLuh(sp#HNq?Th<^st;U(H9`PJxSMX4kG@ zrE6EvDrb;^4=|sPAP>BPyukY>Qv4G?kK*tC{)Yk*?rRa4FWx*`B7gXprfWW` zP^qO5vwy4o&A)4(=5y{J2h={y@elv~^52dcp&+`s({Y{hH~%&ea=!g5{~3I?8IDiU z{9hp+Ui-KHu|bnaUfk9WH&VN5ooZBTzFO(k6VD`P!Q@TEWaEVsEZ@#EDU*0DDKYuuk zdg1F6I%ky+iK(FL=B3|y;PU?RmPm!?tH5*2CwRX4!gRg}&-uerAynoMAK*DH==@>6 z2!F@wmg9pyowi@Q3A%_?VQs%p8B(l9w+j?ol*ht*4W>O2$y+N2K?SaZlV5r}Q z5vnxUh~Ck8qZh7-E$xp}EZ+}zOTwQx3gE}-R`wS4(CiH*M6nV_ zr{BE#{D75rJ-M!ma7*Y84p>1K7qhNhUx{BoCT6%Ddf5>^T;{z&I;AYqq*0wtsI;?E z%x18{VBi+vxy`?K9FCA27}aGcwCu~_DEI_|1dcn$wCWiGehC?E2lhoOvZt3zXI5Ev6= zc#Do+gBhLTD&AQ8Bb|Fj3z&3e=}M!Y7nDIMrGG6)jH4v+HmQxAsT(O^#LeQL0f_l*e>kM(eD{}Xw?Gmm1 z|7BP85{q_&EqkA2>M3j}2VN0N2=-R{7`ZxziyHbz_5DieoZ;XkXgOZWEnTjzqsg$i~7j2UH5WQPng&xtzdB@Q4}xp14oB z1V3^72gR=R*siFgJYu7DYJ0?&zpzZv5uz_rPNcPV5A&yY#T}=e&;V@x8%f8=6&G*yk}?9qL5lTLbmFOx!kz(d5Z`e$myO* zfPvqGfxwQ2%B~-??Yz0#9W!Y?4)Hjd zRYpIFKZXeHC_!*7)!IFbeZxX^t+ly6uxhwOwB(#ld>}oIGfZafG7uW;e(*GXWD9gl z1NIOFaCZKt)q054Ue$VpX090wmMkF*)hK;Q&csdr(jjT*KQ{U0rnGV_ff=QZK#>WngIIWw zk(Y%5iJb#<96N_rIb-3NlPfDSc?xDOk21QdCfihh|Bf1#3Y2yhy2<}$<6nr6u3Y}i z+LTMBmGPz$QJe|v^?^R5%pEZZP)V8@i6Z)lw(Q?j)*#{1pkJ7bbG-DBM$0%C3d9JM z8n6bIklgz$nVK9l2@!LMf1Y=>JS$yskx2>$ zZhxGVieCHYl{#{gkUd<;@<$)SlU?D0S%!t^VS}p!rM*NOmyj;l&?!a(5c@dSmd-6G zxV>yxnw8=Fv;ayQx~)fu_5hz&UfNiKY~nvlBwrjI|$LCGly z!T49v;^e`_w}+lEc}o$td0R5=wm;!JY`R^*k$1h9i&eyQbv}4u#`>sDt{I0wj7a=s z2q-P7xAwbk*?_>ExJcAl$~P(~88H2J3OO36CXjB59~!76;1RpurMZ}8V>$gt8{!c> znfi6g-SO>_XZ20O7XI;8^}U;G)kB=EZwv70XzDRB1L^J%g<`~i=Jzk?u%FrnYd^y7 z&_$I43O&lGA-nWWMu@-NO!0C4ZR+=`$uPp9huf`NqxQ%9{?S{Zy5}guzY1$L_lqp$ zGh?Y?RBdY;upd_QxOd;6W&COOz5m+sefOQ~?AC>^1aNQdZO*Hh>AF+7RR+5cF$AH= z5Isim%@t1Q*$eKJr&_Qu`h0+4VW|B2_7=XXxXt9JMH4dlx&g|b_n#s-im=b*pTPKj z4es>8`~U1<See~+%~%*6Y*8w-(cmi2ZbS&i!N7=nrhw#1g$S8z|>aMyt`q4L--6GkN-a= zFOyENBS~^ns4x@%c?9F(s+ghQwCkxN^Manq0G|o^`E{48)r)Y4Z>?6^DGn30;`F6X ziwCGi$x-TeiuaB|$L^(w0>Q7oALc3)xflrFSoAh|gJ{jAFMTE;XBxEvc*I&O{D?@Q z?>4^b^#LwJvcru&pHB->xYu-_G^)I#Lxrw0pIK1A<5{O3I>7f)0Y^X!3tPRS0(0vn zJz(d~aOtZPnMN6)?VQI;`?r>zdTQA^rGWWvd{k5uiqb}^Jk5({>r&xx*Vuc(+Q!&` zHd9}7B(7|k37;^XZQvj+KJJ5B4pz`(3BV6F}ijBzudRS#KiRs z_Ty9x%8TgSMzYR7U57Be8=`Nh<@mlw?B^A@1*(EDWJ7&c%%EZUBE3 zvB}#;N&de_uxgBNj-rB9ag-$#@-8vLHB7cVgMB%+x7famp{-mF9Kj%6*4z(_r~{E1 zEm6Xa{h7DSS_J+r29?*%lbbnl>P)&QyJM(w5bqC4L)TimI^jBxEO)-0nN4Y7oKK%1 z2gKNiTP5=k#RW~D4YV2vVM0?0@oj8t?> z$z&kzz?c`|NjhS|MxcQ;AV;n0#Wi0Q6Tc;4YzM5=n>-PxNL8&`^KtJ&EzGn>LtWz~ zk@V7z7qeP+FUrdFR#D4_NBxqafnxkE-3QU(muVdgq*K_O)5|O%nJiVvJ%A0VA3Hvi zhk*ofkIO8@Sk}5#?I@cNB)WvqX|Z~4WWGUqnWIQ!GRmM#N)S=|x<+-!sPZz0S)cI+ z*y(RW{e|g7D-hHakLQ3^W1$MN_{!9wXRU`4gMCyZs8?uRRp~9C z*cgOA3@#t=vET!sX%GXQGJxoT=BAbkZ-MbUTGXuqZr9Br`DJ)DO6VWWNbhAk&PKx5 zG(@9RGi9e}(alu=G;;zz;|v(jx~sX!=A?!&4e_f*h_(X#7LPiZ=F*TT>gMD!RTbqh zk6T{*j0xq|R-x{e?#hEQt{2BB-7&}TT8T-ywvuvZZgP~>&ZQ72hGH*q!E7vKz#4a{ z6MH=r&u`iTdXh|Q5Husdr7F2KUCJ``@+f8SR;NB61F|f-8~W72ZyLK(%!h18SEt}5 zq`C$r*%7!J+UBM*ltSnmu>tSq6iggYDuCOwkQzA4j14RxofcU;Pn#;B?WxTfdQj}U z$-tZXu{XI5LrfHF63quR{X}@Jt@Y7AtbWSJ;EJRjO*WgbClb;e+;9vX2n>xu#+xK@ zr=fPO{UxbI?*w`H3dMQC%Q+9VRe(})>{e=F1B?z9z)xs}Di&t!yRP2kR zBHIv{rhv#VP2`!Aeggg}nH4SdXO0?Q`n{}2(QQfE$FnqFv?W$Bjr(g44?pWUe@@qkNocdJ0x<1RgGM^tYRpoSFqFwB44H-kw)e?^j3Y7@{gJdVuisA0)X^v-1>j2V&cqiHRkD3Jk@t5_>jV zeg7XM*&nJUI0}ttq}LOJ&?A)#rvX#oCd6ieM{oFatx$Q;ZlZ+}&r+aR@Grc?*3rKZ zo2R9e%`;Pb>a6ZU+88_$Q`I}2Q4g#0QsJkkKgj|W50j=ukp71eCyXjr}4 zwKdHjtP+A977Ls6f2pMYm$!Oyl{T$$dV$83bJy`sjWU)Ao;W|*&O6s-0$4$fY(+_Y zbLg==6-+xHO32gW{tlqCQz)2s* z5dm?{IGfiy&=SrO5>Lqed_zqPh$m*Hd8~Z{kMCaue@ZYtw>JRpB(h*IJ9UfF9^0+T z4tM$!h6kTr^P4HUP3P^E;9TToj)t+yD63$teg${POQcF}WWOK~+8;hSD>`~*GKP=5 zDDtHxP^=+(jIZ#pfoI@JwN=R&;VmG&L`3R_&Mw5+YU%$h6-I@x?pF`!fs4 zBgedG|DBIA0~|f{=j;BXi?HBvr79IMGMQ<*)EjMcr>hKanlTPC&XW;m&Q)d_|FS1A z)gT0hR&6&KrRVg_dPv|_$(YBeZ8ymixy&#rfNFSUNUHrV_hx}^Ia+pW*Mj8hmoRXi z#a00h^JF8Z;-zFfk1lRyOv2N9MXidtAx2iekWvl=yn;#_KS2qBPjMUm`3dq&8bPk@ zLya%7^K4Lqat)Wr10yJ1XQ!JZ9hR#VVwdjf&*Tp^_bUvCoR6S;O;T;aYw2sgrs&qW z6D|Dd`beh!ONW7J-V>6-t=O~``~6^qB$n(Ek5I9ko}%fQDp6?18*ZBoK*YH^rW?b!6s$8-uY`~N+iuR?+z>A`{-iPW6kNw zttrKw0riQR=A6{5EpG9x{+x_a!l(>+^ZGeA)GpHCM}k-p-dUY8`-=AKM)cb}wH;Iw z0Q!!dNOwv_wT5j`+?z%uQXwt5Nk81I^|`UBEyWGnmhD+|$t-{TYb&mt6gX}*(v|FJ zb1*-_iOWD&^3;3IY#POMs2uw`Bu1@UbBCD!2l!~Cr)h`b3mVHQ!e5uBpjSrMZRwse zFmpxhsxbPGeALS1P+q>UMMF*!QQr;$%9uXD`uPa1c?3Z*cMdIYZf?iaMnx<3efLH^ z`e^mJ4DjV$zJK32yE#%5)ZpHm$Z`61I+Ci0oT}0%D#*h?Y`C}WRT0!^7b4H1dl%qHx4BN_bmdTLyn03gvM44% z*J2kdMo*&TWi^+^7DK`&56(lsh!WasMsU4<}7K!Vs zx*RMA`l{fbYx_Y@Q@m7QcmS>@eQL6}k^54LF({@$CJGS*Kj8{Go+9*vnxHYI#Xi0xaR!95hr{Wn#YHu*$U+Bdb6(gXXiJwglgL!h+ zwwx~4n;PH~dwbPYKTL<|(^US*?fU2;v+LniVw4-GVPJN3xjW3>zrjp%QelOMMjAhE!Tg<72I+TY3`bgcmMmzGX^j$^WSD^1D*-Kgu%Ne7?eJE zzK{7|CvD~)VI6qC*0;a%r{B0wAt6qBTz7{aZ|HjJZbQDm^j--27zjW)RWyE%NkbRKhY$IUw# z3tCPVo}ytlYo@ugQ~g8Te`7}optOcshFI&Qi^Ny0yW#itfPzPn#bhZu!anlui}IM~ zTs5QL&Qmp%wviMzeXHt0u(lJ*m=LuJm^<+p+{ZoQFhCQyVt;Atz&VocpP<6%(rjQU z20+OKpR80t#v-ULFY0{?@x+1QjD>#O%TL3NYCPUv4Pqe+8)wB+-u|vdm$qtBuP5eC zfeSplYw)lr0By&LC9wW+I>E)_{AL|2Fh6K1Rv{or#c!~D-GRKYO{q;4B0OD)svCMj z6759UfjGVPb&LV&*+SVYeYBbEJi1vI==15s*+gLE0+KzgMZ z)dQGmf_p?-+9-QlA>|hCRR;8AHlDpN@3R`Rk^BpjfRbhudS#T!MO{-ZV`|uX`Vn~i zDW);th(E}Y`CQgp{?xgwC(*e)h?|UrSetlY`D10mQ*5UJ7?5Ue{=3=&%?@wj#APfQ zGceRYA%t<$>?^4UzxUuqYWpGNaXS#G$Bf`bkrQ?_-R}!m(2C1j!_ONehb4dfM|wCC zS`=y|farWK70$*yEx}0x#QbU*BYkS_1pK&Tav|F)@HKU6V|NT`B%264RtNdlOQ1Mh|7ol-Ko3yE)v~4})gwxg0 z@7#~>;|s7cOn)GWp`U0W<}DHo9+r)}WilzxAs%ty&imv(Am+YFfxU^$E&$yO_Nbt<4enk?yVi!9zLKt6dV;^Rq>l5c4yNNQ6od`pw1kp?Wg_U%8tIrB7NUI# zaa0>>{0u}-#Y%b=Fbg6ymy>ifd7|RvTESxU8AEtTY<2|!Q%e*P-l0BHW?19$M13P> zwk-RH$c(&9>}t8J8Q9l{Ol{a!NO9&{b+yp?1A|!#;?3e8GRZ7he9F3VZh-=Eq?EV? z_{$qhX_?ISX%grAYtheV@pE-+(}P^4{o3vy^CuPJ=!Peh-9KW*;`5OaCcEC&YL7~P zq1xeF7BMT-!B$Jc*b#ue*wzAbZcflW=xoMLJ(F?=BYC_Q^jj@rc4DFqIW~$*L;uvP z^a)zVR~S#-oGu5y@bE*cQb8}%OI(Z~faXO=(rW*DZTLW768ACTcVlIeTie;Sv3Sy^ z=jp5QO@c#XIwlC|M2*dp$R?9cCVibClJkIZ8?FCHkANB%C~)vrurzTt0K1SIKH}J4 zCTdf{T~A-h#NXW;Y(CJ4%ZnjNXQse>^J#RJbM25J1OXC-%?`uVztTR2@%1NvXS`X^ zR!!NnbdS4rb1xWuT4dJEOM9eEnE#$ji-RU3 zi=tIB<%$~A2rx^p7BQzmkMrHVlb?{thYUb-j{0KBp)782E>xHm1>&YsM&;L?_PDk} zpCVJA`g2*VANj2^!-j2Bq?bOKBqoXhrjsd|mw)&i?t(u2ipoRt5D4muB5Ih&UX*JQ zL=O!1fv^ZNPv|0S7+S+-*X@1}o5A!c9Pr?Ymm}o#0tih16t;eA`3$gx7pCtn+eDE( zEm`|fh?1e=p}4L8-I8;;WbXM~+>~u2A7$m)0uWzLX2{CXzP;_OaqYeT&tCvP-D&l{ zXJ^fxx*^jY!;+}ca7rGA7)_y>yFvj8tOCSlY4lQj<`1PBP*qB&`MixHmOJR;n1=kU zFCaOw8EzF%6@t5}LReBm(AG9`ZrX#)gk3%;Vj<HTZ*Lu`2@k5Xl8NX8CEsT~lD_2{MR2?5s~XVb1nC<{;tc+88<+buO<4qC0I zHJUr|8MLmoTQWW|Q*hn>@R)``kZ)^lWKYlCG>_d+I#>n|N+eD1&b;nZB9 z{CU>mBA|cg!N3-hoLqg^-3}iku5@{H-mQ2-?}sH=F=QYS1b+ zbu1GHK^DSiR;6;p|3o%pQCKnAyVf)i3toVKnCPLwQmMlT-?afR3CMO$Y#9RdOrePU zgL^!~@tW&DTlse@k1#FqX@TNDGj}X()*6CA7?+xT?g90aj3mzKb@`hoV;qT8RHMYR zHd}&b%b5L2^cPuISmKG^x-Wn_0a7-SW;(+60b|#1Ewy@~R@adQsCowo3^voAh$7gpb6XY|q?{{>@Q&H{ps!D|lN9I9gTre&vK(<@3IU=wh9(d0B37X<;>K=4HxBJ6mfndgR zySqg~gQwWU^y~etd;nPwFi0AubAi!1zuNK=z&*2oKgQ*=j_6l_9^1dy+KXDvl!Q4b zJVB{b2V|=$k9JrEh8sWDHXI6vJP9-ebb46KX4%8iC2>ZqCBjm<=hAlk-UUfXda%gS zhIWo6FLhepFcrY4n5W?9)JgH@+I4d4^R%Q%4qaHbUq!r`YXPtAiqq$xp{F^LjcFS= zSa^#{9UD=Bv(P9ISW&&>kG!OsRa#ybC}+q4u)*v_n)=42X!t~40!Veq-p-^a(SNHG&6$E;m|WeAr731?(YKn zAG90zRz?Sam!5$$T}MeNW(2jvh$4r2Qs73?${gTD>C?gua!bzu0nNHfp=AOk2TZ>| z(fUdJTK4+}z98xca6~eBkPzBF$w={sM3aCN51$BSN`HGk2j{!zw?62;MeL*HUD`VXQ=0T{iKx@Tj8Ri4L8TFu2GV7?g z%Kc}A>}RgLlPSkI4Hs^BMyEMoiTXi0+bMpShwlTiAXpjFkbzwWl&tjK>I&1$S2%%s z4DxRAbihA5lX=4Ire7TmXgf#*?or0-6#pBulMFPxK3eu zE{#ut9$Nu9&NNXOz^U-KfY0>X7aL8^UuxDfUy!~V0=vrj(1_~1>Rgl2x&#)c35OF& z2uAc>cFClZbUPF0I?SZmwNFw6SL_b~jM%R8KyhT+r%q0B$&!b!nWT~#RxVuyi&+*g zVt3(X+!!#7?*6I*NS6<_0GZQYjJSUZ5<&gStc`k=w8rhM00tXS1j9NXvBI<)Y=SrF z;gq%fc&?vD!5zuMK7#7Ui{@lRV)Z|malq2beoi4R5~w%)3zyS?$cU~C91e%0rRR^4 z>EZK^jPrJHo^Kn#cU^m7RC`;K(nA)m{n~Wf(f#Fj1C4-gv@{!47I4+r6I#M#9`)$A zo(|COMNutPK)m#%FyaK#wjM64khjd3r=mxg6Qd#@R_pj`oySgI;RWTZnRbQW3=zMn#(g1gFzM%U5a$B6g}1(sW9r2PE*(q6~R>N#QU!z6~Z<*G*rIQ)uU;dw@#G4 z2vxZ?0!kwpV<72s^StQH?@>=}8KgWY6j$jh$;qwNksc?qDf1R0Z4SyoD8#@Wqvg!I5a)bI zruK8vw8A7zb_nzKjpk~;Q<-b?{Q^VN`1@h00&rDOJuIQYN7oqg;xZ$NPJv!{SVRt` zMCpVqt)*1i_E$GyXhk2vBSBBsfV@FNQ)oAyn#9%DyY?gR;>Hez2%vy;EWFwe_J=7y z+yYfWQ>8&S#rC=C2-7mUeyAo3T?0?A1lep|*qcPLCz_6YraN6>(q&|RR( z3$MSee~xDtY2AicaNET~BGP8HP5B*>k`9N2i9iR2B-@;ftHkL8J;SYdy8t&B7KXxF zp1EM7`&b;84Q(vCFcY?^Y%V(yIN(HEHY;~aL@y#QSxN7M%eJ#mWxhqKTgAF!Y@VQVW5H>8 zx?MvT27?Ou!&#n`Hs!8FO#mx&y*)g>XROv5u=M#TWhk)`&5QHSZ@mxdHtZselevDY zi$q1C7@k#C$@4`8Zsb0fUfYe;SnFCi3B#92K^}m;gNw?nfe20kn(m+9mMfqY`QK9* zG>cxr9imX8fUr;47dnF$#&1$$N_=bdtV%0cqv2L7HQ6LMM%b*fblUXn>YHE`i`WNz zJ#J5;r3 zp5nD>k@byZp9mFQrp*dBIjZ0%)}B$TqpHWBaOMUE&;y+Vyg8(SIO^caC0nOrEh9fY z34nU)^6m&qTj`D^m1_#YP5m9-`r~4yEttMD)0(#tKUksxatWHxI+jKW-1~@a@oDTm zWIk+rCg0~THkBd-YuJ_uV6GqQmE!nPSeooA;+ zYC$jR)A(M9n=FU>In3t6b^!!PUu59NSi|$4K|Rx46EVW&Vd6?II}Z$&(XFTANzrDy zac7yY#U>>{LBm?-+W#FRsDocS7GprOh>*9^31#to^@>H!sFkQ+K@sK)Zp+iF&XNW| zGH7zsa)Qq-H*-2BM~K%YE`@hgG*vs0IV52^I#2Pro!m>^VSUr0X_&&re$g`R=RidC$DH-e5s8h>k=_dLjMvndSw7>6 zPWBUlZXl?ss8K>P&mQdMV`H+wuaq1ie~JQyW*=@ zDsvKmV86+;b7abB?uM$UGt9oXzHiD;978&Ug{LfGH~w;Oxik$R=0r z3TGH#`rAjSe7SV+blQi5Mi1LJAD_ssI zDsfRLa>lFskK3}dDzdRVWV_b2=+5`pU6|+a{Br9l<~PUPZ6%^t6C3oHD{oh>5c>#Q z0t5hwD5eKrcYl+xV=RAHkRWPqZc3wm(H1XKE5#r(LfS*}+$veaBAY7w?zpk>Xem+jm{O98Lohw`v!mRLiVNS(fvI{KFB1< z9xc*%OBbqL%sXG(^VlL#j7x~s6L3_=YnE(sN7HsVPE3BlcJA5LO(;~#mbAKq@mwN9 z#()Jr(4Pakpz`ocSXeF8t*cqmI}0X()-0z|xU4N1$P=ec$Y*+_bl63C3W<@z!w0GW zq?Z!K2TBB<@+(`{n<%A`4^$E0)y46~zjwI%?(Ni~yYrZn|GaY2l^P0z)X?42RWVeg4BnaTbCG1S|}hpByi=J^cV@M*AWTgw?LB z%rky%Xq~m|sAoFC-Z1ga%Jd+s7$6<#uE0KK_onT2!m*PKRsAxr^BfRI$|)lt(z@g6 z8`^oiLctNvqn$X;j~vS$ekcvXZTyQi1;_IY0na6W8tL$n7EdNX>>UkX!JndU#WW}%CShT29D)) zB4WnBD3<-Nqh%3_+tRPmNO^k>`|B5j?9Hy89l~nmfBK^Wqr02sIsR@M_PVxj&BRr6 zw9cdPlr-pl;tZ(~$FFy1uwxv6cdm;N)?tiz;dEa5*;;jr+jRl%p)9KP``pkYpK&8@ z!5vn1{)k`ApgGQwl!oZJ(#K|X9Uo0(wQ`V3P$gz@w4z|N3>na`jm5ro%oWjp`(iKZ zi80fTN7Q(2q+qFKQ=UcDkHS612N=xR$p}w;ptcJ_K-nVVd>mNECb*po zfGP!t!qpA3QM4U%A9+ACOD5d@VseKT@cdh`7vLUj!+rwDaF%khL1sn8?<3yKwUTvm zQiaUBnfTR8*#%q+G8oM-q)FY!kG$2}9RRhv1#0kXKtEu#HzB@x8;;IrqGKNB!pa)V zv&Gdfzmgew>uI>x&wnBiqHVi0KG@qp_2Y~H6Ck13>V~J+BbILcF$p%x)A)OFggJAe z>mF3)#+e%+U(4@@K=(QMN@K7(P;rb|)2-HEVA=0LoLL+N$!;~G zNge7W+wyMri=jZMsMGUy$$JiLn56UBl-zDmRz(6nA_|6Haa>u$agt^-N5FL4!CgL^ zxfyLLzaO=;=|<3yQII#TCFt!}?x@Nt7yL{mI;OKejMG^`cUx#z5mm4e|LUaD&ahQD zjWL8^SfsfULjCeQ<=7I}fmck?Q9?@)SA;1{f-#mK4O_t*OFV~U3M(_T|Fa9nB+CT^ zwW<%`*mOA0*R=U9gd4jc*e~-I zbv8?i`<@t@s6QwwKPHNaFiCk*W>=<@QKFlkkmHzL;#qb0I3g}i8bz}ZLi!ovX9-=} z3HaJBW=;Qt0?|>^7TN3}wFOwcpsew6Vrd(oav%?88;Cn1QY|Q|rNB|pHmWNad)WoX zvA^JY_*(tAeCDuRe1mlwENa<_?9xA$y$JS3mjWgrI%`B!=Ual`7Dwo;FwwPW68_Nj)h9qwM-l|Iqm;747#omHzQebKSWmdD%&uSfVp9uKP}EUH zB+jQFZA*o6>vslwwO$WCyiD4EA=W^HRQ1Z|1ING+Y0aHa{w~HsMh4Mz>Z@<9i|cY2 zme)OdDOtUe&8y(+2Av7O#tbaZ+UOR9bDnHKZBoYRFwbWf^V5wINIDS9@ojsS z06Z_TcR4rJY!Mze$tHS0jJ=g9(n4W0WsMj!1h9)V%EFI0;dI*Ura0~dHc!I2KPj~u zn7?F&Q|a2eay);$-CaoC|8C(N-@fVE(pUrA?ugnmdoe}sOv}Wy7b44UK+f5)4rZsv z&Gss@g)Lx&h+!B?@e%>81d2$Z6#aoGr%x`&_5i>Ubt4?bdGO$wza2Wn%jD%_mo5M0 z^~9a;ORAGBA-=tX+WFm^N6pv$ZpL-t4e|Hw-CMs-gYL92gB}n8eax9waP8fh5zx+4Hrd9|R{h%)3^;r0M zN$!dS@aR@Lxa?#A!qD>i5XMAF zd3A1_*J!qyZsv7>sCmDG@9;om<*65-AS`<)z!V2>S!F(WQc0h_Bh>6$P5eDb^wOWJ z5oKK{C!3$mLH!wt9>%wS1pdDlxRh6{eENg-v%wKR~Y4(O|?p% zo`E)DyZkEC1zfjYhMuH^jRhmuBAXc~&UJ0_oVCPT=gD2=_!g-)Y=$m?kmr?LlyZ3D z2$0F>8NFrT*rxK6$3QG(&5=!t1=eJ)4CZ{<<-}mZseeyVU$?Pye+2GZp;V-h^cXlA zjzxPGAcSKFZD<{L6YSt6$@AvsJLVexDCTo|E=C{{07x6QzMY{3cdvs`O7D0d{<8@g z%`s|>7~{#7(@XpL!&awi$KbT`vi1`uHDZ@I#sx4b7~Znq93KakN8?C)MyTy9n|8U< zlf+EvOHuewgJ`SMrEWD2FK|sB_6w*-zmc_ejZ@A~CniCa9;{571gUTQveboeZ*DM# z4?wn30k*sXUh~R?Z&BG}?X&M}SucEJIyBt9>a}?%xc0NoKGyT+_Vqof2CguVhra-d zkPQ*ldUi|C(qSZA!!lM5J-Hx0Gon}k0Ur6yQko1s(z%)0E`Nfqsl!37J^c4XngWbc zykj6yP6cimhM;!X{uH5>zTP?b3(PynirumdK*NCajf`4KnJRxP$qSAd>@$*Qm*tio z>~q`j>E|ttSn;%+Ugyi+sdS4K-%73mS7g-)SvpV7vYLX|-|NK;$s3N&`5~1~CqPK` zcB6;usgO$4$5TPeJBQ`t;qc5fhk2dz0i2Jk+AWY54R^)lZysBoSDHqg_jd-R;!we9 zK$6^AUYABhN@fJf#W=T*AVRkK!kPzfMkvPHh58qek_CjgSplp~w5Iw(x`igWSzf1Q zsUI6&)swgWnFssk&${4=;bHXoxutuMN>7TPb#bHaLMm%hSiw=d(Zx9VSnq3Kkt+cZ zz8}Fk-(F*F5oaIzF_zBaofPCFfARnt*gtZ=#SlJS=3lbkB0hOC28@kZW66p?AR4Wa z>E=CU>QAX{Mi)P)z8zo&_8mh_ z=h997FESgJ^@=dKuR3*vLE_<5(q8oZ)|lt6n~?%bg!`stfd-gQQ!`>)E!^bXV1gF3 zLTVu#4H7!KT=Jh~+XX_q=^&RIy?LqBlpu?%@izK1c#)ZES^!zcWnmM{BDgAce8Wa% zD}_(Bo43?`)Zl%;I<${^SubEuLi$V>m*Ys2eO0Fxw-m7|QiAM{RA(gT4s&Qp39Acp zQIbFu-%N99km>nd4&87jVgw3mLYjqzLw|6{KoUwS_!!h77A8oOm|k~#FD9lJB_jY} z(Tla4dv^yt@ICj}L;$wAeD>R7iE%kkBdCk8^OKQ!^mc1~B}MN+J{_=(F+DY(v-F*R zA8h1Uwb=Oe3lB+pY8K5_`9qlNPL#&OFRB)!m2oeMP>$pTC1NcP?@>v|Fpqw3EiWIx zW&!+2x!lQOlk8Y#rBsJMbAuMfg-}eg#qmpZ0@!GW4omd-x}BCYBD+CCt*^&^V9_=8 z&9G3+Os$Vl&fp*QjoItkMVyjeoq7))ina(EvAoRGic(}VuxEfy?ZE5)=B$NrIRE3| z2(20jeROAt{UCqF0A(G=;tkBjoN51ryj$u6unE}RKluq4*fxMdFsA{m+|DVfWuy#* zFn}BO)%B@t1(?bH`p)!{Mec!BAHy=8&ECGt@&lZTF+=VFr>UYac`B4bjNtC3mg<$Y z=?1)H_z5GkdkT$fNJ~?O^C&oHn{<>yK9X$V;R+}_e6y76{w|L%xQ>(Sc_K;p<6(SA z{+R(tUTVvF0Vicn7L)~uAqD&os577f5@wRrEivstBpz3xgGp<00rxRV>TujeR?m4l zYngNB>IB29)0$-~1kP=4Bg>%vdTZ>irA_hgT*w7Z0{KvvA?_<2%|W?b{*K3S{~}9T zx3+fxQ)osLg?8|Y(>PAr({Ge=S}k@eXNE7WFh6hmw%2i>zlQJSObD$}CiZ~vsuT#Q z4(Hs-u}5JhIg|ml>{dkdMUbibq#0-Vnw|i$0oGJl_R5UoYhD_;6+#Lb5D!(j*q)_J zVl0(MgN|4qsvC8coV|}@jA?q@&Uf~@q8n&1@z&Q@Z=Z&^<8dp0SDU_n%Y8%Vew|nT zgB~xBSri;2@XK6wIWNq1{ZxSEE>+k<=hGX=t7wb>IIDM>!S-{*0AZSQGwP189X@|g z*(Ohcg)IZ*YwE;@2+c1!SvV|!eg|X07Ue#9dM1Y>?%UZfzq@su34|>gW*m`eFM=iN z7@Vp3>9U264(X4mw0$#(Q160g^-J5cFsZw!G$)feey!tK@ZYtip9X;Z4=PidCZ@+e zbc+BfSWcv)?w7^ECO%tog%$AwR}H1be}>dGVJQeWQ6>AI_4Ac%Gk4Z)q_;FfslEnl zU6Gf~(3p~Y?L%G=Ad-^xj?B&5g=d(S5kPbsH({!8Ptm&qQMBVawG5iAV+qVv##J(( zhE+?nMQ+6sXGuz1*7Sfg9^a`*1%7>Fxo^=f1r}vqF390SKb{$($3OKqHn40_s>!yj zlNJeVULahI_5EY@AA)@G>Z8k6;iE(xm67+&dn!nFmCKCE4{ISxJ9iJ@^Hvl~aAxId zNCAGQ4V`^x?hvkQdzg(d)KbQ3x8XEss^0=h4a_#@XTRdK2xJ)f|{N~{H4Y7sK%jowbpFU%Fq3lQ_ zP}&+Fk7)VF!R|<8fS5u9NURx2&b`N z&y(ed({y6M9oMNO>`#8;-zed$fv>Oe3|TZ_8(p1~2%ba$f@PcB(+xy1{QvHEblOfQ z`FrE7@7x70aZ^}NqiQnkZ4|=(om}6%@xmX#6bP!7RWd)=vBs3@xeaqx-;eoGJy7wS zMg68c+j{hAzlLxZCqqRjEyXRxY{!0O-1+@t3c5VIbRMFo-F+@>Z}*@Z-J#UM0kEC& z4FpWey7V6vV?caQn=uZGnqK+NDZhimPn~ak?e!gCI54Q%#tLuTeY7o+#W8@ZJu|Y! zLd*nbquEG6NcEz@^QKh0g(F?#a~@m6K31KIv783ve;8tGoMt^PbtT?`{OpaL-`P^E zd+^N_c5=)7_A(7ynkirVOflE*i`9{cL1*< z5be_b{xDuHV=rJdQ0T#HY$GLE3ja8$DVL>i6ZUt9Mr=1@eG1CBT3&nOZ%+S?=Uzt= zT%ExjO>@36qQII_}uE)v;}NY-{p= zGjF{$^J%`;s#>?I&e>J#o;rIUgH3Di+8h3jHU&+syXin{gO(I@?z zhCop|=Aw2SAU7MEDG`3I_rN$p4zo>5>w~1ZHGobrNJ=q(_VFmm9wZ=#*m5FAO1bBe z;S#pJ8-)|LO{s3%-fkT7QV1S6{ZZfH!JO z>!wBdx0~4Fk@ma#1p%F&3w4q|BY;V5h(mnCc@EH(wfZ{)#Ip!C&v%P@DxDwkkwK*g%r?Tf=YxI#!XY zaE4Aq>;34P%R504(ab5>h6pCcd+vsF$2L)4Pa&Xbv9Ij%3VP^*;0HM=KV@}UTQf1xKA2Tqk1-aiE^L>)?c#_NH6-04~5#^n)0{9=} zNhSm=UR9UDCv-sUiI5+o#4Xp=`>xb#pZ`u~%xo%*fu+Dg3JgJd0&lB_V>p(vyyj!g zKAJt_sMFrxv7Ec~&A-V)MUOXJ&ULU>B zCMS12K9Ie3hqP|H9?PAp3+`xIx!*Sfqc{_Gq*Yy2r%pe&)0;a3sVAb4i(WAW82@ zNP|L7;#mNz@CEB|dl+`(r*poEnRGDP?p5WQF%7V{)G^uz_qLLZ(?F=H3d?bDE`dsM zfK*J3%GgpF`|828WTa+-e#CpJ$L44{ zxtd_Z{{4txbmi&8j`3?MAh?z>4ua{Zd*7Jmn z*0k07>Lr1#k`LJthE9vn%>l@yb{#7!z=iF!kb$*Al)i<}qOB$gz7kFBOdYB=^l<5~ z>;S<}^ z2MPorppGnj4Z|KyAhvBUt}>_t@mRk^Toe(Ma9GO_eBrL{q>r2}Ql3P2S7{rv1Yh2u zQdC31Y-_nLCGt@wg>5W_ZLo>w>nD2PZT?drhg>%UBa`dV*=#c;osuejHbc){+*O$j zWAjos0}5b^+KosOZs#p#oO468IUGUNsRjY}HB7bAwwaWe2|D}J@&>|;+|RO=5@!() zH-AQiSxX$2TNib#GBT(XY05*y6WDzP1YkpT78-Axk}99V1ghCmN_aP_$)Ow0cYVGkmiN2%#?+xu>c;0$>i1B~JvD zRyCRyvpli+jQ!ex#V{Lg`{@)9n>F{Mfunl_Gmiv$z?mC>G8`&8mud@^W6OnXj<<4lalH8&3 z;C6hzox;Hv)m$EHQN}M4HnItYMc5=&A8;0ajzM2X(T_r4X^)!92H_ zGafs{j$eu4;Qq%$Ly>Ud8F)(>(u1M~&rZ40gTe#cIu+w+Y0E)#T^jZmxOE-t^vf*4 z_&-x|zd(xm8}rvsFYXQA|j$|rBb z)4G1yr*Yup**jo9j5umR;d?@J?qbmt?20~<*%Ap9sN74r(%H`gTEoEn7eoi7hI6>6 zLkZyNg4)kAcDLMbsiEpu%;di>?des&Uvg+ojZIf1`9G`LooxWF)5VhgYKZ41qo4wL z=egTx&hiqLwt3=};=3L$l;y&&&wt|HC8ACj?8@s@`d5f&4hO&6)j29{{!7yAd{DdN z@zb_0C@SA4RxG=Ry^HaV@=>-3Zl5k|JiP~UTkO4X8vNco=5mQg448K-rk%{eQA4-; zXI;o`UC9(pb`Ea~iqR%MlLj(IQj+lUet+bj-+d!hq)`8~odkLrk^O~O=PLPl$P58JPPBFQlHau=v5<5dOTh?o84^98+ zs2CgWP4)dX!ievoM#-rww1G*ilQczT~AAMNfCjQ5HBd-zG7U$>w?9O$vcJP7QaF!n7~o< zjFw^4RwPuF)~8y$9(NIMN)7-PCF|%HOto$yvYP+qt!`QG$CGy31M?$|HuoO+^1rIO zd!X7KE)1JfGkAs;aNFEM(Beqc9kBbv6-9Xu9|Z36XJtRsdhjM#RtNsqhpS zxBHNl0zSEz9N!_IS|@S)98Jju;@rg*56$O=>&=%LB98qV|M>`_#&13D9ik^bjHNPX z_d*y8F^WqoLB^V5k9`=+#ZTBp8nq~ql(t7GZDt9USY4aA*1Tiq*NA@8am!o!oTsCz zPX!O=oYuAcT46dV#6S#_>sdLYJET$@&-k3wLMlTtX8Ja3$vCQ2ETy3`%1FvWJYGj; zelzlsp-mMh>P-B8_2{`aHxu8HfIx1UVr>lra}5?Nkq(}VX>pe@ulZjv;F4N&ge3hK zygt3mA@QBF6qXdk$LDkV1kcaq*>xkzc#QK>P6K&r`x+jl3wQ}!{U+vIx!7oMHadw# z=lFws+}Ut`ab;LdVw^XA>--=ew*Ta;xp>W1qp)}Mjlq8gz9DU+7q4QcaKvaAMmX%# zj9;zWpvGq;zpI*#G?M*IPkTBM0C6aQOz1T^?sfc*>}wx${H+T7ZHU*ME3+&tV()Iq zG#uKC7wbO>(^=;>*N;RU+^?aPXq;{R9Pm>3QK&4 zd|YS2^!&y-Omn1XEtbVY@^5j1N@c594LcTTZpW($ZpGriQtL5u_e}|>TU(sXVNXrH zCT9DU5-j!c>Xett;C$t*otNO+UPXyf7m$ycVRMt=0it;N(ovgdIMB>mR*2&B)|r*c z4Nb&VKcVy$yLOyDN_u!Q<89z?b3y?OQM)0KSnj|#`l!sL+v#23nZthjD!bdSoTrd= zO@2RTFdkt?$o6(Y6dogxS#_?)o#XE64f~?v4g3bLkF@fu8OC%o&`JrAWeJk>WJ-NX zCn;rNfPJEbEd##0f7%ZOeC)}uMZ*HzOl!N+?k6kUxtagYdX$X3L-%(B_1EE&PkWq} zxk@-&+kK5Z+<$9fKoi2=snhyuPyFL!`O4lO^P5PE%#=eTv`=XjLnJKOW-=`1W?<_P zb0{vvT5(HwF( z@NN1mL+xUdJfKRdk+=QCa*E9n+jza(;dX4&W0ob>^EDv78@1l03Ms@rrEkTfZLl6( zgV>bbRy*o29r@$Di#6d~^_}aftFhKov7D3EDN%Div?WQRvmewjOv3#g>`Rg6X3EEy z7VGo=+?V75UpLd@JA5|he-fK_L45qhWw>bwphIlzT6Q|cuFjT zNMSOA(*3O89c>0B4IZDeUW^(Xe`qp z_HQQ3d#2}f8)u82?sv2}cEfc#Ym?WfzNfZRs7_seJIzb3`Zj{HWZHQT-|^zSaLs)R zeZog==mAh#b7su7vPZc8K2?~|@;cgmHzXWXM#-l(G0a!khqm_9O`H}Ql9_XEPIA_Q zg*P3KP|r~1P-D2(sVQ*a7@QVy7K6Hw&YZPm(6z6c{_8?}wzDL6%&a~lShttgNjoDT z6eT@w-&u-Uio7t9G%8x{_B}*DJ&JIg;dm5O+7GBMlcG48#dsD5kD%9>%}U19=o;j3 z1a8G*un8z>_3xl@?Ej7O9BlyE9DckSAATej1V=2aQ@*?9+?RF?0oQMPZ;+A4LPHaR zQ%S~eg+<{{M2+tLOPsudxAavbg&jA2=b7$jui34Zo%kpB6R7m8rLtcvIPOM^jIBj& zi-6>6I*m8ZP@VqiztNz#xoxw-z6sQ}yXCBN!3#|GwkPty$v4XE()h5<4^;&+g&SWd zpRMuhumY!}})Ss*AD)Y6%(uwNJ zP0RSGBJ#LsG6kHiOo`A4ajny~xnnqVU}}UpgOn+`tIHVF(LO%=+Aqt?;wvvbu-vVS z5Cv3Z65o$cpmscuZ9J9zrpb&^jfz-*Uw%0cJYP}wDn*l2qb!{nAW@x|$e972z7dJ8 zh!_K>ufFZqSnf7C47FL3PCv(sqv|I-d=A4Zd_&GM4TC?Hp8nIPdPgPr^4?Y1(D^b~ zT)kk%QBmQ)9GKVr9deyl5UDFp@^cLR|M;ywui)v=%6G=TrWCVQ%l(Skm^Z{gDrywe*`xu%Cjrq0*`+Yas!PCGiEa3HBK@P z#X^(Sp03~((cFOTwH|*gr>UhqT%W{3k2QulDN?qy9m_vfjY``e9W(7(-H%4jwhj~L zG04Z>*O1TT7Nd{G9$opV>+a3L0P1p?LvAw8Fdj#xK9T!8jGxn={ggI(#bth%FwD7H z><8yK?9Ks<1&6d?ueQIJTq_r|zcJhS<*a$7n44x^19`c3nqMgD*Oe$k=a-LFSa<)u z6dcY#D(aWM!Ln!7;rv3;f5S>9QogCr@!c+NIz-NlI&iH+Y2ckQ+ig+C2aXi9(j{_m zI&{@HZ+N)R#OK9XC-?s$ zH&o7#^KK$l6S!_HJ}{+o3L}qmU1DD>5mY?wQ5~bVC6AfObnW>EiQT;PaBd z^k_1U^nriLZeaU(lwwbN!Vhs%Rn$Eyc61DX*u|^_xwa6!Mg2IdgDhZ-FpsNAwE?pP z6>t4~wg0fSIHvJ$FrPqhv5|Ph*{H+ZsRKbP&Pb?Y*vO0%Io+rH4M?|P4b(T_)$$1( zO{q<(%n+vkVbfku$6o$MnTo@q=b?A0P-j%etj)BL2W!B){=@34T~EpIM7Q9^ASx3| zkg#|(#lB_SGzFfbuSpLWzl?@a6%Hd4jI&5F)u2T}j|@pk?JSL_R$vH`&%iOCZevtojfnjZ72d<`~7PNsbH!`0V9%P4}L zU<&#!##%DO#9EnTX%dJM<|ba(Bqp|5T;de;0hvDw*ZWuPyS@|Q`bZ0Qmixju+26ZryY%y-L z!ZJbYik9#GPw!dy9yiAw2u2w9DH_EIaLpr#A)%T5>DdI*VrlN(De_Kl=4$!tphk}2 zW*NpU^IjO!Zg+cIwI#(Q9~GI#4__Az{qa9?e48;Qa5(5#193;IM|uzLvU0j??<)%K%) zcG{#UJbL{(ud9BG74O$@A@4?+Dwr%{4MgP|)nz)9**GRUo?dU_P7-Hc@+8dSygEb` z;t=v6Ae22QZ@1yN)sP9!?QE{~a@UvQ zWq48+oAjF_Tkx>ot5sV8Fl(!)k^% z^(wi6s+@yWF)DTYAzLxL^@F%JG);Arc&qLwU=>CoFXtP3m#?_unj_#>^P@^}MXJ>$ zk?J$e2l6E)?6+AZLp1y>r3|$jk{&p8tO5%jRi{fs(eAIuGPx?1h63yJmtwN zfT@*@v49K+lnrWxI&a2Jl8(fd?ty9l7`}8cl1jK*0bci$uiMTV zaf7?=Y-b`4AO_lG#pb15f{(9h^y#WbKIf3g(v8O ztuzNqi#3)grw~-XiDr0Ahhz!Lx6AE3|JBz0P9dDn;lhWG1)0+3)UDY;Y5Uw$s7ZTF zUC9IdtQJBMufx-;PVFbLbErH?xugs#2$bV$Z)<=y>;osvw@5*^G%=2IIyz3lc@DY^aLGT(U`%B0wLK80q_VYtK z-WD-m@dS)+zuR1X)5Bn_HY{|T25&FaGO`GFT{l9S3-Q`>;(_s3S>VU3{)^NGB$9Yv z<@E4V1ZpA&RYg1u^wU0PtK6Nxc3*+~V~cKD6Vg&FvxKe2>i!L|0q`;na;zo8I_Jq;kY+m z`qb@AeKC?wZOzggY=y76u|y60QbzQYujy{$LQ+PI4vZ(YnOH+)aHuT9Hp-1IvqPLU z6s;&kvciD`;k3BTLi9uJl1}yB(-?ViI|oWa7ucGWhnfF~6U7Qwulx!@uvAqcfbl(Y zz{)X6b1yk&XB5^oCU7llB~k=7{HrYvadooM8JQ<)Ijo$xjg^wC*Csx&qoC#Sc;s=O z{medAFnlF`!k?gFQpMKuA+J1?El6ZE9}!kQ-L$~lzeSiJsii3Ifjo)K;x3o&SeXd1 zHA;E%U}PBffzNcD+Fd3h9+LHV4gW~PL`{<1j^UXpTK_TQ_Cr&v36D7krQUVfG14#g zz7Rdo#;ca9dinqoQCkk^-6|8A9_r3L)EHyv&-$X_#cZKf=(hFD*IxaOeWGOBh{C%# zLju!=c1}{T#90HHen0ZC%-$II+9&DH<`>PFwZXoQx%Z3=+bDlh*&S&SK!hPJY)Yhb zN(E}-ezA8SvgKVTyfgIhbR#PL-1VcK=Jc(Oue~AVZ@&vXbTsosWLT^&E zKd+cgjmmK1Zxv|yYjHMkJe*+q1nh`lGr@3{Pkkw@BGekqJ{16bC*K*1MvHVRCZfs(3&a!uo?X@z=iaI8IV`qs{G!<>Zm6d z(@?PKxWl81whwKF^c`2QY7=!Qoy8i?g^(82;HeuLQUenSbE-C~t?Zy6ur+vMOAj_p zsC6YG_{#bb_8)cp5hM_x;7O^ULnVdM);zF)GL_E&p_c({m{f1ukim};SUK=;u?cAm znQ~xY2Q8a_TApa^o^zfdZCBU(lImAVfQ=7aq7xzMoCxY@6R9|~KZFMlQ6!e?0fb!- zDGM&{StNstTh=f;jK>x2Oh!+7QvmyCV0#*c0-?0MDh0-QI#P3A?TV3+#5C}Qatpz1 z>&@OckQvA!YfQNKF*-2c2zhl;j8e`P1)*H=8{biiXd6G08Hd30Lbc8Ql+UKXq-SIV zSL%Dq->*i=P_UKOV3A{ULZPrKC%*e6{}y%dx1!XilV~BdU5gSzQ67kLgTC-~`}|}Y znsU4)!9jy~G938;qRn?E?2M*^Gj-M0I;19F!3W z!Z7gM?WDd>SUInbR;+}wJ_QeXmDe^l?NZqvH??bh*TZN)o@??=9u#3~C-gn*8Jk)x z)~j2>njVEfs?XtpOS2;Q{mcf2sxZr+MT4<*cMj8ht`bm?(7^&j+J@-@Lt{N_F5r6H3SL=CiFbpkybw{VvGqAogMPE8LYt?45kk_=Gh>*f&eA|t(_WNz0XpC!wlyH zTNg*n*9W4-Lq)+ltEZ`&0=DVlUT3d60bYQedQI?ianH7503i;urq^q%*~32*|0Nr-yf1Gn7l^(UAxNXz1_IrW3Vk*CPZcyNNm`BJ-fNs zo-yd8L6MDtZqxrcYdd-8AQoX@+EE&aM-Vy&rGr@P({h}&X%X zDrtEpM!fZ;#A~9cj%OI%6Nr74$O!NJ?u3`Ijy=j(3*mi=)kx1o27##U6KUx%DYYz9 zJlR6GDcHUg8shTkQhLu&?_R~w$+BLW3v$W{J>+tD244AIr1{v@G=xa~oG7Am5T-_w zTs4S>l$a65rhd8ziEL2u*d7+qA|>| zAyN9vQYGbF%1&R1P6RoGaPo`)T9twE`uF|-o~ar0KI#Ks?ba!9M4FMjZUx$(VLCZg zu#lkRiqyame)95blZk4%lkSGVf#h?m0L+YvKE5`#@E(R>UMcE(`P0bzSho*6a74Ba z^b=+XXctUs$c-E=zzd(?RHZN4fqDIA@Y=bO9ds##>Nb7f`_z_(V0KI7s@4Oxz6ofb zI~mdbv9UnW_Sj9v$^GRHUU2_lSnB1>Nh0^tr}NV?nrC%|@^x6t#b~faml$*+lmO?aC9PZv9Tq z5%qSaR0e!2e!^|W+F8+5Tk*Y*t0WiWv?3icb!7F=k4m3cGn6LXp@vl8ZTI%5dOgo5~0AyVEZ6^%?A| zwR{7@`u2MTU!9qr?6 zE>k!%Q1|%pG11K2^aK7IK@b4Giby1s?^+uI*S{|Un5i~=uUx4uc&g>H3*3L&r|XMh zTo>emrrSlzB;fj8aKV5MlR?MuG{mU;qkhqf`3%R(hhpYWGOlW{JXDB4g?+LyOQL(N zktiB!J{Sju6?!`C)v+l79#A4I%>^&-u@uRrtQl*6dPwn4*@CGqim_rR=a*sm8ge<< z&)&E&ATrC8Q0uDnjBp%DY;?(W=73ZSEz;b70jadrp7d{MGqk+pCYigJ}G zP>*#k(DUk(skKC;%Q9VehAB?dZM5b99T(R~rCfC%kdhcW*q@1GqQ{dby^Ji_Ho(Ru zxhIOyu-k#dkwVL(Z>k+|#X7A#&IwIaB)7^YB2`|OnpyVA)p9)dQ;ya!8J5y$ku%q5 z0w@2>5R@>LqatPg5L^5*L@z4+G2T>&#@iQPQCp;t;C)q3SgTrp`!!B?dq&lHSY*hT zeQJLqV#4-Enmy{^Cy(5$1ZvvFi1FC}qS`8X==ca_kEl;| zKZC%Wu@)Sf70V$>2$jn&_vBEA%i0U=%75<~5?DQ%De4fgzsl2_YbVEeD{L4wMUU&H zR_;=_jmyh6wr8Ix%~qs>G{aiGvzX2${?67&02H$` z@w%zD^cJ-vKvpAl6aq@f$zb^zVba!m3f8(e5G5wh-4k&q6db}YD?Dx!x;ogFSE>!3 z(XnvlFS?Ym{WcLf(-_RtslU9dbE{WN(+)8utW!g`hl*$wT$Q&qf=51ICd1kzzxo*K zM{2{jh0WSMhQJf!lgcch`4@M)^k*5S(C{CZkG!Fi`7j#kWSL)!oB0-K=CX&t^fgh;4XV4l8 zO$s-{$e=meh%+-ADug~to3+sfXGc9sH2#G?Dc&u#x6uG)e5B2q?_N>|P0j(l*#R2q zLNI@~t^U~Z>2Lj7PfYxNHXP~45#uc9t_U}i;qBGI8A{!_eM;eTx|PlRv{ub)8$s=}shRW1v4IL5u>{Pz!r!->k5 z;-{R_ZDM*9r_>9w3Q^1{g#EQcPtfb;PrxAwL2}nAL}lACFpBXD-N&M#y-~V7{l=Ey zOOq6Bgv&lW&!cy04$#4u#n^pMrjGgT`)MNxzC#q&GGy3(uZM<=Lg|-Tl#iYwf{l=k zKKd)N$&TGsKTmCTlB3}1me~cS4TqTq6Sv^T-PY~9lRY9rbQ0jUT+7zj} zW4eWg=QTO_+WO?kyLerkajiznz@(5qH*@*NxxFg)nUL%@GVqmM-Z9ajUU-U&0oi`( zdk{sTRz2FzRVqRm{(DVqujkUdAto|^81hpOkJ3}r9L%qPZQhNExG$c2+A8AW;@U;^ zIz#dxmR~yd^CbFPnOd(3!eVRJ{NB+rl+9nT+1wehIz=nN{J7b~TFh>ub66BpTf)EO z?EG#dtEqsD=>YhO-DSE-^hx_W>9**u!W+1LhIyu%FmZqV<8E$STq~@EEMErA9I8q_ zh+hNWZ7PF>CnBCg6Bp~2p<6qGm3BCq%k*7X%84N{?tqb`7`sw_f+iY-@D@U*z?mz> zEAP5ihASbgZ$-l1kfLg4%!0*y?bD51;(N*7XG;fXn*rn`HR^UuOYjE}U7TAE9CN>-`wAWHt8zTu4ZO{q4COd#Udoi;&rqdr0=fEPt7Wqr2@E~NcF_~xt4~-YL#{Jo z@ET?{-FK+v6Lzg94j@M|USlW!7~sMMcBAY-A7jOzWo~2`E|(7@&5$ph_f4ue|6)K+ z>jk{|UpRtsxaOQN*{Iy=r`5in+b)L7orpR5Oc4l)r8Vn!eEp-~X5|r(!(k$`zP87} z{#UTTx*cxW+lDe|N8^hjM{2*t*Q~~g=IBA6r~X9^Z4V!N(V{R;&tQ|fApQSmxgexA z44eP6*rd|cSx!Ksw(D)`YSXgWmH!en?`lcK_HkI^zl-!SW6&-Y+b>&B{F%wK}U{G~k|mJd*C;))!2&22;;&}#KPs4w8 zOQjIbwNnDH|2(5jg9vAdEwNBbD)9Xekb3zJeGz`ToixfD%(VYgp)PUKRTdo~g6HEg zwHCOENEa7I7bV7aT76=$X3GPq2;U*O=7vd}yC9r|tp2dVatCo$piEPoJCp$YWDt6X?CMp z{Td{*$aDRx^AE#iCAGNmWSY|}kvgktpJX+{5IY=lR)IhR)Nfg83Nf~CYc#cYP$jG&x4n;z02`hsdX$x0U(Tui#2>oMFyl^x` z7K(}=xT$lkA4BVptlw3$vRTAeGMAPg?y%jDYmSjizUut`Q-PGEKWIU4W;10|3E0Tw@Q@OI9K^Ok9-lDN`=?6jYfFM zlgjF^xNRwux{zQNSq8ao{Jr6zkEpz5t(R-M z{b@e0Mu&#H86mNBjWtkxRiNoSl~^O>aDZ>yJ4jpVsuNcBo&pMmq67@Ztp5-od)w_9 z>t^foyopNes%r@x-|@s(!+gZ0`VB9=?-%m2E_W0+Jf2R7o=7m*s*^qFXU$C(9k2{3 zK8sc4QilC4bMJL7ODeR`xt#J&)l*Sja07*-H2P)T)1Hal7CISwfkO^ZkLY zDm>V0nn(#GL`s7_wg2QsB9{xfDHOFsNU0&QZN#tWu){7uLnzN*1VB^VTz9z9ND5Rf zN-=v9zq@rn7+k1ET9}LC8ExFT86lXR!kCh@Z)2`rN>L`;=nLr-*lo#98sv@iw6o(` z@;?PMVq)|dh>xzde{<}vbquuOw7_}Z@E#OGA6QJ}Q{B>h-vL@zFkHr8nE%!tf6}Yt zakZE^U^v@pTfNt?{CeHMv<;Tl!O-N^kh)%Op4Vtk+ zIMJ_bU^G3OCk4#8?aU{@d-=+pSBmE(8EX0*>H5}uFa%|6JMhY5G)+t;;R%ZxV+{yp za*Vm9(3%tP>@fN6b#qrBOC#s4l7=_<3@97IS-P=6h=BB+(k!4)l^~@lb2) z$JSn!3zPfBMzB=)EK!tYXRZQ?DtKL<^dWXm(@Dy|!~vx{6T#vYc@cj;AJ`KPu!Gq> z)pCM#G)TU%1&-m&9)|C!3v+8Spe}Fp)*f`1F3GQVkHSNz9Ovr}%&05}ZY+jFcp>Uj z*KHo4c(!85&B-}ph{pA+Pcg!fQmNxH@;)$5LvupSF_J@;Hn?*mEMaoj8mOjXgFRHu zKr=Q4@B*~~l43hDkgcA$*6>*uUiqzkhJgd!vC@@u5Je`PP!bo+jtI1}@5Qg@E3tsM zVi!+Q2xVdHWFHP?JNvYAR;Tjir9 z{@DCsFwfnDxZR$uTsin;44lRB-|jaBwvT4o{_n#a#`ALVb@oLshx_>D-^gsJIBu7h zf`E0{R!<@VR7Lw`RDn$$qUq%YJ~5}|jv8f<;9H)?2Vbh-(@jm!$S=^~ua!ONIt%jb zeu~};r900&KyGGIC9MNn3Stx9UdtuiHwTxa(~ObLoNfx|4-AKK+30%0Eo)vHIa^-P zXZjS`-hIeH=1U`mF@QKdP53Drh@EX|-$)rp!XJ6{c`f1ACAXf5 zSU-WSoPXaq-ana1ECc0wpil$s-N<_fj31Oy?QTK_`FRW^b{qdpxUH+<iwhy$h*z$GtH zBJ_%FZ%TA{51vTD$I*b5gF!Rh!MEZ8y3!10BcZRELy{%_E>Q-6r|z-lSPF>Z?j^IV}jZ~WE(PXGVF_Tld9 zt^?&n@TjvmbRr`N88n^MWyD{PsOwW!2!lbG`=bzz(6wQzSe@?1%}-uF66L=kD-nSp zAB0DMjBk)|)LBf9U!Jmpzt1f#x_IiMI8!~%v@T==>dc^>8$JJ|a^1__cryGCYU_%Y zUE)JIve3K~XWnF@lw>3pl*Uq0lxv_tz}%o>*UZ&il*tL#?Prjl`45NJOu5H+9ZaVr zyEYpeN>bxkzwpqMpY>YxJ+;2b7dpc}YqC(9YrhCepCR4{fhi7Y_u#u{eO69spQ*CG zl>#&Pm{-h%;s&_0$9&DevVpL>6GHq##<(dKI*S=6Q)Li1n{w=urB+|S!xt_DV7Xdi z3e9FPQEFsvO~WSlaawN{GwZ6V)#=P4Y4g$6d?_;PjD}jA#&tk@Xs`3DE_hC(ii5qm z!D=e9AVi43Uc=$Dg zChyQ=>3RQWwK|r`usx3i{gSGzC^UyCio|lb<(O5U4Xpc5RL*By1J?yKz^zy2{jJDg zs!KwQl70k7{%JSr{ZqEhDsmGU_CCYhfda;SxHUA$qms7h@O)3YK^g5dwgIq38G|-` z_H*!^y0K!;=z$=k4Q$cY)k10xQQZgQ(&QikA7Z@O9hnaQEd~K1`lC+<3-di}*a)jt zyZB2_N>sQmFP>-2C5C+^@XKJ!KqQ@sqFqAF@xu-)jYFvO3hs1%+7X4R({%rRl&`1E zIH1al>epSBZ9Xz74^$-->>vUco~v1t#}D0ri%W)VY$7tUlt@FPO+}b>J$e|uj@JN$ zX~))L8P7;aO@U}Vp=28lT*xHJ)IVL@-Q`b443))P>$Df2vBe@pK;JY653R6Y_0+^j zH9zK!Sj&c(!jk7-5;c=4Hso7LH7X1tM<~2b4gM9JZ6e7%R7j}KC@iI3D++Ren3^a) z&HJ;EQD?Ss`{c_OFTcMR1gyJ-DI9ZU)FGQNr=FKhs3fVlX*aBxFWUVmo4l5n(PD1% z7|eb5rO-YP&CD8qM_mQ}kU>V+ifa*7qeiS#e4w z|A#8Ky{3}vP4zz5@uZt_lyd8B(_Um70ersq`z|&2UsPk-5E>jFy@KYwV(`-WMI)IN zNlm$$rQb_gW+-Zs-|fbX^~2~wmc^WZ3%IG$Qj9$H0j}M@@=>vx?@t;_zJGDA zV%Pz5R$XN{kWt9A%o_VJ+pH52AkGFwt&I0;dFLD3eHO=(y-9mx_Zqw)+vM61l7sN~$|e9P0_ERfkJlL$=$ zpYxMqAP{wxSuH$`6z4L}it*TWE$myKNkTEq3zNvK0TDtiI&ADaq_OZPkvxcODVek2yV;I|hogCEY#ifoC&K36lU*p0I=ctS6%hYcmu1YF7&{jE5H_ z5zRX6fMr!%kj5?WmaMALuG#p{!$p6NCLqSat=-?Slv}}o5yZaGAxaz2J>7@V%u|QI zWjogGChbU~foa=U%!d;M(p#M}bL`QEYWNvJpl=JC5ZI*7h}m>e znEE-~Nb?iJ?dyTq?D|PXhj?TtOI~d)-gT(^EVydi1)|0=7F}+0QgPF|s ze`*E}7$BVsKS?ix<_wScF&t|tcnMHq;9V&O2~dnc)5#*YO|DSx;%)H01a(gIWdVtX z@mmbGo2E%|e8|@?vgBf4e$^AhST4@L}DW^uGJve<$+Z|=cO1sCu1UA}zupt2&% z4;+9f>*g^K{f4ZBh6TC!dKZ?fl`_d5iHd{F6bISpI>{| zhl)XZY@HB3Egsi!M{c;&h{!-%PONonn=A8v;1Nm_V{1(R0l!;+WHZqZsWZ^xW9nu5 zp3jT?UF8pha8rwKep-#PMzz6lh&{@uUMuXi0C23C{cfrMb$WY>C5|GSG)+=|3v|92 z*S^A5iWRRo)QcPaM{5;ow8xFD52?ARr(1x79=(%7tz2Qsoku;Y2Oeke8h_Ef;>V#4 zP@ru9Ii6`+up0`SXOiPpP@y-S{$l`s=>&HcQEu52_q#lzE#QA`wx^=J&2>bp>02Mw zUQ1Jq629UNaZ_<&x9*|4y^wT-kec<1c0cf)jAipt#dfd@9gmxxf$q5U=V>ucWJJvP z_i%I|{!CV@GUiFi)~XZ3HQ!i!Hx+6ZkefKpuBiV@Irg)lg<(W^982}aj59BumFd}1 z-$sEp|)Qh#fm!as89?mbT6XzRF^gA{4RBT@51HGMkE~TiDvL3K$)Ge=4bp zTk8BRo|E&pp$+UOO{vFyNhC{`93;o+C0=!BQ}c#Vilb{L62nsPmb4S{FFi6k$A*UP z?_DjmMHPWbrevvD>MpRK7f@vkKyrDFU5CNWJ{7swb`6X3VaxzP$G%xLk%}q_lOUwR z)S(wonQ-yUxs8(=ov5L4jU(%+`vfsp4>Y>p)1h{14CWC0DQ~bH z3m$Pup{|awb5t)P+Q)YE;((D+zZ_%TV&8<}n7u9v*U#X)T=|OlsKt#3+>$O6{~2n) zf*5ezAJg#(V2NGTK(<$_GY=p|v^l6J$<*T1W~llL?)~>#T7Qdf3oG)OM^+Od6_KCL zRy_{C#k@?TzgC`vkvkOoCqZ8S-xLk~Ex7x}Ex5b8yIb%^ zf;$8cp5VdV-Q6K*aCf(tbDrnk`+Yz57<-KFKfSuv>REGE&ElJ%82pUywi?&M^+oBA z*o*FUX%F5TjJ4r3p-hU!<%EJSW!7v)Jo6U9_9P*hc6&^Upo-U`^m4S=n8VC)^zIF~ zWK)vGAjFK3hJ!&+LohK!bXdQfa+kZnlo z4<1DL3cE!;r}xDM)6@Fou#>-VF>5lCv@#kosMaw@V*!g|Z z6U5cBv0>sR93%AIdq{y=y>fLW=5Uc00eV4Pf@sl=)XuRDzKQIBg(EnH`x<7)6CX~l zwGO|f;r>FVnT-pU@oQJi^BDBR#PgJ~wgk*lNK%Vbme^Nd3eVV0U}4vZ|zF_$xWtT8>+Yu{SnTH?&9qBC3=Y%7do{ukZjBeXmSlf|b z7cV$iKwyl-SIf%qOQVGCZ+$;Cw`wS%`-Gv9o5!Gkc73b~7Rdm+o5`6$~oX(@k zWJBIYCJAy?f|v!Z-By6avYlWiO*0|l%1|rt{>b^Hevf+;*#n`MfWcHAZ*kL zXmoE2uwe@fYYx6rr%`F&m=qfAxOSanS7wAr27VG>T#lj%0a;UusPscdS8Z4)SMCgO zyMkiETzdjens{nE7+H4jzyX4H3Lm-$+jUrN!4L0AoVuv!RkT1yDm(WY2#$k!j1{mw zQu&&QtVG8@i&-FgnPBH<0quKDW59a*(03i9S$5S|z=3%mBt8W3%gi0nH-UL)G@gHw z$r1~4#H3WPh}S6PvA8SR?0ZgA3rYuuq8xK25*7W>KVh$Y%JFo|a>cle)A!n6p&jm# ztl4;4dnmy)JQ(9M&dRAC#FtH=3 zPgK$Wf9cGH=fW3H$I-9=(ufL5Ruv(}DC23aEQA`MpJbw>Wq+R*_%F%v|K&Kg5b~6a zRp1oyU*ml7QI1p2nn@x4;W?nNy*MCH`M=9%RWAJE=c?cyWY9QtY%foO&|rnSn=0zO zTcFs@NIj9>`4kplQ#XP^FPuEIn`oPXCs{~gyT&Aw(&@Kp@n1HR%j93?1NfKvI0JcG zL6`OtEOCdGj_biK7xih-n-Q^>opvp(U?(dyeH#oQ?4wSs(y;pv{qf?4;F*U};zEHo zuil5mjQaB*7DNM!78kXclFeHz$E)m`xrl-#UO5UAHC<#j!PO^6;%Hnq)EFU*t?=84 zc^#9LNQ=;@)`|ompRV(faxq^BuC7eTbQ*Kehs8HWQXe5ga*Zw}lmPx8O7qqxFhCj; z!>j8T&l`+6{+G_15Z<_k$t2bzB6rro$Q1-iYsyC6B2pSbm=|M`z?~mF;8!ng?g207 zqm%@QIqen#m}6rz)3$<-T+=NPtH{YZokwdPqR<+#o$kPRZt})M7H0`>cuA+R;Q)CG zyd&+U8Hrap{syLc^0U%|iT3?Lg$!SZHR2DwU)HhjhIADiruJw-_$aojTN;MJln&Vj zxF<@g0S$k;Kt_yhmdf2&4I1{JW#*vL7M6unv!J~3cmz9!KOa0q1z5sSjVj?$Zv`|D zoa3G2?Lh+=aLw|tDWMhu-%FnzS9AVQyX-(w_iZQP#rWmpH~|O16}gmp@o^!g+(Z5SIaPw%5W z$ZudYMfK;U&>$MB7gNZJ_+{y>IW2{bIS+}(x=?@{oaDm~(jO>`1cnM8s&xc^R_Z2w zfp#{~Y_K5yO}`YfaQhHntrSc2VEwII-v?aiN9i4dFCSa;(eB)cE`D2_pwLz$wG%D; zHqD4LDkcnC^a0hE!yLovrS?hasV+-yY|l(wf>j%3G(Uti$kIv0!18M}wwt=^->q^6 z{CIOCx=1&a@CnKfVKj~OgRa9K)UUZB@1?;oHOL$MLlb6|SlXS@ zVHB|HvzgY$oagx+xSXXiG9%w44hoZ<&<59;m$}Xr8fU%q?>!UfCm}YMd|;&1=R_mc z9;!-+q{_~^oi#(`k9-$*f`*Lox5ZnR{jRUJ$U0fqAIQy=Ihm0i;BRJihrMgTGFN=K;6iz%6TU`zYlJh@ojJ#L zU`WvRhFFKhrqRK$!>nsCCi|6i`RXg6C$_dyStAH_5zl#N;Q2sg#)Nl+ON(|X>{Nb% zX`zWSrTdV+qXkhz{D6_Av$c1j;r}SqlCTq8`Pjswg~MO_sfRVQ!fKm}Sk$gVwSUh* z6iLU#{~u<|_GThl?8K1*C)=(H38Vw)pvQD`mMIbDOjLDInpu|<2k$oZPeDEuq&S{D zU{7_kel;DhW=EI`Bdx>#%ZJ5$?+e;>`DLaIp?(-$q$2mxlmO{P zH=Ru`A;r?9WSNFqhqQ=MfszAHxRewvLMe;>v z;;{+iF`{^zF%Sj3{T2`pu1!aH+bw-zcrJa+pl{ERC9OYs4VdO@Mhlri)V(h^! z>c>S8N^1O)a}|5O74KkY@VU?1Qwz%p4Nk@-_U9v! zIoRv~$6BRZas7da-}k`?F69GW&)t50<3lqS9pK+U)73x&_v1hmVL+vWyv;nEexaTr zSp^?tv-=8Io9RHEbk7Po#eZYgqGg=KOwfldoGh}lF?#X(Dl(oUP{|YIrsUw7RkMr$ zC&9gDU_Ib?tX-*Q##ZMTm7Z(@^F5UvvBu1e`FmNVSljWM=Bza))2%JLty;AA?j?(F zFLbvc2Ze32t#<)ArLuOYxSjfz{S&?o_3j9icjXfxggp_owK>G_)U~HGLdNqaJ(QZU zg_ENoXCm8r>ncN^M#LUR1VJK;{b}Xp)_YbzAf!KcnnP|mv78289mCVw%#OmKr#&rD zz^wOP$OiHleul%xZ~+s##XD`s7`^U7EYvGC&=8RQQu$+OI9lvw3={gv1o3f2FBM%X zE&T{+%=J7?4-~lJEC2N5jTmTFzF&8gp!`!<kNg*^W@Cc-0u0|D7#V?v>JObC4e-trC&QF@6O)txHkdBNcW0pg#QAFNvtc#P zZ~m!r@ySYkdhYJ=%BrF{s3BC9xMxeF_IufE8^pjV<5RUeOz0OA#6YcEfg+ixj2YSDferEF?$--?Z@+kN znQHaRhSan=bct$QNAg!nqTQf0`5M`S9J{vXf@0GzBe{Xc8vwGje>G+fg>Rud)4$8^ z1oM1oQPV-jf)^oOd(Rhm!d16pEBjQs7h0%p(1BaJ{ps&Yd`j8va(~fLIEX^TM!m!o z%|~08W6*!gt79#kC?!I0lc_XZabndWFf$Cu4U6_He}CB6C~C?l)O(HlO$D{&Yf01Q zII7hEw$CI#OdOaw#icJpLDlLMbtleRSqz%s^a`=4fj&*?qCv(75#?Ag4M<{dbixK{pw(PDAVUXF?C3b!P|!J^Ty)w;)n%nnwQ$8opj76g&lzu= zpz_NFy!Cx2SNlfA=Sb+{|6-0>(pf53LSle=W^svE-uVKXa1O)alTP+F;$vlgLZ&$# z{pS&5m;9mu3TACf*OXI_Y<-cd?vga9&d|DaSqQz*= z>jZy$&(%unU&zG`cy#(aKb-QUuOai~H>mv;6zl7RMPTGN zSB@RNsfTA;heqtrD;aldDj9Z2!RcAks zR^WR(-8YYRf@n*vXW5oq2qX^3@TlfBJ_PSV%NprNHMX z`I%5!S5d_Ypl3C(vo4`kkV)Qc-zxRIz;r9^1-0oM2u@$5j8#q-f%KKmW|o_xkA800 z7C;|1aM71T@?sCCV_P%9PQk=@5ec#P4&dQ)wAofy3e!tsc zwWnD8hMoorKX0{u^jhnn*o&-lyENFh5fISLbG*MGxK4UgGFomJq|i~x;b4{QW`BoG#cmP zh+gTka?=ausv?l3kXf_RpLw-yuEP)ZAG&qp-5SX=c1P@kC5$Yba77KJ5K3XuhA7B; zY|ztzZt2i!rIaFlw3z$>9U@xq(PE?3vY-82>hqfa1Ai&fcg6Ay%!u4GQjOm6?{APV z$F4gnb{W-;c+1GjY(nGc{$qAB)YZNs`M&^|`iK)uhk5dPa~TjGO_oS0c_qf+<|k4Y zr3m|#SP~QVQoxZLV-Z0$0w(={ZD1~+X#5=jQwn7rje=DizDM#2Q}WI#UZj`>EH^vD zA1f0s;H>nGO|c9@Ag0IEAvY1G99BT{;C1K|`80(XdnnOY1|^()#@J^i$p2dY528&G zsDw^{Xi1r>e78FzZf)OE?;_Y4@*$qMHm-%u3jxLN2&$6eZ)-3uO@ph8@msPx7>BU3AdZBsb>>jT3(ZF~@ z!Iw`S-AsF#1}WiH&?G>I=BN5o*}*%CtYcraoQr_^ic8^C=j#BsKpVP{-*g;!TH|m^ zf0&Z0Y=o^oDr5!xX6a0E`Yv2KC-{EaC&n6by*aSk+DUOCu2+h?`PYJ^$^Q#I^{Kz( zy+k{dgC%6jw!_)zUHY^L$<2CkwI`Ptz>N-r^L2_QKU07@)(X2(Ml$qyxx?W9e3lD> z&hIL9KS!!}E)!y$S`pr(2mj)-{*H6 zy3MPnm|gpM;vW=BAu+o49*=}@lI3IimJe0+*qTm>UhmwOjA?_tSC&nftC%{#XL)Lq zYL=--&WIsCTbz6t)o6?ho*{1%-2$y-(?A?mPlyFco6l_&(VDijlP4b+n9K(9*Rc~A zvTYlF|KS&2a^m4}()oXIl>82X?qMO@tE*IST58{SO0rz@I`GvPIa6&PrGZDk z9WbT#*RscmTUvr^tLJy=(Nt>IFq>`WdZB~q)JSQWI3p(qfK~UbFmmXWixRZOzIhMV zQ;-QizwiT@%-hPo!7N1c{pD&wZOM*TI~j08T$xyvV@v8DG_oty*D#7(qlz1PBIr1U zdgg5X)K8+@%<+(x**{cLycQv=39TjG@?=waftgowDx({ZEPN&X;0z<(;BK>+e)ujL zXm(8*82+3DoR#n3o_M|sh3u?TT0}#HfEjw`zt>k4-C&zj$3|lffVVoSzx=Bid>rU5 zvh$pD#u=@S59Emx`U`E#XPE~hjXV)>!rVEOV^HwX#t{z6F|COY^?HXM5=M-XEkNRv zm53sXx>#BT7`i+FqESCyFz^QJoe{vcMGQYP76Qt79StR@Y@ob3DbYK#>S2>(Y$COk z+;ldi9St+e-W@Jd_r9h<7isC$pedeoJCoL>nW@1}(wcLdgyq6sfjR zf2y4O4prC`phf2Gzzlm87JVEt5=9ibh=rwzG)v-tcQo!CSD)6|FI)8No(72@3ajCE zs07$U%)sD4{i5|)-AkR7`T$qHD%0CMgcq}9B2bBCzhgS-*-!ja;w;IF2ENtB{5QW4 z?ton>EN96Qt5AIKjn_pT?yq@vS{XH!SdHxu{H6~-867K(*)NwCw3?MWIS!4CN+V_m z8FcD~NtP(%QV6a(BtkVQ#AA|ztsd-6lp=wHS<8I0-enj^)9wvPP_-HH5>fIlX1#y3 zbENp><#!&l+D>nrOqp6Ah5^L$IP|*4L6es#f@-A&P}BP$f;zp~39;-JS51tcLLk}Z zAY1qU6%QH`72rVeo{4baxeh8>i?r2RBD0#RS(8q z-qnK|rg!zAJY})S;tDo^M{JR2t&|>Dpp0b-eaOD8OTj??P;=QLBTupL*AYRgY3%|P z{t^BRLHM6pBs*85v{pJyUOgbN_`^I2)k>QFhT2ycJDoul8x6QqZZX`M6>~^eLG*X| zU|12DDJSWoAi{@&F)o`+gvVE--pj4iV{FB3Bs9gq4s4kvvs9J&!}xWyofhOG6%5&BAUSO;^^@Fz?7)gj!}2_MXzI&JD%Du69jrI6dpt zSAwUi1DJ^4UIbr(xBG^-Ct$v{WzmoQ3}{Jr z85)^AbDP%Nc*Fp|G`_NTv~>2FJvt$HWv%locpeRiaUx*VIILa*_(u;WH22Shs|jDH z;;F2dQs_)%x4aOWkWz*fKJ^H24bt z*z}6`exk!RTp9bm7UhgsNNr7eZyoBVx`p+=)YBP$lpp^!0gUxY_QRyw=vW^Q*ync?V{mlXTU2MD)` zMkRXA5)Cu8WMiT2?3{ds1djdHQVjXH7wA5Rk9t92Ya3R;X7V#bV;k(s_IhB$dhQ>Q zchNhD7YmNM4`fcsg1

    (QL<{qzhasHmYhJ(yvNdrxG97qi5`^{+ zC4g6xE8~G(2brtrCCgRbZ+UB$M8nvXd>^(Vqq#@rr@}Zrs1KHJxhv|(04V24L>SB$ zz!(4~hJUQ4Qg?=|ioOI5NqxD$)b$a*|p~vswu~1#$`O$O!<^JtH{~38uu;j<1{FS z!S{Z=GhukEpTbg7FaCqx&oa&BPlXWGw`rg==?7q+$%{`YZ`FrA-#6cx!gU)M{)1ipFS#t&mcWGi*CYm*1BpaT6E zkEekYsWxa%U~8r!yV7Ox>8XMVsx0#r;$<)35aUN6MOt;|LIz zHl)z;GvaFqRWj_OxjL>Uqw76QjC@i^%{q&>arJuwNBjxwBY*eu;jHM}Uo@}Yl0`g4 zI~w`#`QA*FWR3RKx&a-xI$88tdJ3-Dtx(?0^Gx>#2v@05C{!B8=oU;)dQ%H1R&&z* znn%&cnl^&bUwH2l=~&O?49q%K#mfMl%t|D7&py(2Oqo>o)~@^IjrPvGrp-bs>|+_6 zP}u$(*c%sph|P4L-0~BT{4i>f&v?kFs@WzY=5-BfU)W1geTS zt!LV{alYKg5+p;wiMwkOIvFX^A=;ytM4OLEXsb(T^f!@f8zls*NY5^xe;)(>u%Sl6 zjTDk2_+HGSUMY68H-_x9KZ-I8<3|?i244OV59L?l{2E300+A^D&jG>MQl59#yFn_h*6b;BoL`ZWQ) z{vky_>LYczrd{ttq^n|jnjzrnuRpiTvJ!OrceraK8J@}f7}WQwrrWKtuDQKECd;SM zulrDN;g^Una?9chWB*h<$?Esr!mkG(v|j^eCkF06N8;VTIAm!k^{j!OgU?MoWGRF` zk_b%-2`Svtf6jq6vz~1R_?fIm20v{oDc{&jm_0-+Nb$~CvU~LwRsk2yI^E76dZa)Q ztKa7aA)Bm*DmNze95VzeGbx#eskl_STJj;2=b(im>pO2(&cPws<&S&06_UsX>tzlU z2|Z`8T_`f?i65n|p9G-t1?wX^t);?|x$+swNy9(fqx2I!rZX`&gXo^@U=Gvc1AkAQ zk;LL4A_s&N^XL)gQ2=Ey>XGtCcb=hEzlc(oW}8EO+r14eYXf)hurU@%IW5^4(&@lC zT0GzoO1Y8RXdW|1@(6DD1z21f?tX=YLCH3kdT6Y0XG^?waE22Vx2!6za`=aA)|zYX zJ&C*u8c!3;X*f*zXnj{xL1C6~)`mNaC~&v;xc&D?E;0d1bRf?xj!+)D>U!J!Yw8O0 zMEYE4u)w_Ppx`TE zYZ$aWDQ{FUBfuU$yNjRpc}$N?YG5hW2G$CzwLc$d2o!w#?@a^+@`te#3u5k;tRi@; zTAcGtU!R}gcw4E`*o(WyEp)$<<1ReDo419*<3r>=_&%Xr>b1kVLZFsn5^M@YXR5}M z*92VLeGtjnOfvn*V8oD2&ESFH_P}RiF2Qh22)y8BYJ2+VRZqr}L(!$vx&7Z#>+90$ z_VCMS=g7knyP)!HI;JYM;W!y;$G7`!vOvAyuYg}ipPhd7VOk&=q zzZqC*DvzO(pOcpD;&<5o@*OxkO> z57OPGsvSn#LycYDxo9e?qN8!hS4RFKxsKV&jEGtG7r-GmiX|8`x7mrNoIUtj7+OjuF zEv!lM6mgR%oO*VnHe#kdWIMLhZArajnD9Q?%lNnWPFGI2-&GST-YV^l?)A`_cysJj zLs)OuPbb#K9jhoTCMVn5uqffxGfA;ZayHv`qhUf4dY}eh5Ah>{Qj2|i@g0(_Qn2{^ zbcpkAmt0p&IRszNHdG=S`$b4raJ>KRaOU&mekDePwVkQ;Z+d!D*yN??ENC~vT;}K8 z`M^JPU#9#oheIxNU)x+}#mcosLtJkB9TbS+AN9#wpTrhX&j;6F3~K+zR{ix08He=> z>l5c=oCUlXe~Tm{8!X|)y2ZY{SnHm?mzmfqT?g8eRp6!La<^;h;Trewnd*8tSoV%; zhowpy3xFmO+m!suV2D?W6=v%sjXxnF+7fFzQuC7NwWkhGBT4>%LGMx!VLgNUXL0E# z+51Lk>5d627%zQH(ik{$4ZInRW>A}#1r?tI04L$fp_ZvI+)~j|$|kyN@dr(@md^R7 z+|h`pdXi@AW1F{#mgg!C56ic<9vxC;+EDfh=IfxsM zK#IDcoDK;j8awp3iIu2w#0NnfC;#tEC9-^7A2l5_z~WXlr;aO~t|}-o*^m85(at_F zo~FE_yIWARxG|mV_RPjb+ns1lo24kX3|^WFWd!jinXtIZwHd~*!f8ekV9O8yuRn>V zr~TS`)Hb2o3o?I7j>0LmlFMjOj*-j~Xu61>9ngkVIFOh;v^Ve(I<*w4xafj)=yNG* zO=xxA_X$O34Zx7F#azp0oyyygK0r!;YX7<)-i-|<)G=lE-|gHa{D>TRol4#AN{ymt z^?T&e|CZ0ofrOX=7mJ`7$$kb-T^mmP0lHMMlS%@HzBIfI%kI5G>)?n9Fq!2L_-89G zPoRp$o|MVBmFrW7FH35aLenCz=gWPzR9T!nCtE0MT~xogd=(W1F|An7*^;0on?^eew_*^c7eK3f1^Zqr=rl9e z+*qF}KD19QUI&$n=iNi`dj9+;%09ZpuV5m6MbG?BtS&XMOa-iT>w15*DqQ|9xId|c zZ3DK>M@--EIILnF9AN8{`IU7v2GSCzv_@rM7Xu>*xEA2&2WJ&Y5tHd7j&)2$bcbS9 z^FP7W6GCvf(g=}@u5$Q|GJC)|Nce+p932yVCO_%+2xBCo2^jT+Vvs?cgkNn=E?FLv z$NTKxTQEJwNC8co{1%*E=-tmd7pxz^^c_b0cleTzsvi+{3gfk6V_I_gAg33fphge5zI85ss{L@2p(|@~Gd~8T z5*l)T2sKO{<6`)z$I$tNjq842g6hR=GU;DrpFGI=yQO%)1mU`H}d_+;QOraaB||z-lFbC zmd+~-fMvB#t_X3>iIUXpI?G>q`WpNsE;0+-Vr!C?I!bOBHvF?olroE%!k?J#eNSID zEfck07gMk(kso(~&@}LQR6c(Cm86HL@1jBtlOwsToIEF7!%Xm{riy~|;p2+(i5gzb zPb_O(IWQMVv`o>+;hm)l{4yp7`!jU`1h}0K5H7)SzMNT~gVNsrskD#IyKu-EWWqm~p_t43s6o^W`BazM4=yRWhF6FUp=h1g>V$Y)`CR>-w-U0j*NGH3}84WzTI=Z+a5X>IbV|tUbZd?69 zHgLv({qR^Tsk?Wk*^z&3P(Ax1lEmrH(t5w}=YvXtHfhC0jn*2<^!=WlGj%=8Zo?)+ z$&m%e?envPxj2V|SWbxT24DqTaW^&Q`w0ZjRXFW647`Q<$>{{*RB&}j_&r~_zcjbF zq#zr}<2GE_EI)Ai5i85u`(_AtuL?gE791bRI2o{CHqdcs&O-?&I@Tr#Qw)hIo!+3P z_%lWsNofiUNrKEG=Z?vLbeAosNKKzgpj+nJZ_$5O1>Xy`y33MQni>IVtNYXbl3{+b z>K&ai0*UsDpSKhn0hpti7Nh4SuPRxbuO}b=2Sj(C#xOzp8NnxaQ^^YD6N}5xB#elFL}n-J8sXTcpz$`w{Boj; zuq>Hfannd|<6v6u)KHB=togS3u90d7@Xd1`kmiDMM5>h6jrua_&MUuGVCdnqW&E_4 zx#7UZlS5LON%@0)>!it4!^!RqVqc8R)a_QGuR-d0BGY6b18NMg7Fb#)aa7$zvQdpH zgsK9RU7lMiFe4N=&e|?q>zjQ@pnUbGc|BzL&>Xo57A;eg6poRD8LDLcJK;wrUTe}A z*t<(i!W?Ca<4!>7lHO?(x%k=H=yFn-N|rXUyFNwON_NAiX}amnmpdudW&EDGZ6tQp zlM@3XUhEuTfO7VC?DWTEc*D<3IA7D{!uns?GaAh!=fp*7l$MNXu`q*ktn%%Oyo1lB zg=yGXo5+y>B~)Biu({rt)8_g<>lgY`AM!GuXY9rkbc9LxW-U4?W6O)yp+)?7BmE)K z_T=08Nh5;>1f~`>do{dvJw8agrB1{Io#v~rB#__J0D78yfhst?%=PFK^7yH2@jbt? zMA3cLeZzSq!NApM*JE9hX5s`?vF!_(b=|u~lM`1Xj_rte{FQ{>kOWg?urh4l@IzzD ztDypAGlx0H1ydt0m^0nEnwf<~#HpvA+(~$Z8_I-g!9SB6Hl%^9MvR-KMrx@B$xLyd z;qg2i*vOXpyBo82j*uevI<+5KSaW^Zl?fkFte-$i!XCJ5-!#cgm+hJWuWszdLqadY zKkA-6OKp@p{GoXMSsLA`os<+s&u2{%Q_2&>qC+Mq!@2cIY3Yc1A0MRwW&*zz;!Kp| z9>_s&=!@2fsCw(5yU>d&09#*ds|k7#*4+;PA~b0YTKod4v1PZeygk+s8{XW-7S{C( zf;dkeu2tnef_}>a_ea+OiSrlt3*!6O_oBN7z%$bu8@@f3ZpcySt1a4+WsRYr4yyad zrdeaOaJBK!p8_t$Bg5$NQ4eAM$FmltcWwHafRq-*Z!&cFwSfN+oag04DuQ!H=8J3 zTQ|+ycKi^fh?BUdGU94P`>7pxR%AURTTNSYJ%{}g5jqmcjAfJ~_@-${ifu9mlY(Sa zeH3tk8f}uh0lOC>6h#-ZS%S;)`pB1Gv=)H%O*{QJRem5x02Yo5q@0~Ift!*nj7>ih zTUru`Vpn)+Djtjzh>&cw{9bH@%`Vx=?H2#%+Oi)hk^*O%A&Tchr)(d{z!^0!Wp^ioqp0DVb=+R{0mxCouy$?r# z(k1)$Dg_C<@?P9EyiUM&{q1m9wB~v;bKniOrtYh&OzUn+x z`H!9j&3)eGU5xb1UBD}F6H4@?XW@Q0n(;-@CC_bpN=@dY%f)>Q(O0UUMe~c<4@eZ* z*z9-l90L_8Ye`TQuA3_q)>!5eNpN2%IU9SaKH99Z;7O!#774$Zh(4hp4mB~d&#t(j z1bhp6HrpsPyXY!awLPAowtpMi4rWz~cFoK+lFNX(U^v4yvkz(kq9zD5nzra4gmD{x ze7W9<`e&24d%=B`U$yN0wak`X5&CD9eL-*A zuo4g;R!?K|6;-_WO%t9*eo=W5UMMpr3M9VmQPcY6q7~#k@4`pwH zo-xFBa(*Z7tyKq*+^ZtpdI;mooa02k!evNXv)sFhtBnd`u1#wbF>H2`?$y!fRa;aY?!j{|Sb*MDAiFn7xCe#Ez=1&tGyJx`U+k z&u0*Nbpc0{UfOPJrx6Q~|lhb~SDZx>; z7oz}{-x^pv-nvvlA=O>pfd3fmS=+FshX^8*c) z0!|1a8cpoY6%ydMI~0(KmD zs6QTEC`#GRg@k=X)4PJ(DKBOVzr^%Ky^1$pf~LBRx0W{`d7njdvWJE{h7qGg^n>bTh5NEszxPb_qhkO0Xb%n$H>K_$HtLTE$sQx&E1JI^4HBr z^MVmqH0@gnznrS@NazO+tGR^4n+Quls@XGJ$xn1RjN#j=H+=b7)y)tmH)Hgu3R~~I z#dO?wS6cJSG-3*Gv-oP|Y8285EttGwlRTr$qwDq38D66J%asAAG7aZ(Fv#Emb@g~6kXK)dLZapLN)9tV@g4d{hLWcPJ z9idwKc*w-%dBC@LlcBAP=6<7Nf>=~2K@d=yFJeRf`~J&ybSu#&JI> zSII|Yx~MoUY*S`TJ=E({r9N#RwT1xxsU|4h>p-e?cbm*(WAJTu(GV6f_jB0m$qkaN zU_E;RUfMq6x_KF&5mJ_xTnw5EsSmVdz_m(+^l}d$D8I#Uw?;t$6IAsK^~tM7+lKuw zux8OjQ_vCQ%oSq zu!$R7)~cAB0_IAkt=4N${EO|n*A#ybs()d{IDz_eT8jOG)QX{0ftMHbjPH|BVbv;e zg6EQ%rO5j{%65x!LEq?%Zk%(j{5Z4#D5UfvZFTo`D6wNV%c6?^P2f1^9N>ZDzAlF2 z@L)?%$4#zlf9K7r@>EY8o(46d_bO!C*PG3d+vPvsJXn3UubgxyZ@VW}i)riP@4R}q zORB$x-I=>`zcMMPXzMccJ_Y~6xv+L{Y$4J3)}4G7e&bj85upTRVp6wqjg6~CKt`$$DdVoY+hlugVGgVV--yLta`?cu`aL|C?W~Hdm!)H zjB;%p{PnIkvrloo9sGUI=NW!yFT;$wOF9pc;9w0GDB`+enkFVbpiq6iy3#4gxLfn^ z>J33fG2;axL5P+!)h9pWp)?EtHzSlof#?I<^UgPnLfT!1ZtOkvYTOAbsZdW#XQc~!vqp>pG!UiX8PS1Tx1_N zaF*T~*;}KeDvKT^nWs^7$zfMfLe(_?zhr|H#rGcQ@5C%;veiFl|5WnN(C2X1)~O;F ztKVv0Wge`H)xI?FbcMCDYbSodl0@8FQ@Hva1v7w>t?Ct0B+$wE@4@dOu8E60PNFVJmD%6D3(@zAkNEZyM7aA( zpzMO0aK=$`Wph^K9p}@g{Q1u5OAAyKMDZ6n&zvsZ+bgYh(rEjh2j}CHTp<>9(y70n zdtgiE@-Kd@QwO7izmjE8x{O!WNuxfzC(7!>ewh)$;xvNW8UTt46++0jsRC+?e5_lg z>&s1?;b2gRweg%!?n-5;%e;@TZv8U(x}93K4Lp2uJ6x0RqNV^$3uIhzO@rC0MJ3<; z4SMi=n;QH$rz-IS1Gl)*^*vwCfYlqH+(*6Ok(9r5(;TwAYMEd@)8F24pxK-ZaWIhU zqZj{$ah`VM6mTRR;_k;0G zYq9}FY&VBt?aJ3e*A`tOh5xjk^tq!}N2jxQWOrozO5I&Tdt~?tm%+T?P1UOCaDLn6 z2Ra9S)ZBLM-Q#RLHisTO>-)SV?i<75Qqi9l-$luW*Z{Dm8FtnInN!9$7E-EEI=Iia zSds#8;=3e^uk1?fn}M(~??!2+kW=8h;v{~RyYubrG-dA6c5$$<|>R<6zK@KSOs-q3&%%y|?lpVG@-=3LYzHaw4 zi$vI39ad3&A09+ZJcv|ClQ49!&d?#I_p%TNs4uf#V(Yq+fa+g;hxx$D?cZo| zhT3hz+=Go`vSWXoRci_^UnC@nRYLM$C|AEiGkeJQIFFsEFxAOTQ{*-p6PUqI`axR8 zj-D_RO`ie&zLY@FYiYRzg8+UhiclY4#?i!rn5G2^)+xz0*=d#CG7rnaP zRclstJ+m^dR;j6CsaCHQzW%H^WRb8^TV*{r>t^%GzxC$&+9IW@^G|)#j=aX5aDizn$W7%crf^$qmL$dSQ1)hge0KjK+ZRl9tWX7wC5?U}AH zY$6DGq$XmgNM7Psay8)WuM8`5B9euldsVF6!u-|@>@0VWpR~S!JP9Ae%bwOS;3>HW?O)Jq=#ro zf+TjSZ-A53cs4Ys1oq(17RRYIy^07e3P;@_5A%K+ADR<&BpY1lSGV z)$PU068-uwjFlaA50YpWPTAA5`qV2VcYJT;C`@R%d_pzUnp7U*MLT0cb6c=yF9S_jzl3T)mUZpjt@kI>O{UV^?lg4|%fBPJzk! z7VlvVNy5)&Yh{r(?PuO)4-qAb3k&t&p}ezQ`#Q(78N=!RU!DE&LQD7fR_}zFEEaG9 z*=xEEIn_dyb)wLhXRz?k{SwHX76fD@v$CMGf)0_;Fna<>MzHg@1QW*rvVC68cgV`~ z5*MX1+j~Y+>vvlVy1k$YElQ*l=#pPKp7#F6_H`2-*e8hFj1;%>5qgMDSWPoZ*f^dE z$#(a9Vt;YnwG8ql7_kZWh#7tNLZ~!R>>W_JIb>ksDgBM#12A3qCiTLI=^3p*FQfUE z-GvWQ2m1pQzyOb3>;s^+3hh9%&(1kA3Y}Z+0{npG+O8lw&-cTnIWj1maIvep+l%5tp;t#%9}+&z|l=`fhXTZ+n+#&5>y*JXvw6k^OW zgzV--yr^x){r7<7gn4|cZ$AQ>xu*{gq=hcKj*pZLmTAx>3Jgz@8uoIwfTWhTXc*wo zVlu;g#>Z~QN3Q+=B{BmL%R;f-Ft;Y<0KZ9*NJVwPLr*S}Dt>506-&)Hlr(+)NLY*rO zcI7GB!vBSCDf80sllm8YwHl?cy0W70oZVqwkDGh+2Vxav7n(7RQ%Gw!HYdjtukWJ6 zQmfEqEJ=|$5aZrX8iU10W^OA$g4O;IyXPSE&{eaIxIT6kH zohQ^D#DzUaV{Ci5U z!fjB^KXg?wvVQj|Bl_{!mboEPvqy6L*R5Uj)A}t4qD{7 z!>^DNkqaF(i+Y$d!9W*0`Pyi>=4`>FkSykL?l`3TUyV}}0p{+F@20oR-RNW(FQgKFR2^Utv|pIas&r?fh|O@G?-#lGtJ5pN%t;Y{^g z4%7*I$^pkUP{=_Gq~QzEWT{eDO(bhWus{D<-oThm3(+*T)$I;?g9r5c^3QYVz^
    |2EINRFw`Zh-gReh|yAAl71H{ZsPjtF|0}{ zblE>#vwAIVFNyQsZg z7r7hzxOt1DszF_%qKAtqwyKT=*oL+syF8RzGOS8cD3-eut_KwBYh=*VHtNH;m2SjF zGWzm00e&4{Eup(Of_C=inVe%pC&4@0Ui+u$Kp~!_W&>k7YA&%sjbfR#-M;A8mwYDe zF)HV39Y;wW8%WOg=UJSlVfq{Tm?0h6PIga<#khxwK5!R?xEJLybbz5NSgBq{X!XC+ zY?|vio7*h&=dL7W9SLrb+_@C@1u%&!fPp6C-gknIz8foJxDCciNb=5ykk zKKW}y`iqE4<>kN+5PBK#jomf(ya@7fb@>?IbgPrzHdI4SptO)dGJ|tzThtQntA3I8 zwd^U?kXPFUe$!|1pQ;Agy7j3!2qx5ken=0&%ir3WFlg%EH<<3fnv#)=VLv06e^lEjG_%}$3K_vKe{6_x1PFZw5b|DS%kx#_tr<>3oouI)LlIrv@NXxTf` zyzi2S5`}-v6=R$AJXMR}|E3uO?O^YewN_R~!0#+a6oH3zeSonySHTLr;srkek?S|B zA@fk&Zago@+!Y6vK&8gfOE&IP`1wJ}!I}EAImB6t3L|L=&Q`$C_{b)^LOl87*W{tz z4onA~VNi-BI6J|x@a!5aF7j@T24SQE|bpu#kMVCJKkDy51>X#8D-N$AsE542Id7?}c%M^LJIGRa5M zk#VVOpU7%Ff?b(EX9L0 z{r#^0iM*xq7gyY)9d+%xymC@l3PwK zhTR^;x-N!0plPJhrT=obss+5%ySYg! zG*E=7;bNxdv3sN%<5SbVQ42Fy>KZ&gIy46Emtpl}XTjw|jBX~(ri8$1=7t#xw$PlU zO8=j+!`#EmnW1YtR;N@aNS0%3`z$&nlXbPa!HLiRUH1chhIjb8LL(`H_j46;IC^^5 ztbwkQ%qr9jj?)5Py0dbA)~@9yt(bNA`gj|Z>e7~TJsp#<96pNX88jaXQ&p;T)PbG2 zVAOHTn2f9YKXqO^1E)g?zj_3dO6PCJSDOuVOaCL5Z@7fj_Y5b%$ncL(uT^)xDH#<- zQ<75%6U>dh7F(o+<*hyU!jTO+I{96mLKQEYD*FLt`dwvg(9#ik7*0abW+yAiK0I%7 zuV4-jVrhmy6uv^%!N8s7sT7gm0xWbc6%Sisz@G+&sLqC&qA9i7ci3NL4G0w*CnAt6 z3N{edP?F^MSbzA~-C2JZe%<-7-wg6%f_%F^YCKGR{NU^RsDHok=T>_$@Hs-6bvWw! z2czpU$!0qL27Z%vzOw9&dlP*O?fTg2dlV+G{?7d8@DzATLjLji;qv-|1SWZg9SFO+ zNdP09f1C=R*mNK6zyV1J7+ro8ftN>V?tbEQMmc<3z2+WGy8~y#$Mb*){Dc+Ymzmhd zXV4%7o>{dV>2l(V|py(rAaL2dP3hmuuHn&2_rr=Csfc2f=(FM{Px1ZSFTqL#%BrTuXE7y16aV z9AOu&Ka6|&T_JpkeZUZc(2=vmG{Yw~1NY_{GCGucH&%?0^8^_JI6?Kdth8GMx>7&x zaYEJ(GIgc$Z#Zr93LWw0?Rcb<@A$5Nz>xLvyH?XUCjo*jNh)3~i&DU}<}2GEln|RT z6qPN!j*2nsmAQ0qc27XekziKRc#Clbo|i)hy}Uy9vpHWvNbFkl+w;g0$jvh|3*pz< z4lW5daUU`N(d#4{Ya`HQT4mN<Yeij*7Q zU*2X^iib=MsNR5$_-sHB#cAO0PCCnzrWDp2z#6hPJhulXK85s=$60fsDJn=ql6Q#* zwd;_L8|L*o6@$`wg0KWVN2t@YcDP|BljL2(T}7IdaeSaG93~c(CyI~YF<3D{&7vSl zAsjV(#rSs0xbjIMe7~6TCfZZzF=n7jF z2f-%QKU}~8cH{V*7Z%m#UrQ4YO=+Al`}*wSVJw{sF2SG-3UT{^h_EX0`&q&2ryU z68^9Ei_&dJxpI}wGWy3syv?FFwo8gkWtmY#m~AhS1W`kRm|n;8=#cjVgv=+Uao`XC z-fInnC&(K**i{ga-s60G0pWgsSbj@HuZ5o4iE0hWT+!F~cKUZIZTxKY$egqw;B-sO z?-+bNOsrPe!`-wZ{64N;%|Mgen#U+4c(cOw#&n|ZEZ_6pOpetp)~?T|p|M#ebkPult}^X?W3|HWToT_HcN;7>4O zg#@)wckFWtSBbxx1nBz&YlTAx1y|@xGgs1 zh)u3X5t3KIh-F3$QSgD1;?8Uo-V5LZmN-v9xTt_4S}+Zi8h0H?jx;6zyB)c|y$K`( z%PiVz0b8;&GJ-5F*<1jk7p8~lfFE0*<03HClx;xQ8@83FBu*gu3z|R7XXNjxA?ZCl zd-8HMZ0X+!2=%&+6tlM_c-ELk!mofTx9ZysH94wCz$Crx;fab?Ucv`!abkfPC zJ0o0fD=AbmL*9|JbIq!9%swOy4mT8HthmAGlm5x!JKsECyWqA8B8dsULU;FsSbW zSxDxYF$=$c^r!MaLW_d!!1MBQ>{wD&w|e6D4Yja@hbz!@E^OFtCjG_@q~&&wtj(f` z^lwbff5~Z*EZ3O35hInp_XF%I^1i{oAzN3Ma$*GUYatDSVok5LmHEQG-=4)fE=ClA zQOaScJtwt-;xOms(6akCm^|jV;gK)om($sMkcHtTqqy3DIeYLseBD<#ZQoyITybAQ zseuH+d04s}t$zaqC)FMS16H=1Jok^OZ_!$7Eb!q-}%!?CW>rV7*P2ZqSBHZ+$P9^>+iH zCGR4}hD=12(H`R^qJ%zt$F2f}daPtiucj}-Q1Lo$?YPbkPI^l~VrXG;ft_MlGr^P_ z8Vtd4nCZz-^T_umI%pdNAc1ek)c9+x1bFD+;PJ*oiUu4lmOAU7ImmJ?1ttBqxF1py zin`QHtg9^e059>a%#hC;E4jlcu+(EVP*FgtEI6J!)Sg z6|?I!yfqJXWm^YGki--tSmPtWOmVXEKRwXRrT<6f7T>m6byyicfDf2gfxZ8i5P)u; zHr7<&$3baYmLmcwuVn07SOAeVWi+>00OPd^rV8(_o1~_@7f6sxEC^%zknrjs2)c|i zY{czYTbA8?MlYT94*lOycX6RQ=hio8Eda%^o)FB91C8_kWH+%{4SWXL4%Qz%yqh*x z-%Pr2zb=GgXzvCMN^uS)jsSu}GI*+JbGy7Oy=;m70w)vkOnWMW)mh(|rZ+|s%uVgD z*TO{g@t$elt$SM=;55xWd6xbI)Tv_ezAAtt8}DKb7x@Ayo(IsqEwsrOaIzt$^NXytp~u6CMETG2uUxb-eOD~Wq00%eT+$ARGCt%cG|_maI+_} zYeyf%HWV;62v>85;k`DsKX);a#Y?=nIHo5(j_(2qBS)_rO7at%AP?^QP~3iy)$V!2 z{$%>b%l|cUl9%DRn^}dzg@m;8>XaJG0Xqq{c4%rQ8;vpeL=#V%^j5g!Ihf(&OYWTiqXN;>7^Dx z4Z^p(m`=rXh87uo`KJDBS4n}9^otY`RO+%}2lv>z|Jc8s8#2ST&g`5!ia95_ZM=;8 zc7TE(&Y3Yau|7N)skG1z&noCKYl{}wLCTvjK8^WYau9|lK-<&tYpuSr?3Ys*an2ip zPySC>DcS^m zIPoXBQ2-z_Vxmbd^~?oSHj*vss!|B>!)@ao??GLCVhfh(7@Pz?jOr}jMTd5)wx7SA zPD5k;GC6o`X~nN9{6&xgpd3i1Ul@LyU@DwAC~)td5a;%J2&uJY{t>gZ zlFIo3%>-?SaPU3V^aGldcpRCcTROe63kn6EZ;&52nWrreb> z%_N<)GNgPA!fhU7;mcD~>bzTU!ScokE~9Q=|4HTD!{oU={e4BBI$u?uF5Y+P0^UxQ zcxPy)goi^|Oi&+sScl%vToHpgeK<4@Xr9i`+mywG2uMMPm3c0xa81{TV{DrOkJC<6 z@lPTT@01dz$@_R1#SO2mj6ZIh zyvuwsNqyF_tX+Ur!ql|_2RUOc)GG;xVI0fM3|pjJkdx+5gNGLICk&i_8i$B&8BLGG z8hq&eGrIuUDEd055qL;G9TrQ0K@t2bY;{*6j{W(d#4&vnL&P#Sk8cN-pnBZ5;2sv| z69$e%Lwvxp%>rhKXkybWy@h`pS4TvGu!*})B_@bLsj-1pD_h4!sWXJ+Djt8$)lsl< zd3eg;nLsikvtR;m6aH{Y6fM|?qW=2l!2^33xzgH;QTpQx)7Ico*kiD3DJ{p=yg=>P zo69J&m(5Y>wD~8Hi2>qo@0qRCTfq%?#!ZJS^q-^0x|76lndy5uY(tpBebH2iDA_=k zR>DXpUW^UZI0%5EP%)2j8W|KzTq==>mLkzo!9N|a7d8OXSaD>Smv5QJADo@NBV9`A zLI0Np*eT7YlaQq;aD(O&`6Y&mD%?rCt@x`t)HoP}{YauN6jk(q}J2mj%+BX87An8hlbyW8a<7N~ke z(OZVwlyK!)ez7@Tz(iL%4jQfkc3pXQP^~=^67oE)KF_Z~fVePN$-ZRq@4#d6taGkC zJBnEG^qX*Js>f#AK>^zTd)aik3Q2s`o+B)-1``Sr!g%_YK&hv#sUUg*Bnsyc=qGqH ztFvQEB%LnI{`Wo8sj$H1ximj9eLVZiRYza!NV~4oet&ZoI>$}OlunI_6}rgf^^Tk2^B4)c3F!|(TgxRs3pZ!1>zsKjWSNB2 z$LURv&u1Y+h+8mXG!DS1QI@;%JW;KYCvvoc4kJ4$htgRh2LDo36xqM!0}`%GG6t<5 zzt_oce_P^1T*8#`!w27`@0bS=RJ?xUV7lW4o!9my5Ft)j17&6xWat$l7)%R&f|Q)K z3dtIQQ{;gti6>^;C9>OvNRowkXGkMLP$*lUlW15g2IjP1qM!((kZP7?GFiwuqAp=N z6~5J7M09tAnrR1c^PyPrs1g$6iqB$Pd?`m~Rh_K!P#C|+#s_l-k0&hH8kFQjE+yC@ zes0+anL>=TC1*deFkV4go;YYUcxW4=Y6Lcu+x%SH=Sm>2dwNRp@DON3s4*;70JIGd@-9|B*@w??;*&t@D=%rz2kp zS)VQmg;A)w9J?G@M|>G=GDv6A+Pe(Nn={u*bBUe9FMK@!+04_9$J3@s(D|BNcQ6y+ zZ9nQ8{me{|-eK!hL4>hHboP!uLrAuW4MHyfNA-AIMVFdcD<{Lz!y$qZGT}rh%wr;g zQA|&6!((Rq0 z47J`&8IsgN?#pkzZ|^Clg!dVXo5u`a%?gMZZAbTeEf${??CO!ynZS8XT=(}0$ zDsLOCKykCeGP^fse6gOxS$q_xfO4cyC@QL9U`VVVES6qkgjk%d2V zi2R3{TQD0+`g|R5<>8FhD6+PHU9hL^L6gNS5;ubiucNd|!o(e#Ydm4|Uh)U@D6ShwCHy`bi>JT!t-P_(;3}-ELM@DjRrIs0~`_$V1L5HnbPB1D@;O1EyL! z^H+V))3aSM_`ECfIulQXw84Q&Sv%d+s$Gq9qgo+7!@02=LTQ@GOh1KWKumm59m)y? zc>|v7X)CcSlCS}q3ni_k33L<-QGA9J)x5l+Oaz#~3@)WcHrxA>_FK^^m2{&!DtrA3dWe#>r~w^&#zyg zz)H-C4eDWyLA>JaoJ-8>n5g|O;;#Ny^|#(65lZ1|-46`61tMs9o-eR-ANh66-?~P^bCq z<>{fjh zX?_ajppR0`xs=}+!Sj$?T;e$Mx`krW6Ow}|Js;b9obN2g zSr=JBx2e5I0CI3A3i9VHz{UNswncFnLO*3B<_Ve3<;JU5B_Go2xtnHqVlgwjfVv?ji6*N=M`2O9pwS&6k#rKAaV++C3US9n zU+X9s?%?AQ*RCj}27G+IF__?O;kJM;<6lE~Xt_cjo4{(O-1Nh1ywfOSnd>0VhI;_f zvKFxJbVcS25aNdhVE-HNnXxo9lq!%h^bs;%uZE+ch)f@Kj66g`UYZ4h0$N8ih0dqm%)vVFC4;U#U3LQ78FOf>38^2*OYE`%wp zGtCn3uJn=m?W)>%>Z@KIfLNH32Mm^| z+Vg#~QaK4l-YB)hH^6+DPT3E>N<|5wsU`aO6U6e_3!ZyM-Z_2!6Z>%priO$-sRF#s z$Xmi5Nj3Hezgo@%sOfLXW4%xN{%|+Je}0~#`4i}V%@j|x&2#xV3d&7RQ}TwE^ERML zrv5MV=WY&86Zj<5{+#6B-qn;MGWc@%yHe)OcEFqcZ|6VxG)p+%39hXhAE@>ey-BC< zF)OACq3JY%{{m+h%I5iq`kWT z`Uu#r#_LLbddR)MZryg*Bv7=Xv_QlAFxsW9WNe|99=`pIsdiG8<8xtStpg7?lbiP< z({p#}@lNp=9noGBcdo4hz1j!CM-!dfw_EOW&JLnQ+pnb{Bl1(Ow9*QV$Bbj7;y3i|5-3Szy5U)m@P*3xm3~GO9C}cw(I@L273Zps=7M=-Nad!77wzDX zD0Nf)8R4mKk4ZcUoN^M`xWEUm&XB~fst<{zANsn!G+vVv7WR`){Oo^?a0jNGO43#9 zi6bb%@fM^gpH>Dxta;9umwScy-rGs!jHB{azt$1%sc`UjO2n>r9>6y~Izg*d+r zIJEI8PcKMGsvjS*P0I>*2}i3*c_G|~kET`F9Vd4j1SF4!yEt-W>4RV47h9wl7kL&3 z7?{G@MrmPg%@;qvu^d^J!zLjs3gb(lYCftt>sJ2LEn2at0lH~mMh7FKz&)*u6qUDO zIIkk+qpK1-FHwKgf_V76|NU4OBwe%^6MCi#M_;G^B;}Lffs!1Y!xa=Ci4v|ic0U=`+NWN=iz8}vJj=^*?hk)<}wIG@w9OZUS!DVwff4-0u6LKX`rl3{W@ zyXf+C)>1yV>GF;rLt+(ogP@eaP!Z2Jk0)qBOZsS;Z)&JGD9vJ@#*4`P_@&<5f!)aP z2aH*1?QBc}gp6OBZsi)O=uwL%$@Ck)G}sSjqsmGtFc30n#enZQBb)+^{oNpeh%|SD zWY3fPXN>)G8kr3A-dg;vHbuxxS0h1Qta20n zj%aGcM5Go?qBdqBfMcg=x=zCUkk?U&S&!baV=FvRJ;)dl}9~u z-~WA9^ORGR4zhfw7qfI`8*jU?8p7G4F8_XLNWRS&R2bILq7YZSKw3s<>CdI!nW`a> zFU-!_ji+CuwDC&e?sVR+5L$~Ny6oq zg&(j1QXsHbKm`g!at(6&Kb|IYw4`^Azv4DkI?CUmSIP&{{!y(7CK;f(#jEN?^euu~41dKCAL@xQy6{}Tp z0-^e9rXg0wIO)Hq%~|a!@*Bz@2BB2d)CpjdmO;gcF|YhzQcNJ*kl>dPojI{I6Ltcv zD|78RYr{$S;9{{%3}clF#x-cVx_iQe|pJo zV|X(3un6*X^+j)C z@hH>IvFu{oNT>V)iCrqwIBBpB8I{rn|7Pv0noX{8aD}cKHpppdr^DdrCWIS)O_QOh zel&9Z2AT2Y>nj_Ww&N%jgacq9?|YDHASxy;bih<@=F`2!X@c@nP!>yP_CU=4h|43Q%yX6f^p$?50lfDou#>rnGbClq;Z4O_iuHyn2A*@ni@cNh+9E!dZeZ;^}EtDmj8H<83k z(#KK>!=oUZlzYsyyM1Wsp;(7>PSCs?>4vm$L6qny$_3 zR;3rl95=@|T}zD+YDUa_y&axx$(wWlb}O%@j@?j3fcK!(JdJE>M>;D7&3_9}$R@2( zrV@F0`a-d7$Ieh8Rt z+jh)Y2?7`mxUnhazCT$O`IpbXyn=g@%7fi#6H8O7Y#`1Jv4O?GK| z|3iyuFvG9FXDg+rmMi(;30ZL%u3#h_NdH@-9U)BAufU0ghrV1S*le&gLrtN3?3Wp}6P|LL-M& zFyus-!;Xe59WfOq1PhDm?%IQ2ZZOc2%xSU*32PR$SPX}Q`$?~iDyX%s;}eb}wAw0C zUkLze;FqlNA`m%F77`T#t?WiZ!%n6FVHqI@CgrI5l9N2=`EN4Dwhh*Ru&yZa=*NI! zdxlYU#V{rIW)bD(IrjZqG@>x98oJFCyVJmQo`>>Kdy|M&$}^J60D zg=#}4N79ep*5yX-*)$yoYkc^SF_(ktMaIK$@Cj>~_N580S~d6W`g4Q(vHG4Bbxi{k z@i5!MEN5X2CP;%oUV`X@lv;(S-V$!C>`jM^Ov`jJjlPZUA-xAS=Ny35RIEKu-k~a7 zb=05mOP?C?3OabPwojC?94EyeKRpT-*oce6Hypi2j7W7Sb3$7nF1Vp)qf=+Jut!M( zCT?Uwn(X14BtzfUctvGQFyTj=I>)jZJSZ}6Sw(gNCjE_6l#=6tq}pf**29vdf(>>v zGVKvn`{&cJ;tr^d7A)Q=of6zMkI19}y%OEiY7$J7<^%4IHnIAVg&_rGZsP?%vgodtNYPqOpc9O9(aw7 zalOAoHepC-Y2BIdSJB>u(f>DB-Ha3!zNWoD6qE{GEd-s+mz-#giSZ z7@B=%J{qoKiRb&+Nok^|+FO45N6=o4F;l*QG1mG+eq$`|T|8Uh0-7W;7;c$=F!1Bs znCGWjt{QMiUA2xwq;MdYl5_%_Gu)lL>j@dCS@)LQ4wV^0vuW zX7Wu9M$PFSB&5IDRD!0l7TZ$yC5}o4l;g^|l4VlgI(k!c3e!O*q?GnP1zz&VD+U!Y zy)dH1P;hH-^Yvd_PWVd3J*4P5{fUp-)+I&Ip_`c{P6^n-NdErlQ~=Q~!_A)%3>s`+ z)a;$jQ^u}em9?+ao7M|;uy{E%@y$vx+81EM+%Y5Uk-prnVM*SG(~f0mWP;2Pe!##P z6Edkwn9IT;r_=wDOfpv?jV)f!`B5GTp!M*QC$dDY;ac-Eh^I$}lT+NYHqi0{GV%-W z>F)u6iE2r*72)~cl;LO7U()bX*d_bFABmey>qQRyhA=fw2{Pd@d>0fyWuAS0?(=X5Fxx0MZmMx!uMp7L8H5H2s1qV;mgX zqY`gZbjp@IKgtN22`V5O+R4nX>qWH0i}h&XsK%1zDgr-6Rz zSaUz-XD=z$I|;{*EjXNJs%0I|OTJ+uJMv|GHmYezJ zm#Va2BTEh%S9cg*FTgUmV(W~%4XV5o6)>$BACEG4NR}i@5`e<@t2d%uQY{-gKh2PY zO(hwgU7{D+Pb1K(BbqR!$FV&oh5TlXyAonuMJZf@lja#SR~^+AGoTTdQP8pRrZpcaIH2S$uGpkp{Ag58*E^vsk2{>cInzcd zohwi(g3*AEs;iiNdCFRp7N*~1QcqU&K9iy%pZaqG>@=r6LC#<=u$32nifH0+H&wy3 zDY*>IU>2Nb9FeX_?Ko?uM(0 zo$uuTZzb~aPyE5lE^J~X6Ca$r8-03M*!&LIzDJ2x{oK-L;rnd9v{Pv2`t|YWt|ryB zs!3q^`$j6s-HeLXU@NPOE;i(QO%cAqu55_7LvXEZq}?&Ni`X%%|`7%y`jbe}HF9j)1Se&Cwg+ z^x;b^#=Q~XrV{!-IRw8$)o*GIz38Nuz{4m3K8gI%;q>g2uonX-*Bce93{cgi^eA}W zh(9bT{Q~XSVUn4#@$h|>8EBf{hhhHk6E6g6zs6|ui2JgCJ)i1^Z&qJ{Erh8`PHzL$ zr_IzD|J&e(esF+B2sC!dT~WtZ3{&|R+(2L?IgY5_OUf$MAU!UuCrqxWOge}oM{(6= z+~SgB-b$zrY2tKXX8e_B57UFI>HwVHh$sjf>Y+CKbC-!`%&K(cB6J39N?o*dcf4EW(j`JxAJFXOFrP00}i|Lv&!_&DV z7O+Ue;yU^QkVp`+5E-L=uIOcy2nU%K z1VYi+VMEX1^|B(`cw~ou8wX%b_Kf`T`0Z$9?c_&DagmrWM=W3tLe@;bDNJn$qU$;w zcqC3o3>g-U01BIya58^e6?`@>X6&M0GOWE>oRNM;43_oPW$^wyy7;;nlV3O<91~p{ z3Gq~v6mNB!la)C^g?FthFu>r^H;D~nfwX!% zh-9O}k^Kc}3w?xxhCL{5jRxsT+&^g7Pgl?1ht*(UlAbmXjj@%hUZ)Hxt>eLb6M(<4 zz#0!F&`@X+V1fQQc{SWgG@sHu7mBfpUXs;#tKf@iU~byOCYZB^2-ZQJRJZQJVDw*Bh&efQlv?tSl#_fL&+MvbaEyUyBk zF6}jeRp5L;sQuC+Az5Z4$}Vi~^a5>2JUdsGNalfrr_u&6bmnle)Qm`fA7;G(zS=!} zq?K`7H7QDDSUVj+O18XX3?i6%gqd97J{x#_k&IgexmK?mDkXuMKip0dWD|s@M!sS`cNsy1QDfGQDc$sa7>;VL{C74c>JaP~N?jVo_x11&c;{-=K zq7OBmrvTd^XB4`24tFI$hm44g{DvP#@&FU)g*OM!5i$$F0H>M@(M*{!AOJpQq5|pB z_+=r_Eu{=pf+v|gRGH1Ai>F!kDtl0+LI&kL*a@zGfrVXsd!mc0N5<5l`l(V3lvt2w zIAs+7!@{{0Mte;nd?C#N>5_Ua<0E~eM0!fs-g&5>be5;$#udOgP1B)8d!|$nVW34O zEP4bdknW=AVF**WL-q}+iUS)wQH#|BxsM^W7M&l-b!lILM*yCxq+_E^C$rUMjFod&#fapEVRvlwMtn%1Y$aM=b1qkJi>E6_0 zW+(|!-T|mesRXJZI)?dbktJiObIY_8bcQz!1!X*>`8wS+C{Eg`)o3$xxjl4s409tH ze0cSa3*xu)ggMqsG}}{E^fc7eY91(jz|6GH3npF_S_%qE1b^;otM9spwf$>=U}n;z zUDd^KCg&fT`tFnLksdzAp{6+0$&m(#wFLa8)sERoK6%GGqdUQu_TqK2!p2q6GE5k| zoh43icZ)iW63S4krO!O@KLHSbxN!&%<$Eyse3m1L}j_Ftgw~k-~&JCPD=V@WDbIGCG1v4 zN%12ldrN5|HpFW^#;1wA+@vk_c_o@S`>6g=;Aq>kDCA{qU^!*67*E*Ajx8rD0uyR$ zCmtb4ZS9GuGANU5L4(7TDDYC60L$q1RwgD`Ub2=#Mi5X{w1W8c;tkSc5a?8?6HVjk zVz-kbh}^~ZsI5lQu`q5Ky2ppvB`uz{p1JQE%69QTTSnrUsDdZLNMuahu*R{h7bZu< ztfGV@SV;o3oFX@yJdN2tZMir@V%JUf5umOIy2X!SW&g(h=nb%Q`Z6mo87g>e`FQ|1`O}>F}_zo-pevp54E_0Li_AD~EIw}M@ zzx=j2U)Xm^#?b#D+#nB7oxp)Zp8LomLo-2zxz^%7AJQ#|_DGc(T;&mIav$NYFdYM{ zZ8Zw~2oiNILYck@)?_hj?PpKb7`LR<1>~GFcXObBXhY0O=s31T_*8si&!1Q~)L()) zbX9Js%x3^KI*1By?10=05(9oeI~Xt&YuZBNfpYM*gd5O`K5byYV(-!}CIjP1du04Z zS~29#Nx__fr2|k=C(%f0cg6hmJ*dD&mikGj-c*?0NsY2^^{L3@UWmp^WGSJ_KNo*6 zU?yzq#img8o5Yg^FEJO0+i2egh?s$3PBtgq7SOVdOflLU#;igc(Hf2Hy?)|_JJTWz)Wb3(nboGkqSUwO4AGeR z4BrnyM`i8}L5uot-+-rb5nP+|Nf8j(L3+{PYX*`s52^b@zpo;rh0FdgfCv0fgukvK zq?jhi^#2dEtC0>g7d3wKk^ARAER8MI91IhJce5!<7GUOY*I9rL*6Mle} zG9}+&e3b!T7@8YE&p_mP9a-e*`bRH6m}u$^8L+HYHcxfxWff!(lTbPrc|8O?)eh`r z;JbX~L>GCW#;j=Jen~!9&0v&Y@s42+HH2ciU&S-uVv+d&1LBT4Q2$>Tp1t;SNmK0% z0WrWtb0SEx9}ET4AFzUa?aK%2L$IhxeoBSqDnQnYSJC>Wn`Kuuro6a9_oC@kP`jk-|)DHC9X|U5OeI1$4$>6W5qM7 zL45cOT15#j0$BKr=qd>Ud8N|ay((l@Ua+F7RI!;&QL=w3JP#BSU|1I1j(v9>dDjVZ z>Kc5e0qoz6oUDYE16G|IB)B;H_$VrTsh<3{XLO^ALyz%%9hm@00#2+vI!87QSq8}z zJ7O_B7H*psUf4fkQ+PqONSOZ<;S<0A-w5xZ`M(gJ5`{j3jir1a5Qu62HZ0g))G$#Z zmdRT=3r;^7j7c|S>V_|^>NpkGQEdq!MANi`KW>wVxY%pKu1U#2OoZVnSnq7K>A~0j z^8W_;07(Kf*D~a6SNsp7CZGJl5?H!#~}?KZmVDnUKVI}9GIgB zfL6yuNd6n&5Zx9HvAP+D`q!i#0?$+$i&-;(^5ATgT61(o)9fuEu89i;eG+Yqv(RpZ z1`SDe?JS&&gOwGbv$%n{HUNAHBR6`L>>tHJ030Y9Sv!y;dod%0&QkUY)2S^QhTxn0 z;c};-vfi=Mq4;S>+dM*|vo9q|Z6X1wL{?v~*`!%$Cqbob{LYd91~-xL`@U%sKl29P zzP*;^ygKIpf$<=SPi{?=?hgI0YEQAD@;A4ICEQP zka>JCYC0yPR6zBx<8JdV)0eeTMzXzH_4lWeh)+*lSrno=j0zY#Xcmj<%ndyfI#NnX z;^GV57*>j;7gx%vNSedFCA+4$6U|gvV6yKuXKxD+XvB#8Sh!vetBd+Dp*q$-VB#je zHak~DCS~T+=pp05As~C{aDR|S;%aE<-}8mNp3ZIMsh`QrM<9~K3<9CyBQ2o{?88%8 z1;U*OA$2e#y!wx$p=qGY_yQ8a)Do?tf#cs!>mSVZCvTQL9zPRb^Lm{qRLaL~H+N|@ z2_d9|q8!t!4}Vo~lX<7H2daGquOdgB=dR+XtUj9K%Ete*0E(%4Owl@7Bm;6IXc95g z zvok@gWctwNW$4>5H`;gKXGSHdoC$?dPP5E`ATk9d;60~*&(ud9$LtEL<}3X2dvd-_ zKtI+xuSRf*=zd1Me1>veD{UZ3J(-Cb?E$$djh!ZdGc8B=c(t=L$bBUN@R5R`7j-Z^ z0lYwS_Camg_32H;W}9G!8`0x2f{;#LfvHg#jr^4J?&1U+;^kootNEpt*?I58&w9xn z1wjo)B`kjlNi5)Ea2kKt2zR~F2$xLHz=7gaLoj#h%VMJ*nHBjNX8===n$ZLH`#ehv zGGzy#Qj(-e@c`fQeVb)h3~Ck^#Q(vPIFiZ+k3jHPahpz1oa3jL$y^ZUM1x%ROd!u- zz&RU6`Hz_q+|OXvLj$PL8x;OOqMXFAM$`bySj5S?=R5c`4(UHL$;l2ewnt8&;_Z*= zWvZU4wEy-2GKKUrzoA$bKldL15RyWUMx+gTha2I5PH@-9B48@ZB`dqP**(+N=mz{X zBxo{a)}$!cz!V)!as|wll&B+X^zHw=SBzpn6a@-SZUeJ0(pE&l4-FJgU~uN~@qwqM zpl+R{kqP8v1UECo;tp`C0Hpur0vuwc;QcVjCwxxWx-?5Y;Dl-g8&1vTf+`1pZb_Bl zhMELj;_yHbfC;8@@IYlE?fi^CV&l{Gk8V~eAWq%jff@xPNiF4tA_X%~9p#160sL=n z;Q!_Yx)XlaYkkJqXfOXi{lK$}MmYB0*<(K`#6D-~f)8Pe`Pr{(JACowu>x4+Z^|t~ z8N)|t&9iId4NfG>=1L5Q&vQYtwQm0p1TEugek;- zibLzsVBL$6<{R-q_KOH>(>Gf8|HIy^4-^_y`MrWy&O&kU=T}td(3WVdgbP>tftZ;Y zaY|(lmNO`ku6BetlmQwQg;)PQBcZ=z0UDGXGK?=*NLgLw zjhaAG%I`l)ii7^eUJL{hC8RF0u2q=fEYgp5q9BZ-=cje6ro8%EkZ=C8N;ERFMh{Xn z60i-y(;Ai02+=mL(ij6JuPoJS*SbX&D;sG$BsIuj*AebOfJ0pRDy%hwO0qj(Ot4H#Jse~A^UNb^cEc)lG7eLr4+Kra6&OgX64gge|? zgg(4LC}tIb1&-WN)rR>-S5IiJ@(h;ULi&FiT5vB%PhIiAr-s7r@oMve>nnHy= zPAketJl}H|N=al%SXC*Eo)V=t*4Bcq?*oN^4OncAPA#;0Y{09pgYa9j2abiP z7?A%Zj>wj4?m%IvPZlp~c*Sq5g!6M`i0+MWS4WtH!P=a6QszkT*{}dw6y70-dX11r zM`2g7nwb6WhV+xj667+GlYm34oOlWqqgMqI=E7$ZbWP!rJ;(h|A`o;2pn4l zB^sm97z-8iZ;X?*nRj>5t5TpxvF+dVB|;Rf;4FaejoMI2z$!b>p669z103+9_q{7U zp1*lQBAU42*1Rfxx)S)-!qQVB|Wiyo+VHQKsRWslzrlLJEliEAFFXILdJ^#-AY12bZZJpx%^-_vq z$f*OwBI;AGrJkd?fpoJOENnv(!O)bVe_)?^x@W|q4mhe0^ z_=OV@>*G*Iv|J`)IT&)J(wP{aEsUaqi_j$BzfYdem}H#5xgza^o-8IQer+tiN3c`QLH#$D`uctLalA zDcXcS0}lB783%2FE+#P zZqdU?v5ULWId5Usz{-zN;YK!HCL_^px%wkFW-}P%vtL1hpvF@$1iOyislw9^&CDbP z>gi#GD)i{{^m83Z-aaaLdmzyh-X);`pr)2;j5 zffbBaIO$4K*Akt|^y|1Rzqw6tFV+jAemJ`EolgEY3@6gWbXKIE$9=&rYcsMF- zDXXWXC>)uEB^*_HCLzHFBt*@RtgH!KCH{IH_>Fm;Y>x!I(%_P6e7(EK6GBm-uoO%6 zF$_XNp&TyS|7H=L@Ef3s@(Dcx2-fHbl+-kd>ao*GQq)C6{= z`XlbQz0xq!YW_2u1h(vGL_QvzYQ4L%Klh{18=qtvxy;3%}oC>Bgr4Y32pT6~W-CnFjxxLg2fo-Lmq zE?<^&)uB!`pp8B+s5by&=(QGmo}5$o&memTeM+Z)zAT#&oQ;Dr0(ahmw(**A0_l0f z_mCcZk9#w}x61qFN~_6OmJ-~YA(-XL!I5UH<3A83BvOub1MOoNyK8+A5-JSR;58gF z7di^VLmIQe8h!dMLw2fQW_~8>(F)9v>{i^g`X^e*lu%}~ql!IQqcCdpU`oG{s2`e@ zz=<6XX&utE*T*o*u!}OmQd*Fpbn6ai|-r zmlf`tP_F-2pd3zyG{Ci!$i^p}0zbSVBffVm!3u>=RmKEID0Nu^iX8?j}Y@56Ej}oG8abi2xe7$UzmXI zs3RC!HcwiZnHV{$oy^l@{e4K+qY?w{F9Z_R*x?9hx-w9vBZ2sT6rGOvNr?n+;z2V? z%*}{Lan#SgOgM%jk{%TsbDxfFl^JM!JX&g-JY6?g*LblnDxfWIuaR$e66u!{ZiABf zo!#<}wwalL3y(ybP6-{4&x|_)=Yk%N$tcPmGG(kp+&jL1I6fE&*+XbfB1-eV)%jSN z0=A!Bk|MXAAh_vSF-QBCj7Dpp)l0rvOM{8#T96v#vLyNTJ}})uR*fSXaMu3x*OJDm zU4frZ5Kf-9?8D3I_o(Uhcg2Dz8N;Dt)?V$&%J%;!If&z7tZw;pGP_;_z8F2Nstxwp zkb;C*!CWk~sx1Cj=Z7$M_|^3 zu3BKp=Y^x8P>Qq_mxmoVp;JIx3DM*%;d*?HbCs8kl>DFdrhm}?sW;^mL%8@LE`~J* zk}g8R3qnPIP>2p)o<0y#+U`Hz6JAQejzlG`*dW}$=)`AY_7u6?!ep`?sFjlBrGJy- zLjCZUUJnnN;v~&GRa6ET4uz1X`DT0=yHFT?#(gWOEQFPd7fJ&o-;Z-+ScoU<9bRlO z01eB9h#DOw0hT-adzx&hH^Z{)NNn{oTDdVNBWY0wH0IGQN%-T$V}#+_4M7upsgi=* z@b#iP(2TC+U(y_t2dKbR@FZ#3=kto*k4#jV#>A5v;42{a%XI;wcjNP(47Xu>;D|=Y z#t;K&1Jpv&MKQV6p<^*eAGMBpXozYzZEy>RzAo(Tb#mSOF!q{r3=>DUXt#}(+PD2* zT$xO@gcIS#eA5UI+Zoz)GQsCx-$G*FdI$AqVUsFUsW%V#eJB301Z`TP*awhb-Z}BI%YQ9FI)A6k-gY zkfq%hA=t6VxRfgz@R0LnYBnYwF7&pjx?;6p&nrfiy=Whl(R3Sz=h;3AR|_=cs}v;h zY9FiHQz-xL9Xg%L))(uuNVYNo-(HK+>|pvF`JPGkwh@7dNpxNjqqg#nR$4N9kb~!>B#ZHz~l zj%Sn{`RNSWsl&;@3wgUhp-F)Uko-hS)<%fHsL~~I`N})us5k643-XpxOPshM@3Q=Q zG3Z{s4g)mSmf3O`l^=3N{6a65pqEVvuEv+(Rba6Ch_r zft^g}tu5P#A@j0E!v;z-7ER37^Wdd^A4UU@r{F<;zCw#gk)U_}15U7@9LvF4FAeRQ zvz{hTKUuoWX|+~?yIkS_qHWZ8mQTDW?j@dw4Z-X|mY7HFDG zOD>#yH)x2)WO*DpB*7AvXPE&wn3$$gd#U7}j#8+^HM=R_fW&|R{|^ErqK%TF!s@2` zbJqYsLp_cUSnSRkcB=C|{yYETXB6}9j9bf259X!tdCCx`9H9o%cSBR~3zQTY?CV#) z6+plq4ZDwx=)~P#Qar(eiL+2Cy*mGISV3l;Y`OyGx1uNy9aN^=8YG% zU9xX2ta{G^u76XU5veF?RQ#;;AWyxb44^AQLkiGdcmQJ@n+ljLNWt-8`|5*okDdc= z92V6eW&~9ut}P%$YM+ssCmo3N697vEe>=y9pHp4YaKaZ7X}6VR)uor5>lgyx<|OOn z{?eAO$gMg-)gn4Sc`02{T?#v@FiqgYVA-4o!(V!dW4Aow8MHrzaWk_@)3-Pu08D0z zoxVu^z@_(+o9?$|xMxG2+s3vs0p%(cF6wfyAeN5cG0DymH4!B$WCk!GS@BxVVK%#q zGYV6U244LkG{nFQ;W0X--2z#j!E0u|=3~F!_W*Al~XNRn>hfzkq=j*>;4==;d zimvo#>YvXIBn0Muf`0F;;@16MT z+5PM0Xnd{vYWQsU+41AV@>6sB?)m1G7tj$!Jbl-G^mI2Y&h>R)r>>B5HcUPZC)g>s z6nLcn`6cw}^R|kzb2$vr|26*Q9rT|2^_=?kGW>PT^z}tZpWyfW-e2Aa7*+809{cNk zkc;v)t^f7%`6^_Qmsqyi82xuz=pIkVrsnJ2Q2j3?^7$39;=b%Gx{q>4we_YYU)(%b zQ3(Ug$HN|U@jTkce)L^m6Yl^S=}rhLiX{q(=?{l9Wv_x%%3r^uVCueJL_ZaB@Za8s zbTZueDskU^(Q~)|R_{aS0?f*r9G1Qy%!GdZDEj3S7$TtKpShuKCc7a86HVRt`d)rU zB!Nv|yc6*NI?@$$)qf56^wxrRuqY%W=B)2g4w_&^Rp8luX|tF0e~-Z{cE}{`k>(2 zv_YP>rnPT8z4TYMePK%%6Y@XPG&e8fQU(0Q7brS!PSLX!bTKFOW?!Z-Y$T`d0LA-k zEV5@p8JnRf*mWAHQhGbTP}1k3@o*#!occ{6q4)5}7#q*tr#qDQ!PDGa;)YfwO48K) zdzQnQrDb8py_eWxJ;2&0DYDg{OzVr$d3TU}wQTU?x6UzbE8J&Wmz)vS+~S`8Ic&Zi z`nU@iDdjxriz4@kG@a?<}Ar+J0HJDHtrK`zcT2^(9Vf2O;Kc>X+2Lv&z& zee_q?;b$C~0Rc?2liyl!2}lfIPsh(>pXM?}Wm-gEeF8I?C<|gA8XOMapcWxGBl;Va zBda!T3Ao%`;_x>F){zdT2d2M!2~&J|ffL$({3VtciA6C8d>mnrw*8E-=FDjArZ z;eCXR&srpe!5WCXOaWhO$D(1d*Ge+ZsL(dHV2>yO|lW7zba_3 zro6xv`=N%@nDNZTJ2_J5yE`tVI*6lN}+b z%kTDEr?X?0odaGt6}wUW=73~?J%Mf}Zw;VJeE!C>vBYLt>Mlr+!&PBFg-HD#Ona|H zS^s(bA%m>KDb}J5K&(>09@#Gya~C_2c?FX>pFp~7Q!?MrdcKc+E{^z?=2Kb^=QkQE z(#VovYIY3!C1NjH5n;I*5HC>c^~{ug^>D}GL#^fs$`-vKmM#VmmzmI4%ls@OpRrDr zOtAg;Fm8dc=eZ&i{iz$eRom_N?xc8&KK3ViZcNL`0b{8F;M=3gQJmMMlv0)464FRXw_jj29XE3={a@uBVRvHl zPV(O+V~7KR|2~-I6FXTJ^oA)iKk}kZlN+0EUWgZK26#LoUC2-GrgY zLr;zoQiqmxt}^x{D1(q8UD5C7?e3vn%bGJ$?ne*i0J1ByZ#nDwop4Qj1JU78vUq=! zbVoLi6W6fqnT0xSl4!6ck6M;m&su?Hlg~GDk0QBQhx&aI6*>fy7>I;VhX|q@o`Q@h z(K$J_DAuoW=G}&Lvepyw@mtrAL;(KqpZ@?Tl>?*uV4N6ER<{Q947|*g&}Lno=lVF9 zE3bE40QOvdQ^1$l(&Ok5?AFI&5V0!9u%GD)l8`2Frym?05Jh}n7igxIql<%SGCgc> z;Tgi-Xop}Ib-nfnLQw6(p7RJB_ltw!jB0NtdkLljR!2a$`)5OX)Tsj)xy0u>wx9aW zSU<-vrU$RU#vN)6?OU7osl+yo=d`Rm87tFFz%Uq_qbJ%tr*|5`!$Hr;uw}=M7S1U) z(sYhv!DZ(I(n6z@kX0*8ii3{@#tV{)3$MpDK`u;U*w@B;)7FhR@(ihKIX6nTOJuPA z6&4r4k)4Hhr7ZMVGK9}B+kOc@@DS6?@?>8aSwEs{55qFusAzo~FU?W0^Xdig&mOc( z0ChB3w_yaDbZOv`q4KV! znBvSBx`Nc$ipvPkIS%iR)~Fj#=yVMJI_NBK+guPS#=-JCROyhkZ_N4DE@5@G{6k{9 z`OZ)1-VYB@<1CTY;u+fx;31a3vO#>P2AoRNk1`}0pR@M_uyng#NFzjg6|U93nD9Lc zBLsGOTg8F30-p(h6!AQorGM-msILEkWFc|36BhWrj8z?oU$LxugU!04Taq7#2XBVZ z>;!h=w}2bWr84avCGp4|F#7}T<$jPXdF8E9+Yo|sbm^tV)KYvD3LAo7; zGzJ=igXZ|$^JwkuDsTq3!j&iw0qhVtuzgUOMDQmUg2U%4V(`g3O8Tcp>{kdsq7+Hk zyxEX+=C{6uGg)SxQ-uXo?q~M?X~lexRBBb@j*!!fpU?`1G7Gr*AloDtt>a?krLUO} z=I=2Mb}aO?79WCjHEh;{+&^Ezva>fPGCaNAHJ5RL?L0)3-u^7>*-Yq~0a%_|{m`-e z{LOg<-LeKwZg;PjgC*mT^1yX{_`S`OfF}G>NjUwZm*-&q7S$WP@AkIx^mR!8@^PS9X8*E9bzTwi z+cMbJBHTN3=fm2vSv6e%B!J7rK1(b0Umio7Ys=2_=CzwI&`2+f*r2p_o9$QVvcqP@ z#&*oQ6d$%T*_AU8r`D}mlh$;{PR8hBtmfnlEM~OPEruMlndGhv?p#lpY32Y`QWPL?2zc*kJNK5>^D z+i=%|>6rhc(DEZT3&_6fPpP1(yoNm&hBO((rj3BC^8>P}_X8T9i&$O&SyQ&hG6;WJ zm^A4ifaRG3(qS3Nh$KW_Kzk0!d`OMzJm|-@OjI>J^z*0TD)VaNKc;4@PQ>-DvZB|} zOTjJOWStoLX0_9VH>9rp|cuiuvs8!fS137s8v$1h*=vc9{Y@jse! z+?h*lxT%)4YYwgD6yna+)rVlJLV`r72Jf*cJMa=55pi%{^oY7+cU+CibPqeaMW&N~ z{Lwyft@M8>nvJIPvHCCklrXub@NM(YnWxw-&EV1m@Db=ceA69|0AXm0rw`0ucVqO+ zEiR|{+t*$#R<@bo9WcSNrkzfC0=a4vEzR4zo?6#Dx58Jr2RyGr&xhbz^|3G^V37F| z;ur{=Uk(mE(~>;s1k!}wcMI+pPqa+9&*dnZvxW5a0FR2oHj0P9SKQ~`vmelZ8>e0b zy8{5FQ(QhlokKAAPt`V4`}=3pSuHFJKUUW<-Z8eKAFp6L6qA!G`v^7F>~w9Lx1Ivz zt?ZuxkJvQg^vMz@0s4P9B-J>up00Knx_^E4m0`?7^xuzKI#x8ye(F*jrRP@oXVHH4 z5PdlI8)=%zG)>+iFQeKYWIlq*dKRxD^Pd3a{tz~4Xhhj3{;a~Rlnw+LRZ#?HXm|K) zyk`Hq2oBBrr|*lN*Zb`SPC-J07W742+zANgokq#W={iij_TAF#G>_QSJ`*IWnXsO<2@e{>bkCzBwFn&GKtAkQM&)JZ?3TMPJASKmI zXddQ9pm`knrsYp+om5E$O}vA1T7ZJ4x{(w)r^8|8Zud^lfyj2oPlaDCvMu?;U~Beh znk|k{$P0-&DPi?f6qses9%86AH)Re+O3DjQp>NQ;-t%vlQNftLL%n ziyESuQ)4^5@EV+o{6u0gI)d_+(zezSBu`|>1Z?C=1<0^{%2ZL?HCq0cB`J0vd_y_6M#Tobi!P(~p znp^ymV}`a~wv&FSdjhKO-5^-3-Fw*$hB(&V8TNb?|BIwPg%60i^OnQNKSVEMN51~q zh_nSUIJqE`f)r)MpZA9gs}YR40jp!?6AU?hT_ziJ9&sm~WkNTI#D2&h>1#X|obvtmKh7S$fz3A^HfYAG`r^US#OFw8 zQ|46ox+Y3Xo@Z{sp@;JfeU5ZGi|r&H>zv}k=p!5U(pnud$0{1 z+%vh0ZEbq8Yc(^r7Q2nxkAGOTn)q57E~|CWw;kZRBmKG3OC9p6?pnBdFQyVx`xgvm z$mdMLT}yKTecD<&qVkKgCGopJ|zoaX_~mmB*`1pN*=(n9SB?ojZVi* zdxi6@QOC+q)TgO}Xp~{5e4wRvhT^q+(L#)SkRtST460M$ioU@C4L^GUwvwfTy3Qalt zd^U{Pc2OvjHl^0~9&g_?QnNJ>Uhqmtt>F*`A;wr>#-h71Ou8SuZizlWiaqpvZyeFD zucA-y(FZ&Db^r()1R`9a5WaELf4u>L>=-;d)%rS|S6ADU6whW(Jzv3qtJSIF&}n;s zz*G0`Ujh5%VSR7AzhG}w-H-1=(cPl&@Ap@PZ$8F%9-qVAZ~D|LTP&ZkGNr`E;lv-d zdwy~bAL7SQH9`W>-J>0FOJg_9SxjcurhsAX;O-mj^346$$9*rQ8cM~x_Ey0Dt6k0e z3)q6bpkL=H{wKe5jriH{g5*sU@rV4S-s$!D+x#@(z5VL|bSJ83I?II!foDoHWAoGQ zYcshhD1g#`wQDHEtrjvJpAP+i+-tw*H&8+o`J6(i?)ul#%zi0Kt6TjU3e4vdgx}e= z?nLhq-~0aEU!l*ZuMl{Q764HGUjJ}z2v+`MI@Dg*E0EHQ`1RPA$frw`D-d)&u>V;A z>o>X|gc44jXulme8aVS2H}Mg=7SwVsSV6pe{1)?S2PF2zWf!MbnmG?ae~gq9m}p#tbE7$5Q_QBSkZt)2XJEWF|AXFrq;Bi>d!8GNnw>!e6;}_$mxb03^LFnAY;#~- ztv_F9!6uqF2;5Y@IkQh>?QMO(Y-FtS+XIco{czYC208sCC2xT*-$vN8CaX=r~ zX*ngaR)cBTNk0t84_HyCZlRgv`(XP0`Pv&_V%!1JhI~S4X&rgo(e5KcwI_@cjyuM3 zE1lX(I`(-W4D*ul=wX&a$97yO?dC!!Fo<#luyCMHl>;_Ua(_B09lDq|s+H8mAwz!D zB&hLop8zy@JSCf3x)Gs$MxIiccrkt-C|FAvlZz|)_}a;#GUMEO6@U94WIY)ZLW0V* zUu~1ZQ7xm#bOg=nhe_6UiTHfLq@~S+c$tJ)zutb>TaGTkJ6@U-s^4O4bx;-r_Qo(R z`ZB-I7k@GRgXg+N`IQ>sW&Xw1i*SBa7d_0ou?-Mk{L=-24su^Qe!tJMXI4yU z!u6&D56fAt&cO@VMKknevdb5Hyfc!Ck+0>3zEIxy>~v&Jt3@57!l&p0zBXJk*6CcY~0y_IOb3`IMa-x#tW->%PMN}fU@dNFcEeH6u#&M~y{j(-up-|Syo7YBPdhC7uIw{% zNr$vwA?N$bIoViFxhAee0Mpn!WmTm=l7O!iS>xSc(oUGeV|I{u7)G_qYHn1&Ckf!@ zTudygSTDT5fNhSk-?KzgkfA~G3xLS99~(V4A~Cu)2rghR+P}-Qe~+?736tXFk+9L` zMX(?qsdzj#0pvelQrt{vVOC81R|k~toITisXm{Qlz#~8AE|V;d>rZVcd)yDWs*fl5 zQxznR&Y`b>*m%}FQ#(hv5O={z83S+TIR;>a%30>h zmo=w~alNCYnUkjZ1!jnC3f#@L@g4Ybf5lRDmQM&RV{XLiG5{~yAtu5*Y#^J z)F)@b-PB{g`yT8$Xk($?Jv74Gt+46$jFDRlouiRu{2^UlsJQOsRPw`R!$P%*_|kFg zc>*!8vcLS>Emh-Dbd3}@X&E52m`^%wT57$s@XJQ;afQ4p<4m1tU_MIJY|gYwkEgto z{|P6>PNk3x?;+kfzhyd#aoiHJy&&Cus( zN@^QD1p6GfzFl@#?>jvN#it9r;C~fX5qwLScF97F^=~suNm*@75v&3`Ix?Li;O@0L z4@Gp@eUXFXZof=+N9GNJPoDO;c9`{55W@seHAu05- zt>0Y5eUs6Mw>Rm}KSZhK^7tSk`p$FO2$Sja63VI%uGRfuM!>18^8?!J=2+!hMb&+L zCl}|z4tF`1*QyjU;}{2_w-p?|e9mD_GFRg(QSM9|DMreCL_zBKGlu=EHplXXCG(dV zO1r#1R$SrQjU0<{t6JLem>7eAk?LRDDb^?r4}MH{(ZJdQW#i5H)~d~`*c zN&W1+xB!($7hvJ$_b5Iy8nvrHW(o?({riFT)Ew=pHbnPH zDRq`FMrJCnWznRpRd_`xR(v?Vk+=&gbr*hdqmBm^|5{6Oqw{M-6ABeL;oI3m<8ey% zJTlUH7$$Z9_E1gNHQ6}#irg=lzo$Y$yD8N#qQ{i!)&Y8`yk6Ej)W%yhOFk!U}Q!|ODj{CO7oVZ(`#B9F*N&(Y|&I@(;}Z%!RTC7ZMrH_ z^F){xc#=k%2!wFc`5c^vL292AoT_oaR{tB*r8{Bn~#Kj()@6F3$eIWNLI;BT*u|csy@veEz*mxO$MNjB4H2 zbcB5Tcny6eWn<^w(X_x;^Shnzb1j5izNr?-m*1q3r*Y~ygW*$j{CFEJK-HR=8T40W zwhh3tLXuPY@j-y1XB?=W`)&zKSQ#Q1oo}Lo7Q2)_RKm+i)piKeG$CQyEZuV*XxD>Q zA|*fGF2|0ia8X2cr0p(R`h*e}L}}!hn31`bP|-|OB+G{%PF%glITCGydS_b+Su#qo zA?Rpz%{F(zQ|%T3nmf-Kw7_<7PlbZ6iU+Vol-WC!p!|S6p-sN6=4-46n7B1}(qfdm zM6(B{ek*dLzz|y(+yRzn!!X=tl_Ky;V|z#%Ik)dM7Uwu_{M)!GRfSJNyP=USBeD$l zFh@uKtzkW}SiQ$Q;c+=Nd9i~Oz0f2i7c(?7s$|uEWpBG-*s+Lnmjqul7^XcOzyM(B z(El|Wm-@!|vxuYO!73tz(?xG8x3%H4=Z;L=yNS% zuvf4F`z&Kgqw67zT2|Z!wfx+vel|{k-=h)S!cV>La*`6Sr5a94k(+FSwx7d|qme-+ z%)XbCH@W%CwN{0C*G-OhHktxwJPZH}BX86@1!tYisAx}DE=>qRc|+PVAx}P?CFY5uu)b#zaT7OCXFwwiu&3R*`jaT4H+<5a1#0;9gPX%2w z=~#(vj^;+5-lq`PaP8%og9TXm=MuK&CBfT$OO0?0jB`3sUYZL4oy==(QGgnDKV5__ zw9yJ3#Y&eMYPjPhvwDq`M|n94!-jO9kAzV%e$@@Vw`#YVvu+{Bsz(OI2t&-=uUo}a zC6q@ZPqF55rSH-I4Da%YhF$v!yMxC_LyOht=9F9qABPq;bhygwU34>hYK&*QI0MuI z58_sc{`1Q{5<9qMws3+q0C|H{K-6DPTZ^ewmq=~17b3+K;$S!lZB(J!M`e9=)vJt#9IqT;ZRn-|ih2PBd=of1E_vf|Mj z4&H%@%5V8hiQ)%;INFkJY77{x$hE7fFNlsO2LK&yS=DRkp<3Pkiy1in!wfP1gBbvh zwgu)ok^p?3NJVaK2@ors^LE5zHF3OgrL!_h_o8s%OdK;?{#Uy$arbh42 zkWIRtms8qc&`in$VK^*Q!@q+O)8IR6cMhsXDc*EdE+KO+=O3o_vb#KBZN!*I;Eq4s zxHQAvBBXm-JUZTc=s6ZCEd)~AACS|#y4#ph@GMDvYj4->Mj_(BSJGKn&Z^mKPT(@2 zz;Z-arT5$$2H@TS0tV0`ixd&27j=4VsnrZ^&S2;TgfgsZV4-o)#Lo{3e$`b+q!vYQSI4&ct zZA|)&P5=X~%$N%?ptwMd{IgE3g0rChGF6#tJyFQUfzQHgPO0>ywNnwDwOw68%B#8b zxb=ULfR0|ttJ^ph%NDl%f000pHTqJ|F}@i#qDD6}YW=p>s}N|N zs$@>%5ZC*{e5(88l2B+tm4RI+wPZXZ34qr>O$+!*ud6DsA#AazetDXvyvyz9|4$P* zpT2Sc^!uMAz$zGs#Wi-9bA#JZbovT&GNMeoYpM_0t{efWQpfH(r5 z7XxYoUM#4z_dTY8ZHDpx;_Drw>uSSx?Kq7a+g4+vv8~2-R?yfhHXA#Qt!Bk$V>Y&J zH~7}`yx9BQdyntm{4vLv2Fh?L0pLR#pY#q+ z&oSwj_zywnt$qrVv z5EPPR+x$#1nuPyn5HOURg=L84hJ*ETDg5Cda+fUkypXf9{FPlVy$NL&J9OojFQSO8 zw3z6?IVNh>idPTClvi7Rl53y`6?e0Mp&sxAf3bR<7${fVlR>SK*Oa?FFt9+pNYi|H zv9?CCF)d^Nw|~zXIftmY$qsXhb0EAz^BCWPYG2mQI1XB4Nd_jytO^}c&^18r>GeD?5Fls)n73$9qj9;HU zn3}3l6d~TT_n1ped_PXfcKax3p7?0t_Hq*>&VzpyfyEU#+WcxLAP3PcPW`>fiU#ft zv6$5tp@Je0!PY7JiTH`^*o;)LOR5=m8eCPjV4M3Jgk36laLSi%;`T>0Teft zglG&hg~^^aN~{656C-rtkhDDlPsc7v{DU(kMPzvBR)WeLc4!*JUTuPiPeo3PXccKf zlHGAz2;j@~@cA6_SEn1?#APh35vAKirsd{aMriOEAs{RhOJ!aBKl3<-u{Y$nOIWdi@FbKH!MOL#pt8 z(cSf@C3-N;82T?Pe?f77nUo2Ziprx56NXL=7k9t{S+$dl_{(BEk5N2Y-8(Fd+gc(L ze==zSlx`_wg+jTEA~emX^Zx!lc!Fl^H@%Xv`23*WMig>T_~hg-Q5AJK^vtd7z6}^? z@>wkW%7n~tCc-c-*keJUqtDRcY&o$tY}AZ8gqjsxy4$Pf7Zvr?=gd!qrQz_u0jKcR z316;I3A@!JTjhP2OAI$ScU z3g@19q7>-|7ilLz{IWs4oCdZ>Y*{mhn418>hLU1G1y`SBro#fi5k z-q}7az3^&W$U2%wS%YA4X*ahI+ly?OSrN6)@@Pjm>q35bRNK@Q5aV1G+S6HpoFCZ4T*?6=ah z(5^Y$@500NEM61p6yawn%w=u{UOf9)m`Cm2W4Jp3Xi{E^1_k9^DCjQ4wp`93S}G{d6ne;hNu)C!sx#!~0$mFEiXy9kLU zCqdn^mU~h12~z#b4!{v~T28G#ekVcWvg53&xq( za>&~?g-A`c7U@AR|9l0Ti(9j zIxflHhiHhS;#O}2Kgaz)ewtb@zc&aeYu)-UFgpQM;W#>lX3ESag(AmCN|x%Cm)8iL z-t3WBEKwI;SM(bme^z=A_~&2Q)5B^u3jSMC9_aa#|1vQBzX1YhNwq+Z>+aEh+BhhM zh3h5`==x7V8Qo&gn$q#zRAO_U(0~OB3=n$n{}UjBb7$4KnT^H)a-<+OiLh!RC=hW> zm*#&c%KU#7r4IDszl!pb_7`BpGdQ+y_5q~S!JANyyuNDo)l1X{STZlDj5BaohbJPt&23^0_ygoM$+}WX=#Jy z4r5T3+nM2?!*hG;9g^2862!zvk1 zgXnm3HAXs3lI!0AsxzQ;(W;B?IToaDoBgM9tI>=BK?N0?Mz$ESXvwv*B2w8jb%uSt zMU%JL$@j!%hwSG63o4T2B|QI6s`y_}0p4R80YbkbBdZw&T-;81kvVePU0i%T#VJAO zXi0@^!1+hx@^c3ofi_{nM*_24wZO)FR3(4Q$ZerAkyK=_Wl5i+FKaRRGU4CBx^fJ1 zBp~0-eH%p){nDuO6GRguB&;B)oe9RBQ{!2L8}eH3Q2Vo^0y<-os2=vqu^@}v9npp5 z=Xuc$PYz=I<2=I=9a8V9r^aWnLL%iS4~;DbsSwR6?;BgfNvi^`(rf{IOC>ZZ1a1wWyJN*j=-r%@%dJ&tDz_Z2Z9+3 zyjR4$$hdgNp&JB#qA3Q#Gsib!nq!PWHeimf^u5gOMjI7Ob+>mndC^K|!U`m}HlVzw z;=4HOPjYY$AuEAFy@o8P&;I~ZGJf?JZLIAIS)qyokD`N2PCe$&Ba~wQIJe5fmR{p= z=tg;CFfB0Q#J%wJ{DRpybEO zpFc(pcXK^>mf5|cnF6)`{2!9`>s2=TA1{ywiZBFuWhmUa?7cKpL`r_}VN>%6_mKxS zv`9qX#Nr?kN+6%O#dG$@O&ZBy-I54hW7JN^5u&B~mI7SW-pI{-Wxo@q+(QK|3`ys5 zx{>A8>_+S>I@>UZi;&76&Ya8 zL4A$opyTQyW0KK?N5CtRC9VM<8eFwRX1xs2$1T|yat&Ec60RYuYheoIoqoFD2_uKT zmOST*sprzKf|t%i#Xkh(cHH&7b z&-XOR&O{J5>l%YzoSFn0c9?u}9wNPvq6_tXUg#{;UF1b=;uYqDP?7)?VX^}ZNL|I` zKD~-+23B;tDfmtf&AMU(pKgt?Sj+{PLGxa?>nAIZA+}#Z45Et8U5A$1!?Clg#$i)M zUD4v#5PCYYbcKmym5=>Q0N_6Kf*TY-#i$I4S-$jao-Ms+0T#W=gU`hPdR@}qAQJK3 z#YX35CCYetE0e$-yRMWT7NkWcR)VKUjy5msNjEOH8MQK?6&vE^Ji}JDo4c+_)h@ZR zwD-5QVJDv&@Mk;r=Z4U`)L^$Qro_?@=+o|oAM37Ej<#P$W@qKNzW^sZ_Ip_Y)PQ0% zfENbnq!Q6nk+1;^QBMG~N{35%CGm;DZax$K6wd8AU%^5YU#la9K$wGLNG{V+;cHsY%^_b0oelU3FCd6M@U75WA4`QY_9uTb45!W+>-ns070z^}CB zP>l*=^q3S1b@^^Fn7{^lF`3j~+#wowB@BdEPu@K6o;?MK%3Yer+$(ov*sNj8%ll|e zL}X~?Y}gK@vfJwq79~?uZXkTTZmz*2E9_4@cZ35(`3$o)ySa{V5)9lJmX>&Gue@e$ z>y4s^8ck!fanRW(dQ)|>y&i;OqpE>9{5Zddzryy=LER*1i~tv#bEs{>Cy1n2bui1b zJ#aM3(<>M8qhqrbNs(jqpL^hP>3YFALs%V_9mp27zNH*28w-QNsw*JkqZ?JOh^M6F zLh$h^689f|w2>1sPO!|vaw|Jk73@9c#BS!F&+x2tu{$a=He?31{^;V`!M`toZ$}t> zMrv~K909w@1_Q->3e{}r-G1x0>ZYY9W1p4G*q)c2)6ZE=H3!;}`FInHdw6x3v|Cy+|yUo+T-+XJo???d? zo2qr&-e=AI1d5;W9~RM$S|H}Do<<(0jnwk*@NzE=aDXiRb@koZyhUro?4wJ9e*yMp2hKQz_u0=!$BH9+iPhX8vJ4+G4T$Bpq==|wU2a(J z1%vY5qtHXX;;m@uM=VuzWn5$RCyKWn>}RK-Tl3eK)q!^u=j>VluHfYck&JQ=+jUDWMLS}BW35V`S$8c)M214N;1W(CLo`0$ z$iAjt7AdFjva^_}N$9Kb>J4x30W2Ork12lZ`s?j0vYrX^o#}GPO@WStraXP1cBcEZ z)E7wMo;sr>3X1C^WN&dOC)D`Kdu5M=E1(^v1Q-duZ^94rW-Ph9bqdZcFFqR-cneLXrRBV2^sA^I4p z7WvL_zhI%e#HMMx@NE=j*=`Stw-_!jYqPR$k@jKMOH}l7hN^Uv)rH5qxwF0aXEb1F z>Gj~8vL;Q!$`%TIlNApiU_IBL!M}us14^CBGlewT$87YM^4Hc}_u>{#2@Cst3_N}w zE;LrSFFc4SCIS(GnklK!XAuG|a-N7qfhcq+&gM>D#i{{mrqA2_7JSl5@Wf;>Gz^?C zu`B2qrksMFvvKrVVa(Uj9nlN}dsvNzxaG@k542^VTz-tP+~Z?W7ELJ37+L0_-hnLUoAa zUch+ta*axou!UaECott9KLML0`pAAl$MhPkQLQ1{1`@&QGdI8LYC=>wq{7X{Ayt?T zGRJTyvR7$vy2CbcH`_{(nAP>-xlSFr^c8Oqq(aS+O`ZJUfaf)53U>yLT4nT6 z1c}+=vh=6-lXY=izZjz#nGd}oV-*8DQOgff?SwEmXM$z(&y0*_Tfoji!>q%nrX%n2 zfV!iVQVt4Y02VcoiCk71oON0sbNH&zQRkByax;Q7r*Gx^4&pqlAPStXh<}Z$eFm%F zn3)duLd|A35o$bsI1>GP2&`cN_flj9vR+|Yhgs2p{o_)mJ!ddlBf3L!aQzQq2R5%5 zG@J$dB)9Zvx|3w=9N;p6#2EvPiNYdWn_D?iQy4Nm^OT9Iz$u<6Kf7q2+nW^Gi}6|iNu@l>X*t-BW4cnaoIaX6S0VF{@U_cw3^r}M{n3| zd?5~~@Nyfs9#Mjq6O*_gV~{N7)ry^C%g6Lk1BPep*QtUXAB;eMxEmr*V#7a{xv*b_ ztRPA%WMVwKyn)cBDTjgnP3&+bx7X!1RfGYhRzBKYrGd4Mb@k#AT+b!9gT|yQ=a{D$ zWC<)%JfN8Yk>3u}3xgu4xpzohw_B(vdV-p)Z)3Vk7*mh_PR~V#nGMV0eC}CPnij^b zpY#@P%^GOAC3Y5*H8;9o)8?j^l4KKHsd?`XXkLrxg2e38r*`=(>ogew8k*tcs-+#hY zx30Vh_G4ZmK!ORy&uXKW9zR?FnOof))t|+Uj zthL)W1k(Ob(ZSHS@5=m6LYB})cwd9(T<)I;#4%r58l_--_&loBD}+KOp0MeUlFOa#Cw}I|7+!YUJgud9qmNeqf>-BF#~3 ze{HAn{h2BGey-A}Fn;HW7boTeZleTdvxLa01QYx55hx=WVs6=LMy@i>O~7qi3G6-* z%sd@bv;NCZZ2l&Z_fok#du!GDJ!f7Sz_}*dsoGw>Qnrm6H#(42+Da^~ z$SuAXb-OxHP-0Q`Xy&6H7;7@zUS)FD=WYHtRN!7L#`gf=1b_y43B1o>>h?(+(6xEW z3HR1Qrkx8W6$;zwEf@5=mX?Cof$(jfBt&S+4$El5o4O=3%L{@MZ2e1}_202PKWex) z_*jBwT2JJ6qqjii8&~Sxn2ZW<@IUEPlf?`u+UEziGMz%(}@AgZ@I@F2~< z1f0&p=~oOIJNU8HK`QXpt>zF^d~b&T#d@{zg)qh`2N~I(J9sO11^ek|G_dVCOY4na zTwoU^*XM8SO2#ujzvn@!Wv^7Kl6jpkx#~HndTjq^Q`}=bfw*c=7#Dhm1mA44qIH!~ zQdtX?kJ{=+NjA4V<9S3~Tr`bYaEdSKe0JWYRAGi%!NK=|sfn>02eoKCmspsFupSc> zsR*j+p#rIpPUjVC?e1&QCLqf%y_vpLpu&E2R>P)O@NfUttxQUj^Fm$>UVCKu?>2=6 z)MQMy%l71a^C|S$lvRa4ezi@qeGAiM(M@PgMF5AwX_q-Ko{y=g*RZCKv9`-;O-oFJ z*0WQl2};%q0v5q(SI-bC#Hix%5svEDK*@rd46uoGFVmJ6B^-CR8SuP|2lRD|ewXo+ z1*>M;_DeZ zVi}<}%r%HW-S!Rj3vg2jZvW}jX~Hr;?}vcmG*oFfa--)P5e;WD6WnB-=B07ngj+b8 zuVs)h{XuYV=d&a2Q2tmoy#jHOyq{DCIyp~Zqn$nVtMuQvUfip6c;EV{RNMMn$@7g4 zmip-HFzCyAg4*xw#zQMZ0b@-t1h#y|uCuT+$gzKf_N|(0l!46BHJtD@yM5*fz|EdE zDy@E@703qg1ioKqUkwi$%48B&OWd{Xx!(ztVOA1K0-Ja>itcau%4zPCgV3PqIuO!q zq+4Oe4E;B^wKJ)KwlWY0Qit0fc2r!-_#xLK&TJB?RP;1SInzGXII|w3Bq!=-C_KT! z2)6XG^Kl`N0{G(#VYF_9csrtw_0o~iuE@n$AU(&J0#4E0eo^XVqlRNw$-Z?fw`o*- z>S~w}D_?+2>x;8oq&E(#&ywP*N%O>5A{S;ggtMm1O$n(LLcZc&!P0NTvp!l9Ha zsY3B4`EDC!DEODV?RAapw%3(~bP`{omr0XmRK8Fo_%@-t(-SuR!4T;*R9IyCG?hCf z7~oHi=i_LGzTdjCc}_cm&qa=ZCrT&b#y6r=zK=m0rdJf#X+Z9BwuxI>+nn!wNf2bd zs_-Ncm(V~~ka;Jo^lX0bdQ~Z%3klqXQFrC0LvC;0uojq&+CA{Pl&PaO?7wkQXHnco z;=8|Er3(D1(us}3c6u(8i*xUujI%hp2lyJlqV8LT{OkAOP(dz-P%JmW0^FI5i&OH3 z_5D8l@=j>PdU$G#%sr~kD#46ck(uCXuVt}QTdDIh!0&x zi))Gw*cu;rF*sZXX%B6QB*M>c#ed$_{@kH0;}IsWoqNk_$+hLSe|0;{>y$H`y@-%A zfxJ@GFTDd9ChXT%l>!ZvLhrrW@8h>t*VNYIfg-KYL3JwOsEghSpo7yDT!9>?P&$%L$TCQJ2aGVdhu=>+rApYF`~(J3lmaD_i^Bq3O)R zsD@VxhQnu_RA|dLCV#!5G@QCBc_Zs&L}Z^cyt*UM#!dAHGsx`zV- z1*y6fA++UE*G*Y;p>Uz+K3q3ZtWq>^5Os>Jg{HaZn*fm=zWNG{Uj@yt@iM#q!rqn| zeKC8jj}hqN-J0uvWbyQm9fi%+DYN}OWES>p8cv`@1FVhzPeT>5^?B33ibgItH3$N*dlN$%bQK7cCkw z(x*j6WS9PA;UZc^UNWVP3(^Ith;xko&ee78Hpee)So%COTKA=Uj};!gEqsZ75kd0N zM6e4?@eT@?R*?LQkTs9g8LUH>{b!H983?`0X7Ltgnqp`Iq^@1X9LA9~wC}0939LRn z;M^+3rzFIRs-M*q(SELPYG74{VvXK)7|9Fte`D3p&_`LsdbkslIXMzqhqwG*TvLn0GymxlR;2r-!dDFod^( zZ{#giCoaCKES~SOvlf&X;hW{$qwsO@7?}H)B|KgkXryt`CX^?rquF8e%jA)~bXW*k z#Z~YGqbWdW9e%jS9ompK@R0W1nyvaVgIuMOu=<_XsOXv>VkN8z?r)KGziw$Nbcifg z2InzuR5p$C7r=Ew-b%@L+uD_dqvEawv-GV} zH&q_$C-tL;X<-KEG_3?!CLp(1p3zZ&|4Tts8SSobU*zpxQrCnLTv^ZApTB`VZD>j+ zQ`uOkg&QSn{(TiO@*PYvw8ll^OP|TU90J3#(rhhl)sF52xjM z*t(6)-Ckx2JQjv(E&d9ZbaHZzN);HQ?UBeirsN@bE_=k(m(w6 zdD&3gqQZkqf*-N?1{fol^XDKeFVBYH_h*G&?-h9G(+#TiPganUAx} z$t7qOcaWxBn$f)owaxI~&)=VM?fvX8%3C46H9-Hp=PodHax7Ak!bFWZ>+CM=S&s!L zpN)QrEJbFUgg>M{7A}sO!6aFAG{nUhsdpCzQcHTvP>~zV&jIb$M$z_Coa<<4X{*cY zQl_R5>29JtOjJ+Z8#y-|=T)`PQui&HmVTW3!TjOHPdz*4L&1`-->}wR@Z|Z_?|t-Z z*o@!uv4~q=eaQsxD;B-gYcd3fl$b&_MoolYojHlArzpC{-sGe<%jU zZ@mn*jBEwj-{7#I%-VK+(PzRE0D6{aw^ z475-*g~9?2;UU^E+7#$u5t>q4(-7L_0@Itgor-asytv^QK)UD#Z)2uTTl^?cJ{CNJ zKAV&D@@QI0a2BKRCL5LH1YY?J*O`1A3zmIwT;fqOPqN+9>{6|YsuMO-9p$#vcjm1f zeKcN_+q70-w^Q{%iXbAvs#s7w;F(D*o>yjC7Hi%^1|Jg_CJ5mc1E1L zSsyCEnJb@>8jApXy!Kc$!q`fzud%JX72yoo6t3McJ0_P}k6FvorHkKohRPV^NuI-q z6}d~r!COz%iS^|ZmWbx@U@7am!(IOw;6;+}4|pvkIPr9zhzY6Ay%G2D;bra=>{ceM zOKAiWMrUr;neI#7dUvNzk)(%{>p5F0zgN>$N9z(G1_%zVO`#(o9DbK5z#^Ql@k5W5 zA&OnfDflYl75q1RlOLLLQoOT*WP$hF=Lv_wpox-6qE)7E(&yyJUr~%8^&5CBS1orG zQ9`&|={{5I)48|#!}GupIT2z@Yp$Ps=lt;qv{Q%2nPe+j%9&o35BSTI6HLgMWk{ui z%z!@u$-P+9^etO1J+r*A6Mv~tEr#`F)*`t>DH+b2++#SODupC6*mZCovLjEL}mL|#5{ z;;2VLxLXGLW=?_@&7oQdF8KK2zC9w)L*1xiuqUiZVR;)_-wTP#+Y`zYzuHx3n;&0B zdIR->zWKI4MXd>tqRr|!y}%UkB6&y1}x}ln|jkWcj9`i{u|W8-%EAVZ6PHQhafcK3+cotlk{#S(!Nrxr~Q&Ar&v-_A7UNt)wp-~yWN@+er< zfi4wX_Zw<|amM$Q+0x#L%x$CpprcDAp_yvDfyaq#!#U>If%_G3*q-`~bQ~0WGzsq2)gd3&n_xROlEoQdm$sxlXz97Y-I3?V z(x7L5An^5zsU;^x7Tdpi4(k8VbN|Y@jVvnO4`J|qi82}gA>}rZesenIDhK=HvbDsS zWJAR#DR5EurKY}eiwq%yxUI=qs>N2p+tL3}blWwBaX>d!J1ZQ|pJR+z#EcnJc8B0E zSlubtx7GkslX!4`W(PAJ9N~Y8x&P8~Bu4*eIWxfReou0Sg(Z&Ki?f=%m2L%((VTOm zJ|Wwn^q74=Nji~8k-i^av4jv(xX@Y2dEM&4zAENpz*ODpq|r2B5zcs?;|Mql^>=$y zqu<3CCyqBJC#2AE-1GPI0<$eyU3gd<@y+Bl>6^O)skH?teRqHwEoY~ z?IbDP;8MlT%(COf=4kH;e&?)Rxl~pL#MlQZgi{)~45npsUq)(C?KAoJ`^OmU*WV6Z z4_m*=p4C`gBVrFw$VdAX$+Jj?S_#IuwmLn8c7br}^PCM{HJ{PGkO4_(!CC z(3FR4R)EHcCue%LPRRWl@Bj**Q2P>`4^;rIK zQ<2TC;wy@Zm?x?l;5Mui`$XUSX_IU2jrBFf&Nog+z5H--SVk|C)kkn9LaanpIx*CF z6Hgm!r+20&hpyd-!~(eDIi888%b!B$nc=6silg1vM!75NX-dne zfMoz?hdKjAiKwN2!gw6-=8T}w*5FcEO21fsrU{fZLmen4WBt?k1vRJt*0KlL?kk&< zS1JxBr)Tr}b^4?`*gzg(&A=o{WinFqUd1`i)BcQ{n-G+xW?r?%#3dW1jo(yc7p@~% z-dx~-`gQwdflVDIS6pHKnwxkv+yd54@a!-(DAAx$CDH z0LY#r8$`uc7cYuUzg2qfocz^1D+b3!Ft~=J>BcFCHUPWu z2#3RG2mwTgqPu}Vegcc0l<9o+@rQA)fHjg@LindhN2@^$L>0= z2G)p26%PEi3%2YG)jbz&510-!;qJ1>BY8>y4i|;9^`&V{+E}H(G?5fHGPoGC-f3+N zUR-JK1}RuoY~MgoZucDwIrz~4aX^@iwx!PaGF#JY4vvdXl9i-p9GuOtWT~RQy&;n4 z79hj;voZrV>6NE8= zSAAdMHeuskDV{W|p*VOzg1S2On4z61Z27395$G+Hc0Ot2#NA27zd6Y!wYNH-xfpi{ zpi#F8fDj%wJlSRG-i^;d`ZPR+tD6M}B(On=!OWk+D;8qZ7F4paR^8_v136Q5coo8G z0t#HBC7{f52DCP8bF_>1=GdOJP%^Ueo#W7SE+7@|U^KqIX|vq`nIdVtr!yM&$^UQ? zN&L#a_~s;0*8y0@F1WqG0jJf#Ud;lR#`>m>F&f=-EoUg~y%+$DB1KgZjW}V5h;8$w zuuI0w6_%3Rp>L?$XRw*Nt;E}}hV>ov^@EvW9VYoGO z7Cx)C1uq>hK`;U|modzmSD@6xHRj=qL6OhHlYMz6Q(pY~RIHFDBcaJ7cj{N+FH4N|X!e`Yfeww!p#xc^e|OR_?Nc@b;j3 zcklT>!`j$eAa`UJn0>S8@aGWWx)?YT3QeH~YP_f82zO`U2y?*#tJDx4IlDaH8~^OP zX99Sg&a|LA5YLll3M?96HXPu2g7L*{)FNE|9=gi=4LXnoEx>sb*vCwg1hP44{$8HK zyV7;)i_sI&6P_LeZjWe&=l3ALzWD@hjrHdSVOnmD)$Y*b(A|FO+u>xtl(MR&J9W%$ zWGty5ZW8tXN}3gIQm7* z$IV~W%=|71_$~Yl59Hv*W81#xScW2;yy>kN0Xa8F<-~`X{yVW3kue;Ci_afBWu(b} zovnYPWH{bAQQCY2m8;QPVr$)N#Xo?pB|0l4tGBb2rwI+@3WM}a!h2$AOLpEO5~Q`( zr+u4jqIUhJXaS{{rN>o3qIm#kHtoy)?9ehV|9u+`s1Ph@2fPQjM_xi=`2JfFCn8La z8-=+ba?em&kRuKc)fy6cP}6@81ov;lg@v_3X(!YncD$3IO_}bW;>j#@a!*~`&$vZn zZP!)VmH!N8)9o9{ z+Kx~C8-K}BhqYn1y}R+{^^cK#5)ThR{<>FsV+|;DVV4X(TWVK@YZ8fal6aZ}gWLV( zC`+~hDs>m&cY{wje?|;6s6*p~7w=ClM60q~!GkzuIB6D$GI{{h*6iJuo%Ip{^u+|0 z7OwF=_-}4egLto-sRL2VGO-aZl%XaxB?^O6Pj#)aXr1rJ{cBGh8Hu2`O z(1QLNAl3u3{*DT=g@Q!lY@A3)*uFnta+l+J!~arI(NN1+JBkdtQ_W6l>m&jxJ z-vx>%r2Q~H<;{#aqpE@Xp9JYHJ!?wKY%reZu963Y+9(NM-3H9Kj%#P;a!cbYM|@+2 zo$6BR*N^VGGHrz{+AjEXVIKY99e)Ko{z6O7%I&`0{6)F&v=-&E`HLo`({+vRVyZ1S z#degblKwb6vni`j@P)#v6@)_UT>9r98Rwrp|FZ--vOZV z5lWyd^ibl7!iw2n?`eDk&&_=^dM@G!EXFWgc-0E+H^6jHFGM6kJKEhjzjoZ}=YYDq zG1xN%sDUd!X2a(zGXc~buDFA(CI*T*H<^66nEo8?KNmQ5`hpu>c_ycd}Ou-Rc+k~yJ)=Q=~ z33EaBN*^w=-}rdA6w9Plvy>MkJDJs-NEE2Tuhq6oHvE4?q@A6s1j-sNJ-iJSaN88k z9Hsh;0oQY^afQ){EOamYJHBfrio+k&P?G0hAyu$z(tk`{sJD}=}C3ofB>)jt&Iki-A zlqH%DgM(u5NgEMLeFAylr$MzF&>Ctu0Doa+{Mn?_8lhvd>jF^_0B2VPQ~NEapO3AK zL~atjHdbiBvM?6gZr{=d(KI5(ME+$>!bH_kGFE+Wp4EiS6rHCBZrHw~aP?oZ-8-Tc zg!1Lp$bFd|W&Ppjz+?+aF&sKBCHqIzFX?PT)%&1k(>xm4%F9e9QXU8f+gY#OI?Lu| z`1V0W`K;M(az8TLRz4zDUWc>ZLqef~UwpYHYkh%qH3^PFQ+G>nIgyR_@FcR$>o6PO z<$9IexWA#qc|Toj)&adV3)-u~GW@m?`$PEk+26c$^Xo?LixA&_ zi*;`aa@Sw_U(UVt0o_Q!t-yt2D7(8H%a7m_uKC!Bp5RB{G2hRl zY$tQVoW8oERL7yyMSTUVBl98{&X;INtcm<5^*XBHTsyxirW>30tAOD@>@ild`qbnS zjnL1_MR!o<#Ms5GSr{{V1qIEPaMAJc+K?Ln;mV`OGEgn4R?a=nQmhD6(nA$Y)j4WM} z>ZkVjfg8eAcCH!?Nppd?+`XvwJIrdr!}w=}JicfSeX<#S2yDz*}uOe3V#v}W)DjIb^CYs|Ev`aBA zW6u`|oZ#P798Stuu%hpc5<*6B_8R~Se9?}iT0-2L+&X^naK2M4oP$W&DP)3IVycC-D!j99yIm_G5 zpU_d}oJ2Z;SFXdKGZtF!3RigI`gSyJEW|DNaMxKfLBUOEUO0(DP-TI@7lv36+l{ z?1K5E+B$3!y@|b%9hD}w$%!iEXY_?2t+=uI%{t4tEbjuE-pG=fyND8+YnEdih2uW* z2?n?&+j93f^$;j@n=B-b`;bz9T+`BMkI z=cM|9so%b-DguaUu=WNdkaYDDo~246`BmawN#_^;EJnt@8LK&J+$OT^&u<;okM$7qPJ>Tes~iS2!I! z5(f;LTm}@P4jL9iC9dAju0}X3gsrvH6XdQgyecMiaKdEFJmYMuXPp`>;fLQyhaU~r%6@D&TNgY13X1w(>V`WmQ!*-fHp}yY8TD4Ln zk=dM2ICPLMYHw!Id_5;_q-2Zqv?u^Hq>3?M^kKZ38CCW5w#>~wmeZe~vy%%*eHQ`Y zN7mi9)7F*IHc4&9N=_g|Pz_70t(cV_5PiN+W|p;ta~9E`oz(#bpT!rr z#5$W)+coNgxXKraV?9Y3=>e|`jHn7yjL;VX<$OG~^udu8rq=Qrn}&oHSWOSKE9Ekp zb;Kx~fNX!M=stgA&ps9Tp;m`cquu#+60fApedAcy=l<8!e%qGfYByCz=Z3GgOO%Vh z)W>bi(t`&1;>Nf5gsjX|2`_=XI+6~J0N8zxSc8k0GTG#7*}QVI`3q(`9%nmy4U;$L zzTXQa6{4}|?cpr^EZ}AXlPn_L(KvaRLkjd+fk7fS>14vJS7g#J`o|+ z=~*`gxo%RFbhhCdE2tl{+1ocp?VW5<9x)h`S(!EfSJIO<8jNu-W@TVo!F9zW z1Obm=dxdPCEVElwwI9S8q~eyG$(Gv$U7X$1!s4Y5c^#!gHyLagTpF<*OM{+kB(FOX zcQeMf!sOb}uW%2{4r;wT`UGQ)<>m*S+dZnn4>@Vis^zh3DC6gWBO<`KZ0Wjygf9eywu#yoS9ei%%Bck)#e2a@3NTAh7+gXbud%c%c z8sBojx+Ua7dqzHh53f}<;6d&F+2pjHJYvGQrM8&{F6t^>j0*wFKM3Y4->H>`_z(nT zm+Cf5YEf~UlDU0<9FW_h=qo#6&q9U+N`00pn%Gzh@+{)t% zS;DzA!~xTs$fX1G`ur0qBs8|{opNHfr2}I<;8xjhbn`s-TjiU72{Kuzfl+s>F#a1y zJNY&Nj-2RbA2~p39@dHzLxQ?5&5fJ&Wbe2q^m+duUvC)|N7R0M;_d_qn&9pl+?}AI zgEnrByEYKq-3d-`cXxMpcXtis_WREL&)k_0^R;`es#B{@o!WJt{oBvn+Q2#+r?JJT zGRXvKFvvjrb0bSIm)+S5omE)+@MF%y5>)Tvfd8QEbj~1MX%W5%2S|;TUy$R1Ui>}3 zmk-BXHDXoq45Gi@Q5$SqXRw{5mpdHVxphc_WjwM^3s1J;(pAsP!`-u>lC(bv;n~!$JW%0s_!il7DuP^ z=33_ig~KV3byRUXA+>;*&<-$HubC<(MaUxpDYef0s~e@^|D_*khl8Y;x2m5)qSBA& z$ryVFTg;69qaU?LavNh5XkkJQXH_vqt&Utl!G|~`HE4&ZG)z|9lPP<%(W?pCytxN2 zlaLpy83)^@>zPVuJN`+py{a{sZxuF>i@C(hcrpMN;=1<($GMp~r z6rsvI3cQImS>N~&fRS;C&6s$|iX9N5_-1EZT^`}z3XJ3@Zr@Bpa9r6FP*=`mp8wM0 zlLs_KGfQ$5c1M@{uXt?I{qiTK#{3sMmi}{}z#K$8inMd0GwD45zLfvZ1}Te88=cdV zZ#Na(GCafBj-91oACU+Z`+tZ;HXevbJe?OJ8=Y&A_#chPx0x2qj0r|IUM|>N zCi+D4K%@@WG-A$`0nlbJ)wN{z*~gAO^`vV3#?0t(M0GDPh$Fz(C5PNUebUf?dI(5>@z>eg z-JB@2^e55($V0sZaA&7CFLV3wSNlR=Z-;JWWUjpS=({|t7eL9d&b9bL)IhlWrnhN`N}9;sk*upF1~h+QDodhscINhGPj zkZuNMppKQ-_7bebrT;IQ7jbGol=4}_v)*Cbf!3f3lO@SlN0gMN*FZxlRP7`Toi5cf zTFwd-lfSE7_5Hm9rdNvGTe0d7eXqQm?&D0;qg+i11;%Ii_%3^07foCM3xIJSpNcaX zN$tar@HGhMx+8j)R}>n9I*LK1&?uKbR1f0&JfB*p)76-^?wUWS&bA_?`V(FWyU;r2 zu6s1Ba;aW@hq$mr4|NXWy{#r4a1}qe zd}CI^Q`xGoSfz_5q&3&D>}JKaD3dL(4u@w+>GUEFya4IVC`Oum=n>sQFtqa&;W?0r zoDxHvS0T8$6<7h<0?gY!8Hp6r645|6$SlX>CSym#VWwx)4lJ>tb`-ma&Y#ApvshV9 zUGLltQszdi{s?_7p?qofDN(s_*8RdMwLT?gLt+|z(tUrl=qu4g|(Q` znbK$#e0LvoBXUZwLwi*2AiAnu$~hR>729aqS2Tq#16erERL-pxa!2$x22&CVTN=ye zQ+TR!uE0t&c-7oJgyHbjyT|r6$IzPG94U z+GaTDWWir)nW8yW4woyw&-XGJou(baMH5wlrAJ&{Jk6^dvu_T`L9$`myvLXx6B`q{ zB^6Zd^q6cuJEg(kejVCq;HayyslU+nk?zmm0(XEfe=>JDFs_ zi=9TecWi{Ah|>pXDk?g}%_#2(#*p&!6TpCJtv?5E4-2}or(}f~Ll+rJH(#r?V%UWE zGas!A?krChPPd*kH$U`{KL6tFU=4kAd6R->!v zla_dhl7)*R{jZ5({zHTLzHPY)^)S#SSqHa(Tg45ou=Fciy>Rs#8ik7W(ZOMHLdAlV z2E68PRGr^z%LRkeN782`Z46b`q|51wkxI)DMdhm$-Km^EW=kp`;H9T@*HQ~sZZuQa z$x_SXiJn=n2r5Axh|L!S;)lLaA?vgS^a!8l=?ne6ZYj@<%w$U-g>bKp91ED<)H)gZl>k2%r|&0RNlGW8=_t8ESP z_iNTut$LO|<*4p9{beg3>Lw#u%BvGIjJ;-TByOmxGwpY{n^$cq?jUOHx2Ey6J^hc{ z@(=$#j~wvtNaGop^{m14J2=8qK;Gc-&sTG=dWjLhXS28|h}aVQV@it%v`ED^p4cLg zIf+_4zy6ZpDC+8EVXDmmBf|?#X8(Ed+(KyC=1o@FYwvMddm0?z;DvauNm@k{Wp1=0%xy2AO5*!;gl~ZR)k=y$>ih2YFo7`9sGPx#vtm*oOGB!%!+4e z)O$VVsu}9)1qpm?Tz19vaDvl)7F=cl%rXv?vy9|i0kfeFA(w1KE3lZv1BloL|Fx>z z@o-{RCBf_@2Z8&-F_@w^Kox4*^Mhtr9N{aso~c0_LYK?7AoCszyliEauAKukF!p^S z>HnWw1!5*ClE&1ZC2QViHwPAzJ0S$WK4m(riS9%GLGm2`e6+~wXhwglGubSn*bOTb zDILRaoGNzjH7RP@-z?!q7)9nJ?TvMFpl0L=@zTWb!{UZQTq*;QXnFsSI8Lx3zVQAy z#6FYAnSQl|etZ#OfcJJ3RvMOA9MiGU1$yNEAI#FNE8y`l5bAr#h>~a=v5=IoO`Jh~ zhQ#rqea=2)D}~W+)3VCp0^;6wN&6kFa@Fn0E4W1qhHulqb09)$sEVcg25B&8mPcCVU?^ z$N5AF2}aZ8x>WYLRJ9BtSP9qYf7cdMk_}19(~6Q7Hh_#ZY?EC1Q(=Jrkd*M8++%2Sb&n_OwjZHsQ{*T;pCE)xFsj(Y8} z9a3WXdwf-CgUO#lLFCdX)ROWu7GjGD_VhL^zn>$}4D3;naNBI%>G!ym+j0>^BIyAO z_8|QE4@xrcEigWPpM}bAPO1WPFf#SgmMoY|tj_$*aOrc6OtNTP9V3fKhgR38_!@NS zq_w{r0?s|>>8FnpID!^JC@M%-uk}nz$eT<=FaFw4#@^?%B_q>D zjmACq7kc*2H+Sbz0Jg@6+MdLS{mB+l`P7aZ_>~PmtEAs7k1(aW3my;5OR>4WUKx2i z;C{g_55Lab+P!(5LA>KNWHs5pU*TmYe?}&n33CF0D2tknr?r0rL*E>Locj8(Mw*S# zLiHR7>7&mk&RwCuf9XFsZ}h!s0qqd*kB#b>{lVz!1hBLwG^tyG$BqKkTcoE48Nk8q zJGg-jSRI;o@ujsiv}LfMDbD*C)iO>Oj;RT{v*oB6rs=uOYy@Lonp3r@kO)pwG4@Pq zzTbQebe}foqp@t%PM7_GBaS8PRo{>gmkO6_y~m@c|ALR`wr;~^_6Mp5~wopwlk zmwb`jp$e}=Vjfc%L2Zx5j;lNCe`d>!prg8B4EIIpC)h;lU&kfN#_CCIs{$h)$B+LT z(rs9Pd7HSOG5^GJJL31FBGP}gu$;|bVV;NZX8gplq>^xnYr1@fz7paB%K8~`qPG(a zyB1L32&5@U9^b2sp+Dv6Vc#G}YumQwO=x+yicx2_f9o!@fuW+ZfA4ffn@6G)p&LX5 zJ6j&ZHva5DM>tEND4oJkwETjM1V7jv$S+($+S5{+=6KZ=n&8oifS8P{l&>q|BD*{~ zK4cSWswq5hS^$f29{Z2QJbEI^j*`;qKiz=IhH9=hL7g4gR6-d!yg{T76Tyl-j*fkH zn?Ll$dL`RW{yiSraAS@e{zT|cquG7{9};=DB;cRT25wdA-9pu*mkw+PRw)j-JS>)q zw?~lJN5yofqpPy3cI5h8ytGd);-w_aQXUwfb(&2i zd@^74Y-q81e2CFr>|uj*fM}U?tE|>~|Hk*l$oOC_0E9WXzMS?^7Xm;(;$?4T!R zo{Pk7GYauXm}@%dAmTHX65L9zrPmUECsC>pxq53?zjtSG4qAH=A{A?b)ct6VmNc-%($ zUx!VgHewBfgQ#Mi68XTXMOx;nN5ppZW_^sz`*-SKqS2`ya6xsEIs6dpTiani!o3X) z09i0xHti=ve|T|P@gRn72E+91qHsW&k`e+zyIUM|_;QFaFtmNa3e4&{fIrj+<_CjJA4U0%_v-(Rl>fXZ|nRrXLrh>nao|vrUN2&Kq z4%)P<4ttNK?KZ`vnk)D$%^FOLwXv0pSs1`q`=4 zF%DrViEBEPKMGg(N4UALkb>jI^egy4)E9$!hrGqX^)v08UoC$(2%@dT zCkW}{#C6DdlppEJmG8e z{y|QW^rNYKUdY$K1$vhF7)Fd#Mwa`YR>2F8;d8Qth4eNvObF546)Ly)N-3u*w0UA3 zv2907{x*Cr#yP-kB3SN#@k0!04n(#Prp*kr$MTR!1fEV}5;x8(*9yp2%|RWzTqgk> zc>gzq(*Hp$gl^uscZKP=PrS@`*|(D0NQLuqY(0OHStCx`>U9mN2sPhM5fy=9!FK8a zcIp_ZC*)f%1qONDp#%>~FAASoT2wz@ifQ${_Z@4{%u96sAKy{1o9z@`lc~KPKG^L= z+r++i@#|*;M+S``gRSk>Dd_RKFzB`dYfALHZdn$<-};;e40JW(d`jt zBGSSI`hS57V!Z7Cz3nn}G z8c>oXQY{Cb@36f6?0`!AyXjBOvl}S0l?T!qVsGhTK;tC1s>jan*83dN3M2M`1RVqi zr`)-eLC13c83IA%7~~wD5h^N#tW_lDv8P(jCKBwyDzmf4nxAsuYrtWuz&Hm{a9g_i zw*Y*K{=>C9l$57GgeoBBki;r>TXUip_*+1+cck=3!DEA;eD6(w0<>=F(@BfhDSb3c z^MWTtOC#N`m7eIV+cuRy$t`+!t2Uy~g@;l0T#e8ajVlQwH@4%!&h(~cHa@o&BL?+X zgx=6oJ~^dSD*udi&du6*82}5wx}9#@zI(pQ9_vW%aklrDu5MEu7^UM(wQnZ@AabHI zuTi&e>>1zzmv4@R{MWyM-6>!fx$?IVBOi3$S!^#vz7D$m3!bY2v5c~%I(IySXGi~sN|k8u~Hay zfw-DD4e2ke&nI)fA@Ngh#Q#42U<|&wuynC$MhX&6PG{2x>AoM0^ozvAaaVSSKLXgG z{eEz`)-(_|sEq(1dB65Ieo@$vdhmQQGkEmAN0ot_DKaxG9V97@McEBW*Py;TL}H(* zj$eFRY&cT}2}%n=+;=CDAV|7KIcdV#E?w9HGw3Dc+;XDea+ z^9X$MXZejd)R&<1_-m>uS;o1M8&j1em=u7hH1?qwYqj_OXEjAHAN$r7?qA^O;ns8) z?I@f&OCB2Ii1-q}c3OzObp}Sew0eON-G#wU| zFdhLV77$(xYyRZ=Q?@JEN7DUOyJ`m5w@L)!VcNaTopR+4$k5JVy0-0JkA@UDJ;J8u zyp8CY#2}^rVx=}-BD0i#FM0IQLPo=nG^lsEd%{71k4-o2l|>#~(#)(QH>2TEDYQpW zwjxKx^rmW65j4Wt^HMP1Z&W#mMsy)nXKYa)+eGi0dc(A);3F_F(YNF#iWlK!<3atk zp$45N9i1Zi+1`9qL4O;ZuBpoRvwBrEcN|P6e zobmy>?w@drf$^Y-MXrio5-SQ-pN~S}L@?WeTvnG^+G}doG=2ENs_M$)1_BN-y5BK| zrsKgE>xzW)#B~FE?Ii>dt3W;zBlg+o)VDfc)hGy?|ySCxr`KNGt1Z!o>!!sn< zSomt{k!^kDiPZCmtDZn}9sC5dEZeF)ekM%;B4N)1Z)V6IUlN^>5Yc31n8~JaADqRl?Mvf(aox4pY+e*D6benAzrl0)>l&6w3{gn(q&HW#a98<^riYd-8)}ABMe95CRG)-$p7_;s|MFOZ zXQ<4tr9qm9R`}K9w>o{P!?j;iE~A|v6GvuHhtXbz3nG2#KBOrCN@s~XZgsTmt@&@< zE1ndXN?ah9c}MMI=VZ3%W$A&&Kx850968uUdOZ_KY$Jk6II72 zNhaP$b*shWC+lgA-9dB)slUZSQ%$jw=pi=uSDS$0+TNJk z+u==HyZGUs{5qez0fv%g>b2kvV!b_5`*wGX@sN47T@oHK6v%drc)wBr{je6;z-@NC z#WoJcqfM23c!&VDNk*~eiT~XWeR&9U>Egp?8+RW{9Eg|oFQwk=B(7X$w!eMK;D)&k zP9w@7daE&@AjlPlC>E-LM;@pWz~MEY`dROz<9EtF`>kX%L2E1QgQk0XNWDOmbbgz8 zL|It6a{Mv@k41d+9`PXrQ@sOYthD;%N~pQu^R~2TYvgr=NF&@atd8~T_i(hKQO8_) zbCgUpqMFE?rQD#7ff&WBFGhNF3Ny*tpiJi4ThXnePH3#agSfWE} zG1XLbYu~H03{nsmtGCEZq>42PP|O>UL$b7dDeM}dN`!z;o%@!SULz=a^+-$_-E@V7 zmR5kjmI6AP9t&a)v+}0+5uYiww^a>mZHeOhFqFLD)>DJ5+SR}CYD}hDXczazNIA{l zXYB)79i(=B9%D7yx51R3Sr{D9{Nt;DG%|18TH^k`IoKUH^bw;YzL98VvDsm!jid;a zgUnojtjBPQWU{fp`ih%@5^YLw=E4agB21kr?*}o`F*P0mq z@9Tx3^l~zB5c!tn{qcpa877c|ji@45(!87wlcYdA=Nt@IiN*Y@vPZWu8>oYwExhQk zb-KSTW>h4Ev|cJDPjEiUeqdedxZRwE(VP*I3f*`6n^$#CO6{wha+D2ISxrMes{Oh$ z;`);q2O>z`&YK%2P7q>p+_L=xdr7Y_YObjv;I9jVjAyS;ja}2XiE*g7qFU17H+FF! z;Ao~*I0H7pUl7Vokl`fu@-O|^t&6l)g*_G&$5M zdN0WY%2ddtD46EaS?P5ZANxJqV!{(q7j+lBLF>(tF;rZp>{$BMb`IgISBYAF^*GPT z*o8A20Y=E$39wH%fMxHST@qXJ8RYm?O>;dsuHas?euwOf{Xx-Jj($A)pL5lV$)VIw ztDkr#^2`*0aDhuGB4A?Ri*grw2wmF1Yiro%ME222p2v{?Eb$DdRmhjX?p$~W@jTdk@ zd!y}1kO*pjegbYUOH2->c-o&54Y<`{ZE(j53BJqLy0SU?g)I|haCgSqiJ#YRUA5bc zdr}y?ZP(E2FTM>HVT;2L&Q(jsiJpeN&|MqKGEwuM_%!y%_`%wFk@5EN@F$y8jhDz^ z$TZ>WB>^;Zw$s*44LFUJJz`{gw@7zScQe>f)ShXo1@t7rP>wl8TGv>yS~Yb8M!qG{9SPlq*&sAX97Q3b+fmJP_?T`lOv zJwM$)GC*^Bl-VkAh_*YY_c&54MB}rx3Et991S%M&O$u8 zcL!lrkK<1k)NhuHtEJ;au0kdSxbfXYt^V`l2nZEXp4=0+>BQ!4bPN=_GEj^Msu$xI zB69tMin_l9nLZQcH6E_<$wa756eiw#ujl@QE%i1#K~yS@@P~O-L1y2W~L^zY_F!< zUjgUSy#(&HWSs%kDposSHdWMA2IN(_1ZNF)j%6S#jX?4W6}7=x!es;!zJjb({T92Y z^96Hxv?-5-i*R|2o47DOp8BM8d`-wtqG}Q=Doi(I?7XQ%qykW3DdVA4ozf^xT(lEM zaNNy19~(jP%B*ur@$}n-S>2L7l503m8G!qiDPF-`XkALBpne?;w0!QE{X_gQIuCngEO=epaOSgVfMGK1k5))tv3FlI@#BY~E% z5E>t{M|K*cCF1nR6T@0h;56??&vzMM${e_yAZ=9Wiwk!^lVD49!CXhZppRD^Rx5_^zvff6`u^*%kwFH%B+QbP0|NB6J4 zj4Hl8oCuq;I0I0*Xd`rQmjiD81PJys=5LI0<8D-pbh|iGrQm2@$FfhxWJ~gfb+HtW zf@@qq(F@4M7r914&akNv(J+lJFs>qk=8pGI=CUVbhXX{K$4ax>GTeiZ)EzU_UKZ>OoV*E^ z1U5_Z&*kv?3Ra|Jpfvkzp=ls}pl(|}p-%rT85kGvcjqF=A&nU~Z$w-_b=F-583$28 z&(@^cP_6+UZ$Dnx>-^COw50QCcJ21gsHq?nQkA<=X0K!Rq^jUVUicJbT^$ELHw*0k zW9ZQ$DHaj^ot#ZI{qhn(Ud}di%Wx1e*L7@S`6vCUdQC*+G(IN)Ri@^HOfTl>E}!aZ z229y@E-?s5C*M}1zSc1HFZnourSV{s{L2abh{yDf%2^1JJVJ=v;qLlEb~j-lnsEFn^y7#hh8g`pDAENA>$7pd6}(n=D+1x7QCz>Vz96Tr^LLwdHPT|R z!8N-f{jf)+jpY4<4V;ex=;y^!fT0)*BGI6CkFQ=}Gns`m8iqGg3KORYVb^iPhv;&M zV9)|8MY{8;rA@I1vw5t?sNudf>nP-nX0r4IT1%M=IsS_^^iP16Y$-H*$;)y?O*4T) z5T_|(;@fWKMl^%>8t3{X<>!In2;j)u=@Bg$T0o{F3N~&`q0dQ+r=|?|a`d~XgJ<^uQ8p`XQZ&7bo>(QvA;ccV zIV!}|sNcu?XxEn7sUUsZ^pdvoW<%rIR~5T%B?y~y5RWf!W#-U{XX|ct#@?0Mqtu8r z`3gC|IRQ_2g|L@MFjdfvdOUi(^f5Q|9iAL>w4Z8o(D*TtGmKXySY@|*{R@zvde%?M zZ!&FBI$GHiXgjU@H82FYE6kYz_4b8BsVRm`O@8 z4lHOn8xy2r+~MUI{CriS>GDMf3ODm)x|haV9igWaPK;8qZ)sxK;}UHL;VCCIG~)1E z28$fIM)Hz*^`h?dU>dJ(!xCFGQ;uIx6^Qh8ME+Le)2!%*7yz+?R*SFIy*8*oLyf2Y z{hLsPZCmam&srcJOkFgt3ModarCO>+PfAHde)14kEJJTCJlXuF>+@+Ez;vFb8CL`q z!%VqXS>AAjJ!9duU|Gyh#vMND;B+#fdzjDz+@gFYj-R{&3GATP)DGIbtz4i0i_%EZNWd2XEs*3T571v@9moLY%gyH3oTD>%Y6@=LsvQ98-A`93IC@3?sm zLFqYKEa`N^Pb7bG&h&JrxZ6Xv%tnq23HmN*2`+X}&nq2$2BqkQQ^%?JV>7QKTK!@QyLr)fZA%UJK*(^z?oXbk_3bikJpNZ}3Mg96}zXA)j87)V~3=n?7 zMq9ywQ>~NDeg>h|sJZ`4)+by+rc$e2TV_+TasXV<6k}Z{ZcPpQh9& zVmwu`9L*Bi8?kcF%*TJ|0O_-K_Tafj*icKDPeaXo%MEXkNDeCFC%^Yy^Q`F8eo$!t z8R4SZL5BajlX3oNul6l+^+O3FsgPECIWjhKaMW^ZUYDwVBPT~CmR$TyXky!kqLt|f zuT1O#55fWg85N_0g&sz7{kBN%y}O3!hjy-I`BZo6xt4XCSc;pgKM`eD!Lo zy-iTAn?Wm$C4f4_^wB~EY%Psu-vD(uCVL)+=&932G93zs*{{t?q=asb90MGKllm|u ze>P zyQu_f;=Nbf8S6ZrZv)_JB)jQI`AW?Y4I8@MjHz8%gLv7!-uTmgQ8eN(~LOp7h%fsfIh4X!vOc#{ zl7WnRZOfi?z#izc{nj%5S|2l!OSW$N1^e^U@?6Q}vpKdsxP`;F$;C{;|4XU@PM*w{zyYuLK))?q9 zh;Q&=0c3W&H1gn%n>FQRUIDHyXTC2wSECINuQT}$tnO5pxoL1YbK%4#NSzns98Yfe zO=hi8qj@l;-v6d+Dx&oGW9f(FcwNuamfU0Ix(HN51(mB|9Jh0Vb)w^gO+K><1|tv? zN4vDBJMUk_Lv)X;B1)Z3ZqF?13}@Xub5&84Nl1efu}AxHT?I=Sl#+l#QNJxd0dDcJ zq(lk_fI?%n3Ggjt+zb@=o%`@C51trRA1^)*aeU^RczqE6%x6~zcri|ks6p6B>7xhu z2S!KuF(b6>lh(kBdqaVtF{0`F3YIolffdhAOS& z(q_xYWzPH09*<oXH@d+&o;N%7@V+}uLsb?xA!ZsLI@!qdefNK@d+QVdm%e#WemoETd)@=T?~#9ups(&AL|I%hm=%UkZ zSU))1N5K42&1mo6KR9zR$-iMZq-XwH0PmTW%uD|Ri+sG9|9hzuS|4cr{T}`$wmtCv zXm6>spRG6l-zny6wHjmB2kSqNl@c4nORdAD>RfZ$uYq@E5y!5??t?IUQ()66;#1~rNUu$^)X0_D@S z4DM&V77#N}=p;*F!6@f_HQKMtp}k?XaG_cw%b zO~OetlVw+djb95VsJx97_*urIn6H;$XoxYu(}q$sA26LHH9$t$$0AI{*9&bH!hA$8 z>F2On!GKM|W{00a-jr_9C9W@zcS93m<=^!e=2Sv10&W zN^+>_2}p*YR@A)aogQATfRWk}7!97;aal0PX0G2XRO(PW^7y3l2!*XO zdwT`;YU%JbsP2oA2CjpZY_2akl$qEGy)7!y@k*Kx`Vcjl*23 zALQ1iYM=h>Ns#Z5POKEO)#15|O|aZkY)C8AsiH}bL5>Ny8<3=qgM`^f?B=0^H{;9) zj4&V9d9WJ>^>@wG%dL-ndzy5&MIV$=lf3tBiW%bTWn~bGFUDTKpC6`nzevBCg9C@5)IaqW7_qO`U41Fme<#QTs|@ zlb01t>AR2BAoL#ZX_yz>G1lAUl<(*wLmnnkLOl@ZLzjZfvZnWYbB4KkvPWuH?P% zFDu2!p61Z5Lim1=IBe&d>+C7{I0)$%iEA7dou)^}8krQd%jgXT=5bO=<-H5va(#o% zkJ006Uy5@E5q{fswooVBq}-g)pNTsIE&)QLiTu+xYOc*9v`eVWd&Y~t=l<~5B=vDm z(A!1D8N<*ce9fNfuymKG_wOa=F~J#W(EoW7xAL1{ z^sF2g`Sq{t_Ky)T(d6HxH-RvU6FpxqOS0yvatw0)%*?zmEgF+Xao`!b2q zEmkWn`t*D353?iZ_v@uA*I9`!?S{no(T-J?_B)_E>=obUHv^ zWk_q~&TOIz%vDRFAN_&pu0u~L@$qr<34Fv#hw>)N$Zu_$OaI^?Ujli`QJsjkQdWoT8#J zM!F~P$eq?S#SNvbP9gYvwI;!sN%pt4kd5E+iUZcY_~*c@NgZ@|UmjMBcaUDA1yDf& z*2G>1e7KbRNfnDXy;48_3#}rEeMJ%;Vs-oNCWf2%Iju5nVcmvP%qNKcI50yVJF&&& zzB)#Jp10PC2#BQEtVmoIYaI*NZti_%LFAV99>9Y~jn^$9B;U~hEAh=F*3{%xVDwpN z0rz7eBz+ism&p;5q0QLw*HONpATSy}H~u`BjKHC9goZrLeNa4VaFz4CxDz8AIao*) z7BxD6pipHZiQ!d+_Edy`y+51cVxHF9SkmKw5Z|fO zgNp7LNQx6j>CBBEn=?3i9tG4h!Y9NAqk)8qXKMb``!&?W+ddJe<`D3B}&U6%KX0pq$| zL3CKq=^c?#^|~Z{MRCafHLPmT5;>x(T#Nchip%U?<}_lvF+cI``cX7-pg3#0vFu;Q z^u2j@jvL|{V>WP8l@0FzTVS5vQmKv_B^WX5Uy%f}0mvI!S)2yPDA)DYD`icFA(R=w zuhjv^j{DTr{TlXjOmJ*-^vDa%-T{(m%gBm=T}l|fZBBW>ly}oFW$a%`g;+2p!Fekz z%!I#d%46d|WBmE6dSYu5NX~PLfMNs;=z0}XR)2#sfAm%Yn&lszhu~7q`#qXorqp49 z#dO5AK3HGIH*ITF&O5=|~ONiazmnKH7d`v)-e zm2zHP6y2ln`EA`lRO>;x0mB!;vc|kSWu0*D`SLx`O|f4F6#pUG2Qt#*yK8#J?ZWCJ zz_Utj_RX>Sk2#WWQ-b48%Ha=nYX$9^E;fx!VlDNjvdx8rk4 zLo+D_M&47EwWMra>IO@imkv$DNXHk5;wL6fBCTujInyd$^5t5F4a5HxeSu>1W)tjI zG6EJWgd1psQxzP6b=|8>=0J{IJ;kZ6-@Q z2O`bT6SPpUD?n*XM%8~}wGTZve&GtPEhd^8y>?A{?cdbmKuwx9b=gOOzFrg)1El)p4zI^+BOvD4t|-1!tm@ zz3^7zrN1i6b0+wJm}1Zqk1er-WLvC0;HSy_BdR87XmXAU7^HxD2_5W5gu3qa3L8De zgJRB93Y~B7ANK|5GzDX>>Sw%>*Lo@y5GoY})8?RGzG@r((*~VBO8I)P7U63dv;7E@ zNe`TduB2lF)zr8BZ~B;IVn$k0I?pb!+DhpLupF|XB&LD zrk1uDozkSQZM0H4EV<=D!G7n6>~$u#!>TFxCyF)LuI)sENr z`}tRE=Tk-tWoeoy=IX-VF~+|nxZNz$y9$=)oT$LfM$}b!q1|(Z1vOlm*FUr0h-$tq zEMG!N2MBY((dg1sX}CC7Sss7ypn(2~$zw5MCL9|?$SmK_7a4=Jr}$is{^AaOz|%%{ zk3=S^v+W@&;X){rH%S~-=S(TaI|+wvUuPc$G%DV%8w*$uymct{MUTX|YkNM#EM6MV0IDN6?r9kH_yZJUDIEleJm+A#AZ{R(dp{adB| zU&Ft#eP-*Q2gz}N5Sn-c)Li`9H+tx5%XERC;%;Gdn&F2!pXiKij%g(iFQDST@QiT) zfHnNBvbmmNaQMd5v$AZ?89*j6Z8mjf_Zid$AA?LFpixdHeXm)sOLAW{0;OA-Dhq6P>M%)kD%BG$jY_zk{zaE^Uyjq$+GS&jO^}9qO*aI%YQh&i znOB5EKvV?e)CqF`*cC%y!lg(AOF$D$7W@FKr9Ix4qQkX`&2Z;-AHbb?$X>9KcNP>JEe}=_EV8W}sX+L*Uet=nRI0+mg;5cc4<5Wg6 z{96}W_g^#I12fdg`-6*4nW)W2l_re-)e_EVWE@!orzvrbJ*p9Gb_2rjkETXAf4{sbs~vujZo#Mt{QQEpQR>f9f5) z5(!fo=c!kyHmdPW8tc{(k97_YT*Wx*8gFwfIP?o%>4%6*QJL@y{Ng2}nE?B^yXCV9 zA4WS9XoGFJ9DffVBr(Pvs53aB5=1a z8`~4(fTMCQn(Hl3wRK1fgJ&2NLeaWDa<(Yu+Svv#+e`jj=awZW;-ePf8xmNEEgY# z>UWR?UDgmNn6%?y(Wpi&CZBpmg0Na`(2Vm`Zr)$F{uatLq&&ELAR`cN9P1wG>$9|}iLTcmQB0z!%kjw?}G&?0i~dQ4OE@TZF3P1GWBsqwLZt zu!L4{C_h33WyNK{h&I84Xa$Stao}61X5Wt#u^0(r1&gP^SeO05e@X5IC*D*T?Wm4w zKnr4mksx4q&xEbM7$=@6s#C8fLBv{Z5aV2(sWiJ^x(KYcgp`(tOG6LPE_23e_IX80 z2o)S6`&i6FJzk#RBJ%>r`8}jU^?9<}%koQ@kl+cl#0o?}1$$#{-INJyjnr6F!;DxH zj071Ov#;!c-D)1h3Tx~z&RJ#ypISa1uwINSckD#)45I{=6fdN@q1tQ>3<~~Aww7{{hTh^B~6siF!Ji$m1u)Al%6;cb;MXyLetb+}j zadH?@hRF}CwuY1eG1?W2vxRSh!4^by3GCFYjOINf12F!@DO<*nfPMgiyZl`V204E@Y zrr~d3w07DgaR(g92*kx-J4;q?YdoOnE}*C zd62Dlg)3&zf6wUx9JU|eA`*BKS>Q0`u#*RNt8zwXs-P7e3*5;Jc*-8QhG7V55AcDA zBge+TcBK#SLBL7mJxuwL^p0>2*x=hA0rxjtI6{PBe?ZxXDvHnvN>5XY35V{4nILq# z%+v*k@&T5hz-rkEFSe7;O0)?g)qU7x!E;u&3q3W(1RZqig0lgZ#e+UBp*x^-PfvcW~DL=r398L;6owUH5 zdZ*R>boS>Y)4c7vT6%KtnFpgr;%(=;O&eZAYQk41;eG6^6BlqTL?+B&dz~1nq=-Si z+6hP%8?c4>01y&8*Zm=SA&ksu>mEw#XEj^PnhU8W8@>;yM zzq0w`(E4Px1(y4JLTSH$oNwhbcKsHIR5Yu&0fZeDO61(aN0ifkjAT4uk z9s)8z!RRiq-6`ge+>Ho0N#p=OtUoJl9o-}bMhc~6M2i#!vi0bNkz#?%8liiWU9c0& z@8QVYO%xcBz)ZBjOsX&7Kga!re@tv`aWbVFf#$Rk4s7kV^JM%cq6)VE)>tdxJGf?p z3A0Cy6>*AKExGkXpZ+btrqKL)UBLQ#v2g))xkIl+m=}y3$ zG~U3qssh`EZ+W|#SAkhv<7Su#-7&TahOXQ-!D!}OIRq>Cs>PxZ(_;yilXkb+89bC<`MwoyImVfL&fC+2HK2YQuaDqnQ(Ze@ z7nC)&ih>nfjJd$vIkIx2e@Y4lT4IL;_svKyDi8q;7LAElyj-yLU1L!VGh$6J5@clJ zVTI-95tFDYONJeaz#AJht9fvb6`J9PLm4L>C~!<)a45eJ(K+PO5j`t-5Ut=Dl?&Aw zzfA&G$c9xQ0w-Mya29OAv99~PR$ikTkd_mS1OdByCM-?IIIKIVf3A9wfLI3`bmPc1 zt;Zd!9YV@hr8Gr*_8?-lg6DX_?Kb-s1y?c$coGR*L{=D6{SZ6DU1&`*O&XmX7bkaN zFlUfv!gD)3W>kPa=Vf$O?z4$!Db;`+3zLG-)&^E?3v?*s^ApMGl>t4##!_ zGeW_eensB?C`ttIf2cDw$Sj8Q1Iy_+{&f7+A$g?5XK4nGBpI#mmv^0_JGSO98}EB1 z+z9swb}~{|Zi48#ea<^nyORgJxm`a~;*9D{X$!ap+Jv)W6Fwn;DrR{DU*rZxED}U4 zi=sdTSm8;6(+Wr39b}|kuL4OhGDZlj@Ys6;y5@6h-|E8hD=z_5D6@-3U2IB<{+yZ7AY3d z97;zXAThU_-rg))Fo~HmZ-up&g+$~)S2R43;NgYYJHfidHlVl5Y_pf2J)mO+3RSdw zPR6HJE_l;Mf7-{&A>Y9@ekRP*HC7dAIf~%KM8so@NN@wHD+)wFSL7T5R4I5P%&m(> zzde39NAvKW0A!OMD4l?ggWH9`)0kkLfO||d>lMwWVIFkH*d`dd8x;yf_bQbQ!3y5= z!R|7O1&f&>UEKnYrkQZo6F6gwDs@J>ZW0)2y-g4Sf6`t^n=Dmu^xEh}UCtTIVy`n5 z*I2V9=4KkbaU|?y84%bjbccI9-W?giA}yyE*y7AYudS~!`-a{L5i8+E!%Cy@^@mLZbX-^Lf`2iR)*0Ix*iP79S;Kfwn9 zCz1Csf0>5IkMQC$9Ve9I1WPCky^;jTLYxudw7OxoBW4 z@M1e2Ezu^(a@~i`>lnUVlzqFTQ2Pppivw_MJvISeU*HO0l2}h?aBd?$z(pkRB(lI^ zYIk=Zez-{JOrJJSw-H_qxRV+1ls#|}`2d?&e?-F6R)2O|^DXeT^&YM;9x>{^R3eQn zNk*sCghON>s~U&_2u2^ku30bM|F%v zHOz=L!AOviEfFs8EYw4l1lq#rIj0uU ze+nK%D_CgCt$l1!<;6X5M}k;^84E<lC!V`=H0lRx9T=m{U zb=8Xm#5&lZ8%J1MRJouzhLqmq>FEJQm^0et!3s-09Tu`+jm#%WsDetf#lwotSow?d zp;1Mb&0+N!s})EUEi&CjZL0;|bo%V$f4X5|HiTP1wy5_13YQ`gOwL00(62heOZ$I; z8Gp3@H!ZzWD0er-$`MVzuxF;DRA8iV%BP*1u?fdGdS=hO7q;5}3rK>tYbIP*c$N3A z>g%Tb0JGX~5*U%)NedjOI*I6we9xiUfENwlo+(Bo@wPUg*3DhuPI%Ly)H56Jf9MDKm25}-_T&l}dBlj@Yf5!g^*23Zd8J~R_L@ZQEaMo?fJCq7UfDIg-;{W_CIL?4@o{zT*BU$e%S# zgJ0kas_FZLM)}F|16mZIlfr`jo&OMoBF8U3f8>p{L+YRZmVXCg`tZZQ199p9@W3Vi zoIo_v9nduZT(&>u3z$!*hXozr{4m0z)*XC31LWTv$glR#5?FKnf0t2yWwu}a-#Dlt zm+Q|Hh(#L1EAvgZn?GaOwX9!cI2-K1J5LleD8w0JQRJXx8V9sUf6wd6q4l zUjP?Ex#!U9INQ1d7%_hhnft%Qpfqp4E|XS%4Uiaq4RB09_j%FIukq~MM~)$b!~Uy3 z+lLHBgF~ONGwP(zf57%HKfLjNy_nComsIk+xpVp&8~NvLjqo#LgV=uuZ7Xanwz=p( zkv?&uC7CZZdtB~UEePPhnIB5~GWlB!8|{DNzmQ>HagCCH(aJ)skbmeB7Q1xCcZpN(9k=cgn0?VR5;0sC)a&|eCP6O3ajC7(*=@qQ~gMpyd zWU!ll#q7L}2l+x;vkTRVQWH1KeHr#gjMWXq+w4MZN%VTywIgtY#C+E@yOgU7oC`8l zITpqXNEcwfe;gRURz?7+H|QyoY_MVBj2g)CZimoQmv&PYXszvEAM8}zA*ZE4`InJW zN`p+*)s83*Cr;YHR+bsiL6xgzB_CrktV*Fw2I={BfeS*{fUSG7UXq-UBKYS)EDHyvDTl33o|;JdNDJn4vJ- z?H!Sl!3srlq7q-w#$QxLNhV)W;r?|j{<~|FFJMik9V30PIIIPSDj`(M6)iRfNa|S1 ze|*%to!HtyCLz#pSV2uAbzr6zc$b(mbwiCO8nuFKlAVF61xj0P=;@UR8019H*cK<> ziJ4sjPIT&l;x#eeqF`X9aI=Hvii;j6Orgt-8JfkTVRdtTbvwsy*G~3#zLxv(gvwln zYMMQR8Ce48*%GY7jZm|8FD$7vJn$8IYFJy%nav6d9Z*9fz$GE;SY@HMqS9ftu~ z$aq+K6QXb!q-(Q+W0eJB8M@gkGxZ+mgzNSM)vivuF||Mjqe`Fw| z?v8rn(u4rqfhU}-^z3Q6izn(I&@8{G7Z5X__g)#k7^vS+(y?chhbWF6gfXWu+>3Uu+Tz zZ1Ns0ekc^T_vp&4eP_2DtQ?Q#f9S}}&$6%wGS~88g-O!AqYD5&f$!F5tq$d>>|v}m zFHwcW4Jd}bf$SI$8b;DZUPen4ZpH1{f zSU)G)6?%w4W2+p_S6>gB+}^`FIu3OT3r(w{TeZ;FOTqEbLEv5^C;?Gn9VT*_y%WKEb$7rL+rlTd~xR}8d$3w8)9%jpS=dWA7W=bYJv@(d>= z4!V7YEmk;j!^oYR>Y#J`9r-oQqI`j%VK~_>(0zg8+{9KKw4kz6I9)UH70ezZM+A%f zdZm%nzyhOyC^+J4e+DOF5FNE~z!3?sh}6yZU7@aMN|>jVAv@V^AnFYMQ;!CBGV@e;X|FpY*d=EE32C z(JBjSps~m)6mKY|r-ILd$tdC7w4dlf(3A46o9_c$UI|)WSx`RI)>N70{gIPMbMA$u zFCMUtRg%DI-d$ipno=f&$Yw7C%dXk5!Mt(Nu6_cItukB$C~$yHnE}#K^GC?KAy{CI zJ9}OZjdTcPy%gZ}n2X@mOcRygMlaC6VKHG*zE!SA)lRlO71G-2C} z4QMadYQ7s;e;`e?2xXxoEG;VgfEJ{L7my|sv?=)Mfj-eSq*0aeZpYrhg4#_&p;a3b zWwzaxS1AKoMFhRLrGwUSRABW2BSHet9>+%oH+wNmY@qAh&A5Pk9RXS?*vAI4)M+c^X6t>zN(;EvK}-y>o1;Cm(zNAf3U(8Sm4pXD zt_ul^K-=v46{MqHqs!i_TaO7xICb%$DqVdA6(WX`AOs3#x|l;cM05yi!N;6kD5c5u zk>kd|GCL(MRBwGpyH;FarAVFDWYA!PCPLX3!$%nwterwS7Miw_ZrVZ(;wxARjj>rq zNTY!_1a)<_B{K>XH{0nyNMEaipefC~8So zu{kl;=)5z#P%h$(L_|cI5wXCDAZmARR#WGe0FtnR#!mx{R>OW$f3V0yDbR zd=+li5}JygXe&-^iWwX84^vGr1gsYP?^Z&=e{t$FW33ueC8i=3?+47LJtWQi?^-o& zb*OI_*Yc|w+CsW%Qa>NpbVpb-v|CZKz!XVRXb#rNyCV}=hUscc9e=rQKp`)FIthze zRe&Lx3nbs|>Ld?FpkURw4f=8ITZc{X3rK(-^O;|$WCrH@g{DlFk(3YnU>Z_8mf0i? ze|i{6rA~3iEbVWEhpk1)L^-MRW)giYHEc}s9%U#R#okAlqxhlO&vA^cbqQvn(G&n? zWPy=EWlLJ^g9)R%z*S}hc0#9bYKjB=EQb{wDRz$wP3RD92dNP-UhH9G(+lPDPLG;_ z%962LNLYZ~YO~ZJ*YILt(M?8z=5xL~e|pv>f1155o%aQ^^>r?pU8ohg7bTOV$Qt{> z!hM1vP=Zc)(Fj#bIDTBfQob4GZkM<}UWeVSEi#?Sr`WPqnQ`=`m`i8U*&^S&IN`I7 zoeS7}+3h06L)>gY%U8OIADYN%PF)aa>NzchA^#eEGFl{~yKi@x-O&v*rtHE+e=yyt zBZ#iM+-DSNxpctM?m>yj!0rM0XM#)9aY3`4LRkvU7gjRcTzg?pCb|}8HVbUHKNd8a z4$Qj8h;9o;`EY4R(-j@-v0?1wUPkSe+3XPvZ-kUh!5VK;j@gAWDAWpl*_Em81kerI z!YE3z^6otvee-mW4XQ?j*4Bxze{I2p29%72hS~5*H6eoyvD6GPNg^(WdS>>55*B(R zf|Ar>+M1z4oGPVvXV#E;kQkWGp&AI!t(uC&tTq2^lNrJuC@@ zM$1TPF@{F2Iu}&*K(Sd-J@zpch3J>JFD>YSou=$5>^SK?JXHbfNMFxje~vuB$w%N9 zJLiL8U5+P?57LRRb?`LD3del5shjhi#ag0tyJ{pnJPQ7j7)C5Qf28QK^+eMYCRSAH%@oKJzUb2#=zlFe22?*!kO;`(8_5dg z1ny}%6itrqdNsM|FFh0%o7!}tO1VPMTIqMX8i`^s>uq8Oj}#g)V-;4LB&R~8{jPkh z62soJkUhYyn=3)&0GFLES!0GBb4_@T{{^-`iwRJCc-jjH`5I>u=QE{0ps%Ko7_WUXBy?4`9>r!cdpq=hz> zB4}DAd>=ARd*fiXs*YuLp{NhT#wJAS1XehXFuuzm>^y+S4~44KvKYh9?oF#6b{QE= z2Exs^EphOv_dm>ihdu?Gxq@giJ;HQqEIvkPTVsQz9vXjMiR zEz;d5i}&7mf4ap+R3(D@6wZmTZNY>_&>Rs4+L)n>gzY7mJqb3%LO&)+#Klmr%wAB! zLLU*~Y{}SkqC%X%na`n<&l`!lqdvttwL(T7mdPah%}YPe}OcV_XT0ShjNQuV|DIgK1;1` zk}p>cp0KllRckfOc96G2B6JCc8Cy>RxOBti{MdMNLE^c^5t@S&AB1M~k#<wqWf9VS>jshc(=7uI43Gj+~s&}(bFe^N3 zt^L4lU|t}RHRVD6$^!|fZ5;Uw#0vAI7smg}J5yiS|1G$P{QALP*`Q8>`U5;=0qaub z4@5q|2O^7r3;fr-g?YQ=Ul3TyL{n`=O+@D!57Z)A&dDCysMk9fp$TibZUK#TA2W7j zf1Sq7eiF(uS~G3}OXKeP1S4Q7uAxK-YjBL|}mh0|!w8RYhk9ZJ`+p%pw9}H9-WN10%s< zEOiT%opGbr3fjQKDy!xQyIG51zhYqj07D#2W&P_itGdm7H@XBXcl>8 zinvFSp!FWjxQD@aN18Q8PB051D(_Ncr|{sEXmFztI1CGBv)w^dPNaw3BAP`P<0Nfz z`zZuSF<}ymqR0^vs6`IW6h$FHxP{p}!6Gpejeo&3@xYd$oWRx}Jh{mD`aqxcfAIn< zU>LtOpU)&Q`m)2^V|a(fg{ESN9AIw4G)aa!?Yx#{rcZbc%hW;3=rn=fz?#$d9VST^ zu#Mi(jwDlVqb(wESRP=K>;g(4fwM{L4}Z4`_?=CX9xzqXGyD1=R=nFR^B{J&N@K31 zhZR;w!~kK7jDjFvukjIH46qV;e-F!HLa7^9OH2)tZ|V31ABc$X6Zk931zrVQhhSRbsg1?>l+UwLDtt{9J?f9t9K6gb_Fe*sI0VSD~V zv(awsW)G;jma&%uXsu*q^5g5@G&@v0Ebw3Rw&?NiE-VicSURgWEwNKXRM|%Ee8}~g^aD1Y@=UCH$yI`X}WAKteRK*G+VG*oAbe}Rdn0F^kvx;%)HdMKM0UZ${K_jrh zazY4jj+luOk=CIsW1DkoWUZVearY1B))yedVXe6jIOh;)e}>ZnJWBswVPP_s)vq5f zy%O*_dEx^sP(mc_h;+6GQ$fYi5z%D>TQ+MT(XM2HIYt8(c!0Og7WEJGxo zW`fu&C*U*v3&>(fuD4ea~`pC^rEYdb%y=xagyT^E7 z_bwg3$3l-EUWg~dH5-Q~Im0$O!DFk!!*ll;?Z+uLXh~S-Rg$yd7CjU8g_9iao}u$( zib%u5;!@kG>$7ltCts7C75A(vPM9{-TjVP#f38_AC{1nY6gOtY(rMy(gY%xi*l~}1 z>fNQPwl>$uC-BXhZE1UreB*D%M_07>$Vb~vLFYJQnS5@TdiE(4ZDrFn8nNTV6{(fj za74Jr(1Na!zv5Jgy=a5-7LJjc5i%M|j&r1o=7pj`>OId-Jen6eBY#EL$UowH!6)R;cQKtb97`U7NJIX<{kOF$9SN;>t)8$aiT4LJXwKA%PJHn zOdIMg^8M*9(jMEMj0mEW5OyW(E^5W|fA;Z=K1Wlf<{Na4EPoW9^bgNy70cEsQ)BqQ z$#FN<+MFmFI^gb!p6BRPc;0&b7}C*nk1VCYhH~TQSVqk*htXQ-&t_Xe7CD-2(bfmj z(}#=v;3Ccq#BM9J$LJoYu-ZsO*X3sHhI;!A=VEa8eeBf1?T3 zQ4?Y8ULa$Dii@D>fD`OF0D;jaC8=5zeU=_E8}lo%~|g2zs*jmlUNqm~Hm z-D8B>z3U}?zQj|D!Q4?iHn$V)38t}0k3l8b%@g`tH1%Y~`3Kw3iagny;63`sDt9{2 z4}p?Lr0h9JJ6iwF737N2Rd@9Xe{VJ@e7s$_(-0g%`E5moG?;BeW8zaI@}Yf4J3K$( z=#2RMf-6HZo_)1?@2D#sqvgIoxM+Al!*il!VUy>}=mUPRpt~j}#N>e5o+C$mwBiT3 zU7d)5#`6-QYKbpC`-y@HcdE2+IZ z_JCXUp-EU31xrf6Mu?n-zreHL~!E)wa`!94})JC>m+sX#i4u!Sf!&jE==Z zIUGs1qKfs5ZD*|&Cp7`H9zr>Cbej|natxU#+HzK&ut@tTAmfC3?kV*$=sWT!cigyvJ0Hu26#NJcLS6J0G6Wb)syb+4W|$Zdd!HuhBS`KNobh zV}mLZI~p1jpZ1QO$Tz60 zcs!(Mw@V`^J@O9&lk6$>oDkj&v-`@8eP_a*WRwFKB4UlkoUq;s*`Ie}1Hk&lZ#%L9^OeN^@GL z;av@*Gh-#`3VR~ib&c5Y<%ITx^XSCw2+|rlq!C&B3G&557iiaWti+!`JjDu)$g`dx zjlUXCgwWpid>kDDI!D158LemXoig@-vP(K?(MT4W_*qN&M4Kjw$Pdc%E4oJhiYr=% zZBf49_@%Em49Fym`3rSaA0l z?e`NTNlHS-jHMEti3YuoFL)%kWJq(i?yWV1qfw&de~!vglK1jO^1Ru6&sgGwX+u4c zpSMu&Id$fh-v@lo_yxCaM@IOBPddRTofQw~ocBokZ8heNx4SJo87nC=&2=|%=UiuO zqq}aSz7O?UMY7bc>l<1<_pput{fqjnM~G z7za&FaKOdV;E3{rRofYz&%i;+p;gjlE))&+xU(LD3m#qc{hHsQBkx&++!YqxpQn z6MMnsNmkOI*NA{}Mko(ChF>O{JYgnL{p6#N7E(A`ch2VSI9v88mXlj;~}E_}~6L$x4es<>%k=J;|#W`~3C=vbOO3$^Hehx9gu` zi}4lffJgrx-pA=re`w!=Rqzey-{gCP`*rVM)B9nz9FSl8ocq>(^=CVHBy>l}ug9bR z>VNG+#stsWSKUrUzrUEzRwG#7Ui>X9e-O>K^$>Zla|7_S>~81uxEj#EtoTj{zqbDC z(G}ae4w*B5ZAkic>qGASuY0HR=o23OuQB1?wY_=P)cpF5zB5wlMLl~wWSsW%hn2m* z6f-q6Ewc6_tmo_bO7Pa}n)GYVuNMn8XC%h|XEFXS@2J#^zx9l)|KWc*|MB1beC|gKp47{5tv{CWpDb7CLL+$nalpg1L@RuW*uoaMSMx2?KfnvB%HIDd zVm9Ob5)LWSm;n^?WYQ@J9S{N;MP>5f4QQ_|$q73mx6jc9?oHGhZITzTB>LyibVlte19~R{ZI>CPpXm}1}Wugt|1DPwTCeKa ztYTemvMQ#|H3@B)ACv#~BFS4o+oTp%;!*^{Rd}~2JLb)&u20$+e@E6rZNnTBEaj_9 zhXeL>dNV91iV0qFHxUp$#VYHBd)mNWg>yQfIsfd~&mT@gX<;-m2Ph)4#7OAcps(mj~hp{w+}NpH1~iX;7yy%`BO$nlJH~8uyfhDLC&IAFT&|A zYBW#V)L_70`Qm=AjYDPldzR%T{|dvFkHn^#wPz{9f4BkkvtS|(pVpRH(2a>AcG!&Q zn^2@jIpR!^(OM#XwGV=I5rw_Iv#Doa504d-2?SEEe=LSV?;2%kKp9EZgwBdtXGtS= zzkE8Wz@*$U&W)>AoAiO)BhYw9AO~e}h3{rpZ;iPLIug1iNymvWoyjAg>%2c!-BCk?iPXR6D4UYe+$Ws zrdOL0i%$4|(?2?&dZRrzL= zoBd08b+@cx0E>?qeUZ(6aE!apmk-kvrBv#6jvq?T0clO_tD3Km_ zD&>B82P40N`&+`*aXzz&u`1B^6*wz~BgjGv_r2PJuCUYkYr}2zw9> zF!HNmPQ+R_oc5lJo`!^et0$ADWr=Jf0>Um7r^NYD_VEC>N-JhTe;`(s-MU^zKYPkm zkL{*j1MN*vCh0lfiLn%GG0hUjJgf3~x-Jwk@>VA3UR;!+?%{9ga1PYXIPV~l980no zO&{wom(BlE2^r&>8jkCi8=9FN5K0SUm5Vwz%Skar&pLkykZZUsGI7QcL^c`awO#T) z=z?=?pRXcSpfEXofALx{#!cpLQccvWtnfGj%^@m_k7J-V4+Sw|+5!={WEmQ}!qJt% zFE{SZ)=Yu}xEvY}sHDA*d_3sC92*-JUmebkk<{9Vq=ho7fH*Uvu~vRKna}lS^NCN- z;XEe&>gE;~i>l%&F`xQY$6M(G7Doj)UN;gf$Hzq~Gh+YHe~qP1?wq#;B`aT#v3B4X zzZ?ADZ+ zsI;17iJQ{Mf3j%NU^Qgv1>}P(6VtXpGN`7v2P4RK1;=1xtdhOiamAKX<$@k$)xSTz zc<74?6K1eRx9Nb$?w>}3EzeF)95cm?i#=MXOjI9W&nN2bG~-`e-auk$=&3^yYp26u(OdkY>Unr?1eES5>%LyC(y^ zLnjIFen*|8qi5&L2*<>oZcPi-Ei@#3itkpoNXR+5#^;f z?a;klpra1CXmi{{;!gEF`A%$VXlX96BTy0%d0vW}r((V>g`{L)*gwxk(xO1!XeEn6 zJ16i7UlnKD9VjWgtqM^MX{qCxyHc!@Wr*4df2ee>=vwZgvlm@%G)^K^A7GQ7L{-mZ z8g&*5WfeC?B7#}BkNXNnGcwrTlHUrBX8HcGD?d9YCn{o^2`b2q1BSqac_{m3Oq+#8 zGfLQvx);oqFWvizDZewnO`oWfPmE}` ze_Bu^MOVR*--06sc%8)83P%11C-7p6#sMc>F8qC$bCGXfa0BF8!^MzNuFI79+`(T~ zvsxBJ_dtME<(dSG-CElPHs%!x*)p(it@0_%DX z4DVPjMfZXVi4-R8O~i>E<8jSbUdD87fA;s4l9E8vjzw}ni$&I@<-Sf5edJSW%Yzl> z0=p$+2~EYRtuQNr4RgU{v>SQR@7k(m{{1S;lDN2^zONuX1f+|Hg#3iBi9Pr7lso+e zp=O$iDFU)Mdtemm)mX~JvVd(#EehYmGDY0jme)in91}5{QtHV(%{iJr8F^K6e_b6d z=~@^G)NGW>IxIBCx;hNgl_lab6ep$}lm}s%UFbnj`@WWdIQ;}O*6>Z>>6-=SOBY{? z2ToZYG$$({PInJjJtrRza{0Qz2ev8kRNKzXpI{qr;=h?w?Wf(RgP2@lY(`&ISf}lL zI?EMueNQ&mP1+bE3uF^TjPQItf6*=Y>W1n`pD?3Tn4vmB1q;Z)%r8``F&i41XD1n& zvBVx`N3t0Jf2w>|l}9d% zKSc(L6T>^f-?Ga@)w6MUMixaL7&UpAtJJy=cgNd%FM{s%@z%p`yp~&>Fy&7q_(ys(_OO5 z6@1m%6lIOsjNA#XkvmaVe={2znl3w{8B6TZs1kotSJIiF`D7KE<9syWXf=cadYk}^ z1@>de8o3Kh?QY!|rmVZd@kSZ4+!tpQIwO}9PY$)&Qf8vm^I3N%*hp|Gh zF4)G&b4KpO@UF<6=#1P+kq7o@4D*}=xv)UkQe=Tx#TWuVcdCa{= zu2NpR(sqysqYcAY`mJ^dYiNP54k7$+0kOee8%%6rxS-_CwqnhOrb@vcoH!gEm#bE- z+CS&92_o>sHLXwKS@2a$Pp3iM5UQ%LX2uB;pbS1yqLK>QXlmwK{) zOD4VKVwWfL>!rLBe=q|LU7Bx_@C>Yp9i%Fh$M)Gpky86YoXjU^zT=yO*V@qq$E-@y zn@o#ns`GB@fni@>nJ7W`*+p_HG`SbnO!Cw_7DC^H$QeZ>Ctvc`NN9|;z)k=m)bgAL zxZn?&B(wxGIn=V^KGsK~S&Se`*~jyKIt!x$>7klc%*Un^e-7P(bp%prb~LRTt@O0E z7gQ$Ppm}rSKqKyWQ;Qxxf1iSx!uc)Ai*u)C?)C;=LExo*j7p^Oj53rbEX!-}v+aK(P!t(=sAmaSk zqy7`T2)Gh?e-Fz{A@Y@Dhy@MihO2eWlUl-Ua|W?8w&#NVG_$V7%8GKv>9YeuDPe40e>#piFR5^vybcz2nh+q@$c&Nc zu6=X>k>yyKF9(weE3BOhEYC<~H^5-Umc+E6UM1OcXrtLh^=fPZ;jbx>#>WH^pnwB# zJ0BOU5JC%G!4aF0-~d|Cwm6(Je?fCv+DD$8%JYwpI7&s-U~Z?=FmS@;2II`u7CE2I z&H-+qe~b#1@=SeoK$Oe(w@WN7oi5$dEZr@jgn%?s64D?cpe&%2bUuW%fYOMxiXa^V z64D_`NuwbB&T{YlzCZuK*_kuvd``{GGrKd7X<*?a;f1E83CY>fM-uVicLKAs-!VRx z-Hc3Nxp~QIP5cy*c^vz#pZ%fU!Wt*`4LwP}?u+~^-MgkM#wJ`Q`L^XJtZ+fgJR%b# zHTUOEU!7K3U0=AqI2?JUsJqci=!E`#Qe@xU)=}aBDOCeE^P?}+@~9@~U`NF$)J#cx zuOo}}liJr-^}|+O(@RbdLdrewIhOrCc5J`7Yvz%e>>0|qdvJ*$*eBiM}&g9ECU82wVY6MR5lD_r6FF@3VIWTe5@yRswgyvpZX@g#Mjym6HR^{KGm(`QinA%Xw&#>AqVvh0r_cC z%XD92M9)#!@MN$ecH&~g-t9BJKs*TU5jHj7&75|eGhWkWB{(})PAxg_tl>f2%TXcE zGl&^^En740NY1YDNFcX>wrYiuj&f`+WvMluSMaR2)SP|dd130l^*ibAE~Ur4)R_6z zF1!RX6*oSo_!vpdTa~z}v%W>$sjcC4bmgrVtCNU&)7Cddi_sTxKVM8yuTk5JLKF3< zz2>v>s*1{8GPVKg@=1q){JI*Eq`7F`nTF`DZ0pBm)dL({pEC^B3P`>t{tf1PZ%f9W znDBNfGKAEN&+A^IVW@f7?(4sk8~Y--gDTw$^um-YC;KMHY*D=8Ek$YOr~r-c%%V@o zCxX_!1V4y-5% z-y~T#K_4pev<-idzf@4A#kurt7gv~F2yP?KZ3keqk5z-I4A3!aqZRW()7< zw8jL5&4RcLkL4oLW~#$zgM01CsYMT_`fIA=tGbN?JqZ(z4}bIj>RXG|)tWBTd%JZK z6)gH34NF2G1{66Ld1E6&YNtarN} zr$H;=9s5x=!>*RkX3nF)B4;C<)bd zglYx|-)3~`wEQA=Qht}G?3sv>l?H-bB{m%JhD1?pgD!^o(T_bmzddyj%Qhz}wzcafDI0#h&tPLDThzl)c~`@XygY_9!k9s3~m(ju3|?_e3ql|KpGpCLP& zl2ghl>o`Q>jYW$O;i80!8;BE;=NGR~k!Mw?lV!%B)16%H)k7`0%bz-Z?BevR(BLWK z#UrL?=O^(ONaN4SCEu=}b<3aakPug$8=YomYp_){9P-r8-1@qpZX~c$8=d^6;Ye0= zs&h{6b^nD9Ki!4gw2ngy`K_SB^FjU}#eECPW=&P|7e;+gk6TcVH+HZxeE%3tFZ{Tu za$!KNv|f&)E`JXl%lkdqGI}(0xg;M6p(1eIb#}F$i*JlKkPIBFzP&%-U|86ByT$!o z>l^CS<})oyr`fX;foF5Ad$)-n6xLHV;mAH&54k)-S|VD1D;#}tPkt-ou@p@e6(| zyr#)K3yH*YvI`~G>k%zm0jrIG)7 zW1=CEl8OBDdzG6Bz2uQ90s*PD?UPZd3fMLIEypx04;_N9%Pza7ygv2`rWvj3@(Cps z2p?9x=@m`)7o|qwL@`0=2~8aE?FbFiar4vuFym}7<=xb0D!J8Jo_fPQq8(>gL-ejE z)*nJCEo$X&3cTEJ?ke|seS7{6bwjKM94{n^&15Z2<2A^!qF@Wr{vlvB?}Joi-=-$< z?)#DA1>OjdU$bXru~@M584CZ`d<}v-WZaC1JSrGNKCKw!$h5RgUQqksfb>tEs zFRg?>zvQ{Ryzq?Yi*cKB$lE6L;Sf7=5o}e4-6PVqGbt!>Vq~prnNc4@O8akbhhvu{ zzqogJTU#mlGmbsfcgAPgzE7Cz>nBv5E6?!a6q#}s);v3VY#?@D&@=aOoCQ36*xRVd;1>sarzEE@J^e>dUq4;*!%wj{xxp!iZ|anbS4t;EFeVb;UZ7{z2;cXJ zz`dgpzLbWs?h)@zE3UMge}hteW$NJB8>d%aeBL}AzH#T~O|NXathK|s zCc5^7(>6b+*Q4f1M`%#d=Rf2QEHvGvRy{&;j!k&B$t>c3ePGlDbg{AHXM zc9@3x^OvmIB0T1Kl-yQr%VWM6_*?NzzmfZE%4{y#1=Xi7YVsN3VojgeF=d|%y~tIF zKwaK(&(~whiknuv@g0%G+eu&Zx~p6=@ugVU`9SN#@`Wn)+PoSPGpyR`<~feESF^@h z=8G(^zKj)`ZSnOee5TYrV3jiB*NI9%kp_S3B0PGkEhji7ks6M2s6 zFxDRPjP1qN`_NAwAVDsh@k*J-&lO*o(vInpT7r`F;ob#)YJ0CR|I}QH({|^_AJM~j zeMaSYC3mK_jzanO{NzlYDUz%1=mvPxN}X(;-Q*wQls=}c$C{lY64lHe{S=g0QQou3 zI~bBPr%Oz9XMIC}hP)87e^bf9JDjZ$C7STFDHhgFf4rMvg-arWW!U(TLc=T#F+$@} z()@f4-}YAo)`XLZkE+j__Wnca!Y66MR^}KAKO0ck#`|L?u>^x~z0nuY0(DWb`TSh8oC|b^JlhS;y~+1T0!+M(LbVxNB>v?;7%c zu;WhscpLrvQAqJRC-#ql)yN^8>K_%p3mOFU3$sf$hBSP?*>>y~$OJHn29@ri#$~5N z3$ESErPsaZ{kZ5FgJZIY@fpTRsKz}7sW4RjC>C?6acuRA@*|Hab}l+aYUjER)o6Q* zsFFzU15F=>S0kMLJ+fPE82S&v$+GmS*r=eL;jmSuCD{!w6AJGe6;2JW7>83s-R$NI zOU#ZLEZDOs<_irNqBn47Vr*CxQH1aF18d&h+ap{Z$gEXx!bk5iPT)c7^pna^<#`n2!%)pFKVbCN5CWoVs| zZ?o}jYo^nflVj^?K{G;n{Iw3x`2bAM6;ntJTK=4DgO-}Jq9h(AR!-|GSLwK2Wi z$mrZt;SaY8-E+2U6vrbIKFp_~NJ&)`92F$1hXrrQF)ww58@r5ZDH(@P3(pXB#eox0 z92nYqdR{M&S+v5Vm8sg`9ZvCF zzWMp~_8VW0L$LqSlZJ3TuoZV3C78=muthot(|Jw0@|s(@uquE2uP>KeR6sSwIIR>w)Z zPZO9a%MnV6H-b1nXBfuVHEgpMwqc8hSklnWVw>limFY@nq6E$;VPBi3%VwgM=C)F% z!uoVJ5f0)>q>-rGk+9f5pR=_(H?c?~^Q?+v)`t%{jc@BsWV~S7HMbN3_7Qh*E)_UoX0}W*7ha+G~C3j79%*f z>3r`The|$4x#WBJMf7~p| zjQ`9+o@|M%3>#7*4i&zTuH$&$CqKe1AK%A5$H|@wd7HAwVo}Xezw0#SP0A`xYOQ{D z7RG?>qi&yq;!Qj9Q1#Xo$zusn>})+;zW1yvG*e}|eOM5jQxYk*g&Kr^Q|Sy{-GtjD z@ay1OWrlab7w^w;j-`g@@h-My6M5f}TU*#jv;JLw`2vURvKWdPuJ9okt4^2bJb7%o zIkk|o@IV&KH*B7JVP72#Ml%^swa$GvPr8RbaiOTo`LYPVb!qCt>?-ictThd}I6I_A z@*Q7fSO{#c#|KzvxQ;%15wA2i9X%*U*6BXk#u}zJV9ahq)q9V5mX$ONf77sr^8OA# zrfsEYr1v0ynl{Ra@55USp>N=!SE#hXFEAvFXTII}Zl2{yqqU}+zX}Uh?jMW>oMB^W zMRgR`m^;2S*q?3;T0E-JDIh8pe&{U|T(-m^6^$VhQH~t3sZxpd!AAyanmX!phuY6P z#8wcTzJX&F9U7(mzoTXerOkxxU#GU5TV8Ua|OOlx?X@pv~HoS&DzntDT}9?b7Z zV0PZo&zEbY4o;c1;hAlbO3q^{G7)ov9hyt{?v&^?Nb7GwuOFaW4aF zBbn=a4#{DJ72=^Z&dNQLmR6iTg+p8AR<{Td_I}=<%k^}z0`1w_@Ci{C1j#IJ7@UZ$ zoRmmH7D+3x?!3(@$MP+7B5JeX{4su+3k|E>=LyXS+NAM7Cx67-s!oh5WkveE1N`DC(Ts%9p&t)>7F%v73vV0a=wi&{=J>t8R#yPJ=ueKkn47% zB7)zAyn7v)hzHO4hrZ0*gQ1Dn{f)kzL2>h#E)(GnqKDK#@tzwBjXU671YO*9)hnIYIn)x-BzAsQPP32Waf5~FB z?+>I(re<6TGg;qhX!X2ulWK5H+`3S1k)gX~0=Azs9x9|p`uWuLeP8wG{B44^zwipm zik>W|rCh^am5%nauPZhjw%Li|!ooK!_Ctkn-DWq8i5a&_ZVlHVwyy0(ed!rs2%*2H zBghi9W0hf%PBC9Iz;95#`NV~GGa{b)Q@HFOMo*1zZdUfmUeOtE^Kj@~8D6=t-og$X z`Gs(6uY82+b!nUP=dlWqIl#$$rr+L}A7kT_%(co*6cLx$<5KocP(;%0*;}*xkyW>P zRX5H#ZYqv+skEsuDeqffCWXZx{7q-OW_^EA_C03VlnA39eoLE~{E^Cy`jmGGGB)jb z^K;eh-?;?$9i?o#)iB-{Fr2y_;?+}D-hMLvdO&o;6Q#ATd0&9x$LaB{mAY3PrKqv- zNKCIbj1Ny0qA|SnjCIdqKISg={|s97o65t3LVW2kvz4B4MiK8SVcv?JN35PpM)-|a z5TD~V*fJ2>s#47m)i>>PNm!m#1bS>8&Wz&qft#ZEf-=@sE{{BMZa-rY!!ngpJfiT$ zKO4Y|s6W9N* zjyA`oXtrR;X?CP$S)AH9PmKD6Z}kAbixww&-I=ePq2DHSVx>d2M9(`I5n@+Q#BXm( zPTYGCXJj+rT|gu`+ffOE)>*}Op60NK3dNNng(}I=-W`dH+R>17RAZ^N;Gv?*+(^%B zDyDNB?cQ{_UK}Wuk#4nNY)^d?{2;{TjR#9hi`pAjV|#+1U$Dg-q&LB_K5OyEqi1xk zDO;)992|rIx0N>D?DATi{rp`;-H-<^lPEK0KJeCQ?u@^qm}Sgt)0qL&$ZV`X63>_O zP_9~h9o> z;s@u+f3hw64$iz>r-LkXP?|qvya$K2c6>J1y)Kg9`+u09^;y}ul`E2CUug;L8D0MV z_BS;n$f6r*kgILTA76DG*y8;-@+%JNXY#Y4Eo67Cm=|?`2ZEf6@}j($j7{oh-0!w& z5$4mKGaAznbA49whG`v*+kY+G&ll!DH0oG!ewdTLFsCpTbgHu4t||#$a&nwU@zHRW z4`V5*QvVLq{KaO<62!|y;HoUS{_1Ja9erKYvD;xl%%in zS$VphlOlNE`0yK8BFXq;!ZPCOWQ`I}g%81s0uKWU1D)}>rBrV=F@_sivkhy0V|X%8 zB|=5(>jmCltK?}TGv*JjNx@OH@NeV*U3a~2FS5LCpIysjv6RgcNaYe9Y@JSVW7uw} zzqG&QO3c5m-)!$wCBGDXxQ@AoGG%-%yNT1Y&B=sOCM{ys3Ed8k$)*xJ$r%}|seL0@ zJ~%v1<2s;1{yJnoIkCVxbZup8h9a?nv;4f!J>puamyF@qeFf=iYq)(-e#%4TSc9k5 zKDS}-aTpF`NGu4DS${ca`I41lr-X32mky&OAA0j=e8`)^%WHf=pra>kjzR`nJtk20 zoDizjFsa4q%Nr(mY920#!q;>vo$b3iqTB4M!eAch(a#a|s}WbhR^n=@1`vpKcnybs z!tK{}!q(JIsn!+G^|K2uk~4g!9{9i0x!NRpTNZ!1NVg(h`{xxtI%2}Z)|Uqlt=P28 z-X>y4z8Waxn``;hMiMtK%%X#3Z<HDZ zY*~}_n9h8}m=Yh_)WFO4a@Ry)1>35;H$Rl?{BM2Era1rYP>+>QCDVU2>*E30Mz{z} zR+r^r?6aA-Hv~1XMXJN57}y?~fa`}@qflQn*k<|PYtw{&W3zr&v0HG{I0>uy+i<4~ z|C7+B8?6!)+9Vd@c5e%7J25QHpU!bD8Aq8UM<;S}VGtq(4+yd~Fa~i9=`0O7Uk#?k zu;{zx!0pBMwgTuh$+c?4t25X*x*lB3Qy$u%CpG*(;bzPUos!XreGQpdzG-v=CclO$zdVb-*L zt}nGaRqzPz!6i1+TRHK*7^!sR6XfoYiE5;-Ol46rqte`GbX4rBKFsn<%yPVReshL} zzMibLP@7*@=&-6X*+}SxVOff;Fbef|isz@Z!2PNZ0q={e2L!O!r|xfW9H(y7EnUWy z?lb2{GR$zwIxQYibKm?$3};ME(h}+Xx>SepTmQ_^qzRMTAlG%1Svk%@RxU0YLQpzQmqSJ}DKb0TCighNs|6F6~d){6jZL_U{$2Fv^Ebzp- z2)9f5eNFR;R+YooU}=`_17^Q&Wp2QkUn7X?*E%qZF<-j*99!;-&sX1`PMjHKej)7Zb* zw7fs|WG@i5YIV05Mh@d?KghT#8noO%|tnS(t#f~ z2P7@1ZOGh=qDyPZtD3Yajk3>m6;t)V<;HzT%aujUC7y@b-l)PWnCXpYr(qnCqxRFR z+_z^8qd#FlAw|v~Mr)DcJD(^m7LDY%N!5Y#RtlP_)ciQjo$w%(_#S@yOD0Q&n=_}# zdntrY*kP{m%#S#4Mt4WX$*rM$?n?Ol>gqD)Be&qNum3^ut_hW=pV;AKX^A9Ud^vBl!l$YcAB`Yld;Rn?Vu^TjF#ZTfkQTEH^1Fn4? zYYRfP1gUs|Y|M_t*A18>54gWpZ(h$Ry-VNqOX8<*#LZpK=UeJ~E81gtucTRxW0Ymd z2$2#T;hvwpSc#EY7@~oB1Ce?L*0>RG8bg>noV=I9*DJ;2w3t4L5_~4`$0yBcCB>zQ zg+~r~JYy4LB92hV6s>Vp-$#`_Ef5k&N#C~u-|b}{W&|}f1nUWJ-T6pBPFv@r_GNLC z-MB7HjjGJ<0(f-M^7!WSdOdJc-!)?_DiY+lOGlB>T}LH}cd9Gl@#&nM=Ja7so-dU{ zK9rYi2N@hiHWVm}?!Dpp9aEEk*fxtiWBaijy=O<+==(Y*(%biWeJ5iWK+M2h=X9Z*)a(A9b-gq7Y9pmJk)mn(zls zRD>S%-54W+Cb6Dzx|3=dkF6B(B<|e(O{FRr$6MAX)cH#}d8sq@A|Ogx+H6Rsm3oW- z9MN74j>L6}`hX2Bz64)eG(RmSC!BoNo^ip~sBL7}aJ-|~p|x(Mg(Bz*VzVE|G(F`j zuOpx79r@$On=alOirSx33p+P&)S(!RN-N4QmwsLOT8oeCB1~I4zqwyT$h#TJG@>k? zzHE2-5^prb8Tfmpt(iqcUfcI{mD5lUyrG+Q zMg@hB{=0$~MNePh9+5|JP_0<7j8doP5n{NBeEyIT{L-RGd;`^ee@l_Dnm>M$T?5<2 zhXTR>gJ-9=UhJ z?Gf{P?Q$#?8{#dmwgNshZA7+BD141-ImOH{_1zS`dxMpN_PnX~o|hWijdA10$FB|t z)oCDNQSTA+0j{Sg4mE;Ukt0!tI|k4Co7*Gg0)48jVsybht`I!2cgy^>sU)M-?J{!7 zPk41-bh_8$1e=_8Mc}F{~kyd$QtCgi#lo}_a(YL9-mh|~Tf}ybX zK=0n8)#zgHk7O)IE<0f3Fm2$^nU})}Ojg|xY#f^Kn_x}h8>TbbvUCKCdq2`GrP!IQ zaC*`dXrgeDC47fw4x4`zr=IzlvVWN9)=Rc53Ty#GLQlE zHs%;+27U4~QFpwPzixQ+*#S^!K6%e zPc~7YibwVK_^<0!df5HQjo$85_F&e4{ig9zJarkKLU*z7Ef zf}veg3}Mun>)_pbLGUO|bc#K=>WO17P3x9Rvr;J6_ZtsscJ$;Ef&Nbhy@w4mSdp@( z3eI9o7;M>C@PYEh_hDaX6JTnXLX7kCvdE|E748>qw4a{c;HF8ddmaAhdvIbtb3>uE zm`ouOS!2OZ^nynEfi3^m7V91JT`Fd;Qij+LtdJJe1?Hunu&9Zswp&$1CYxflTCLib z;xH@as@;+fmDOGKy|L?vhji4%+4XP8`_yu>>Z}Bxklnw>?+|cW4<>IQhFTJwLGC?sa(;Pnoj$wmi!U$5@f1 zP3hh(kp)lpH&o-?Fp_D!;en|i4;6#E-^NjHqZ9=Aa>Nd=nF`2GJqXJ0ts;sw`66U9 zqob-RxH*SZ9c~|(c*Ao4?4F}RlH~VUrYq>hj}mPPl?(@Yu$*d- z-nQ?%!F6=_FuBJ?qf}7De+SF1MX8W@RqnuR=N*4K(!N``Mfd5w#7QE~JyJQ73867! z$(3EU5z8cbUr8ujGw^VM9ghD*B;q`;#cxI!etT}xl*QuaQ9PmkyF1X^WP~8>XwB5YnXNDJ*r9a3 zkSDB!O(3ercxU9<1JxzjzCPP2`X#%lKH+>64^J=L>)J>;W>E1izSn4qb^98>M+QSo z&)ORX>#Vz0dKB-CiGGr~hW>0?aVBbX2rL{^DmXSR+4R*D!#5pSiU@vEk(*xSE`zrl zdT@O5)MdNyV~C;Nr!;pC-p%uO`Ch}=6F2|3ophmCC%+a8nc-}>`3B-FC>-3~(Wln1 zM4`%!C&@b>(aB5heHJ!LW){J&|9DHq$zH|aA~0vr*IBq*dlOg2F5K4(zdprY~Ds9c}FJ=%WqvR_!MIqqQ|dU9&#Nonbd9Y^C(;jXy0GNgrpg2Y3*&oj3Q7-{YPJza>A! zM<$(N@W|P%KYu4&b(>}S<&)uUhCbsYl%wD_qo;tNFPmsH&ffyhW|Y@Snrr=C3k4JI z&`v4>mk{4u2?Qo3A9#n!^@WXN$5m~xtuGO{Y`SXY?ng|D5EtJf27^6FXyKENc;g12FLDVQU z=f{W^(WBr~?#Mz;sfdUtXTKX33t-tIR3eD=^5Z~~4^ah|o`Kv*2$S|M$H}Fi?@>?e z4|SUG^N+ois!?R@^HvqH#?Hio+BY^%TMnLFyO}u7!f6w(R`#1}Lz!hSSGUH`YXigK zh`5n@EvBM=#aN~$I9*p;uU33AVGtFO_wGGyl!8BJOQM&e&w-QRWzIx-lfmFnS=SAh z4|PlU+$!F*&Q=DCZM;~;elLEm3)eaJc}EpxzBw+Tx%GUiSZ8Hji}K4T;zn+Y?F}ta zn(|_7ZUyu7lZVE+g64@qTIL4%F{qP2E?L~4wCux+Z0?ZwQ*pKj|6lc#X<)b~-da#bCrqDyYvJq>R_*K$DL;6A6bK7bP=Jy{zK9qTg zQPvsjdum^6m!G^ez9#CrBAD3yOY0==;LERU_089Vo+qKR8J|oD z$5!((yH=pDK>A9Y3wwq)?%j)1j4od3Fn8UutT~P$>Yo8x)UhkEEI{ZV}eM^afsFC~CfkN9*R^J3F^`Jdw#WTeFoXL&^bf z%PAQ2@86%>j;WhyRHq6phwo%5V^)5n&@1w-+&E^Tn$2^X7IV4Nu;lf$ni)IJtGj%d zI-D{r>l+tiv8b`A_=YLC+kM-Gtgv#SE{B)yK-K>}so(1jExv{&yE-^cCaM9OaYPT7 zFH9|&@d#fQRWH#sup_(h?6+AR)tnj*)ImNgl2k}}WO41>42cqd@^n^Ce#VKrm}0KC z@M8V-(hBY+Er0CrS4V@m?+e9afnNk}KS=j_gi$dr*;|2={&rnk+k(qP^3M;4MiHgk zg(E-G)#p16OQQ(&J6zv=7Ux9movY2;wb1VG`;dk85`@lKXP5?*t}c0Vl1yChK$S`4 z_q+SIe^24@DQh$m`q0v0x*6rvUEZHjDV7e;+3z4EmN1jxG&~R>`<0VD?UTo{#~F2& zO>MW@D!W&5W;0{*JuyDqi2c%IEa|V~T>5=VyOtJfH$mZ(o)jZ;rDiFVE<%fz=nDbr zmTAjQjOvr&JUkxm?sp%B{Ai^o#Z_pn&5ej^wUb(rgQf8t&~HWW9j8j4Bsy-vRoH5(5%7&SzrA4GQ2hNv-wh))J?=sVwQ|K z?>`Ex_n&c&;?IN+1UMJyp>CQO@2|o%J7vx@c5?T;)2O1m${-4{Xp+j4SoQ-7XT!cO zLW`iO%nJ419F3Ci#I3j{QCRnd6LN1PH4gt>(4VQ7zPMRB`$)7f1K-H}UYTm8;lP9M zRZ+}S#p&sMsVT~ufdS6Uyf}2x=X;;6tP^h4CYk*%)9;Rdf?U&8s6v@wi8xuZRMjY% zc^kjzUBK9^enTT3#~}I3V|icEpqB1A>V~QqfA{mR3X;VmxrH?1wTAP0Jc)mgZl-yQ zdS*P5aGyRZ)2E7)b=!acBIJqGB1xI!2Aujf?9oDR67r{Z`)iL02fTQN6%uWm$LHLI zxyV1XI%U@Ovy^+44KH_5Vy256ZiAV{%laGd5G)@$vptSk--*!ul!e=~I!r9Ee6CQ^ zr@pas?3d!))K^7h6tHh&_+84rucT2Wv_bvz# zD#d7wADs?!UT4M&9}bKbIu4}fvlxieYv!vsmd|pk&i9tpf4Bt=pxzNwvI*$>tki7N zo77buk34%D)EK+4nvC4~X~!Y8PqRy6EN|H7Af;?2`!;Cc*2Sqn7O!Un6dg<6xZh)R zBevlaV`C%#p=|7KU<3nYLZStJ?mgJ$abSCV=5LaV`?bq~iQ$I)ydQ z@e9*LoRnK#L5F4Qo2W8v9Ts`i+2w+1kn>SX(>K+*8J1JcmMZ5*#l7k(>=#dEKgPas z%QB1aKTNRATi*W_(bgR{^vIkz$S1b>PKJv$I7OUo+4wAJZ~g4o08iO&@j;Auw*HsS zWcLw^q#lEM9gm04)6@#(8;Cc)8$B{=+y4A?-25-eL??I2SzDZ5uH3$puV~QQqaQ65 z`{};1e{0D^bvVC$S502E9inulTPpJW-dpEV#a7Wt7le>)*J`Qc ziLlv_f)f0q5|R=kB1}SpLi~b2lDHTXQbe3zLKq+rW)cz>gRGvxHKEVpa4XO%1~BRY zgy5fmvb&qNotyU~CeZ2sRO?!Mfm$X}2{Dm>Yvtr1-42*ovQ-HY#HUCq4)u1xu0yWr za02Li2aFfeO@^yM2AwcvKtTxYIv^yVnNFB0M8btblx($(2gTWP@M8!GLL>4x)G$HF zx*SFYC3nFnAuCrLYG^15E&y$H!3ZHDGaLd4s~fP~&E!ylyt`ql$-7g8kOv>>4QPl5 z9Dq9O29mYvfn>@av}A1r8Kj#DM?&E}FcnB439bqe_kxnuOHKkPxPnt0O6vtZV!whO z3c(0!sH+#qCLzKlg?3~}g(1d1m>Lw6flz`<(51XKQe{Z6ABaV3Vg;iohdLwSoKSN= zKv&^_z=wwWVSG?9Axs$)m=_YthEqYN1EBHD7DfQ7kjZS zTJ7YT6N)r>QYCZ^DKF654g-TE?KTVtU7^T}L!O3(s|;~rVh|-~6%j!-fCTW!m#1fh zhF1|1XewAR0Phe1Z-4}AKm^%Dfpv><48i+qv1r*raCY_M>Oc~PK2WZLFhUQdR%;{ zG8)bg2`0h`AzfwQe8w2SE@1#i6?z>5CrW<*ngIF~1Ls3i5d0rc3B?7%DvEan^!mp~gA4PiD1d|5;Y!9D_lqclM9 zi2z(>sSSa8w2d90|2+E*0%o5rBY>=a15$W8fWd!!Lm9G62MW`}=qJoF&mg@ho*V$i6U0r+%e!I>eedVrArC7{s71j0&Q0$_8q_&}C*Ca}=NO8||h z5OCqo29^L>rvx=*1F;{15r8Opx=IxcD9f-*hhgF4}T%$jSu=2?UY007QX#aNhq03JGWrC!0c zp^v^C5)H+#;D#_rw;v>$d@;zDK`IbzM#XS(C?N+f4fPhomH&~hTLKW7Lm)pMmcZ+v zPdhlukQI12I^;48>ik~=luRvTz+10@Bru4nKwfXaAFKNS`N$7iz8vBZK^X{>tqpvLwG3VlS=o`QK=kE+neGEHpcR*c#`}!~z+8!X zpxd`{IK@A}1S$Y?B7MMIy#g@bWd~kttN^O;Lf3)8T@_NS1aL@N!JpnrU^)SE zG9^g2>MAmcAm=LhP3ZCiXq>JB0T9$e0PRrnQ6Fd$&m zfYzM^^cX562{FF{9=wYBS2e%`!(*gM&{++j9XtTRN%0oYq74C*PKLdr1|ebn8;1(2 zdkd#b4F&^mo+g-Xh-v}-)r7=R3+NYNAktE60q~(Upb`B$&>Fi6B#XZLH~Nd-0W|~> z2xW+`4m4g(JZg2ItKc+;5>$__yBZIL)s6n?R#^H|R>;4CjF^#eixN&A?eQ z*>D12mpxlBJJdG=tO{me>>_Rf*y+A2 zh?qB^lx&p%Of3wBD9}?unje6BqIC_aG^^e1$GY+)M%2W3qaEK8IULx0TTKSKypG6X;Me z0<5Lh2?PO!m7)4hv?b89pL`bpd^NuqbpgP;vY@UTU1yGigLX$gE-n>x*ah5iHCgL) z|2tVabpJOfed`AN(b<;_8>NPDdVqdcgOybesJ{w7{~jRsY8+le*Ih}2_5Ldj$YAC~ z59z&t2+a9v&}lDlgvU5s88iuTfu_tpG;VXC9)3SM;DHyQ6!ds?HOL?aKnXoUpv}>C zfJ22PBrGY>Fh2nwhC%a3oVXCl6dWH4ri77VBEf1$W*SaIC@L%=C@9Dz$S*D?0nI+f zp@o9zV7O5D44e$Y@g^mc6c!Zwhd@X|^j}I*p??d)!v7RRMMVB7h=~8&B#ab;6lFMR zpq&y9JWNpuK`3zwPD>#EFE0r(Bt%aKqk*#jaNxfa=wGSg$o~re;XwWeQAGUTCNZEDGdC^`^n#KMA0hs)2LEF!BK9A5 zFz^n;Xes{3{C{c-3qt%ea9Rv8QHX&OMn^C99}kItMF>m$3tUu46nbe)PeUjq`7c*d zadF6s8}}x}bi|2A`9GFOBv9$f=3qHrGYF%>M2bm5yPyC;G%zyAF93%OQ$$PGY&39q3W7OR{7IbzNBz1J!z4S-0sj0VEe)Idf%twdZv=-D> z(4Tk4K{*dh#Lng1PCUh31cu~W>El~ZpUS_iec~0E<$7Y&vh{6zZ^miCVYEfy$a3XU z`{IfGVTNeeM{1|Rlj?Py?(t?uqtErZvpBZ22h*ns5pj(z zJ>vv6+ntLNUs%bvBTZC_pD?X33y;4{w|mfjrn!LcPqB7COvb6VP>G@LFRy>cR^lyJ z)fkylbcHC==Eq%)gS$P2@EP8Cy8XfgWi7=Rgj3Hsi{$gCkTPZct?P9}LcPo=y3Yw` z`4R;CoR*V{w)8)AHTdC@W4fCWKS=5|u8WGqDP9lrU=I(KdAowi8jsOgNAwO!bSUqy zvPTL8H1%NIAE^7-TSV1u#xFW4-ZYl>$yn%7v7#D(2K-YGtBVeIe6?kfO#7rj@-2#5 zPy(^at_8 zS>HKcQ>wF7lDr6Y>RH=+qh3by5`+D2H=LNWwwDgc>x5lK>?oI$5RHpPiGId`s(bqp zD`QlGiOk*Qk&@49^m`e%Th3~31r4N2B<44pw8&4`XGTjMEAQO{)Tq04XKe^OFV*ms zvNZX|;`-<>c?Y2C#DEjU?g>h|gqCAZyM;P*unC5s?FV1O}F_7vf z(G3{~7Y*z?`#7tp+ZR?AVzBI*85k=nq2@UqZJ&I_Ro53VwTnsCkVN@`9>!k`g1wA= z^7XHy2s`sWn5%``40CLNJEY6kKe3@aJ|@Y>GIsPb-yZ+e!#vCbR}f0cuUURM3<;FIt+sfEsU+Yxi`Ugif$$`rQQ2phHQ)`M$5 zh=+g)ozzw@j9aA>S>}27WJ%+*UsjB%FdCc}y;5MaCj&+h+hMo3Ej|D6aSq@WN~ z6+kM44el)gi~Tk#1Oh_{Y!RHm7_kJ=HA{;y1eC`L!vfDLdt5LC7J6p^YFh;B9^EB4 z7Bm9tAde+D9Q{nj4!eORiEh997D10CjxHr;;vg`QOhVAz9t1rc2}*)UFc-lPnCMo| zbQ}bhgcxX6>;_Nsc`-O>Iizy93|OENwBNy{#}Y!9?&{;xV+#Y}V2Q5;qeqJYPcYH| z#xVC4IO`=C{S{Auh#t&$46ATPXb}L1Y*yj)Q1CJw8&X<@BfyM51omq%L`A{F8k`YJ z3@u&g7T_*=CCGCJhQJUPf&@V$Sa2Yr9YBpGdWEJ9U{rX9g9Uk=f}wVJ1_sAQ8wm;R zAz=uxYs0t<7z41E-+^oaX5W!3^MFv1oCJOSlzP`R@PT$2PmjV*Fj34#z5K{FEatU~~Uf~OlX&_(2m z`Bgxn&7|l+4u zprlPW1C|890Nr%~j3od-;EYqW&#nYBgS3Q8qBkdzP@6g~!&Ta$2NS3|5kxsyI-}hW zHYGtfwEH2Fl&k99SR8sta2JFr2mpF)FvbCbe0f|1wELJI3%cBb16CkA8MoogAaoVC zK~{beBSV|94TJ;^$Q95-6oR2s;i|JWNLNUH?_bYDR(o*xKiOK{%7KMm6{C|FTL`Ek zj1B~PxDXn(7!-oGh|rZ@OX@(rf;jAo9Hb{8zfuyN%GknJQvL~!`;`}Bze zB-$<-tgtvFw2MwdU{Ua33xXj{8}}c~5hkge8o)06yd>NA#BxalMGgvnuhf^V$Y)N6RYlMuGt26wD3Q0)#D%60W2o zGF645F8+9F@~%Qr=7nZM6^i-@;8p2Mm5>+>SteEgeH~g|jaHeMaYT+e>8?hdiT!xn zm3~5-r_sguKM2f3K0#Q8iEFT*;+>{24lnHSPw{rdyf>eJ%7lXKiBb(-5`#0vh*1ry z(;y99^8)ZvlwX6FBIZSF>@!rOZ_mX387*`eFG|_ZAjJtKZ3kjm`}dV=PA#4?(U`ln zLZit|UA7o;6XD|J60cTNHJan0z9JDWUM!)n82Z{tUlunsUCsFk#U0U1qH|!ISp-`?GchgNI^Wo^_wZ_?O}peqU5X&UXXYE#@m{{ zB*%e>^-@J48pCH~LK>&CQJZ5xfvZf z*-LYnu*+{2`~t;?BC1h*^yeYiM3f(?|H2Om^e1Bu73xV+(`f(~M!?H3mrF zLI^xD+Hzr|O??N1k80U$#CU0>bTNl5!GvWuaM6s)QAB7PXOHu97*wIItVpfNh(kN#WEMrf8(NwI4wmsP< zgA_A?1Z9z0eGDMXK3Ry@e@Tp_$+E}MU>3$;Or(wBu*QS#?kI=8nF!5sIqD(Pu$tt_ zL4Ay0Y00xG{$o!rAu$-TFjHVVAR;A3-nen`2Cjhoi0^R0sl(CL(Q~52cQ`b`z&KSj zi$>1B(5xyI6!lWX#APsv5Jlwu@xX{071>@4Kt+O-(A`jsSdO?#a@geONH!o6erQQp z85Q)4ppnhWK(%PQ2bCe{Nq_2m#)I)#Wfc^nuEtE?Jt`1@#$->^Y;L+XeBPMx`6NB= zaAL2@mS;l$M^uOGKavX>rLqw{%izNPjB}W%ArCOw6mnaQJv8%b)zC46`%F=1yD`zn zN9sd^{iu#-g#lWOP>iP=4|@7e4LGm(wL8b zHjWq?WBdq+?lXdGmoyR`h@TZDN+a322`}E+lU*ZeJ?bMeqi9<+SbcaDN>MXh9ff+# z+COqMiZKDn3!|x1*GP+ztn&!v8)*#vjux`F1Hx&vn{tO=f%* z+C$@euT53#B?B1Egl+}HaS*eOz~tPa^LKK-2JA@2mt)oarGvsDNpS6@W6(DY1-x_W*i+Xo$!*nY`>&>_Gm(M zk;Tjmjqz*;#*%5{hdik2Dk8%A6HtJO1K*f{R^cg#t6N}%#)BhVpojh$wUz0y-53yr z4vKw7hs>+GRYXV!&G#19#w<1=Zd?u)JQI?&7m>_E25h;c6PR_) z1}Kd0fFOo!2w$MfFA1fq5o7IsnquOB(MFg`6bX_rrcVpU-g_3le5I=m0Qx5e<5#TEg5? zEZ`6WctR%WK%(JR5TBk}NQ@Qo7;xnz&12k8Y9d;oOYoS9P)oxRh?YXLsJmvZFlSVmS%*n5g(g1a_0=(nqF5nne*^)4}7?Qfp|OM#pbi54Butjb^+P;ct#M5TMTu zUdEi7!fdb`F&J5u4V;sK-9Fh+$tZg3IT?kSW5}Ax)MB5t9Y_PsyFCZd`|imo_m+in zfwoj@*gNTg)*VZTupJbNlaW2yVh5L{JHbIaSjEN?_%lwabb8<%Os9vDJtSbx0ekJy z3dPy@4e*Y{tj4B;JAs1(TZ2diYYCCqS!^S6+yUY@4AKIQY2W2N^VlhyhgV8XSh?#>fzkh_6O1cj%!2}7q9Zq4HlWHf3 zYB`s#O+v6>`do~i9?w_?Nuk&Xr+Hx9|4rR|R2n>u+s;FZQDON!2*aRvP4nmiO0n=c zo@j=dhc7+Zvq@GEl)h{}M#Er^-)M^Q@h~r17qB!R-jf-kNU9fGlX|5&8Sp|IjLqUx zv4!ngfU$8WVB()x0GUEI0!m^iA5sg^MsN@;HFhD}loX~2S(226C_F(Ag|ikyLD;12 ziRL2oilJbTi_kKIR~js)ck!^u{S`$eV|F32r_xHl>7+dLMN7Z&mm8MYD`&fAxj)(0v9vj)0DLFM)_{CuHP^ z)Js)8(z68Pp>u*r`$27kY!I`A_DjRbVLyxX zER1AQ0OY&9gPxrOsB=)jy*PjljyllHK=x><1ptgp%!dFOz!{{Wnx#ZS%4CF9|Kdsi zKoCGpf59Ol)upXOmRV`b5T`I{S-HQk(LW>yKoZ$niQwFfb!=JEv4|@}+NxQ?v}F#i z>kc(!f^j`u2dK&?O^d^Cgn4;EMxo^ zx-l{_BL{SV4hh_qks%+`v}CkROos3sbECho4*g5f8Y2d`p(DqO`5P;3MPietDy(K; zHdN5ea0mqh=rlB9bA)X|**#(u_F~#FrTcrGkfPc@r0ky>|KUhLvB>^$93|Pm(3iu_ZF%n|vsf}yK6vW3M=jsAjRhwdr^6c}5=JEM?M45|AOy0ZB-nO4VMoc^SVfFL=jgyYPxowUAT}ghQiQ0pww4F_v_R$Z%cn1|Z}iMmONdEJ zdr-Wwp0{bj?WPCK-zr;8wB}v>wYCUp*0fy!NkKGTIr8)C*PB}NmR!*}1Z8FC) zVNU8{L#5l#HnM8XL@UzqK==9{EMk))DQ2i39s6!+^1vzaZa!8 z#q<4g#^2BPA9}B!qkdepmv=toO{?~qH6FG*#A??n^gXPZx6H;;)&I2m+T<6K0WItY z+Iz!GmoKx~+FC97C`h3{e&56JtkwRV2~UqM>x}w!PWXhOxpilh2}*B%d`A6i%8ip_ z>`jtiN?wZE^wa)b!hobre!qKYD}GGU%u{a>&hPh$D{pn0!=u(#Mr_kG@4>gj9a8MRzUGiv>*RKpcHS?U>2?}k@hP~w9>6-d@KQx*%rL->2OU=K{tay->aEi}VrM0znQr-pPK?_8bdTpQX>ksUeO!*uWY+Nm= z_5e1>iV;&@(aN?tliL}!?LvW$!D`x*wigRvBmV!lod9=lh@lkonjMubV7HE(uv36Kj*UH01vXRsvqro z9A33Nz~;9yzhP%of7(^`7L`S9XV#?R|E8g?8qiyIIA#5%tD4*7cE*bP4zvbyxtK zEk93l;k?bg)z9z1SGFw`fKK~2s`pp*h*e8wj4}+MQ~qgxYfaY3h0ScMyMHX|q`&Qq{=Ccgx;t9 zkac$(jsL7;7k}6)d|AYX$1k~B{31E9k}a=dYWNRwG{6%6KUZYp`x3`k{SF~DK@$YW z5muv!3x!LF?N+Xuz#s^Lh&R!XWD5?VKM0)zZvtS*U`NIb;6%|#>=no)Rb~j38zGGM z5y5<9Bg_FHPyo*XBl5Uhic%RRji$&20_BJf0(7A09jYAAVW}_}WsxbmWWY|egWBNx zK_uBozzh^gq<9wqt0XGtDGEJm&{TvKOO2v}f~0X2i%WxqHa~#P&=5XELwP|8F(S+e zJc_{>kqfv&z=8rIjqA8v2JUE!VXF(8Adq&nd%feWaOu41mS%W5~OkS3RNyYDiPKOE*Ctt{X5v2f=0}U zfksh85&>VpO*9~VWd~b>K?9A5n=yrHP|uj?Q!}A2V~Q062x7Vb@T&n<9l3P?&XC%LVf#hH=z3#WLc~6rtwLOAY6r~=84IJZt^vr2M+(g`&@lyf z5#WTn1z~<;7(E7_1PF{KVSho26vDm~{l#nWz8JO!#;Fm-)(~LO!S>BuHF6T5D|w2F z6n+&X()%g8!}w)Z1b|vbVv0Wr8R$b7CWIoZf+kD>Y9)#(0_m}40er)A<00ILLW6K; zpuQx;mO~|`7^9#Gm6HNaNHYFRv0iElyv0&P(Uf^ku_Oju#Nep?D7c8z<(iLh8Aj2J z*#OW}voRQy2|zDc5RFko?q;jQenq?48VE)rx{u!szGX}RGXeRJ#W-1wXbkFsO<@^Q z<*qHj_|62DI^GoAwo$MUjY0z8#*DW^k*m3?gP0}8U6}DUCd{OzKxW$> zwmP37KzaJt!iY%kg#;An2b4jvN0)Xr%tA!4=2K7>Muih<|ho8UbaBbu)BZ-21!lgxFuoLx|{Rh}WsR>dQm=VqBLFa`e zbdFPqlnEzN$dN*%&{79Wjjlilu~N{H!qz4Ppd(;Yh(fN&sjBgr=#$k4*g%`9HbE4? zv?dfbB?pnqtR+<+spq42Fw-JUSwQv?K`T*ZT1IFDQ~?2sp^}CMIF<#ua}WwN!^}9y z)}$*s1`M4EP#ic3WDjDK2;Eduk91w6O+npLjj$gIl88o#OJQcxNK@4HNF!JU%#u|^ z>r%`W@h(gWpzK1XR3WabxUe`HXBK;E(V{G@pLbFqH0BUYjx}O2VQbO}8%dxHx-&s? zI^XFWgkI=;Z-^Lb$N29ks_8P%K>x0w1d+~Bt98}nv*NZ5Yz(0 z1$~9i3_30|X5|qwq&{ZGSQ?}SCZ7_21aZL%+1N9oEM= zkgnZWVJYXtkgi3f>XND&v%WzZYzYjZhx+ZY!D(eiu3*a$GNfD;#IW2UuljMu9SrD5 z4PhM&dJ+~axZ2wza2rEIpz=rA%KzYcJwfu~40|LWCe9#Q&%D(x;%JlVvutg$AqZg> zpvj^{r~tv7&q0X#EE*6JOri6p>VAXB}bTotUfQdRL1OqOhHs9NlkwUJ(s`^#(@Y?| z1bGy0Lt!TH-?^HtkPwk%PiBRnu(%aQ6c9q2rksT5A8F83v2*cOXh%<$BSFQcm_kj_JcvZ)-6Q|e!{5&Sq9(dGcYH<$kTdueqLJIrA*lLathenP!f$ak2t02} zrGZG`m`Q1DM<$%T3yDD>MLHeN{;QOnPUC76YtuMC6ueDGVvrc1e}U?cB45)kKqdN@ z@SxxVdS<8*PiP(!mAAb(1gw@JL0-+30WxWh$mTVSC^!Q`qNXExlL6f_7YKf2Kv@(Q zqWc?z7?_jx>9AZvCRD{Bmd%;ydYcXK3jRxMy@bLuwqzaco?+DYqPfAVZwN2HXCoqR zp5T_KK(1XzXqUm)#IsPA2^=QU)aq}*bh%e(9_~?Kxfr~;hBy~)t|^e(E7Yp=n!)&N zs$6=D#xWZ*G6CjXn#bH+dyG5=;27#!%1`n9|2aRZYAVA8_ zq0kcUOcCE4_&h~;KIfop%Q6{?yqubcGP`rp8*?@0cP=V36H6u!c@)KhyzK%IGbL{* zT1$klL!`mD>~$F7#4|LPaGe^R#!8j*5g-P)#T*rxX@vT88r9@Oy9|*;_XaA&52!eh z947Ve!pNioJfPGYK6CSgE9nYofte-03Q&~+muB5W*9<(i>?YWQA$tczGWW z7_C32DYJDz3%!fcG9%%}VhF@=Q9jeWqg5C#UkN4zGvh)^ASB~r-6g2|-=vIqfS)H@ zp%LLyRAVmKj3|YWOtk~0c*>-d%8;U+^WokTbHnCF89F?%2VwHD<#;y0&jm_xQO1;H zm$M(!71^HLtDq0)Wgq8<_=l$WEQWYL77Xu4;1M-8#nKi%LfIiT;`mvG`P@ja>?u5k zX&Fw&%Eu6JaXyyzosZEqgLK@I0rK?gF{;A)G6|ClL=!@?wMlp-ZHzLNPR0mX5Ox)Q zGH}@bDzw2|&`hmD8w|2Gyn2W+;IkIicdW(~V)S^T8f6*0IIkM+$GDBo6SRb(I*_a< zc=~M}7W~pDXoV`$e!0|1f?NTQ{`s%Duoth7)k?1@;N?vmzwt1>HyEU|@v3u?;BDzttD87R;@O zMi@W7R1fKHHo>z>>meQJ2@b^kC7NO`8{T<|r(o5IkmoOHTSeF&roO^cEWZw<>J=Rs zg}gq$Voyj2Nw6pLU*jQW9=#;HrW&`6e!s?|Cjgq*iZ`f1$x-CT8#=q_ZMUs&Q5w`S zDqw=|8wDK5+#+ExlLq!NA~hMJ3~$7AefSRIJ*j})j*ajw@Os4Q)?=hl% zler?0q&)_%y5T)4Ggp=>-(y(>x@k`4e?S+E%GQ3MlabO28a`kToBRxy!#7~j3;al% znFY6w_z1!NamDy?6vTT1tS$NpHJK4^{zL)ic&ab}TEmITXVd_!Xh#tC9n>Ws?noEC zLoHtxbYTC5dh^i2X-6SJ;sk;_Zm>JWeDQb9Db!usjNxFd zav*n_F^JbO5V5uy9WooEQwy~=Wm9;!{GATX`;q91B7DT=s z&Ho2A9&Lv}I8=#{YwZw{uKaW;tWai137WOTiAM)oV*K(_2RaNnPW|@>+a)355ExlM z*t3&%$(RwRpF@t~{S)nE)xy+O$Qrz#{F&wf`2_Xh7rJL$q5&ypk}rA)s5h{aDuzyj zh)yVml05kh4fGS-suYMw7X>hN;f?y}9H24lkFaE2$Ua=`TZ&UOHPg+uQQ zmPq1h4%AA|hB7P;b}HKIVHStEhY3m+3+0&!Fp>|e1JHIylF7%Jl@RgK42_y3G*&B+ z2D)4L@pI%!;ZjuxQoxVei_b~?8@ga!E~1c`BG5+Q3o#ssvrP|&%o7~c`#VXb!N<}fQoR5a%z3Z$>mk@H1o% z*1>cUjzq$@d~laUIRB9(z`+*fP~USP`J(9HVJ{ZHr=py3gKlImhIX;iIg*EB9I*tk zGEwqQjHAm~d$u@~NNqq~iE{?oRDKfZjZQ$)LQ`}zAfZ%0jFMbY3e}M4W|mJ0rR47IL1xq3z1bEG^qpGf!%D-dBl}CgWMpWiP9Dr&8cg^qzx0| zOBG465(k&q|53q@2=R6=;wW zh8T$h;c*{TIFt))PbRBE0Sl`{i6@<64bCv{162%`n!Z65QvT(jm=PB)wSz0BasZcO z0ji<}dBNq3a$)!m|88ymy;LH(ra5Ut{WUtl%+djl9f(tE2DNz6SN zRdaKTK{?I--2$c=RC05?1~3UT{hfQJ+a{*NuzDI!$M_BA1&BGY% z4&QMcMG9N}rDbp!Vhgz}p(;!xa*|5mJ1iT+ks(FnII^OD88haHDzwXhoAQr9@t;Nh za$-ObA(67I6Z|ysDqTxa-teCT{=Etf-V9mu8w?M16_&8xKiUbJAJVqrkna9#{(n>}%tL(pyVFrK&qH5;NQ(#H3i0E%{Z0y#tWyn}Pj)>M@iT;hl?EmQz zr+&(2QRFu>!hozn3;D}gktOSuS<=MDT3eXk=>O)b@Ue($pSFmAp&=sNlmJ3g=0vI- zEUHcwmIP1g6H8xgmu$n7gN{e>QR-_WHIfA9FYaz#ck%I*?WPUhUO#=lB#+I$z2%}# z$fSv}k`L~Up3k$n^G%J+=m<6cR$1|~xp!0ED4V2D_tqqSDEpID(CyOx{dGm>HLsU? zHsLoHC6SnSPn*Afn=pSMX!Y$ykJZccel+$h?cH{iw`lYEmYDP0>q>Nbh zb&ugWL+;WG$>*n+*BJ2TJUP5JThX>(NmJkNKI9;>`T%Ra~X_r()NtI|)#s+{gAeI=#l6urFTaGBHz zBG$gC%+=l`Nzgt__3%~a=#XzlW)8m+%3P-?7%44JH#Icd@qB)C#~2shx*N3~isf#O z(aOpjzC{N%~(?+-nenQ=7R6(siI0_ zgWl}*DK)$npz}WMZskGViGyFW;?-M@O=**Lwog5zo*}L>we;kP+2L7t>I5Z}TJ+BK zo@$;`F4=i%ErDwCP+{>N9XfQdmjF(tlOk z93h*q#s2pcYU0nAZCxLw{xamg*dq0Dhm%iji&+_OUy`bB*b(J1t|zFCXWkLDC1HAa z#%p`6?)=$liG4oDJif(bcyv6idXsw)RFdi8-R9q7zMc-l-D}hM_@eyu*OJEyZYdeL zdz;25pXxCzHL!pFK-s0V&SRLNS@)Hl%@@R12sN6QDC5`p4-X{RN{=oipF;V@D5c;YWR_e09buLle zxVBufHfWs9*2NuBrd~Z)!syeLQvTc3E2@s>cKFTE^MB8a`D7QKrG2d>*{^FUH0p$%Ot&8UrE0By#2(uTjvtgTgD&j zt22~DyP4ztUrGMDH!D0?MO>+;XmQ>ekTv%8`KZ4vja!+oG14Y%1A-_VhwithpLijI^;VmmpUN*Yb=kxaShX=WQ zu)J|?Mcm39&5<^Bl3SQIT2!{Rtvk2+Cx&o_sJHmt>Z_YKD}IfQ`S6aYNf(Iv!grf{ zirzda03pg2%~_R}N1(iF9%@rF AxAF&eZ;6i0#_#$< zHuE-D=zzQ8h&~O=B`>``)2mr|rtf4=b$`S9B=v>sP~K$2AtbcB>~oBGWs>#D@7bBN zI-+hZJxFtVif$VS+9aJiZc!%LxudMVA^8J)MaWtkbG4qL`V{DWy+Ii?Gzzq&guaHu zR&-qR!$bBRc`fm-BPw8EjMHL+8qR0eH%sdXfw4!SkwMfN zN%KLT2sj}Ei03n?GQBa33kM7y&Ln|w(re&mgODgCg^SVgf*BX z`t%+;cqm-Jf!-Dz!247clnEn1sUqY6$S8;wlSn)RF9AA%3Yg2ejLI0Ej3EU-$$nY=)i55` zEkLpcxJ)i(g{}>0&j5rOG6vH$1A)?e4hVGc^8qZVlSDLu z`Qbg0qfS9_1is-4%+~?<%;7CLfFIkAf&(B4_6QrSUNW65_xSq>(yG(V%zY`78jC?*aVnY76;kf9?068Z)?n1$7WgMzR=m8hSGx zdE3D=U=#xfQDqxa2g4Eb0A;4YtMFw$3W0VJC-E~Ufpfj8c02mfLSYOG>V z7oZ)q4jC!iq!7;-wh0{;gNQSP6Ep*9g>d|)g&E%gB#*IC=c42j(9#DLtWR?pj+;4? zETRmU5PFB6a$y9)Kf&{MI`x66BZ=h#ehA&5KFtOH1WCr5XaZl#P^A!E;S|V>>5A`R zQMUn=01p{t+yE|u^csBGLWt>dkoIVv#Q2-qh`Vv~S{-+|_WAJhwoR2xeGvEf1D8(p^XfcgOzMA`+3WGrk-+4Cb2zNWwL^Nf!970LoE%4EQFDI-9t;Gm$J2r=fRbTza<$q(2rXj?z=c@AR| z=gh&ie{2=_D?lwrZ1MFIK?5*N42cGkm|%uP&jIT3v4WoGIB>8HQ9A(01mA&iCam-z zz6emiqF>oiV;lxuP{sq616q_Jxq+jB^x(p8Ogq4bHq=PSL|xkJu&x32H{{c`pF~Weq$E&OD0LFP0&(VY z)G$NaW}#v1ddOic3_1#uk<_RR9R)wAxnW3Vkgzs9owhq@YZ7nGd=P?}^ggp0HV>u2 zK!yPqh>nd>)0OrBWuRHQ7!3}aesx2T`X5Gt&4<#G5RY0;wFOnc=#=XSq85_OIYQ$o z)brPQWUJZ5x?5wt!!P z!k9R8-Pb>cW;JGGf`OMpY-e*AGM(>ijs`YVs2f$lN)^C2W-v|h^$p}gdq`6*42}{= zGF+ID9xTR&hBy|0NHiBq><|?O`xA;#*P!f_kU6wnj1OPxsP2Y*bTUJ3cm|d?rnTxz zSYfDlVq74@Q1&FcfESTMkHI|yqXJbFUzo!;IN*-@B*GmE#&=mz1)Iz8f2@~8LDI00 zGm>SJ6w5t7^di5A;{p~;p6d^paQbeK%NK_!vHEtg(o+spA}3 zZldE!xr)B}GV+3Yj#%o+0Tmquww@yu+=Tcfb9E6R#7&=Mt^#cjvBJSv0Y2CEHi#q7 zn;EBDkfipp=Z}0q-SfiN<*{+G4^NG*5E4_&{SfV2*jg>L!^%93W&eAi?Z-i}gKmj0 zGM_H5bbtNHqgLhp?~hGQOWa#|KfZsv(_b*~?!le**5|z6?E`&dT@TF7w0ZuZ-Rt!8x8WpXIfw8QDFUezJPOs}5T9uwGr)B6Ma( z%h?-6aiZ3z_%sY(1!cVrUscguWPjvWQFhh7OU<^|-kPeGuD+f2l4Vrjm%%NoZ7xmp za85kXxAu;`_QToc0w>li+wgM@DO$l<#L6S;!JOl))#P?CCyYPS%8;0?(-v-NDfQl!jRqUJXIp+!^jwdJEc0uKw9%w00-``RsaBNboO zC4P_H7u0Q|mT=f=*950G-%85UxT&3$ylq7fJ%a@~7mPN(a}>Iq+nUwUzDe$By4JJD zlTCJyjlRaYZ#;ft_R@J~2P2kM_W#o7yScQ>HQ~*d+tn4dOYiL+d0FGP?_77qw%h@^ zuAMLXJJ-eC%J6Swi7Tqy_p-~68q-;ma$%>*il>Gt&+D^|Dracizx_(#99LXxWBtyg zDb75@A{*9%GXkT&3MM`2)PEhm!2R=s&2ghfa&<4--My<>Gi=Oluaw*c%^M3p&&tC8 zKFD99Pbz;`o%w{elHrPKvNtotiVN>fH@sTr)FgbiMAoa{tmx_!`#mlpW`Pa1?-p%X~&&x{Y{HD@=V@})hXR8kbsvvPAPHyloBg2`}haLNRH>G&KQ-|$qbl<2<`fhZyZrJ$i!B;(6=E!Yj z1ypN!PPZ*g+dY=|`ncf+&8pFg8hraz@AZo>ZVAzO5O7N5H{aq{Mm|rD3s^pze|bXD zu)S-pf5^3P%>Hh2R=53K-EpTEx{4zzBA;p1mB`qAQPr)zdaZL}Zi|@R1plOWK?TE- zCI^KraF=fm-(o%eq;dS%&w6OQ_%YEF;3m8a}pt*zXc@c7n<$vUG-K$`+H+b zscwTn+VCQQf`s^-x-y5SbA@1j{i9onV@x}D@p`3iddXj%HRh^k zj(&g6VgI^OF0zG?OEctLZU+!kKd?w8{i?58TfEl)dJXpRC)3nb)=kr&WYc zDv!>+w!*BJZ$-Lk6k?U8~Y`1%7`TN++>6;6Go|>!U62~sy<29-Iw&j+{6-$+Mk@wLi_^>GYpF_9yGh)`$RpXNNcUemhlV39MM~;{Nk@Be!aHtS#Iad3g*k zI)J4;;=6Cjqx`0EGnz9-C;gh{q^;L``c>T?pGR_^3jDTZZD40cYZgaTA8Ys~pkF@m zYW=m#`n>Nx*Q{F7o`z-D95>m-ijlk;=6|%~*Gunn^_y8TC*F!z_VXg@r(JjdV$9yL z^>9$`*Zt)=U+gTu?S64z?d{R3IX%_p>&3dfUXKWqYstG~*`nroc~;Ek2|HU0<%V_g z(iAdFBF8?7Go4*q=Q~~HT#B#G^z*T6Y@BBW+S}g6Drv%`UH1+bWDma{&N36s zTOt17$@`3%hpk@(+8gTaF?t`*JMOmdRneWh!qxprxeqWD{$U5R= zIU{xSqY0LYV|+j8XkSp>+FF~eyWG-IUuEWIq>(cz2=iaT~=hnO?Z=}Uqk-co`oja}MG!F_LujVESZ`gM=c9fYy(ZLz} zqRwwi`IIuVd_rf)(yC>1l)oxOJin%OHtx67e$MFtxbFMKHI z-*TiXc0mpa*SgH^!rz<(rFE<&&C?~q4< zeRfQ9-Dw}SoxIQcdj;0!dgbolnzVMu{;?S&&W*0ybTdI~_Sl)mAGdp-nLB<}m+Sib z@)>H&cn{OV*2NxORQOSA@B4*;+}*h+!#+9hIo|oD&V8H9q%~`c{3SL$m`?=ww>=qs z)y?@Dujr`g*KE`LDEqBlnF33Mg*L`n%4W&&8R}u7M?P)}Z{aL<)+LkJvTdlIeo_|W!JWQF)w0_v}!)YEzQ(D zIF`Nd`p<8}Ql;t+RgIk8dho_Imwjci-`kGaHAn6eujoh=5((U0HbS$|u_GdMWTfAn zH48^-_(iPra@;ErIq96H*;rof341=lEn!z;>fgoL%uI8(!XpPt23r!himUTDf&zEi{ zI-7>=)N>0?^In`+7a7p4p*qH(DBIf3XM5eFIQif&oaD3Oj&t`-^3EHvd%nlEqs@xl zTPN<3x14Hotm$RZM8{CY*Q{odaXQM&^{RNEbUai(kNCDRc`SF|b&IL(ZkNlo9xCVt z?ir&Ms>tW|;lZi0iV$_5Ce!8!X^TvwqKxx~JtLN8$mIzkX)1E%=^Sfyt79St#CZzJ! zii|ma=F8?SQ=YO;PoMYvhsAYCPid{0QgVR@&VIetx!x@4$N|abDUZdpe=J(uvO!4q zrnAd?sfvc0Is5M~IsCxr_obX`ld9~(M(M6Kkrb0!B*?paJ8DL{S@)&xgGZE&dTs6e z`M4VMbE4l*-c(R5l%IStB`mslcJQS&N-sv)@77~?D318$rMv0$h7E$}&(#(EiSqVW zRE)7%dwi^&bJqO@c`Qax*;}S7S*&*5;qF^1!LP=WUSX-$bRw~jb=;%F`TeAu zRYlhBJ8q^K$sP<#>uTG%%TW8f;kIdNMiWKNr8h}hvA)lGBDQ7cv08cI;DdR}3l-1( zD#@82Ybms;NTau6#@%KOJ+U!DvEKwXXbUKK{Hod-Dew6qVqaJcPy1z=^^xmV-8{Ym z$%^oEew~|_XNQkJJNIz;wJB!zGxsE%|9RqepTLY4H>Y<@Jzcf%c&)C-_eNWRiMt-^ zrWWi2+WAI{lg0+s z1lGDMy=~m$c{+frr%oR@9&%%V?RHLHQ|c3;iU%M7n2j+I&>KLP=l zgFihLJ}QweYxz@y+-TPFzG;0oEUZVAw@-9=MU$Vhm1VYB?Cr5_n>!cI4w`K)i~X0zc`Of)*tVoipMT`q-vQePau!Dp zcVCVzhyudmx|;7s?#6Mg->zINU~=Z1lXgP-(z!rp$(oAmaN z0dK(X*WE4Jg>9ZrJ7xA=oywYR*B3OG~Y#hy>FwHmYWuyVC%KQuOO;#mhVP zmwcOR+Us;i|AoVhcjL5tr$4fN%?)!Gu@1QS;;z&A>MLzpCKHzNBx`0a;Ev%*T(fw( zU)Fn`b9Aavtb^q1i7P^VCZ0Z~`qMDGS#eLJ{fWrk5f}1a@Q)7Q&JRAk^4z<*TDdBp zM-A*)-YISCT*s-+ToNRIH@H;yg4eD-uAgkh%}3vKTG#nkl+IdnSF1<+(9slb4|{Bw zuy~5cJ!Uxmmhx*|B(fROW<$eh+yEt;ES=Q9EvA#dN-n5m<#kB>5RgSYhHRtQ> zD-)IXjoFZSGqJa0o_X$j!7cu`f4#c;d`C)Sg?aPplaqL3OcvJ1G*!v3-q0=NtT71%K$E$bXzUPdk-&9>(obzK%LO&TDdA_;c?yBeG zzOKNoE!F)$dw1OMliPOre&r7B%?*~x-{jZ6eLkW;PHFU_pRZl3_3I_m19JrZg9Dor zZ+fH$^7fvNcUat1xhU!2rJLXT)LIRBli54F0-y7jPdLyu($XzC^oVPctFV*EM)Iij z2Zd_uJ_t1Z}gHU~^9#4!yFiQIhv3HSLtu2Y+Gp^b+5^NtFdg zeX$ZERkM5@U%z_sLvm$s=k?N4Ki0f#40V?iauE{??Yx+}Q)SB19=7}9O`{Kwe7z%= zxBjzZrEvJp#%EpfvoDprp7GH1VQWTWr(w2d<|&VSk@+LTPn-K5%X`5+_Ti9A@@R#P zaT5J{fi3f0uc{RV`p?splXUtW()4S8V#SS{rHL=NTm`P2cE*ocxf@h|hej;Oal3r` z$+!@y#L@C?<1hc1>-6Pyws7x7-BX)hH;*6tGL5%Yx4JPkQK3~_ z75MBgAD0obJh{o`*lN?m{LfZcn3P`PpEDrgY<*cKWs+;+tQ`40TQ=VNIo;mPc6_Df z>eZuXDK9e}^QdIY$!iAB1|IyDFm9B+h5vWmWks9yp>+u+Hzoxf`n7MBO#GdWCH;|m zURWM>Ps`|g%hP|o>R^+Je#hRcC!21b`Mt!WtYnE!s$p1mivCT%bEo?!w~jxsE0HVO zw*T#KwH+It2Kp>ZkSP!2KXz@6T1JPn;+sa9H@x0<_bH#Bbnk9W(|J_?+pX)mx z?ATwuYRrdn{ym*r+!fLkMJH$8k)OU`)7kR&gOOd&zgfTOn{8VteAFw%Fu-TRwDrUw zf}_hjs_nkPP|7*#{19$vJRmDbTf1Z5e zLUG%tGtyi5@9BAzk3HfVw(IOFQ@2r)Z??WL9Q8(dQlp{%&DI{*bwZXwevf?jckSqr z)4ln^eja%o!BN$XiB(*A<-KYBeRsa8*F5u^kBohK=_G%|f*%ErE}z4agHNB5UjLw7 z@XP|mFP=mpk~5MwuGsVWwC}kmLi4xm<|M9~vm)ZuIG+of*jw+&%UUSzSY9tE_`V~; zJ*3RrDLP`VyP*Ba;;jPXUWP}Gm-KF^TG=U-J6EwNc(t2v#How()w?Y{#){5W`7y3D z{i}p)v*5gu_CAG^tVbx?I|@0TXt*zXZ(j65hyM6!6U%Nz&RA$_&hs%!U2<~#A*E{r zQ?46@%<>d967zGmS}<85sg>{Rj&{K>6{Ql<5e0QqCm)b-5)WTfskDqQb^7GI2u|pH z`@&sP$&DAXZ`dxWI9|Kbtu|?K#W=~V_s7y_2OAVUbn&=w|Lpb!wLM}#G=|4mZj|Z~ z@KYKgZMa&#^+ua%oePdUg1pSQW8T~?lHLNU4%S5;ZC2(xtvnt0&y|o#ilsB;-Q^q- z4YMO(=})U}tm1}ju=6PzChrv)|8^=y}?;g(qG#aMU~IpsPyt_hC=cUrK$2sGfpNBtNj?}eSsHVR+sCQ zzT{eGhmmrVl=-MpPpXQYg?5K{h!*XSJ!%`8vwX3eK%|6ILBJt>g{u))iifS7ATJil zx@d7RP4t=m#U35M$O9TnrX_G4BIG-#x;AANx#dNsA5U4q_f&&*JXI+~v@T*n%M{(2 zu~LCPqv8$Dc17$Q7UaUmUA7=g#g(@*d*k*Er?adsd^=utVD-_(cklC)%~vknf0L_F zEj1!CCGFnMMLG#}?;>W)waLwl(x> z=iQIHGT~j^BfGqU=*eYrd!MKj#Ai)EE%MGUYH8l9R&WJV1uj&&<}AFwjkDux{4hoN3(GFd@f*2P@}g&>VC=S5Yt}UV z$Qp^{?i8O{%cuCIuX?ECoV{>_g1(G^?)jUMoOoWCub#C^$j^h-B6B}@F0EZGSh3Q^ zWBf@wpG=u5=W(^egDlvuOC(+$3}5`-YRP5gno}zAJ{(Uaeg`u0^@rkE{t1UQ8}6z< z3%h;9cFLB*5p@DHOb+D9Y;+qjE&ut30}Vfur;0`{S#a1Ss3=V!BmV6}-vy5ij2|v@ zoyIrXgCmyAGvN!!@SL*#Q^ytMsckDi9LmY{vdRw)aLnC%b7kgA)$hmW4lEDXzqiDw zDRG{~ub^O2k?N8U(^R-mUHDz9rlwx9jwf4lZ>q3u|MXr>0(m%E@UB9sE z*(Zsu+>7-RbK8C9?_6HPZBV@<`^tXQ_A81?iUHBGv&FoX*1Cxv+;K=*dF!X&aN}jp zyDvUdHVRa7xNa0StB&_kHrhbz0!wMmZLvV5wkcO%46~m+8gCq;-2KWfesk(yZgvs= zEF=~=VFl-Ab;gAZHG@8jm#200V*57mc^I=1(H$q zonnG&tbMw-S81@6+IJVOi?+|ZckgASH0zVu4!x{NMq)h$_oQ3Zs(7BZD%B&3hj9nC z&UKm@lIQsO&3$RlVK-ySUOCGtN{2Wb7a0T{s=j~5S$j_K%A@ADk{p9uOG>@Q6wiwJ zuKd`==R)`Kx8L^b_Uvm=70^Am@!XD?%GSSMh8g%Rh?S`dd@fem;_M)@tuOlB$7dv+G{)0r% znqTK5Ij*)7({feXUe8>}?>6a2vB2YBZGx+2iKNf%e3z>k}r& zyijP-{pmc(&+Yf=tZios0@l7)Nh93Ks6@Gp6}xkGOi-2zdaN2!cT6JNMd*+sFCr~4 zw^cFi5P$H;$891BiWvc2=hPI$XTO;nowt2TQ{S}nF_so#1p>J{iY?Si9jEWMyqEby zpsRhtXzr-jM^}?yYj#>4D2yr<_$1G@>@Hs+wQs7wrj=C7pUiuvdp*O2=HJim?Ag8~ zX=U;H?_*WB7H9G&+GW=-Na6en1AoC)w{b} z?4~L8Z(88I(`-zP=|(5Vca|P40&yQp>ea^1?aVR~8a>Tv_(5Y4nRWUngiI@H+`{7| z{o-a-A4vO<*tY*;ORYie`v;fPFV&3ny6Z4oUwOUm%_!c^t{n#rJN2{YsMhb;@IK{A zUDSHDdh-bjwjFK~ikmxdvT5I?QHkDd+tp;tC1>c!WvIT`G|I}Y`2O1TXqBJq-QBNv z2UN_rHk+k0`f%Oi39A%?B2P^_U6ST^FzR9I?hQ)9Rz2x~*u%Ei0QbPo>S#&hs-Z2CV`eN$tdU9@#$bH{3&9ose<+cugs_RdZkqp@u^wrw`H zZ8mA}N_%sDVGfcS}=i7hBDwSXutV)6FD$`z4Rcj5r4 z{E2K5{u~sJwPdsE*47M00o5j+|CXgB{Xb zFuJK0FLVDbG{GVK9(<-@_n`3jLQ$<_6MNVk3j5+?IPY!AKrr<+kC-vhm3w$C2)m?K z&L}UXmOUac#P4|Zs==hffjIENn*A}5fDk`dQECULx*L?hU4-s`M#0=+d0&_lq3!pZ z9?((MeH)+TesjyPXb(g@lHq0b_h=(nZZJYoU&q5U%)@)H$sSKUYepGTBmS4PRAXqV zKcjx&KpC;KS2Ydw%q^O z_*nxsAC0Fw=(Gy#BQp9SyhDYV|NeaRGbEn0qF2w>V3!Eddyl%sY%_PV{!Q_9 zs+Sq>%q{BSoFI?4Am+RKf!ORSeA7g#8R;PhC#bz3zpNULV}T?`1uMOX*?Jlet?^r_ zZpkSzdyt`LR#q*_EZQ93JsAo!Frc`2}_Gu`1%9(`0(BX;1qT~<* zh!~eGO`Z7R$qO=d9*Q(NI2s;80XX;*1HBi5tRvG6#|Vj|Qt50qJTaXF@2C4fKtJ zv5CJgW6Qg%C&Q=D!nlFPLb;xwuR-+2_rlKD6AsDo(P=6HuVLY@U&P!k*Tgs!xjdSs zXGCYmOC7QCNh@5=#^8zQYI&UI@F8awMR%GmRMjW#MH$zb%gjCCQ$+h3Xp?|!FLwo% zl@)y$cqok(oV`ht21h?XryzOq`%1q1J-0! zG$FxqAciqff*A4}Sq#I=q{j=!Arceb7>5PSC4ylj3JuE;-cX2B{)>g4x5FY=CKnss z4KC~p_IZ(tXHR(m>LWA{n9`cl9)7~f#RdXDP`jr=im)^lxEP-erHOD36D<;IrWfj+ zKFAS4WrQVF|G)ob8hp?`nB4VuHHEeT0pWs-!!u@ZCM_7 z7g5?&Qe)rPoswKy-eyZ8kbu3OJ5VFsVoU=;{KU@q-w(fyCS8`Mmcj_s%^v}24gI%a z?(pPP{~Csp>8+9GWGZqb^7MCymopaLYi?zX5R_pyVA`#L=>;IfJ!W$UQhsr9+(^a= zOBPqwmEL1_9!HPN2`OWxTx?vu#M0}2gf*2m`^OK_y3@T#1~9CF)F;p(j!2 zF~NzAfWS_$Cjx1Pheiof^?U;DJ))EHS~Z@ZOp`2Yrp&H!@EZZp;sJl)@Y;%De(z}Q z1ndf0N;DGRSt?hd*B~yMAmw5gLVQgW!S+04!^|YfST&rjltsG4BwDPk#jLBuabrwE zFh{YbU@>kLbQ)@Fw^OebEUOcJ2vkC0utNcuIWT)U>45B7zo?1p(fK`1=$^4aan=tL z)@=sL%Mv-7K$>GD6R~1EfB9@0PkeZ&Sy9O+J|MEg|C)4@sqS7d3LqVaCXD!`-a7Ca z?$o-1;l|*6*2?->woO;}@M&CQ`$MpxP&Xj{n~tY!D|hw8yBe#yde&tr^GIfhBv%MA z7%tJbgGPylkTD0(be)(8)CFtZ*5JMT0vV5%3))0x(w^KsGL*Ts*VFqDJ zFwnWy5DXiZ$#nTRq0l<0UDwJ83Bv0*1Y(U)n{+ieE1<1oxGOF`^ZR5V9awN>Lh97u%r@nQw>xmCp9c^nx@S^kmj|@P--jPsAFtPHAGHFVcmeVSVGYerl-s z2k2Ds(z1|m^VkIPYFvFD3WUs@ydz! z-;?{qjC%^?BsKckCOj`EQYf_ptWj8?Gd=%%X_>+;C1xu_w;nJ_wh#_LUtCS94?G{a z9-pp>2$%iFp_nHz?(~%?&`QOSkBd15GML~31$l##5c;Q4DAXo;4NU%O{4ugFK178v zu_j8U2Axxhgy|3UGD;pnF0C>SySc!2=;GN(2j1ZsmWLk<{i{WP<{`Zpi8G+hq%lOXRLort=ciuq`gHypi z_V-#3Ihs8z!sbx8$JP&$SmKbs$pC40mRTsuF701an*PrL>5ZpWDX$6e@*oh`)njTR z{Y`8Yy#GEk$)?fuEt)r#cUSIIR5}PoZ#_XHN|Y*bjU9wD!{)^+@ILlWh+8K=W<7AI zC%e$1|ZEO=wEs4wTFMSG>7<=2NtpKm~d2 zLB5C`=YcMKxV{#|KV)t2E1>HvkyB21ovt`XILfV1(a)6y#$MEVR&JNae47&+ZZ9#JD*G(7AOb{C;e=9T*qOX{dVU2MW(9VQCBk4$rsX z`il9;=!Q;-h|c1HEweb}@2Kv0#ZR8F3le<2gYs8Y`1=;6ZgGO@{{5WH+ft#s^Gd{R z1w*G=VaNOH*G|k-=gV8Hi?)p@Ddk2trpopxSI?RuuqKANhGJObqpM)iE+i5+?5NPF zIWQ>jb+#jpO5tlXfNI9IRQ4(K5AgjprZkjWkC4J7>&pX1&FMw2>JZoS@>ki7i@B7m z>NITyNNhRIV>xkuTiC+27VvhZ7Ofad1*7$AO11SS#hD+T|Am3(4AL=V6j#85qfFgR zLeQuWPKegT$Fcv$vu>jQ%cXv*XF_vAeG!W49|eUB4EU_c|^kYD_GbDX&?mc~54JoUwLViVjlfYDn)DlYNIZfM&A3GvH=napx85EBCM zld-hmhm-@w6iDHMv@b2akn5ZWZ2w(Db9106x5s`{)KcCWH;8za(yrEFeZW5ig;p_a zXwhC|E<0Chj$}%#a-M-zEYvC5Af6u%SV_W=r7X@Ae;v-)s*?^`n;UkFS2T%W>nnR=gPWMK6l@|O*xnPj#Frbbb_>c2Dk{M;?Ri@Hk;RC5qzg$ zI$5p`|0gm_WZld$KZhJ~8a2i&dUFM=TnR7uY?T%}v8s0)g*`AhCYcSB&PZTzzqzqG z*}~aCff|})N#6*zzX>#NNQ#S%GbkISO%48%&&s{sohP_v6@g<8e{CGRkG=}9f4R}a zJo*y4uE?IO4&Nn|yDChEK8zN7&>yy3CuUtN;W(PQj1)FRyvsQ(b2i-D9uzghKtWO$ zsM8_N<)V*2K94#DdD%9zV0blq3298j0tSsEfy~?Ny+nN2FSQDLo@iwR|uLlxEGUznUVD6(*beN^tKfZLh?T zqxjPjU(+^J;O;o<_F~ZF@zCVIN4%_v!vfdOoKXbzM`VJd^pN)8*&T3QM^&LL`>nT4 zK>QWx^cq*F)^u-HHAR5rxYdG;z)kfeeNugH)KJ!MtYkN<>tIA#0~GH8c+E!UmF=E5 za(rI>L875qb7P(HYZ#x70g(ZQE-6yuWGkqC8jkuQ-4irhNQA%3WV|^)G}SyhivbM& z?qGCk>PS};#?0FnvR3{`?tFX-?%djqhA zs0;u5EC{-@Z5MsQ8=H}sQM2o9d}i|YX}ran-BH(6>i0ijKK1)L${KB2Z3oLWxc|im zD{smCyf);=vt2B}+%}~q2up_|SHcB{b#@tuT-;_Eun}Yb=iq;m^XXb7P4(Y!7b1Wc91lMccY_i?(*8Y3%jH@Af}-VPJi z9`J8P6k#9Kk_x*`*wgIf`fknU!9`U4#{ZwoZhCv5?ApEI5o(9pr!SI}>Pl1JhcKgt z33W3H(U{HuLhesk_D=cb1_T$P6P2Bdmn(to!|;6P=>8xA-(NY%;t!VN&lgLF;f70o zi#Wt?;ldP@uN*|^=h@rMK!^^*MD@Xne=o81KMnkhE;k;ObMn^u<3j}bg2*jRUxUSJ zOqo4s7aRu^sI1ku6de2uIC>{Oi1;NIuuK;4``p$FVi*_|AyKsUf@HGj z%l0qZI4=2$Uz;4sBG^G}BZ#-IVN%_n3a12)eL~43?Uaid6na6(wtsn`ZHS;NuD854 z%f_`EM_E09ElYrhlGQg`rTQtF5ih0bH#Qlvk28Txdb?5KszuO{9LSJlP}g|HI1{9k z3>Ay6iK+WhA>*asfo)CpVE)U7fL(GC6%&kd2~Ml)A2eh|CJZBnHT#R8J}wzhXAjGV zRPk0~affdK`hA$yBQry`nYQhzhNM~_rZOCKdRUlA!RbcxmDywd7+#y9@S)+;8a(>t zVs7uZnSD zqnYsO5D?EP=GpL;AaTwuxE3EOAb}{2Q4(d5S4%3;50!kdbv0f|xKP%vzZ^W?4%HN_ zDV!fO=ARpn?h~pgK!bRtVxa;oe9q$5{pn&j?)S?OIAYC@n8G2Dhdas_`~*zWWAzV~ zE{5uw5gbSisHu?zi!vNs`olguj<9ThvRd4J+a-HHihRK!mr|&N{QiUm^Ax7lFYQNT zVv8bcyXWzngyFkq;`EP)C23qnN={ooGWiCjxDO*R0oY75fmdxo%>k09YM2Ip-sne2 zbZzMoj4XCIdHQVFIm<5=yFI&ce3kl#w&Ro%88wd{NDap1ChewaRrJAv?H(Mu zqP$m0E@iMMy;2?5GD3{iPUFIgz)=UaxWn)29B!KTv;d-4Qh;m^K;4z0-wIb{2c=; zkHkn_35h}s3_vEl>B*SFtYIrvKq}^`b6vq~`N}jXW;G%ae?LJ&_hCLDSbn*T2*Ut8 zL8lC#o*HI=99ad62N|=3(W83ol-Pp*4{}vg5uxR4m{Qu{`SWNRW(2*@BL&TmT z^(?`k)MUVZg>v4){ad8R5<};5YiE#u!Ah%qY|;eePhA92cc&5~xya zPvz1HD2y?>++|Dc6mKt9)bB~7dBVXPVhGbO^pcWQW}2oTRk~YSP+O+UN8!NQd#(x9 z&#(O;BTYeFfpz2-UoX~@5S9oPrIEa5+FR}~NDAT7NUG}^$SE}bHC>?K@)v%R2;B?6 z>k+ZFt6iDZEWe+*IeE8D?fMvO5eSHnx3#Hpn`S^>kiUPVs~xYyFC?EzM6|gAP3;O4 zp_FiFS5i1bV=2y4+5cLqkMliT;f%BQ31x1l_pp~k?w}%&ukc~7 z|EI-Hvi0(3E5s)Xx;4SwI5WJgKH5zToo+pBuF z8}xZw^k0p+b*TQBy48KZ*u5+1d~a9)EqU4rJAh357pHtap1s~88F!l;LjxY~{OS7_ z<+@bH3VrVqL>7A{y7^J9K|$=N)i~AXR_(uCwE~ksqR-L4-}*DGbO%17h&B7HLxi7} z+`HklYi=3I+ehYf)smih)1Ps6eD&T6@jKMi%V9F>Pxnd>*LYh_7qU4S`QP3&-XD$= zgnsz)k7}4%RXUPAEwJbnHD$ap*G`9BC_U<2yF2Xe{%ygHQF#9o&drHRp6;j+UIcdA zf36Hvs4r4<cxD9pW$ z=Xap<;2E;v`d0mIb-*5W<3K7>vv%vOvS0Z)c03LNpts%5uA)JEPT>^7o)Co5lA? z@okr`iKwf&`9W}UVwY@iK)D;yk`Vd%qiq&m=k4%Yc% zjSKTxsqT&Q@i7x9ws-_#tVz}>1}RiB=aR9)VI_JpdADo$i3`O+hK(7Le-JsQWH4RY zcOO4zl1=2H-X!7ka)Y>D^>94@NdU-izTqW(dxplE#|^&|Ve|DirsXNrq*t>D3^qN_ z3A+`hx4N zyL&Jcy27!du=+s2X;LFiBk2eT54)eg0mNwl{3{bSZU77kb^3NH$JjjRs5T+IO6~T$ z5?V|1Z~~$xqyKu3%lQHD7Mh7@F+s-GYj80ebogD4oHw5c336UmAD>W`ZPkXlTTL$J zuz$u)9yr1CWk?=FUdu)6?voL>P>NXggJTE?CPiO}lK@dWDUt)$6J$zlwJ)dW&lfSp z;{vqji9b~pdNLxb5P%P86$e>!*b++T;EV*s5krg$nmEXKV+%cMf+h_yS4fl?hl^c~ zNCzLxTGw_TYU!>Y-pS@9r98K1^Z6}R3!e@Y$PyKsb#r~aKHq_c2m>RLpp%|s zo6F_NA3Uo#EDT6eZ-|T%wR?dCi~DwdVCA_&;T>nMY+1Fk+t^jbcSwU%rN)_nc5-NO zkE(N_-iR9XXUHQ?$oY|MNYm)>28*iJpPl=DYWG9oMauwUFb!1fU@b)P~RZKv}QNFmkHnWkT) z=?OC5_Mi`%)T&YANxM@tO5w>s6^Jq;-zCk`1^@3PWD!a13tP4@2O0IkLeLTHqupC- z#PKocO1vUudek_A*(YQ$>(f+UKRNOaDOAhmd2%?TFjy*U0RdJY8ttUiemBN2>fGEe znM}Cy=Rxc^Yy%@mS~Sa+>OV53w3|JNaYF&y0jP1$X4E}6bh7iHZ}8S)9xUOy;n4wj zBu45}w)0BImxsid;FVC^d;nG!Exps(>1)F~DB)gtuiRAXee;o1a zYM8Qi3&kzv{LvR6SxQPnDSG|USzkO$y12e5s3`Md_~KA+nAt|!h-wWRp>1ocYC~dl z7J3|%1qel1^T6jquXbgWdQ5RfRxE@t$qfi{5typPht!=?pFqKIc~S&#%+|h1S%?j3 zDF)+j+?l?(MSkcGt11QSihC4iK&8wsS!`Lq(B5DFX6yjO0T!Mm-$QN{n& znLegOhYZ2+Pwrz+JvENflD)_iqpF`0mYx)`JzD}3g)Y|9)ACX3v&;x;93GOUk>LJU zXTDHKmPw&7DI1Z<*W8gJ(Zad7X{U(V_1k70S^0QueAxtW_(>Rki`Syx$iDN2Ke z!a+=t_5=|&PvX9lsrKmIOP?x70tw0|ax&eQD;6Uyo~w5VmkpjgTfJS}=Z%BhiqJk9 zP8sqKs1U7Ta)>K<-rA+vQAPC_D=coP`a{m6AFMvPxT2gvZjh#3XxC2PwiV4oT2+3Y zl^^v8j9`7}`bxi@1^z}{6%7L2t(HR-m4b+_>mrPDlaK|*9~aW7K@PKwqvtWIeuC=y z`1}k}1LM};W~sElKOiDlN^?%kpN0&{LHpo&eP%7j6~MO<$GHCjg6S;75Qh<5r_D3L zhiHkrI$x3z)2D0F`&iFC*k&8eECC>tCxqx?l!W2*HMq}KFL!qrpAq5P14dNV2*~Wh zd{lIiqL85yEU@yh4fbw;oF))zZdgcl3;e{M^~MHBm(4OhHebJWYxu3 z1j?xA)~TF1$7Je=z3JMheIW>y@OA+93~l1*nuxnF(znEBja}*~Besveqjm|6x&4og zXrD~9M@e-wF8yp09?Ilcuc1uGU}W|F!i##5(3GOSlj5k@4ro8qB<4<^a&6_a)61Rd5$l^~G|@@;&| z6FQz1{_DuOI7)qkYqxhvF*E6S7TpqqMt&NE2~^kqC>@mOE%-+cwm>)Z4aOCWsPCZ# zS$<6;0r>fYBg7J;JW%Z21&i`-xA9Mj{d7KSt@r)e@~#)DD-1PzhD>3E1PX*)@dG`n z+>jQ6hBJqmfwc+6k%P%a%ph6l{I6Nb|b++A61)>%R^>XdpVtbOa^?tZDe+c~@L)in}Tzr^|Uk3es zajMS9P+Q6c9Ze+;5?N}T!O-Fs1nobe?XB3)XLoMakE1v+8M&kgEQ`KFx37$SAoF{4 zw;mO(Z?(E+^Jvzvyn#NG6wt_;s+m0=+E7|e*JDEGd$|41pU_(08jB*IIwqcCafr|T zUOpA7QiCl_mr#7^xgeo+^%9lN@`_np7`8jxt=p5X3${v_qgI^Ra#0G=awiWOM_2cd z+Oyow-yRkxKjZoo;BP-?bi>MD4O>^wv93=0{=F!F-(L3o#sm5dbOZPwRFCFjl1=(C zJ!{CnrZZ{$_*!Ep=OJ^-DsDW+N<3h{k!h&5Rw}4DC}_wXJgO>m-pp9Wf@aiy(HwO& zGCGv!y+*ta`_o6ODX7q1lhtH8=Q%AZCA%_Tl!{FUzrHB*)imo6P33w{(22!l@R0jF zy$b&PBt2)Tg)$1KE*KGQ(c$(gsQC&6?L&7_rnI8WBf$R@%l2PO-fA^1*=e!}T zcRpO_J9@|OUhngPBRQ1ThJDot^JHA}hNHYfprTwbA`7`dD&>BY!CbfkFkK8~??~#5 zM}#S^`X)R~@vDa)5T@S@S7tzSR={ZG1Xe1fxPAr7j}Jt*p~5Y)2Wxn(Fe zbKY`jiCe8Y)=TnrP?(Jo^F&wXY|pMj>^XVj^gyxM$FgtTsUXC z;%l4QKM~1Z1XmAtD$umyVfSTFAz;BRPcFjTlOIH>-S~(WKA@n{7m*05!il>iY&0F6 z*iei7^|r@AEt+*-+*{A48MoP0-Z=JYnK+MnK2U|>L}vfb-b~Pa_<}uu#(}F-nYf;3 zJuiOTg37|(i11|!)rfjNaEEp%*x&^L%}NX#PoCY+t(B5i z=_REoc-=5<%&bNjdT?3XC`@UtQI2n7B{Jj&D%T%Av1vaI@!gWHY}5%Rv19Q!=;*hg z%D>*!nWj9zm9GU%8ZZ(?SOZjl+9!9(Ac3ydPDLEt_uW1cDv+Z2RV?V8ko|etBUzLj zWT_lbCNMH!k5e~P-1PjJgEtR3B9gBS0_F57?lzr4_7;iyn?(&px6M_#1NI9;hzHXp zjLye`ESfIMrZ*{xYbtSny^n)+n1Mn>>k`<+QTe!=kdMF)FDr!t+%d5z$0v>=K2RK6 z_3-4vaDeqj=3B|1RTa%b!9_&RHqNfC9L;TzsIJS($8t^~I&!)+bBxrJ?$1Ti(30M(yiMG5GU+1e%gz$A9;B6eCdPw-XJ`H{@5I2?`AFUIOwi?e3w>Qy z3Iik?d&?q!+)dJ^dHH@(3RD|ool36GlSODv3b$Lq1QDrtebPI*u~a&L~k)=1dgRZBDt%&~m2xDwd_oO6?YQI>Ic$ z$4YNEV?=rqAp=<8dqsy?RqXvt^m1WuOhhKQ+#k)uG)l&2u2x$J!8oIyrB*dJj)bC% z0y}bJ3i<=`?BcXZJC8wxfRS{P4^#w9F%^A0S1y2R zzJK-5_~Gp?ufx3oF_eT-$PKk`k);|XnCV4{K5M4LkTyjexuL8c7QQs83#g6^8!t2N z^BjgXE+~Exsd8vU0ciyCI@CA`Oy37`Ly|>xu8gY1_~@#6b~9+geVrxW0UD3x2vD^E zw*kc|zN%l=-s43lhV*7jJtekDGSYoa{e0VWR;jwwUW7>Yf>1Qi{fdr4+_$>jYE(peuD z_w_k-`kA2AEFKolWhY%=_+#``^MhJcPuiE-am{qLwrQE7)>g7p6YHVBh@ll6GZcOr zrXq^st*}XVp4nmzM5&8~-*)3eAylJq{m3PGww_VR~c51fEskTz1FpaUZ;)>DwWY7!bvx=#_rO{TH7=pR2ksULt zyjwl>{kZbr`&o)hWxI8bqJ zkt=Mh&fht8K2`V9OQ(ORD&^AP_B!O*oRe9^m4$i|0fNp-`;BZ^KFT-w z^;Sp}TFid!HN-&8-MB1(@rQxj$#4L*lBy%r1t_@kVkJl6I}&l^ylfQaH)7qvP34nJp-AKR=xGB&6v z_us|-KX_zN#IMW;+%>NfQ^AQ_HM>T8vyL>ZjAhFn_oCUVU_;amtwTs}c&|q|jFn5% zM5mL|=$cXmQ8FM5{a_wUm0d*JokDsx1Z}9$?wfn0z&mi_B$ygz(uQ2xlJ?C`Fr~%@ zx7JE=bs6L+`K(5jnQc7a58Dzq7L1M)nb7Fg6=1RYrs-K8b7tYHkgg2=6z@Q-^v^XV z!PX_F#UhwS4N1UEbYah%i4cfoNS*)+zzr$g*W#&zq@mnm#3x%h@p==;P04K6%FP=0 zZeklI-1+>=9dg?*e>G~eP4=gOqp9#9=*cZ79@CA&qxzsEi3zK6Z%4D~s*_48=nAH0 zsq4V5QAH}km0Lr#xp947Vy&2?F;tPlxBd^#;k6%^)6gsZu;rJN_;1}XD&n0W9AYn>5%4(ETYknWyTIzR|6FT^ znwB(ye^B$DCph+1@~NPjY!qu>{8-rNYBL06qrRhfdqJ!yU z0+131@;o~Ozq=C&R33E8Aspuq#o=R@*yL&!Pi-9()q#1HvQl4STVTwb$^H80Qfok! z97)z=9aDmQ(>t-=R}nz!!2Pg4Ect@pB~{XfdojEYsqC?~z0%FqLm9j(*5Kb;1L40@Y-1G4Rm@N3SD$;9C{ZVOBW+Q~EM?)j9I*nr%b5G<-B z!L*WD*|oqo)JPiR-{A74B&)-;6*EvU^1=+UY`JPg+$K1FQ|<5QF`-ZCHm^VXFWO+| z5%K6tAj7PN^kyq}Dm#WEe#)zEDWZ506;CN!oL25!fErXS9oG}@}xM2w=D{={XeXKiyGRp690II*n>-I5@a-@H78&;(SrA^I<{ ziX#@LM5aq*d2>V>%f;$GyDjqzi>ZdZ+1r2w&JYW(a_@xux9d8n&i#8%Gf@ z0(j!7to1C~3;=iidw)YTi1zPVtXyu;j@mFr&?dHX2-1EfR6aLta0W0-RBW**l$UN& zY*lQL&${L>lU&?PQ+uoYpD0jkNVz2WVpiZi-skos*-g<*y0FgITz04^Ln^BPeXCF) zx11VjN!C0mJEb7{Ag?>DXmuzc(E-g|a#C5z+rkzqbp4Sof#(;)mCElb2;Lx8*G~sm zN=JlC3WQ)b-YI#>~Nbrp2q-*+gu>i0z= z_r)0UBY9U2e-sT6{C9S-dg~fn>&NAMn~-y zPP*?Q<-Z$j0}m58)hBY)UyxUjB8IhWBHia+`i}=X`&e+Af9TCMi?sv+f%tA}0WaS; z3BX$=q2(Evq3Bu*fM(G0MAew)k6mqxkUi>uZD-C4jV7iv>uGN+=B=V`{t>w z3tvPLov`53vF$6Lo0K+UC#(wFpd`qT7*a-{#Q$*pmilDFPQ5_FZ1E5bWpoEUH9{WG z5UUYIOkARl6|3mI$+kp8iUKNa;(uR!nSK3Ajj>GRAT6I5aEfdD4T^2Q?jf=*K@+{-eoonOFW-WOPt2Nz$@ln@@2eEBg!yD^-?0SSqvMvc<`duL!7E<@)_JOgrP4Wt z5n}NxLogx^J~lOCjEr_FMTos6;)+Q2%W^{j9x_~8n&AuZJ_F1muakyqLR@X4mS zbL99+w4;q|Ajp$~%9beEOU^~}{>~P?WoPXE?Zn0*I#E2CabTRCQEt}q2057fYOuEL zyZ_-k$2NNzv6IB#uEdL;Jn5K=BAy(UUUconFI!!up8c^u`4hn`WzLIsonMXy?kaG_ z&g)-|R}r0d^P1%gfr0ovXnEey~AvqsPxxnHENJDWq*}NTxZ)Oa_K{Q0b)& zZDonT^_Ro|KJI_dsKLm)=6%WMCT4WR&1Bd1bpUNuCg=^0EO?XRb8GwJQeg_c7Ro8h zI6q*Oqn$5c5!kQ`S*r@pY+KDH|(@Ie0 zyM-V@c@o+&Gaeic-dI%;;$YcOmQf>Xx}g(N8A21Igq-z%*fO^$SYO+KNal3oSH(jX z-&r$!U&2_mZu6lerSeOTtCcqk5epMplX{57loDG%=x#Z$$#B>eTcdE@tim*)5q}rE z$0q3V#0Kl=^2nENn_O&aShXME|%)(mbo zH+h_l{ZgNEALaVp0Gl${27Ch_Sl%8+(R8}{^}nAEXi&;FieIK1M!mEVFdk5o=h3eFL)b}T(WK~{6gFkmNg6-Q_C2^)lKLxmw zVr5@IS<%5vd8eoAp8WZ622=)@dTAFk9K8?*F2V^Yg1=|IyLq|S!));-e~an-!hN0N zLKPUa+osacttI`P1dVf#!2ZjQNUCNb9TkjP7nSHuf%}6$y*sBkG}~23%-uM@Fw|bU zwKcf%g091?8Ycp979&X^ypK!-;+mJb3PVK#_2M)FLfp{S=+}3f0)P(g(IhP}`paRw z-cj*ylJ0jpzY7BUNIp}BF_X$KV`M|}BfgAhPx-{xP=pzetV0e^MQI@D!x=gS ziPnnJ%NB@+N=mx97dp|ko^YGK<7l8S>yO|xsCoVxgT1%L!bY-;4FiT0GOt zM4D&y`EbxJ`CZblqhdhR_isk-t*wwwxOx^`f7xqg>JJe2=-If(c29dGUJU92fuXo6 z^49wtVarqmag9FcJ4tUGKZJ0jN&XUmDF2JF3F*1|Xc-cQkMDl{%&qDC@b#T{{FF|- zUrTj$zCPU>4ZXE@hr!eDqql&b93dQ{@Sn#?k61p+1TvxTo2&cZB|prApgJA9)*o!e zD+wTDqU`5FskdGMj_=p18Ht>hQLbq2T5VPehXx<0ZDbw9WjCI}WDr=ZS3Mw64M%Mq zX7F?+wG+#j-Cp!8vgOWr>7|4vBF19pah*&DRTIBZo_g~gk8L~7ANZ})L8V1pb$7r8 z8ZIWwzs8eZed-I_DExXuxfXpDRpMWAaqB8lDfwnUoo3}A68KA5T;08SS>3!X$Z(In zA~y1z?|h8L=22B8dCuA+zIuW9a?KYT`=b)r(R<$x=Va_XqF8i!JQA2GMi;F3^Isx< z5=izsTWN))O}M`Fh~ya0Dv>zlxM5i^GfX!<`+H;9(nl=W%GWAov8v zCE6$!SWP7|O`=^1KDRz}A`L!b`>_!%!!(vbz_Uj(;F^?0g;VxK454 zkc3y`kIy_F6Z}wJ=&`zl8$@%BkTdAC@{VxEIy%B*srMw79p*LWk(l+3UusE{^H;hO zytu2YZsabie36t(OKm8SIIMw$mx0YeW-RSx>5F@Erc(S6xE)zN(58o&gT|0(h!}lt zN#qnQjj^!`x<3HU!s{T^;$rLE!|u(g9e0%|t#P|We((|QQ{g0^-1zJs4a>QkFXSmw zvK+NM!^rgzk!YigC%TLRPB=mV&gee4HOnUfgp)JRT-E z$ck7A(y1+VXnsCQc%p&mirxz|M4nor}um4&Rm84&x0 z(9ic>z6CipYycvE>Gt0Y^pPXquvG6Pb#=fr`DlcYwjfEWh#+G#W}4%)96M$Ps=_Yv zjGUtyKWDi>IRNf6?WjN^nfF#-I!$GXVV6ABLXBaKGzwIaf9M_?;9yI`J%8-Wk41ls zoSeRGS9tuDIA+=@TXwsksFh3k4joLVMU+e|63e9-OsIZ#S}Wi_Z#+$xggWxaOQ?2^ z6^>J~3U zjxbCFQx@coor?Y^E+&E)0G6e@ZO**}0vh9#tSn7mX2|?9=MhzW0_vl5vxx8`u2iV$ zI4JlKOW3K^uf)oi&=UZ#(JfeAC)Yj0o`lUVhuKC7W@o4sQba*>l?hP}3O6F6RMMfj z@zh7Xw`=*ex6vV%;CD1n8BA@B0;D4>{{E|hNDv>HVOzJ+Lo0KO)WF>1Y$zvK$o;y6 ziMW`!lUYsxC10=%a))90-|3!d=G*RlR(Vg%>m29m-L8=cFkI`28EeH2z5%6fthF!Z z@)Zkm-!@$GVp}u%J9}6q6Defkq}>xT%l5yqJEXjsc-PamED?T31;aGqJRLp<%171)MR6jW-9<&;#=h0x z7ES5)3oHzu-B;<1Rb{3S%9YjdunPy9ozrF!tC{b{URbz*qsu}3toMepRm^U2#LNde7L zTS{Iu{WmPVMC>bRca8&!*3)!R%pg{909yM?{@S-T)H*4{&6c>&j)hfAJvzxq&(16W zvCcf+nk`QeSLfW9)%vlVi;eS{-;%59{BPREED*-6`FmF;Zel@bKIHy6NRRYWH&Dzb z38zw2UgX*y9*laXmf%hMHN5X4Pj2oNlFW&(XPp?uaIGRX$p)p6t`^z9(3T9+BruZI zJnPSWBH-}oUE=p)07QO<7R~qL_=^5VlLquJrkWP1B7>TZwuc^x)wj+NtwZ54H`5a=3Pj@c}mWb=4W)E-`yx!W9--*UshO^Y(H z^lIs3`meNPj;B2sD;`(Z(Udo=~G`~S##%cwfSrEN1f8+W%ra0u@1?(XhR zaND@MZQR}6HMl#$-66OK`8emDcV^AZkM8GRuU^&F{oJdo>bjv;lVK$JAD%?2oe6>G zXYxHW=Jc0q5r*I?;j8dbeh`FS@e@<|E}-lKh^onO+DJ5rW?17XNA%blT`i(_1U&lr8;C=@3u5@B5 zk)@a0YYt@&ZDL+kr^UWW{@Q=U0})8C-+LT$kv@r;YOGfN8#${{aBHNFrA%Gd5~uqh z-bqCcRJ<~OwCCX%dyz*}>|OI_9c2aKwqH^hEGB(|HG$3!nuRz_P`?O0V!5jRrGnf% zL_fjR5VXr+N8q>ffh18GhV;WMfZPc}YbO<7qG3g@;YbBS{<`kx0K)mSV;67^k=mpL zDoKqaA8!Gdz?q>lZ)On%&h@)!4Bv}jylXs0tat5XZ!CFfOExPRvrytlup zY-WO_IzxkPsv?!NBa>`LBI1NCwfGiud0~?9W1s0AJo}9~8u`ETo41?-EEv|CoaR#@ z9s{P-!k{D7@kcO*wSEeUznZ|=@3FgmC28r3Es>VdT9%<|PM+(2zYWSj4_iWz>QMVP zmNIqySnW}(5JZ`Ix8tv{wXu3dXJwZYT%9UKs+F@8M-Z&r{VxeGZe>8Ekt+M@#0S(V z>6#LIoW}uLLh=)?&ZI1ivke|~*}dmIBsJ*h>LU{8Xs>p+WQorV(_NvDy*bEV#xsS= zm2^cV^gX`ttu~*wwWcyKe6nhM!{)4~s*)AjBzHqcgImQB>RLK`Q7YH|PDaRgzBy30A8-3(yA zTq+P9aVi$+4hW;Ze#XN%gt@Fgm)i+S-q;zlT^8rrkT-nNcWM>(RpOd}3=(!~MUUT2 zZS;Ml!+rMd(=Gw)hjh};D@Z&;=M9Ja(#o8&OVTf$R--N!Y9&A0RP2!360NS|`5Y;4 zzeclz%PXm&pNVc15EbD7dWadJX$ClEElY+JwYel@qG9IEaj+)uqjk6Ca4S5 z<_8DZWY?8G1A%YOr?j{qJXv@2tz;CPhImKl z$A8*2jm86cZW2z7)l1?)%iC(N0HOw%OHuwl7F5~lpjNv_UKH<pSfXU}HwZWiYaJ_7kD(P>?=;DwZ#7d-=cvcpo_J1BE zVMuy?A|Tw7&KtTt8!yG^J-T{lqU}}d20d)#YOQ&{R`Uf|5Eyu`Fz9m;)()Iu8f3o& zOA2hIH!Q-ruDbs*1d~>h;{5+NRhaU|b$aEfT~BC6 zsi7p-&ozt!3XqE{NijIoW0I2DfPN9cUs^Xs7srp&-O!e>$rINsA8vv*?9`wq^Js#* zak}tig@|58n)h-mzv(L8T!gBdy0h*LZ#OLs#B8Bfn!N2~I-%wj4xofv^A6j)Jby!P z)q;s0waNy>vYpQ;Mp~Eu^h+kEOwg#`!e~P5_w+6LRCN?!BVIeWBmCW z*}Brzvp$9?y&JZ;kY)&PuJN$M8j@GH^glJ79v=ohjpUSR_ zG**qWFk;fpSbKv${zvCB{D)&5k$41+{HHV*C{y;Da+{17of^>6v%lOZxiIklvyUVW z=b^k3iQ3m2aA3;bsKfINXY3?%(j|%Y^1auAG*(~_!MC45{Hw4#0A$Q=*bMK(w7MYXsRpYg!RXu&u%kg||2oD8 z?Nrh&#mBICo_S*Zs<%ihAG3A2`sc<0g#mytaH2@+7}LCiOvPGAXkOejs+uR$YP97m z!(mJUr6;&2pxj|T2hc=()z%h6Y2S0bN8Ldw-RdUY@XL!LoK$D^yYZh-?QStUz}eTi z_vop|Rtu8_T8!giTs-6Ap5wW36vKlGj1?sljlXpH z(yml%Pq{44Bd=-JDa7GaNBQKBnQG%BKVx%+z?>6cQG9lhJq>!OyIObJppio9qV&Ix z*(|L;A^1lNEGF%A4z*oKX}_RtRh+B=zI9{`=pJRjKx*ke@nFl?ekru8-7pjQYxtO_ z6Gci#jUxzNT9Fsk8HehEmF{lMb>%doNyfx6^cVWyZ@W~Y*kXst{LU9zGeNe2jsEiU z>dVSNNL!MtiWOH*wM}$_T~l!SNk*GqG0p&HtwiBBo_s9(#6U?LR&N{f{|fOI75pfw zC>XrrJl*Pn-DJ$g9!88qu7Ad%p(n4Vn|p`Qp%wqoF|FZBkOT;XgL^9H{rO+Ol@+kK zrPu3*>t7D@ru=S8<2m9hP$$+FLIfO>f`NfLfYCh^n2BHX7i1d0`o7Ig0d}ULi%%>W z`R-De#;V2{0}6Vy+3tA8HRxGM|G>ro`A904V zXT#`+R;T#U8*h~N$OADU2{HOWrDM|aV)!W7we**8!qz6)Xr;68hTWvZwb)-e42SYz7*8*W||x$EWTX86v1Ul=3@JXOve<)m})sblzo27w1C6M!)X z&8{aQTPA6p3Xn5zk60aReU`|mb;?ttv!?$9_*uR%l&vL+YP>+ImNd9+Vf}(+M@oIE zd3*n%z*F}64HLRd8{vh+*C3*YGzAzHp1GL}7H+L)YMC%j_eF=PxKBN5ICbZ=CNx&A zw8r8I#+?*baT)`w`&VUwtbEnVx2_6ivu_PX^~&J}AZKcbLxURrk zzhLqCK+}Wm&qEFg^v$2^5M>p20?#uNydRguA9ujRZQQA$bOki>xX1i$0o~8(vR`pm z6yy)9*|+laSw+IKt2ZN%aNUqO_aFa|&-p0p&*~1Jw zknJ?X{N5ECpFO}1?TEnD1_; ziXN1{dkc}M@EpvleplhKB(|o*FkUjO0>BSK1W0q#2txv~f|({+W07=<;8od4P|Puv z5!9edN{$%vFcV{HAaj^YnUx~hz3yY>m|yfy#gb*0Z27-yuN3t7H)*o4EfUz(#`~AY zmlOU_IWS>@vcyc60Z$*lNC7*^l8&Ot?yJzX@)m^=z3?(1*>c;Q>*78zXQ&9*QkE*J5!EXL*RrWFz8&AI%WJMc5Pj`0h-ob5=tdt zHTpq(Y^}Z8&CyJFwhcBzrK5h@Hzvv4LD6Sk8By+4t(>(>AJ@Wd;aEVjjZ#p3TdDDtuC}nhU)lhaFD7$uo0e}%s5Qim6;hprTiIA)U%7f7 zF^BmXMVYvnkPmq9oPN3?iWSl%LyV0`nRUF{&IWf>| z9zE^XJH8@TZO*fA#s`)Q3SX(iayreg8TfsNKF^NldD!RQWkE@jCa*g2IrMV|vqVRA zgj1U1!vm@&R4A+seiK`^!?~7rqZ{;ZE|G&-Oz=uj+)R~CGCQwWV-+5bJ)ABSl2$C zT!S^Kq$xcGig${*NpV>YCaoEbOzf&|+(++|P-R?-ClwZsU-QtB;dU6YZW!&Ql0znG zI-3vKuw{1LO3aShd`zrt5RdU=4p~UB_&j$=ixq@9otn8si{Pfb?QVfOk6=l`t+_zk zN3c}5BG~k8bq=K39ZvoM(O%qVF*1#5yK#&P-d|pluyg_sAU%aP+lZu66277X7+t-s z>{^`^rdd~EGu|!9jN;l76+)bgp_V=_l;qn?k`nYE-6xvU^235n;YZa5X#{B0R`I@6@duj zK@_Vw(<}xxUVaCzJN}`8T6RzLvx#syDq&+*j&+wm2odPdFS;_=wDlA9_auO3Cr3cR zw<>Q?+G^XWEoclLg97n21=#aOSBt$3@xStX2}uFON3J}YhU1~L?7BbCP8F)lLs)g<2PU&1Qv$Wy7X5ClUt}<%L`r~s#LBYhC3xe zlHi|(W8~sjNt!dI^R?BhtyBL_$1$fS+#q1g)}c?Ui2t2lat5m&Ta{phX5$DJJA-si z*qlb+pUb7y32}d7+BeVgi_eHx@d)^0(v(~VN3O;G&H!cy|Dj&!>i|V+s8v37cCK6b zb!7lGZFzV{0}7!+)t3Y(LL3yQD60R$Eb6}y{ly4$gG0yjTb@Ij@oe!dz11;L%sh)w z$3r9(hPnK~--jOK*yQKZcNeAO>!G&U27*Cf2;U2fWIm+!Py}^;-fZ?G1-dabaZ$azvluHJJejwY`XwHjhxPhe*q=*&-T zY^U?*5_D@1Wuho^v5NW{KD?DCQTWB0#`Yy0Zazs>am^?!ML`{*h+1=XpnG`5N z`SQRk;+{J0x`LW5?Wx3s7LMkAh|M~eJSbD~{j<;f1;z_?k#LDjT*kN-`-sheU21~C1fs#e zX&3Lz*574OeDdR00GTeDSs6FL2g3JSENSvd1ay1`D+0W1<(5)Y?g)KyW6JO?)&-TaQIXeMZK=*phgsOl=XQ( zXdj$fBcUFY(wy_V*~t9-{Jwcv(GHaff|lH>u#EC>`10#B!GM&Y9F|}B^6P_VGGj9E zB@z%?)eGeAEFAEdOh{G!3g^s=Ngf%nFGEGMJvRDH?q94}AmkZljF@ek!gt2okv_W8 zXxad-*{D3c?5i(g5#6U?*cKKeh3esN^r{$RFKeLSWX}d zGCnykG07()?tg~E*%@4-K2fy3EDQr42*Ll0D_LaKYXWndIrPB|weHnzKtvn*mS-Z|6MGSD9LSQ}mHce9Lss{)d)8%?J zMPZh?sRSKjErcCJ^FxmIake8|_=3qZ&O16ama43PC#0rtqn7o`qcF>{sYfsP{ zK%gp9>3kt)oLCNoGWv32#2YidoG~h1tohQGi7H5d{8c6TXO%FbE_8}3(Somc2waXt ztUP{kL6a*Pz{yJi>poOU>I5Dl3W*5TUOpg3mPlbF^)p&iaGax}pfvD1dS(S_x$*I9Zam5 z=`6*@Kh`P_2t|_kKTJjgXlk1uC*{>#SR)}!Vy=NTjXdLGS*XP5fI0JcTzN<k9JYU_U48?5M|hUOX@)jGHufx71FC4`r#Z;dd(*A`VJ(qsb{Y6w zIOUsp?3b}B{w`@WpwEuZ`&1~KeYjUww7>yC8K6RMEy*YkA1{BmTE&&!pJ>ebIPRLo zTO&Yj3Ll7woE5qo{#n0w{0vA~xsPf;c<->7YJvuQ^A`Kn#MQBPL{~*8=72-zvuGs5 zCO(WL=`{dVBRveO1Af!hQGU7rexY4aaz&ZQuodR%>7XP=26|}8vTIUVq5b`#ye1y9 z-EsPV2vixCR-JPsV@n-qLm9h0hwgY3C_u14dsNlmmyWDnv*9@#-)Z2~#%l-v$n;CE zA>n}2VS>LgFrw9(_2a!G0EpC5VQO456)3jRtGHbJ$;E7@x&KYvdiom>Esi!^+q8-) zGAq!aCa@V6W27y?7E~Lzs@mOn)Y4+f35}>zzRy_3#O5tlyWoy)=RKUirLw(36va$uQEb6Fnxr!D5bv(phgO*0VPYJFf{Wx0kcNjw zV;9J2?ul6u!#hRZbp;aN*BMWP${8yHgOf?beHIeuJ^(pdc zyt9&R8RWb28D@96@UdiPv5+2;yf;5CRG3&J)MIsB_oKLB`)?xsfAh$9n+y`g>gys+ z){po++kO9&KRfkzAr$dyLu2ikOhA^UZ>Yf8jE|l>r?aPQhDb0}f3lOYoBZcz5aBO5lOykr5><%7pC)-E)Q!9ii!R^H*f$s?@P*F^|FSo@PLc z6I6CZT#l|vDM?kiTOu%r|S4751TdKc-fM@kz1v@3JZ=l zqcdvWN|(``oH(3zMO5U^pBUOxhP}XQABssLjdoKviE7F0@Czq&sf4A+poo9pP;mm0 zJmz85eB&7mHG1%deme2=a9Chx-~HJGn_reDpVG}ZwwJX}obtbmvzr?Ncwvd~=v3~f zVvufT`B8mpFYkw!3_phIiEl^CPY<;HHhL7~m`rPc!|q2SV>^QhugSTOBE1FP`4?^` zp=Io)eaFM4P~3~8j?{%v3sXbJ%+O4^ylsE{{eMrKUxF7F&NUg4q7~ALzl@VNQerGf zV%VBc0#w!U8acW)QOY0!)y9eszdqLpaC`*{H&Qk$^gjXexw2a(j${luKkIkLt)iVj_h zD*}1gaLfB%RTpIOqX^@^>Bnd&b=%O+e*_%&lqbY@eIMpk(EM9=aj~&g6 ztKoqRWHn}%8$^(rtOSz}adc2ds!N*)N_l-50JaQNSXGgl1>NZ|6CxeLkWxc=pU9G+ zhME!{1`k@foUq!3mg*lAe3H>WhMKt{*qUyUSy3%S;%MeSBThhS?(%&ps7lev=x+_< zMYH&aQEK|&)$5K(q?`K=Zl*OF`p)^G1gl2Ckyw89j~8h1Je7k0Bv4_y*q$y-c7A{_ zdYehFUs-uIlqL5oqF%Fet{SNhvN+TNrjV0CTAjNZb3?mWv!drGl37PyxwIUzbln2Y z0R0cIeL=hJFB$|k;_VR@1>zIDTfMl%eqUP%1kBiOrK140AdE=gmhkMsKO~7lGCTnA zG-tQq4HqkVbfIs>^jSd6Ccp$E9Wmkn4gy@GV~VozDm|o$fV)q0*=ygbI^`__#k>p| zO{>y;@7JBbpYJfSVT3YeY0t^}e^7?nE~S2i(ehKN=rj%1Ui0DwNr*0y1$Dr3`;{&e z^KNhgRxWIEB_mM zyeRZ3qQ(%u_c-doFRU>W8bJzM?tLphj>C{wK_=?z*PKo@`NZOgr`}CuLrFNSb#6NW7^;W zYF-A9tVF}RvhWn)^6k;Ki-u0XnEUMjQt7vF5GShyv|Ncf8Y70uj_%7T#K)U8uu3dN zAB*hoi!4KL7JLQz2vhP1QxGvl(DgQ_JkMAH&x0m4?q%gj+K%Gb*W|utyW8_mr~HSP zOzpb+m?rPK2c30f3x;}6K@P#t?|kAwBrrO8j^BOTDfr7i6^GhF&F1vP%Q)w}vPH!y z2UtZf0msqhkhzmuj3KVc(6OpFH((XE27(B6kr5`|xEZSrw&QC(g67Se*%oup%TUZd z?+&ncH>y7)H>;x0@dp18T=b)qHybK%OjHl4c|kUTu-hE|g>Wm{bcRqW&0E?53&Km$ zb?`;xd=+fv=CQ|JVaz*GU50T4ii5pGXgT|{v~%Zc8REZw!G~xo5ZYS*T%~j^B!KH4 zBGM5oP?ep8ic52}3!r<&I#n9>aV!iZ3fd^ILV!gs`^L^4yh}^r8-Sp0-5wwZQcNu* z*l5>emDVa-yS8fONwl_r|A6XN@&C!?z?zYf;fj9@WN0f<4NWVRn+y(dbBaKVtd#I8 zM2P_shqBjcd?>A$_9bA3a=rfiY-J3^z1I`Y_SL7#fmc&1H4<|2AI~QwKTo2AYMjdl z=^$hM06r6J+ij*HJmDFh`en(l?l<4Oxc9UFyNnuv$WT8f>=@hN?2>5BY_n}?&Tl22 zMTapd$Si!Nyf4!F4UvdgIu)@ z5q1Yi<>IhAAo@FH^XOjL(igk2B5lqJLUso(0rRw?H3jso!;szN2EBrc_f197uVJ2} z0xNUy{rUVGtNjt>qLCPSgWin%?r*-C%+`s8PQz%#5%|G~4JsSruvUFcDFXSODG<%r zY@4&G%t#+z*mC~L(k)JseoRaq36`oEa7Z5#X+BVXx~}Y%Q%o77<(R3M?2gT!u8G$x z14*i66V*QVun+q?CYvOLau=-+7Dp2ZSIMrb=vC*WnP9thv*yjlisHoKqT{eakMBeR z6O(K5W)%k7@MAl)$5aWhhw&5m%JDHt`z50b(I`=8zVl=u?443i728S6fHP5AUW$c4 z(|%GvZ`TETQs9xZuO#4EMX==bF=SBd0l|`d;aFVTtpkj9v~XVx7VTEJv1z4OiJGM4 zN&2enYMfa* ztL8u99(8fye-=?o$IR`@$iOd6u~p!$q;WcDME$wKSzS=bw^s$*KZf+K0~XDSlo58# z?)rZ0tN5;fGwt+Yr&VHiX$9NlpoE@-Q!`=`hIQ8e`Sa`!miTSi+-d&tan~v)pl*IO|3Q*x7W1Dkhy+h#k0i0h zvM8UvfchQ|7iBcR7WOJW9+1t9hYd#NcPYepo+JZ3Hjeut#7tikhEp2_9q}OTEjI6f z3S-}{;6}K;T_Ix8@A)bvp|rVG59#WGpI-GqirqnB`j*-BVDuOT2HO;EPQQVca9hj8 zXpI8%#tlR-leg>07>dRogBE-7Ft&32ZH%27C?#%kbMQr^X2<=lYR2d`B3Jsj3n8Y(;7W?~Dhoyc;Q}U}Urv6CrAg}!SlW^Kp z15|!^D;lN{spD0IJ*;pcj3NJLDC8*1iVQHmm>iP*sLF{O$N|ItzssD(n;ipypwcQ8 zsZi`il>yay zsdfKm5V&dJkpWF{|vU}5pPK)i|C$g_rkiVMJo05$X>x}tw9*M9WCri z&EPGmlG;4~t}<%$)nSwh$W;fXu1Ac4-}S@L)H|pm4~D59@#|tEw7SEzxsq{EhT=)L zg639Ql-YvyB)}MADMGokK%`5WWrd9N?yDTyzq2lVfkn1yNe=2sU)`)(BZO&ZN`&kJ z{oD&<`@wj1ldr!fMl)*ye^2}}yJtAZkAl9G@6ErF{194I;#en(X>Ty!EUSa(qaJ^` zn$*6o`4k3ykXAh8FmW{{A~UeBn#9Zr9zAaZmwoWN|s{6ecEVL;6HSDSXRY%}iJwS07CILkIVNUhvll@}dSp6D^u| zIveSrO1t71W+D=&L*$7!UY)-50ft#8CpxwFZOStD=G__SRqDgO&E6{- z`|;e0Kj$grm!E_-{?%C#Ve$}ms1zxvzkH^1(fix+)&8g{6g19c99P^Dr!xq?Fo9Bbg$?KX=k_k9b^;IMXA{^x!u#R^yRg5~5R7~Y+i~@MwbCQhgP!qR zfWAH`?)vBVW`i#z1|;j~W%+-E7Yu~#rmugTr7)@;A>Ax24-5a*BIEG0v!tV+w5K?U zX7ZC3>Ml${>c7OJ-nZAPFC%nyhv0soUs=?}<|@ZOR4glhe3v0z0b-)x&5}{*+;53` zxgayCmEDua_Lti-fBPTV;gpcX9s zn|6;0EeTaIn5hLd2liUjTdi%P>^7*OQ_!h8*^}o`5SkYg=^k7Vl<{;!;aHrx4ldEn ziM;%IIsZ%a`o6xoc-6YVOVo&@Sd;#3%=xj8?`%?mcn@AN2>kj#DT#qx7QLnT;_!Bu^K#SObv#pm zX?8dbqxSP}G&Tf35%vZ)0mX>+pOuY7`MkgJZ+Irf0ALdU3eaFQT1$ubA z(^q-=t1#BFC>=aUv$LD;@Gu>1#GPa>m<1BAF~!}eZ9xs6;s*HY$TaUo0&RH%GWXe& zVfu9ma$KgMdf@+Vx;&V8l`<{LEQp6mVLWig|4Sn1SN(pjPrU$v-+&#dz{Sp=KkssZ zM2X_V=^F3G-^b--srv2MJo;;w1qxH8AB}4|dK&SJ{~0_nWcMx}`1NC7^op2zk@E<8 z7{1rYR6!u5L&<7n2JmR;Nhv9{tRNEhBz3IvJ_ybfUSL?Ge{GIh!?w3A2q!Bhv8I0y zt5903xBkLD|0rGf=lv8b%$1FIBc=90uL{J7xXq#sX3k5^gRDq!=|0e$4-DNXz)zWS z`k9-Nm!P}3X5JhS@EfaVX-zzB{RU;3oGOy9+6Cz+%63#kU%c&Zs7W?_1Hd5I2#=86 zapoZ8j4#ooAhh(Q@2z1hAs1DFvV}b~1Lj^M2aJI=6|3cO`1k*)`*TCQ6-w{hKLSf@ zNYS?Fe*FM;HXzYrj5CmGQTI@$bKu_EPU<4fc1~pYR;5V7{&`0VYL(nvMGF^&h{Zof zTysbi2V33R6+b^#jPNbGkllDKoF@WhgZ&|XkO|@B>w(jIMv`uW$ zK-+3wYZJ5CcDH8&{jrn!rcA>CY?j~Oc~i%0{2(KIEKDFTbKlzfq>9c| z0vlzyJ6YUqDh8)K%x|RlpaD`JYH>$gapj03`^dfd~?;kT*G{OSv)zq#hCFFkysj!gifx|AGd>}M+;6t zRCJ3-;`V42;wDo;se4}tw-SQ)15^r_zGO7(5R>uLOO{-80!yf&q=TW)73p&T#@Xe5yo1dergp0d+smc|@+gfg#ZC`Sa>CzJ#vMo%DL< zhIn38=AT=CK zd^}Tg*X!ij860ON+9RI3O*!_??x%a~kSmtB{jvG^dZ)1H7d*<+(3xbyh@1FvFW^9F zHFz-cqw{FQ;1l>7mNVA3d(IU;Z1Fkvi4w9ESMW965$o;8Fm=ArKM@BI|4tdQp6d8< z!|-|6`|%x(8-+S0F6zcEF|?j|$Z|B7;H@oX=E!1*Pkm791x9Y4~6M4-Lg4 z!S3Hp9>8aupL2Q$WE%)a5%E8a&0vvPjrXYS`3cogY*8IBAq;<3k8EvFh*3BcS zjpYRlxRsU_{FpgmM*_0BN_55+{D1f-&7=RO ztbm^&Xd^G@nUMw0b4CAvm{3=42>V&70!k<2kB5nnq%vYgT=WZOM!SUp&w_my7DnsG z-9wgbN!OsUTpZ{zA6P;)$=~NQ2B`}AHNmN!Gd3u3YBN$6o?fver-v}vH%*6a=gfpA zK%!~S1z6}NXD$T2EsTPPR}O5@$!c2aRvO~oC~_jx-h3baXm0Plc~WdnPTax*pu*_K z%mXK$j}??Ngb;RA)O4W%4KiKj+-Y2?Ist}4H%VJ|7Y3UAN0-Vo+xA| zXYUyGeE|ekRF%Sr;2K7$(5-RH{0q0uZS_7^uC7?U$9b{zXm zk^%wg^C72Cp4GXxc?vs zQnRX@ULkHe&l!Rg5dnfU$V8!wlSDE+v!h?e0#M&E!J8w0BjQb!#_~HW4(5ycr;={@ z$HQq$vW$Z|5CNUw`^T;BNB|u$Fip?`GJqP03_E-t>|l9*{ik*vf67gDZ}Q;V8bO+4lV2wTX`1kHo*z+R|V~p+RWDN>FF$+=rt5O2vC%P zYlMc@m?*5$`sQk5jewMD#P8LQ3`J0Z;z?<)R>Bvi{~MAemwEfZR(rYCoe{vA-DY2Z zoI=L>t**=_-^N3ZV7TD%^R>Iq#ORt1I&c}(?9E4Kd+)TZ|;=oDV$K5N1sp?iU1;s z_=G6@Z6Gc$t6$azwwz3R4Daji_71-970(Wb5?Ff%STm5hN@@=?mWKx^1ON2Ac&x

    u*`+9lp!!XBppqsyjZ>lB zGINDspkoB-h#^vYJ92!ua8L8m5us(DVK_}swR}^3sj2g3k%Gd6iVv%fgOw#EMN}JC zZ?lX4B({vl@`xlWNN?wPodGeT0YqsHym3pff+!j?rK|e(Ev(dU<{xebbtgHJctVezz5!Y*Xn@*Ualh@m6i_o3AQt?eHAoB_kO-UGyZq!(UWW!;b)HW=3Ejx`^~&g{VVEUN4NZzD!(H8FC3p& zSO4H~UY7vx2!FlYBXA!(?#)OUlm$&Asa6MKDx_q&LD<|#;~a_;!f9u4554)@QafnH%RXf0TB3(|G1hZ#s$djI+l|XU8e! zn#%N9&6E^d;UM=o)7#p}PVMEYo=(|QQ$s@7A$J2~0~XT;hHwPBzo&;O&f6eMB zIZ{gMG?gmjBX7*tyf>9v@?fmsIbM3$8IGTP1Q2~EKKQ`LF{wq2<%I7BpQp1k?1MD8 z$DL~g^@$iEH2rDpL1u9-IeX>I+JhpZ#E)~5V45urf#|vA{9Qs^G59T;mFtQfB~)do zMM=YsgIR(gvou)Y6Rl9YYxcRWTt-FQvk%2T2IvO|N)sNOvU@u`s*}&|)R!ABi zm(ovntF2ikHp(C(pWk26y2>f-@YOd;xta{ou%1~JojOnZ0$TOaDdb3wDCD+vgHMIZ z5w#_nXI2j52l-2V>1jqt_I~+dcA~aoy?<5;b8b|1N$tM9ef4DgKyF_C7%0hTqTA8T zn|^3o^+Csw#?z#LvxFwExcQ$aoCx{t8YkX3YVtD%AFn`!xev-{^vZ!WV?swuJvFvN zKN$~0RO^DgDef}Vh{e7R3#1H~)O`b{nxog-Q~GFIhq`>TIv~w$Oo+Bcnf+YX+(apT z!fO&cZ>ynmAiZx_KnSIzQ_gzE*{kLak{2IbHYIm4VFXf^s&f$!E!e!n8$$ z5`jA2d1ue`<&%b>LoGGf$X8cgD;7dQdIiIX)|;Am@d?d^wCJ3bqMxer8eE8$iG%oK zRyqzpyo&zmx&ZyHJjFAhrjrM<#b0By(X{9yx6N{*udSvbcAWAiuhB7~$1@EmvfOw4 z2al%VsvQ0xZ?g!jUMY$jo8h#zi|d$yN+CuTrjfezS?esi>e&tXXC&+U8BuO`f*ktE zvPT~-nDXM_d7%-vlmf~a>ooQuY7#Qs0?uLX;u;VaHGxEiSp<~QwpEUZ5Fz~xj}V$z z3Y5;-x*cdodsxuu(QyTpgDCCuhW*`zjl>}{40A?7Dqy^#2|_Il>}o8^U>7lUxTNq% zX`Q|3kZCe&uqEr5Ug-!M_jP7_0E_GwxTMK=aH1O3-C&wx1kEvo=vm{PHqr}mCS>px zwk~sTDBz%AEOCw@Dj1U%?TH)lX8!(X<;QuIlOzb$@Vy?7|fm_G83;GPY9 z*C8Uq8~SdbL2GSAuzieyI7G$SkcJuwagmqWF$bhPq?mG6l9|5=jGZS$<|=sab()_T z7*m2)u~QtWDoFjAz<2SeGi|1MTF6-REwnHTiEUt5|EIO+>Lu1>N&;D7BeM!4)DixX z#2ZU_`|%xjU}hlSEejnzu2tV{x5|I$6Pa^9Qvxf<8J^R10>@Dl@(%)53FXuq_;_5Ao}h!N zGtT5M%4z^6(>EAV;#Tn1%{RN_IQ!kw+G7LzSaAZfL`Mh3GRz$l6?UI7_dKnn-*IE$ zaoOWP92`qnIF|-%3Z!z>hfgwFQ9}cGCw~A(9-)Yqs;l84h#qAy_9v4=+8;R)l?5|w zkg^8lg1ZZsO$~`S36izJj+*_M0_uo9k!Qt|NXNtlB({WQz}EF!#w5f*dWQ_;q&Ae} zS3-Q1$wjkFR`&sfr6K@3Zd@_T`Y#uneovaY?|G#RS~(T>EFn@EW(Cx(D` zHh0VQzXER}ABPQjIy>uN1^AAUv}3})Nn=>DCQ1wPj5u9b6sAn^!P8-MMxm_5HU9!v zCeayOCUf;@HUm>}4NwW;z09%j_hNLDwVt!4q*irujaCA|9LSLC3Tn!J zYk`hsa(Dp-80GQGgdNBXfbc0NQt^OF!!<>jh14bJ#fEwH%m`Ve7#hg^xX4I$Y=T>7 zlmQZA-_&{FWD~I<23E?V0)7;NbM`c6AQ|Pe6H{L$FduiHkZ|iSLsxAyovK$u?4Mz_ zai^ta-UtVTLqD6ZhxOxoG+}Twuh(YK?iO~KN~Ywk>uy<2S|(#2V672s72*S7Zv9?6 zBPaB%h-w#Lcs8U0-m@`xXL6nx^-S;L@rYiSE01>gI;=d}Z8D8bE;+Ae8#$2xQA)kr zzwQ3J0gkxnaBti}VUv=vq^^4lN^M!zF)=9!l_3(+O|Il$$jbKRpI)7iEK#2I?0%g4 zj;rSsQt?vK{UZK9jGc8<9NpIDL$Kh%9g+~-8g~y6+%-ry7Tnz`xLa^{4estP!5R&2 z!Gi^t>G$6I&3xaiHEYekU0rKcovJ!#KhNI#caFqYOMb)vc}Ykn_Vrx~OGA4#?Aj+L zF*Gy;mbyq>6HO6FBuu{y1HMKul^@$OLCYjKu_na>G|IL=hb5?J--TFjm8U~p5~dyV zlG_~K%;E{o-+Bxxy4vk;5Myx-O~f-l>|D+}Mks!93O8C>UF01D?)w70WBDUSo_mn~ zwA{y<`EEV|HziY^v*zT5xAVF1KQP6JRQi#tN%SmG&Eyx+D)cu$ZRDDuj9ZkZ`iI@@ z>=Q+6^|4uKKp6@QcTzY$uwgVL!HM{~B;AjXf?amanV=Gz35QYv#FO3zL+d+K#A#z~ z`zdkft!)ySH7#dTs`eq)pQZ(!A~RwFbgw|)*R_#>bm&2DyXvpQYK$Zb^>Oz`YWB>`pa;fRSYP+be-Z@*}wHHB-8J*So~f8e3H_aF$@Qvv3e5 zQ`+zH&l@4uL?6zRmd|a~l$>q4tSO@W3{&plGbVy8Ill|=(Y1lZ^%z*}nra=#! zlJ%U*1w_0|K4h(yn*OGizg>O6b8zQx<`p^|fmy3z0vDucp(O{k@u_7W*kZ_r%cEeP z-&3Yy$=6fnl55PAQM>yiXlpP_LM}CPIacn6p;?r&DcjekBoJA#^@BfIEr$)A!>amK1>9RdD^n~2 ziqf0&%w@&RxS(IYPN-9k=%C67{)*G#N|hEGO+Ja0tI2}OHQ5z9wyk;ESgaUpyKx(z zh4v|@Z}-i=AVb?qhKHSk8nGp8uIAs>fQ6$Kgare=>f)e$p8EH%QSCed@BpH=4-aBb zx_^rrXk!FUkfu@;c^NLr$t9HjTpbNQs~%eUiZWgTOpT@-O9LOhIbT9OYA^tfuD6vZ zP_4?X_`=@Uawz|dNFiL!(@u|j^;^f^&rMDnagiKE%Bq!#oATq%NR%~5lkYb=kc&&j z4TXEA&}Z@u**%2%krtK|6l)`J@NR4d5eawicT-#2JF_(-37{zIYT|moi=hNtpvi|J z1p8Saa;8f38Yw$5JM4=t+X`!~emgRvNj!Y>I>fwLFFx>VGmgLnhW5nF>%Q(9l=^H! zva^(-EfUDNUuRFRdj!%LLeMNG;B*@>)J{)Q(z^S}mAcDFoX{Lh5^t1~L@R00Z3v0Y zVZ2|H3K#>eERU((h*pvy>s=CoZX)$KKfWQeQIN9UhZ@ujSe7&dq~nqbQ%Cu}pPA9` zl(6_hdWe20?i||LZC$cibIlAY<#gTIL$r5gWqs4-CSllwfqr@&%}kC2)G!I5$*j=b zLO@Q_6GGl7R=nYSUxTE1Fcb@V;QM)ISuA!}-y+GAfpYv3rNHra6Gc5Wah>oxj$cJU zVnU*hYLvfLjHc}Qp3q(AnPTla@{denFgH^X5$T=miz2}(-?92 zOqeU(3{Ll&-p?EjyOfbDvK~;I79B*@+fg0z-F35>=j*nMM4_Pg!T$YeiA+5!hysWQY>I~2=XZvoWjhqg(WBFP= z_o~oN)MY%^wTy}M<%I1n*Y&X$J&`Nl zCbyF1MSLD9cPY@N|FaWzmEgn;+NIHK{sQ$GM-`wgqsNg1-LTy3{AgkqU6VE+FYa4Q zM$aK$l5D>=PpQlFh!ZTUu-~2MK{AKnI~@ET$F7w85BJ|s3)f?N4ow$m9Kg4_7Uih1 zrRe#;5HJBREnbJE*7mk}icc%-&abZfX0{!X9$fhXfI{D2af1FXYE>&&B(;*6e#T0! zE9#JXS3~1mMPh=+S=lB0Xp2WV#cDhbhy4G0E*hI~;H!flwf!y5tGINH$Y)}8K`c=6 zFj_cb_)rYOF#pd)WIa23{RB72qh2R&Ykm*g@X|>ADp^wvD3&S*Vls~$kt%$@MCNsk zrFtgy0mtx0wd~q57EI7Xn&gowses+kh4FK5{WMi4dj|UMm z$%H{VA4gU{76FdY6G5s=O11+}rV)s+jqL9PmdX6NLol9xvrc(QR#GyN{1X?9=gY&t zASM~gAXoHzX^W1KsP^USJyB>_-_<$}7!+CC%ex^3K5&Oc z?;Z*%FLRO{-_T<9^hT0f&h|f(K{ezxYQhXej*)ZUYSYiLlzwa%NCN2+9!h2EUp8lu zaS3^RP{zj44Cy*}=@(ZR%lQ=_0-jqEm?Y42!MV3G zbM_uOrE}F*UA31-ayva+w$o1Wy+J{_N%p>w8$peno-%UvTW{yVaph)W?ou@U9(g*w z2l0LOJBmT$(R+*vBIHX@8XM=vHJMOP`GxwvEm6_jO;){w%OU03~;# zSiEl_=GmNiWwH9vmYoGk^!~d84q0}d8OgP-Mf!Vg8mUHjD$cij+YVRn%Uy%0deid= zj!UVtXCcEqblit!h53n_&5#s?bl8&HCvXtD4EgeWug2WGjJXsZ0I2Y`lKep2s6VVM zK5bu^XtPCMV@S!(+4p-mv9d$VN~z^o331Ci^qg}U_+eQrcQ-di>v zG%~a|bf};iDJi4XhHt!E6&)`;JbDKoi4sVxX=>wlzI;F`DJJLFBEXv!vZV3MPBOY2 zRz@PGv`$Atr>xTr0gyVCQJWB2&5&u z7ZaV;PK^0gTp39i`?`f3g)P=Z{nb#D z=InbWybP*c7a*fp8vZ`D0gjQ!rj_I+h?Q!P(H;ZM*p{3X4jbcaq7f^A=d7G^8L;gt zTkVy08Hr^OYienqPMM}-9A>WPYnSH1+)5r!Vr1db zxD<9N3`efQkVt{6#4m0c!qtri%-Px{g^vmG=5gd49tj5v(6qKm)kLt1cId*)N>7hL zgY2~IU$lnrMj}F&#jDa1G}juY9jV$SZ63B)_ZGjUv&SdcsVK<2e#tcywgAi$CKK=E zBn~ulNa4x88XoIsKYNRdZ19tZ^d3=vx>Dly7x#2?|B_tmo^gMZVy%J7o#kn+xY- zePXgKy(G2dK;CT<)OkPJ1|*1r*Y<{!atvyPa_UEyJ}g){i!AL~PpSAxa5hE=rY%9R zcE>5dh*Eu%uZ2JQ^Ql*qsC;;DZ-3w&apw2;P4H68_;7P@WBX-O>%L9+}+GDbOg3tgZRuCE8~h!c5> z<77qzD(k!bfw52Y8r8g&(?ALVQ+os}K}!vuk33daaEu6cq!y_$srqc<{oC76mfu66 z`x{WXUJNHndD2c7;SX|ncqrxJtOE>~*UHgNxAV_^8oRG#TSpSHrhd)9b!_&7HrSF1 z`^}7XNg-qu7({R%lYr`Pa_WBDicEcuYmNmElPl(V#s1*mj=I0C6 z?KQT@rY~Wqu0ztEqm<5>94210(*f0P7k9Naui);YBK&z^ov()30Tw@M%~W^7@oWU&C*~F z-m-4&EHkHgn{)*)Zf4W5er~G_|M)bNIQG)tgT~u?@3AB$G7ogT{=bdq4Z82YkQK|l zK{Qg}$k|g?qFEc}iHqMwaKt8Jk)QD?GvK8mrLSEVcOHOueds~nzJJDCwqE_P?l)e; zgO_min^`(Lo&F7{_Lr3?^iwnH{%eT0VT}#6B8;9YdP1#tp#_=oTztE)h(&>TSE15y z?Bh@xK3@y?y;%ns<%>#Feny%OG2e*_c7~(B&Q3$Mazw0QVk{iHXGr2%oMP9;zZ(>{=`FAwDVC~I!ZJL5b;Ee`0-hmk zygElBu>69>#WSO)#>?4cbAn6wP48eIcVcx6JHo|#8mdcarWe0YN7F$2t-gIQp7PwJ z{4S3Qu(U+?&zo<_$*XR&knIxm7NN>T0Ge#U1{ZgsoT)~*NddEAd~;vPHXf3uC8F`Ko{hIfpSKf6 zG0J~XfiQSQtlveZTS{;qMpjo1n)S~fS!FaX8R0Vi%ZbifsY!5jM;e2VxB5AFJN|#H zXd58iYsy=&B!1ZBclpnEZo^4Kp&xl7vBGspE`_{u1wp9MOlAmD)d9MPGt!1qsN9mw zGDB$mvTlMOgRmi=#`14R!I;p92m#mu{b5bS#i&o@7z94Ys6p=$L1ep}mX>cmLL{Sx zUB5PcHZ$EBHfPy*Pw3B+LU}$}ps4J;?+^UPi;iQbtnuE%jy?3IH?>RuIbw)-3-_Q=0!*56w$hP z!Ns5=TuIS{^a#4`wKHOH^S--3FNpJ3)2`k5^^N@Z>r$bS`OWRgyO zM5y$=d14eyEzE$y1VA|xD~hE_tMxoSMji9P!kZ*mr^0v3QJMPT=v2W`=gb!8Go5Sn z^3ei!mBx}?0x#(wiJX*B%OIw<-)Qa9nK%z7@lFHjVL_h4%dxvrqkw7b__QcW_eRNR z9P?}TY0@L{3JRQ3mbals=Bbv4$(*SKW?zwiJW!eXRV!QN0x3s?cYYJGvnRnz*+aY-p}BqEjNGS`)ny_J2(&eG}rsG5B97)upBr1=F;kvJq| zYMV5|9m9RG2z)twk5j%QZyF+`&h=qK#pCRzi0DlK{?ZCr2hqoTHTE6X?hf1_hBLUIW`-P};0NY1N+9XSZ@|Xm%PYA0P&4a~ zV&#gg^)NLGE|t1SEPe_M(#25?lv1S}$+1ng?_7vwt{It^(@T9bNVgIsBs4sR@sz3Q^D+Xz+)!;)_XMR5@)a>E~G@bKB7Px zV5ogM+%-XG@Ka7;UO|mSKC7n;;|#*9Sbq2wCYxJkroijw)%@SdcKk)kYXn3`k*JFj zlz&KzZS(0#$s1aRG4c5j;}Lufs~ZnXh@+l*?6oZw<1()vWosCh{wkl*@?Ub9j-*%R z^IDI)yiQjx{x+DaL>c)algkT0FiRw4Lb1~4+$bidIxuKMNmBFH8~>i6i6pmp8~-H~qOw&K@4x;IO+ON! z;981|+a^kNz-1Y5S1uODvW(30EHBj#PP16_;vzFjBR&CZO z$0Q@wv%l=%XL13%T%&JJKY8#Y6toctklcB*!C%GH$0bUZEpRJBn|OppOmbdrj3uw( zX+E&F1oN-G(G~cdQ-A|hAv=xV1y!ChAZt2UKS|hp4AHdZB#9c|*$JM-5EA%|n#34b zOtbURuy^)ZB2qUcS!x-r#V>JnJF1M3DdgK}FA9^)XqNRy{2mR@QB0TL_o%Y!p141- zSF<~>C*A%CFG6IU-VnW@$!$}}{G~ACx^qTnNJZ3G^@us`Z0GF(;uTAS{z)0dd^?Xw z6zTr2l+l57LbRskUB-*Q{T_@AbLDHHm!8lG3QIPT?z5zm(Kh(2=T1v z*kNALLmnW0qOS~+o-Rd0O@eV^qJr+d2=R_@SoSZMHBxZYWJ@$wa?Z(E_!P*arH@m& z8qi^{x`CqpVoAzBuR1c-TuzUpeF+UF1!X21IXX`tjY}?L3JRj6g!`#KH&G1z5l_7l zS$Qk2i?^%Te{+Ts;!8Ds|M>e;1J;it_n%Ymfj|B@`TV$TO|bsW9TQld>KcB;i!kA_ zEJhTeCw2xJ8kC98qE2*EY^*+8;H^pE|GSz6zc>|2OA^1NQ-uU3cWiL zE_^L{{3b*bxcOjD^oFqc^tSA&<`0+D->B$s7awu?z${S%-3eh?KhZe~=0%+dkeW#% z?>vyAy6w{_bW7w3zuyvS3T9^3lWIqN{Va{Fn?}Qd)SCKH(u@FMjD6l|wqJZ==YS3$ zRmO7df-XindXeQfzZXZ1@|drNT-993NL}$_y)3h3w5xewF}H2)Z@B&}1-Xy~m>hvd z$xH-(PK7=_WlTt9?FIACW+mifSv}}Ab zdEmJZtk5Mf@ZU;9>m)eQJ@*`e|dVmpddS2rMyc6YMW=WAP=V z4xdiMM`e1i?Nel#p&b9ao@ZJ}Z21HDf7bJitaKV~EMklJsNG(gwcRJ)9g!9_jz-Y~ z!?WKxkL{PilH;V0@Ky{aaogv}i+@9mZ#n2Fd^ZbkO3*5Jf*M`_OzXD*&-XWmFXs!u z%aO=BmIt$W087Z>39>8R^@sA|p)05119G?SHH z&Tpz-4!aZ*5xt(yV}O^xBLCD+ZvyuYcMIp+!a(GD2KC)E_~|;1GWl!w%j`?LPnK__ zY4UGoku){U2WF9Hx0eTzM^=#+o)^Z&n7s0Jq=3Eq6cHqCJ*$y|2g9eyUk{JiCKa9c z=fKku<;yJP%k3ZEC!Uwv7YhF&0>8GK8J$0@uRE?D(qDEbUz&xJcHXPre+R}|U%gxW z1i3QhsqXs(R@yM^Vo~4N$365sHCd_?3(|lCSOP?tJm{K8 z;#^Ckr*7w)B|JcaghQ6@WXuwZg}<;a%JJ4Y{#V>bj5reW=B}O+9`c$+8?#ta4OeSk z|NfzcSzIa1Wp>Hmo3~HoZu6_comt_hF!v3a7Agig2Cc#) z+pQpbE7xpq%Q!dXG$2`*JEr|BlFrXtI(ywiaM$X$C1JFW^f?6J;)N}R%7+=P+FPE& zDZoh%HDn-Irs?i)R`2$pM?aDs4Pciu{~jDZso*OtY@3D9wxmS6c1(AKq|Rr5nJXO~ z^El!w20S}*lQL#>KZJ~v7Wn)yj4h9Z#T);WHt>AW@jy%CfyJGbMj4V9oetDonS^z;5{Tq?>h6u2V zAMf2^%_9hD7l^!2QfY?2@;JWfY<KoH6Yd|L1KREc=7d;DkcZzBe|?#yxhnz zMng^UyGPsVp_L@u?J&omy>ydrzt0^8sjj-v5{oN#4)E2W`dEeDwlTwc8?KIi+j_bV zC$nh062bSw<2h2BC2ZAvNqpIPoP7gre?*4brs-fNGTgL%adljm!niNw*=fA{ySz+g zXaCqX=Vca##8*mQE@y02dO7E5^!|@}}l&R_;x+Vd;BIoe0au`)SCAvR%XrAJk@n*M6Nbz|iyKU)65m9CY&pO_u0^8q}L$HcMHt)e;-Rx=J8rKLPkI zoQ_>QF|>{T-W0tDr>F-qwve1i-@vI5Xz;UVqzaAQ*HlL0wzO(C5u;NuCUV7@OQSjU zXb->i_-9#exLmJ#A#NH2XUe<55lYwLbNYqrH*CJ!^JGD!j^q#aCw`dA;;fV?sfp87 zvNyr>9@u0y-0GhhZm%}0-Vi=wBr*UQ0;cq6YDw}CQ0y*-|ELA({vTzdG;i7SwJDZ0 zCO4jl)M|ziG6PIqBFo9YRH+wHs!xoJ(x>X@!5X(;B>LBF!orfVyq=51R%|A@Tf9Th zhIY-oOW@P6xkt3=8Ukn<{C)M=WE_69&Fuz=G9qgtPTJb9^hqVq($LXpEhqyqqDogr z+?P2?G9Wjg&pPv;&U_a5lE4G$6vufl_cmi2bHiGevP0y~dexhvzPy)RG;Yhie)BWP z%}|+jzW8>W9B=e_Z`}7Nl=TGiLn)Xu%`9TbYnW$#(-7Om(qKb#tJiLwDKD-1sd^qs8(u}H1Dhj8G!~G>mD8J zmUR^3MQGeudV@M+B@WRqj*_dPaIzWF|q{=VR9)!BEn{luUh`@Nn-MbeHJZKN@G|m5QoT&|taF=u6zfr( zDVyY5LY8f<;s803CDjk)Lo05>-0iLtVXkl<(#z%>dStv*ZGqAT2Y20rg2-d*)w8RV zy944Kn?qvH@?Iyh31lfML67E|v++)e%n$nJ$1Q?`qn$o>M8{41WnXN`e0_J$JEJYbDTWMvorYv>a;0se^lz%+%s zJ&Sfgi2|Z-+l-lCn;DvZOS5}r2K95m(@cJivKKns-;`DrY07KkLjOCAOPa;VP(vCR z+yJYJb8Iz-XArB({`k_Nhy8Rpez;zROO8c$bdgK9D2ScbT()agR6d7qxX^Uo6I{Y+ zWKA-sxzh3EZ>kR|Z~mcO($cIF_E}Dw0xybcFYttURy-L_Wkmb68sZ_cyIm?lSz02e zQ-hK>HK9boLwECQDpzhP`1y4m7X=?C!1)$MdhQdtRTY!`o1$Z79#2iE1IOwOynNya z6&;1L%91jhH?N$a@4`4U4OO$SyRdgGMa~<91^GGo%iTM0ehMjxaHlYF9JI88kawbW zz=C&vi%runi*p2Ezrek3i%LBT4w4t@`3Fn0QQ-{ik`i}#YscqKtRgr1m3RVCSQptF z_x4y$hs@e*!{pTIa$6ZFtwUBFt3r)FA2|&%0k_g)3{&6UtGfF>%dU+tga>chwn-uS zu6c_bjP&#GI?W;t(KlNWhxnD!f-wUT)g1};6>yxaAA%Ut8&=P}CDk*ew!^~6G8dP% zOUOwJ;+J4e;?^#yXz@;JsD}V~#RkH|4z0e(1mR!WB{uOUUJ$h2`cRw??;ecB<0vv# zETUQi+KplMRR1Z9X=9(jMk!dbF_ROmXc`X}u0AzwD$m)=Kw~C_#@L|uu2?S5x(M=> zbF@rT<$Z&Z_X*vVC88(X>W=qrXogA~r@eO!Ytn8g*n?rJFfw zX%dI&?M2~rCrA;AOed~Iw`BGff3TYMFfZvxRPfjQbKQJi(ZoSq&jq-Q>0J6q@8X_Z zHDlRpJ;J*v7Q(<%L%Okbk?xb^9Cy(>Q;BU!s$CUoZkoOh*2((Ip3k+*w~uY$yT*qt z#+YTCcDH>Rr|mO}L{?xEggL6!hy%SnV)oqHVDc1qV(OG|n;;0ep^N_!!Pj?frDqbK zHl^RE_2eibe`AS7;(lOUdC8~hx7FmVZGX!`q<>r`93O+TBxk)poa(DB6pXq)KGjwZS|Jq21~P1 zH2|_P{6R$GHW(M0;tM|m-cv{4js=-3NB}RZWsr)R?cW;6G9j*gi=T^>k_>Mc#xT$l z$v&X$%GD(eVWho^Ga8^Ar018_W++0Rl=UgQ#!PZ51C^OR`_tl+NbW(h68Uy-pSrW= z`KEU+`jts%SI+7O8WX+WkU}R^!oqW>YpRvgr>9lrrp%wy;U~d-gom=#kOeA431D^K zk-`n^nJbv+`z>(*2DNa7?b<^{w>F-5rxaTMH%Kx7I?6#3<6Ub|XKhSDVIiNJ0xLt_ zmr;T9_&V?J_C(lvH7UyJt%iVE(f>SHq_0LYoUveMULE6Z;*4AP((C#}=&8wB!L?}S zg(WvJ!ejpRDtHv=6UdY;+Kd=k1c-v!&4_v2@F>;M?7kB593=g$4q57$Z=7Ts_dtV= z({{})*_8;G-mwYh_bn3BoA-qB2sqMOp*Nr;%rSI}Ypj=ZA`I4GhG0;6Rie4%tKcS= zgcbVclGzjC9jJ{V?uk-Rf($1kBo&Ca@Dbsxe|Bxy&%;GalUtgIelhZ+6__ANd=68z zHqX)z2)I47Adm!f`Ib~7SEBQXJWLljo!vu=d6FqP)Pp%++4v-g)Iw=x7f&R6={@h2w{5<`Vyvu97bWb=GL1V+KD)JRGTE9h0Tfnbr$ z>;7|s$$GAdxV;aI4O0_X7pw(d6!-5<(B>fbVQVwk;$J;WCjsri)h>>7OT8z-7t~rD^<$ z3P_&^=#suTAALyKO`6m zl&zxSvodFeQu0JP*0(8ov4KB`sv$jAKG5oP2cHvb#WN$E=G0?QuF~D9$AFtnBybh^ zNAS0dytlB(B6yWWth|>Zxh;4k+C9crXnb%}=&*P0Mmc zW=r?Ww3DeaX609AJ5qbZuPWzDwM`?nNu!pjN2P^7XDNefT+Issq9ZiSc;gCYGFr(P zq#O2zsX2k*-w|U&6K0;ip)YF!X~iDlE*wu*oG)PA3iaC$Lw;?b+NWw3Xq41M*|c`$ z-W`Z;rZ@?W>~s{FZGC=YTAM!^(}I8y_+DJo^J_${B2>R7(fztDkvn*~nkQyPy-I3Z zjbqL^aM>RgXZDzVB8f`dO&oCK33VpPjHEPR`{9-SDYfkG9kY2cxf`?lx@B|1hd{KOjmN_-cgth_%PQb z2GRZB*tAB%-?-)0rVZ|#?H)FaM5K#x={576y!;fx<^&w`s(afGU1OEPia1BX9u|4P z6T6KOJSSq;&qpPWqThr+zr;LQ5L|^Vuawq^sHBsMQPLj;?HR_kZknSWCp}_ttYl21 z!@fiR66XaobQO{Av*F5p$&$LbA8Yu9H$>w$h)WDFY5Jd5@e2idO8FLfM z?uAG&AD(sO@EZlmr{~K)upKME4A9uaFb8X?pGxi%yL^hhswkKD zss6gKj2C0o8|a<7uPto<$unDDu>nmi_pCKYNMlVT0#D?gW}BGOxDyLlXO$iZhd)1@ zAxH~_M5|o*|0NjG9r-e?Pb5XY8gy6@5Uk6!^#<|tCtQb=^vpVD*#oB!rm9^IOZJWl z9kUEnCVF)ljLDQFraj_`wbc}^n>ptgN4Wf+#Ryf?C01^0`YH-9MR^&OCY4ow zqOzbFdLTB-Mx9^s`pT>1RsGhz&b~xAItu`}hnJ zBvGiY^?l^Q7R|OqXnXvqOwLWNBS1_eVQxg97vLQ9!Cv4P`L;iQyDm46Sm^XVEA!?} zo$up0RD)0E-k#V3YnG5UTLsmCZg?4CB~fZP`V%tP6Hw>-hnc2We1-- zrSP*sTd*yvISHfIoRb@76j&VFb%>Un^h-#i(p-3hsW)fgY^F%Jhbqr{;_q+}B0NX& zfhu0>?!5%G&Xz*&z_7j*9m=Vmn(lbBD9_4dGDzQ6>L9|P=9a>*tgLpf;AO5bxvY!) z>09XB2L0O4`Ty~ibA(>m!NiOC?ukE7L+@(e+^0M2s!^XcUGsGz6FR7u7RV|yyAS!) zfMT>|zWXq#ayL?JsRf9t>9rYl$QGJ0cjrFbr+3oy&lSuwY zrVEM)yCNMc@%`NrTCDZe8V#mHIiU%T=278a;) z>b{N&evDeAE6jt)57>fBoF-*>>AEV^k_gNkStQEJG!*vEnX#ihuKvN_)dZk0Dhz&@DmN@-uFZtsl3)UmKjFv| znkLB~ofaTHv~x*E@T8*O`aA-*b6BgL+l7pfuO47)O#~YaO zW9TZzIyGaZVK~Dm%0UT*PTWY%iX>g0+8SVzbY&7wwtQc(vsX1P7e*Xosaa%MQpsSg zAM4`AAzs6{(a&&#f|`YxH5tjAJ(qE1GbTP?rv}vq){&ldE_(lTKj}j-lU~206M%`r zXDYnL#McM#)Di}CUbV?ugiJRy;XubhTG2oNca@3fV7`dob<1*CL|h1#j-Cl1y9c2N4!5 zWAtD$ZekXBov7(=e{ZVB4h(Y_A9pC?)%hUTKmT#kCc{bYI@HyebHg{0foFS8Ht;0)ydS$kJyLxOHXXt7EG|HjM5jZ=@dY4PWuK+nzi=d73^IqE4ZvYfuB*ZrcK*}qHL z*>UgPg|bTfQC|Y-%^U8|<$vcKrP&9r%B5tnkB*pzkJQ16d$z2yZU)X zY%&v0L+GIMWDHniQhBzv6fH`kB~Iu-td)P=4%KMDBw{W7b4>mOG8RK%Y+w&BaSnkP zV)gW!IwrP*Sw`*B-3e!zgo)~JNW@5lB&M0f2`+T}7G+E*otw_DTSx9Hr>CG(K{;4f zh`^)~2SL6-evb+BpQUQBu6)RB@8WRmAf5VKMs%?yIW98bQ@+gg>}vdN)d=B_SzWq6 zpW1e=dFdw4)cHZGN=CO^2&{>J(;QTa+DSbPtYfAWFCJg9+9BA|2#k9nsjxI!&JCJ1 z&%N2b7zHeo!R|18?jSs_jtB%*_hhpZ0cAa(_pO<`xaQGpJrvs4OAG{kL)Uq^H%#`QOMpnip9^J>Yr~3a01N@MnlL3e{;|tQD zk_V?^SWw(a;1T;be#0K#60}|A{RpARZQmrdw2R5;k#GtiqEph-nve3cetcvDzNx6Y zC~V|-%ZzHn;Spt-!$A7tIu5QxeD4Wmbxt4h{8fH2VER{~(&nW9OAkct3TwQ%wC`E3 zTO2>$9xEEO^gmI{6ZcTl4;s6?M6FoJQR91A#?>rukd4jMZ#9HcO^{5s!wUQhf6mMC@duI|AsIXE+i45O2iPY6I)zFHiYWZ`H` zhI#6m_=V)Aa=gn<2}>485?>)=H8JwP$G!_%n_w9YPGmugFC$<8$YK04%>`&T6?SEh zYWpsbH!M?tCimKV*~SZZz;8OW@9hpz z!`cpA6d;KTlASglBT*q|`1Jv?)>udH9oOP9!Fw!jSa)}-odq^JRi0z}>^qLP_O|<^ zyMnCy8#0}<*81oGmGAN#(V;mid6>$+*>^^h!hzo9+qnAPTk4*!WP|N&R{lkZM*#S^ zE$EbxKRbjdw>OH}30SCdSMgwKcVm>N`@jG2+QHe(C?vG`F+63d)U+ zIC1g~C}t@sv%~-J?xbz}A#Xqo7#to7(cLbG-Y)vHt=an zg9G93TCN}qDqTZCYTpLw-ec6USjUv9s5(1-57gorzPwwJf>xc2&SFmmwV0G%jE#=x?mD4>H{f`6%+tPq>V!WM{{e+k>K#}z- zn!87>>sWqTs7I>>B6MA(m({oPCI3(bzcl17HRMZnv^BT}v@+tZ47Vi8O)}mwUUYJb zD9yJpXf4${zTyjiz(s|9FNMFe_?4C^>&tv|C~2fu|1a$li~EPs6i&X;#eU==Z6%L5 zP)52=q-Sor9iq%6 z8jro%Jmg{nQO7elb5WBPj3FICwRO4@hR)2bjQ5%*oWq7Co+IOzwoqM>a)Z}BbFccY z`$SgCbC?1Cz+jP$p|TP`se-jg=2f(vtYq{)Q{*x1Q;Fg?a_6R4*PiJD(l!+CU-#wU z>;FSi%!+zOQhg^Y>BlSibG7{BJ}ZM%>P^;ls(Q*kkH^suXGGZR4K37 zaxw^ZU42N^HzKJd9rOoUSWr1zzqIXiN&kv~t!G}&L9g4%uHoA+wa=H7r#Wdm(pJ3F zLxgQJ9{=(^tE8L+kYBTeak=K+tw!7k=BlASCC#z$og&UNyj?{U9v82=)2b=A26Z}B zZdrY5-3W|Ap4*@#gnmRVt@%2QYxU<3j}yyhCu}O0yaL4rWGb3pN`@4L5ToIG<_k+_Av>b^qJC+ z1R8q96|e=#(SOM@^zlNr_f-d!sfq@bN%wfI1P63Wh=D4>JSA&E{-L1Mzity>{$+RG zi`{t_J_?n)r3fmT8~&UqiOJy=`8~S7K2InGpLFP<2t#|1c;g8Wp&>ks09+>=$_Uo_VzRWawBHuvy z9kRA3Am0hg8;(c(K|Wh~*Jts$USrc>eGtt*iK%~_ z8-=e5Q1pKw*~_q8QcN^G<#-6AcDvF4;_9vAq6)jVQJ5LJyF*gx?ot$_yKCs~klqYP zC=C(<5|RP}LpK9bN_PniO1E@#c%Jus@Ao_B-~G?r_rB*|`&!pp>$-N1fsZ>GR8=^$ z=+SkpI%CmJJ+(0Q5o%#8bssDF)Aii>zS)?|;FlTWluq)sfetferHZ?Gj|>SNe`%N{ z=i}ZMYSp#Z8oHssxjFi8iOPR^Kf;^XR>Vtq(T{yMaM%pTd~ai$^A=00{ej%?)9$Bh z&mIPz1o46mk5R8f;ZK1;sF8~M2Xb=6uwl#(R~W9SLttIHQ=hQp^?w;rv-ou{(>VJb zj+b=;^C}r?cYd$)&9g)#RKGs&Dto7d?*g@$4fwskOX8v}+Dc24r|Bp#zXk~>D8=W> z#7Y+v;!tuDq}EwBEdBwXsw3_PO-*RT8(91Br3A{ItHt*}Z~|!cVC|m0xMb;j(p`n( zW$u*>KTD+opYn>1k3KRZqHEN>j32{W{)ZIpYHWs)#(Zs2?tNiQJ#f&pml&P!G!8rz zJ)L573O+!8^BKhH8P?qc>Fw^KRXGXBfyJW9gMt|024Gl|@_H=Cbd?xv{b zdR&Ems^I}%YVBKX+==MHJR{?O65<=ylr}&5UH70E^nm(h@x0!gSlgVuN*n)dy|zPL z&NI~aYwTI*6I!fHks2MCi|U{333PYK^jCXPure-@`2=|?^RE@uE;uuz1Pz7WZHSlQ zjEL~@+a&r$-x|r>Ka1yhQZvT9lAin-AM8;OeyR$5A+ngle^LK#qPcwvP~Wnt>b*Ue|>(I%|!_#cP>O;nez zr-i>;?|tQ($-$`cV?f@Prcut1D;B2+JsaRfURSSE(5|io{3eS2Pnml#jyYa*+U+%I8+l+Zqm{6PI4}}YkVV|Pc4vW!zYNK$s+s@|9x#iJ0)k; zjfwb_YE+P{nZ9CO^6QMdQ?St*v1Lb&+m++VpSU3kS(|y!(X2>QVVGN+fXkOU(e#j( zlkvrHtf?`lfeabRV7*BM(Pp6ddtd>krM{^>Y?)fhfzvyOQvPt?_LQseD)RSU6wQn% zNngNR_9~$fu+OzQ)%09qstFAu|3p1B(j%4A^DlUEB^H*-_{kQZAq;_6MvSoVjT%mIDM%B!|k)<|zWI`zI+6R8UWSn7s-fK8%wp95z;4(;d2x}b*) zuCwl3qmuNfK8t?3O-0YXd0L#AO}2W+j@)-k;9ctfoAuso;_BeEg=qv_wxx3Wmj2(g zH*OzE^g$%MMK@=0D0;`vSyZs{1X(%b!|!%4uq+^JQ6pLWnKa3oY#zduFPWUN8%d4_ zj7qgezs&WHI%4+}Bw|t2mlYgxJ|-i-QsD1#`TCT@ItB5sj4(h&xzLQ~m8VO9PrpQEOQA?9`X8`qwH{mv)9YjdyMC2c;DFrV#3J8u|v07owb z{Uc-qQyUIkzeOHK{w=tU-aTF^96@arb9o-1zv^~a>uBvXr4evmy#GHiAKCW@!~gHg zuk2*!iuumhLS{d9k=X#cUV~_>*x4on9nXLNh&qu_Sh^^(~77{IT9m8{oihqk^Hxh%U5e;2_d&{UmFyZ#b3Vz?CIROKYhx#E~Rw0%Is>K z_@zE3r6%h9m3L)AEVBd=-xT=(B&htv<*6|+O?%$Q(_ zipPg@i^sniFW(cilShZ1Zq^n5lYMx4cHac2*gqeTeYl@{yc+1bi{9IKvLX#O&3n9w zwtUNCRvor?E^s_{cQy66Ni6Hubp2N*^f$mB0JCmbRnIfqvVW7yTB5=8FL&kffApp; z4ZDXkoLdu*7_4VpQ&waK60LM!?KbV*RnF5h$;-@qx3ye*J|Vam6LlB5 zKR5Q`P{C4mx2~*us8h;kW$j%DF8&dE{@mHC$sb=f4?voT~J`qc$vw1*WnuGjW9%e7JTWy9|BHf-#V10 zhu#jmuYGx>W1v^2&&r@K5aZ^gg71IrbtF-Hg<{Qm^5RyrAV&z`oR#Oz|NpdTcYgL6 zxW6d52>hG2Y!kK*I3`7_tghbi0uPD|CR^vx^uq+adY414E0WcC56;oIBNnmpT?;i| zf-4BJ8_}@_Zo7aTpG1obf(MC^nuW``4>O4jIfnPAbC1Us;}PT-vl0*X60@h-v7h%Z zMP(%#S4*Pld9lSAJO15Zxt24~wY{+mNfPU{t^IM&<57JL!8&}`l`)KoMa9=~{jGaB zU~hS|y$RYSuHiw^X^&Yz^R8~qziKaq@t{O3p)mA4Zd&(68b6Z;6YEj=iDWID& z2qYQAoYAOwxJw%)`IhbJ#^rB=z9w~k^CKcHc>7mgER~I1>!HVOYSc~}xb^(zLtgi1 z4X|{)9T0Jrr3;EYLkVDEI;jR0NJZm^qwU$+bT!inzJ33)_5ANQ-G?|Xr`qoKG9g)h zjjm|%)$H%K>uU%jP&rrC>0(1yFXQ#M}Fy@mT+&IkvZr?;s9!v*} zxOZ<|Y4p?Uzvl->WwYNzYv|8fN=ajId^=b}+t_V{T5HG>J>ucY!_A-!Dft;1J6k=V zOpZiHRk7CKZZ`i!?86j=SD6!I&sPK=Q2XrH6au1fC%3u+wAaXvKF0+I(ppVpWfIyE z+;4_A?c&ww?AK0RXN=qjVR!VCjW0_~Ra;;DZVXQui+I|#kl~nw z!ryN1{JRK=lPc-q{h*y?_5DvPIWj^#P5PIx9zcW zefad+4YvV-d^lDGzG)FWNN;iCl2-RH@lV@qsY!+W?5s^$zI*lc720WB{}MFs%3s_2 zn!3F(AQ%=nD3Za|7JwIz`##`en~aMme%T^|t@cg)@rxyIccJycRZm8L%nIDGZG?iV z8fph;oRn zsDgAV*gGLA8}EKZHgeo1+=1tlg6YRpGgmv_NcLa#Zv+Bh>w7zUKMe|#+c zZaQ82q12WG7x1(klHJj^Yp3AA@puw2-Ro#8^lPQe$6ZP5R^Lj!t1Yat7a|lj{9|`X zC4b{nWkNkqQJ=6CKqO8o5-eIX!HB#2%V6H$Nu=`K@7MS$Z*cMqogPUQn0QEV)XM(l zlH={lC#39|`L%peXmZ$@YA0!me6e;#JB?93tEkdKl?AkiOFIya2=ngOj;b@aev@wg zhkb)9wZ3lYu3sLpV)XN{pbrrd~F;$$i(ju(~w6$P_;Rt-I5PM7=sv=jX} zU8>s)rkMfo6vb(sh;%+UJmb}@Sdy`4_Njc$9bKnQtD6Lk2(V+hb80GNOt!-jU(=bNf{MSZRli>-tB!cyl4PvQ zSt#xdm@;nug08&r6Tl~zeHra?t?nnI;V$2HLR9bkLQT6Hr{O}5`&ecCJ}ZO9k>S?$%NqaQQ19@~k>2t5t$$J^z;x_fy6%x< z(J3TeiF5nQ2T5dDm1`9yM6RU^lU^T zEAl$uPvT07bEl+ML8SpxNDzJ@$@MR9`Uf~dVvM6B`b2L9*TR@)XWRGtN^i!PXZmOw zv&^o`K8d1wi5ud$=++2b~(xHtKU4A?fZ0_2T4bGZPi4^;s1Z%8!u} z8a>wQmw%4wm&NW*GyH71UkjpwL1-KdQuZhoz7!yPfGVQ#wyi3l5=+^Ip!U{~fL1Y%Ablg`xPF68D^E35$Y8!+1CN>-A6SHkR7xgdsQf z=*zVhsr6}+Yllv$o%hw{^Yi)Yt3*JxaG+q@^MzPzbSZ;xe|#GqFN!VTFu@LB z53)=ik-bQNH>>7!RN#EMAi0D3z@SO1JhnRC?KB^%oArD0^F_>RqG>5R_TC^cSG~xf zGc_T(TqmH%*K(M?WoE_Wq}FkF81?=(B4eHA!!E6huEGuCBtg{M`DF2_^Wl3TeJC%f zh{V@tZR?1X!l|r{cMDFI0#VWv+V9sJTKV9y@8Bkp+#}caUawsZUgIy6P*{;U`Oaw`OoVhfr(zGl{i|sCdPzg}_Z<>y>LTx#Ihk)?bioqjH}?P7$nG@GZL z;xZ0&!E(X5-sVTIz(!V9b;l+oCOu-Y$M`c)V)g5Tna%}cI{&TPgce#l@SS1cfZ8ud z#xF9HZ8M(LDSH#+mjMYG*4>s;Qz8B2orn3#8y*Q?OHFZUYxMGX2=?{HPzdpWNoV3QL8 z@5hSB`P&rt^Ygw^a*w$50NSD)Q#3u;An)WKoP%Er{gf_4G>ec-B z#nu)9KO-y?(TE<+)a?*~60P}|hoW3B=(Uhvsv3?2{%($AUYRY(nb}0wktoE6ZH+m_ zEWk3vPA;*%UwZmaMS7tUetGvlw1s6NL@P+{v{LxG1f2KWCCC2EGmvzE&cxjL>=VuC zW4W%oR^Y6gcmdOU3P8xvFIEaEz8;xOy>!5FRC1tcRHYzK(&gaPN#aEQ4P>4w(HLp0 z@V>RadW#3Am?)bB?=tvvPu)0lO?*8LKbkGaIpAnLxjS0-lp-MZWyduBzpS z$8~?Kf#k@FM!SUWSJUdpS7D;lszE%pZJbfI4C?!3Qg+1GK7+aGwixq$Hu7~-<3+pO z4&))*G%viDd%d!aSHHmwSiB_=U0~UnS!)76w*{Jn!;JFj1 zA$@8mIG$Z{AtnpB43%XJD;Ia1)7NiV)Q8Hi5F%-<<$&0VBbB_H0Ql9Hqmni;y`|O9tH#;uA zmyY+zEBo^O%#c!~X-P&Szt|3O-Wqzf@tA%GFY5d~buj{UzHl%7b_9s2b=zq?)-+Np zn#>RyCjb)|>FpyWcpcNshWwL@XPtFhLJi}372p4^+j!AWnWcPPP?s?m$sUo;S?Kyw z(lh*G)c3r$aXT_?#^#er5ue-G%64jq*MOr#(B;_ksN_?N=Zg~^vfK!#dn+TgUp*sh z!e0F?!EU7~3>I^P4T7Q0>k?{~(?Gzlt#*At8~3(YeegN{xVT!kgqo|1y3QZj+%r1{jiAO#hbJ)!4V{{Y zX*&}ybsYkIiv&B9gO{rhTFh}ZlX(+{brbH}KH@d0+=0_YO`XzxO9IAo&L2Hzn|`FY z5BvslN~UXfI_vEAjw2-;+$v0mU4(UH_N5p|FP0q31GCeE(!M7y?83stf7N)! zfAX21_j#ycpW8=KYO$jcW47;#D4sJ9QX_wJWUs z4A>f@xnF%ed*{8k71?C*z-S~e58vRCp$D!3-cO_zb6ldpU{&q{GI9F{N(Zj{*^`;m zDT#P!`FU@;&hf(5qlLpNrd9y`S*aoCeR|ZC#@#gR%tkmUA7lRU4x?2auPuY0=0@j@ zt)nE0%L(r65Z8j+mi>s`>bcH<9?Plq3ChM2Uj9hf=x3}mf3tlseCP#Uqr|Y)NO2DB@nETwg+s)JxegNl7xMP~MwCL3<OHB z8j@IS==~t{ZtSqXN35=N^x)&b>-gV#>X7dKS2cCN`i56RJ_vS4^r&xPsCYFJIj@Wi8F>KWE7Y za-nudPFcclZRkFCom^yJ-w-&=))Kqi0ER|(MZRnx*?obtXNgylJR$Km@ta)x{)&Bt zYUHh%3xM1bI}lQ<@sRTKCnAvdE^?9+r}t#&CvP5C$3xi7nOA+-ueX%qGB0O{D_lRN z#o7nlDW3gsIE!*NdY4_=e!2vORjw;qNT=^-pvNVKV8Wn&39vE%LY?xq2Biv1Ma zxufJdVnr?5TOg3X=L79Trcul#0v57xG(athcBqm_URzY!#)&p&=ccbA{%~ZU_sI#u zY7Q+>cSQP&f#_bN*HW6eh}CFUC!=0ie)!5Dc1dyZ%f)J)&B1%pKTNS^pAWs3QzI=T z!q}al_ahQ^K+ZkeWWX?MF^~jP6V%Sria}v!bfwvG0;J<4#_8k)&<~ve3@C9m70uY#) z3wMyV8WmP+A{YG>F8!e3YtC;TSCtiWZ_A*MVV+lqTJg30SOxR>i9Ug#!!B(i*1Dy7|uEu@hl6D1kySy$2 zybQcNkT&3&lm-^UH7YwIRc0bsSr(iHF@y{-sN@?IZIAmz{8>xXc%Iw0E!Hewf4&~K z>XqQYd^(gHv`MsSO2qcOhT{bSd!Gpk-m|A}`=S2~L@vm*bzE0k42du?xJD(v>PMGR z!E3&xn=rI3e4o7)wSTZdbkOtdE(!|>U~#KRl0JD?H36zPQvd~x*eMOa;bP724tZbS z?`20!Q{g}LG@)C{09HVYgHBqLF<IhJ!e zqd<%BE&U?zb?ujeDk!(&Lya(QtlyW0qDIT1P&%6V4Am{4nrq4a*RNpgSFSl{#>6Nj zdvx@iZmMUod7yIPn8q72UUkoaj;pWBR3?K-E6l{2q{rLr*6LmuBuVver8Efe+(l}hHrlLd$&7vjZO)!t9gA5{(8#*0@&9+i3l=Ba`-e4H{hY4m9iEo}q#H39TVF8*e;@L0{i|Coy1g~_lu zBWJ11zF_2n%P!dPPC}2bag#dP$PkDHd05C`m*E$xtVV?SQ39RROgZpSWa!GjgRTnt z=Su7d*03_?1C0czXs-3}$QgZZJ02u%o}91}anp5U?Ygs; z7adWr!;OHd)L1K`gw1^!r*HGai~ZnyHiUlb#zEGcg2@c?7mRPUY6&;P2hF|1fE2K& zo;kRWf35k~16~@+hX6Q5wUN%0Je*kAG)k3^>fwbw!Ry77*GI{IuIWsZ^vZ~5+ZMR{ ztIE1}#Fi@1}_S^q)8A_w#fSkmC*HvZju=x+Q*+J(;)g!Irq5RVvp zAraSuTzvNFWfDq9XhO%V(fiLrLof=hP%!9qoEXkw@_eXx`ZFO5UIfm!78mHSRMVicP zu%Ob1%XnDw9xqB>SRI_;@_Z|88bepfL?g~mCc@mVRqXa*$RMH_qILWmT!?q1=2<^rLrWtxROkd5C1kgktG7=ql-YT``N$;M}wA->A; z`J3#h-SdbXy?Kr@Ijfz`^-C+V4Wi==Yz1;664;6R>v#1s8fW$8WzXF!k($0B(QdHa z-((b*wXPPe{Nn*_QGILfekB_tjwBR)BQ2<=S@^eWh~%Tq+BClj0gF)zoD#rEB&k5ek$`H%Lzzc{=+?sq1nM@MARgTjlLbcH{|W0b*>{PlbM%0;9>$O zf)Dy~OUTA^pVx8idCJ{Qv+c2o9S>P&oF6Q4b`OXl8jI2FTUfhi_UO#!&=D(E@yNwJ zx`y=mbTo6UDhhfJ`?1qMC3;F$1F7j&YcP}1)I5Z5k*c`~D&4N3%a<|=Z;z>7ysPQ0 z>HLXRADyW6tFTf5&1f(Ae71gsm{pV>#TARgpCT|fygPBYnI95N4Fh)c(Z@zT6)A^&@l(UC(P2L&S=p1cG?k9)wU z(Rt6rDtTAUv1IzY(=O-)4S139RnA}}CvkPhidGOePF!f;<#`XdisiaWt}?H7tD`Tt z)C~N2vT+Xq!5%1n^m!wc`~ zj3UHrJf`E>uv#DzPQ|PJbTxce0wajFVRtF56!%mIz)&hzD;2*2g~Rav_7lu(*HfoPxCmKe7iPIY4IoHLFauTFt^5 zcYVva?@sC^I8H@5-xcLrit;t2$YX5IQn0y0xd(;42KL1xGnt`h zix6g5(1b9fb;*Juu9UQ?$33NCYgl|J)WJ4E$rDMRNm?v=XeD&;y2N2SchUKXQvgQ( zEo+~B19-mn_9eT^a?@m;HsPRDUz>l)Eh@>&9uKnaGB}q0?@GhZ;eWe^8fEwQ-m^x8%NtvR z*)@xyG*o=%%9VM@hGb#=!gbGxzis=%&+fyB?od}|Ua!5MYhMx2nPB`)|A=^Tg1Crt z0Q|6}Sn?3O#Id0lwza%n{?_+rpiYb6>*7PccJ)87i4a&tH6saG{hN zBxe0G(!27e;gVt|j$qDt1-+_Y+2Mm!HCzEI?`~Pccp4_^LCyX`aN{qY69y*cZwQ8; z$A5$Afe_ga0Wk`e2TSv0t*6?DKmUNq*huySaF7)kX`}n(GKBPQ59O-J&Kfz{h{T8s zdkLw&3qbOAK`cLjg5eU9MxsWtZ}Zhjq(;mqYqfu>R%V!U=*)iY!JrZIAl1U@ys*-k z`em!}$te9?j4{+H)t7|!Y6CuWb8(P{w5Ej$!s?M+O)SlUPaZgnDDx(jM1X0~lYtxY zQZ0hB`#<63bZ{?HV5{r7GO>LD_CAf9xJD>wj-M$&*xDy4^-3=61wABnlpwa}iz~fU8^Ee)vD5T8;IaOgXd!UD*k5mD**vim^Ro8oQ z(%CGi@D9`#%Oi)-wX6D??`(OOi-=rM+S5Z21m^zGhhsrVMtdn1Jup#y)oJdJNO{k7 z*%!^ddGxSV24{&*Uu{1BgI1yTx9Kt5(V^CEM{r(>;H^lz4Dn?BH(Qf|u17D`LGLuAX;`Gwuqw*B_{NDLz?_*L%;lOig?*6w379=F~Esf#0 zQpMAm4H$=GQ_N1+0;Im=s9c;FjfZcMsFbkb*ZI{LiCnO*kj=YGi-d*}&j$hAaBi60 zi=`6?vxsw|nOS;YPQe5(%~h``A*TVcC5s2~f3RbSs}d8H*xWNYbBoApG-Fbcr%hs^ zuo^-a#J(R}vc9y8P~pshC`Ohho~<$;{A4u2K#fN3OHToM?pT#3{#=G@A4HQjB>KQ} zBZ}#)aYn2ad`fV&{C3vw$H`X6H_+@4GlgP72#6*o10RT=q|+b6e1i4M?LP5*s{n&o zF{N0CBgAGHT~(e>V+j2{ziv9}mf#jbHh7aZSf(N6I$Ev_ejh!S17>iZ(Nv?AsyUwL z_*5i7o(16YHvd9!v#B@<^~RL^V$}L-q=HX-wPZg^6#}BZN|C=HeEeZQ3LE&uk(VL^ zAsI)EzIz>usIr4A95a>|*Gu8Nh8n4Yb4jEgzAv$f#lBd{)z^E@9$y?jS7bICEN9YV z5)NLHNtwl#%h@wWD5P>hAPEiY{{e|SRr5H%0W3@Mz285ODS@M~1t?aX97sq2glcy2 zJhP3+-1;-K>+EPKejdWx6ZG2F2I4a2^M}9LXhDV0Ggq5v+mZ;=p=Td-RvmW8ex>(Y za<}z8O1kGzEoiYd8yUJutBQwiBEn}+)`-oKWOymK#U$G8tfR+Q8}l^y4=}f)x8#Wf z7zrKzm2>jPVGI5&C?K@f1y1ED%tgkC%0$+-et;bHPbBewuplz=8 zi#N+c53Ce}AI*{oe(q-dvc!^d_6gSMQDW4#NcyiA$0f9HgD8z36;A$xujR9xXe-PiBhS96UwBsciB^jc~b-7g5A&74i!gV|QNa_u1ZX4M7Az&E~Npp7KB?Cxt zxe-^>esMVI$*;7SRV{tm9GqSp1;p?;r0sasLicD^FT5tj7S0i_l)so9hTWQmNjgl| zhD?=4bWYq@$A7u!s%d=&6o;KIs6dRXOpZ8Ahj7~X|6vYi4WW;A*?r*IG(TS>v`IS1 z5r5=Ks%#((u_SwGC7s`DLky2XX5?A8ZMjE0XPofQ@S_$SJ5x?6LsCPUzOx{g{1_A8mqe;Sx+`_Az(^?-va&tp zlg7K@L3k9;h-t`{+3w<|=zpjUXDp@~P|dE9lGY-~I_;RxJZVG25zHH#0$;aBsS=w?@@4~ybe~@B zSv?k{j;Kw^9BA`^P{;3YBzLr%6UQ7sB{F>i6KsTf-Tf&jyb)Vw@E*p1jAZTb1jOXd z4-5X@AY(eRtORS5@P?SQfcN4C@3F8em9c9;ZJ6_jU?Y55W(9+-h z`(&}%L^566IiK=520_f3XNJ}ptIr99I)qTw-;s%x;z8Ghh>!O%H{ZWahc9gFIXds7 z?^Vi%5yi}NkAg9BH9&CsA0Ca#e~1hh_}D?A@$Zys!ezbdXe8xUtw{*RI(t()M4HZ)z9W4*8~iZ$EoWinWw2iGdght%+wcLif+hhb7vhu0(bz0?hsxn4>p0(emRb>q5uV$r6LP+?9#|dBC~n*Y&C#~A2g|h9g^ZL(J`Lo7U#IlwI=5qy zc=Lj(m&Nf3)V_FLbfHZ&kB>Kqt}zA92>}hZL__69HOQGoyJ6MQ-U)GlJ5!?ul<3Uf zpZ1BLd{(p_Y=R5!I48Msxmj>&XT!=WLZaft{u)XUwFbu)S8_TLqKbAKsZyT=8TC4| za=^o+sc8HIn{X7MiVf-LYp~eth0HLztqjfsg11AW#QwDMt?q(f!7lhr${eI$dyOIH zRl7-V9Cd`slUxwSL}I`?VS6k47M>xo7(N_!L$JRZY^Xr`U5zCEZm)X~^ZinKI)%WN zu_~gJ$1-#o{R-S%zVb|rkBFwNqwTwUi)f{UA>IDgxm6k-gG}D5)4++V-W4T2%kWiR z&Tk8U!CaD_gLs`g_lP-m21^sATJNNw=I@hH-X(-64B5eCpRs{r!e>PLVYY92$ij2e z4G`?z@8zhne2cg(NL{mf=tca02hPNViRI#C;IL=LcK1Z$J|C{|M>T*eTEcUm6R2ik zzE?@iHu%A0iftGqP;N!yPfc#pI#;-`Pm>bD5`$#x`CA0JjW|kglD%dA-u0+eJUbRm z;hs5ErTCzgZ;}r5J>X7xEqH#$`gte5Xn(v^_UM4I_*{SEj^xf$DNj%F)szOZLjAaK z6h}QI)tdeK(8tzrmy<+u?G^#sTdmhj9>#u+AysTP=$-^1yaiC^c6h=ARbD|iI&-ym z@v45o1G6bAC9u<&Y~X?QnaUh=u92|cZLbWdQQ=Qv7Ygu~17H%anuy|xSJ0quaM%;| z9Ve%y7TG81f&BTrvaf6qgWfqgY`S+5a>~te?1byT!O+3zJ^f)~Xo`3bRdybUHy}w& zJDXw4fFG1;d zzIH$kkOv!z3BL3(kaJb^MLEKLk_mSQ+1^8{qard9t8JXfU#hc=v2U7=I&BqLRqs-Y zQB?^{gP7sD30PO9hr3IMRiTsnJ+Q8K2CHXJ^|5p&eO(hYTM5deDYFnArsB(iLu|2S z+#g)FO%PzWDr|}$)nKU(3tiWLNipsTa(4psqvGqXPj}!yYeJ13SUh-t`Y}w{>yD;7uaz_Z@v{05z-})2 zK>ctIwSn}wstnSXX~Jguhk#xxvE0*5cPE#0IC{V4xozsm7=d#znTaeV*h z=m@_|Xy0qA2LMVhY&9w%cRWg%OlB^@>&`5w-9$AMSEh2oTaoBnS?3ZBFi`0iv}S=& z@ejRQIL_fiLG3X6Q{PqSY!@Ksii--%Mk0vw`%;j#CyL7>|A)vC2R1w*2Gs`rV46X6 z#1`NE6I>b@y1CjnZhW=Q?P&3YnlUL~`}?g0TXt2Ced~H#K~aIL4yg@ILNt(dPwlgE zbK+f1s(4a`6NnJ{e3M(w=$N0XH&1;ZY(Z0#Fl*LG-bStt*a1CPC|NL`j`j| z92wa~Szz2mxdGcN!GTE+X>1&z32ZZWgHM$ppHS)bnQDVIYJt4nj@?ptrPIt@u3s72JNph{l_z=#~UQuUQB#5A|~2JVMiJ6f9}G9cKZpYRX{ z#x0QzzaA%t4h*Nrn>SrRFXpUFA8Iomq8tA=!9ZpV(9dSLS3?;7B9(6wK0h6q!dwID zG1-SX^P{qkbhJQ2jl?HjiuI@cW8*(yRo1Zy@{@S)Q%&s_jLO@M%+*rX-3b86vUF#x zjy?fX*zFm zf=Mw(^C(_J!8yzN3@ouOda%;u#&4e+SUlQE=Ume$;__edc(MMD1dqi_=-V;5BKPT) z!hQ{^jW-=D8mIQ28!$99rM1kXTpZwIbj!_ghL~ZASu()n>FUe-p5~`fZc9%9p73i^ zYuJlem7fbw-yr6B=lyzj1Ta)iOf2Op^A?%vxjSpnS>&5j5KfHO9{8edm}m}z<~c*? zhf7=D4b!*tD6qNOat+s=rQg~4vZVhCR=~jh(7twg;&jXW|y3? zzvMv4w&RbqxHkER#{$zf?hg|1!n^~85;xF#qx@EXgeVA{JkTU?OM|I81d9ZlYP9b% zer78$p7gg_zg$jMxIMbsBsoX;#tDoBl(Dm+jVhqVeS~Hs$yydr$vh6;V20(srP)3Y z1iCSUnFi9n6!OxF0jqMss^j$6o3*@w_|6F#JHJzZah;7th-$CND0+AifuaIvGABGaqLqv&mX zQQ4Yk$|ro%Z}8%pzbSPUyylWlh70vh%zKTQ>`0^CaH6snpw@0K5(buJpI?EGmd01u zn25?7J2{iXzPc@k;O5HSU~^GiDs{^Kr9EE4n=sH{RC&8+?N(D@zkgi?XBNa$1cBVJ z9JhA}bFvK-H+w!OAnLMzevwl9j4_<%PKH11MKdZ+zR=Zfxpa^*cCjWiVJ6!Vc0_@X z7pzz*y6=4s*n&49ldRF9=bq1BmP;;vE#4z0q=m7dj=k*L3~Gc(8&>Pk4DMDG6Cxep z3LXTN)SaBD_e_XD0zG$4cO|)}a9lGvZ>8j6w9wpfTnLc^(PCBA)3zFqVpf@>Swu*q z{wIR{yvMBSKyXfS-x|(dXbq-Z{1178T`ekfHxqs3T^Up-5zhkp4|n21{9o>5GK{xl zUswLP8{POym+!!q_d-Z*>a?+P0QFzCd>+Yxu7J zZ>&-#KCW4@HbcH;B*r`@jH`#PEIepi2HL*SxoJG>9%?1pu94ZCUA7?6}gZWci2o`Vq z3@3w(t&Q+tg2WzNwEli7d9^NlpaMA8C+^K3y9i+TQ2`HwQq~C1zdVyCVKyU-wE?fY zg4CNxa@d~puCcTJ*JoZ2b?j^;Y(|MV44~lrv5u3c8`IIJ96mV^L>G~4^HkcmFmvHtrxARXrW zS@*4(uAHSNBdSh+05|B`c6&^?@#qhCgd}=B5Bw}4{@{Gya%D;4g`l}VvW&cv=I`!K z(<2^xuxmGaO`(F%xm!1t$)|`pOGKC)(>v09ZDMy1YI1 zKRnC)0e7GwVy@Ac8Kw$agZ#zop1auavZe8Y{aMS#{y;%h$Xj!Hk^#|0aa$3mkC5-i zAtg}HM^Jxc9Ma3GIAq^6|w>DJ=)JPx5f7ef@>cA*{9;)sUX9sfO{Q~847R}cgUo73uz z{rM-ivDX#jFk6{i@2CexchPlRL&>SCN9r`(0h=o{!P(^&neDfGPqJHW<2`M!=ggDNT^+7-yTdh7$+ zL5yjPU`9&Av2pAL6fqJ1Rve_ycq5oon#0g?`C{Anc^x^knEksw`$Vz87g^5c$o&%{ z8$uB%t6udbI!c~o(KU*!sQPayDCBERz@5(;;vePltoE86R0OECNJgWCxklBe5ab-~ zW{0UFFkvC@tiC}>92O^?E6HF#D8)wR!{+HmVQHNgB=6Xp(`gR;Q{3?*Li=Mb&{eMT zWh30hjy*>li7oPf-~e&h1UW+#w1)l^7fS`qFL9>hfBqGHSrRW-nHt1x$81CCww~Bl zC~kF#7MAcR{RUv{Qv1ta!YlWgjrZ=^52+sFp(%9cW1KT^d+j@}^Pr5&hHzAiKHC#B zV}M)r?vcEs#$toOolCRdZM=}tlO2?krAX{?3s7-=l(H4Qtw$Yl`}CEe0ZgJRhnm+$ z-aWC3dq#oU)QhgZ1kS0xLZk2ciH}3sg0-1|FBNVZy$?8<9p=dX3XGVPFiICSuc>$A ze|B$m?~6+%Mw}_Q>cfNLb5VV5ckOO$L^&*k zcra@TJ!OGwqVIP_I&C=V(x|vEsXy_?gVrpKP&CYctF(B1-}E#LVRWVWnKTLonmEGO21p^|Zst=HKN1 zKYYD)SXAxXH4GEL&>`I|jiPjk3erk9BM3-3iXbJM83ZMylu%Nnr5mXk!l0#71`&}S zQkr*j-}f)x=lQ<(KjvU_?7c7coO_+?Tx(rbmv0{s$I$U3FY$}w4kpH5(_Sd)rg4h_ zoL*7|C9$DqBwv&#&i?c<^7YYskwyF?A z?kgsAt2+a7nB>_<%1Zl7cMyD$73lm#<4VUFkeYFZma)I_tojzEk_5{L8A?<`!uWj2 zBst{A`2VJ60FayMyb5wNxf*+XC6-^Q(r&=>?mmLb%y1n{pJ0yO7iZ?VDpy^j|2^4s z;PAFP7JT#c+cC>eu%g{U%?{DMvzLVEVRk-WKWUc<5)aHjCs;h(p7^uYnZBNYVmlg( zk`sTPZNs?5ZA#kHorz78R^aZj%aGX@F8^8a6RX?{=$*9)`Qc-ACutAE@-ngJ>3FSl zX1$Y*%!$swF}2L@`@tvgY7W$%qYu2>J#TZjrVtmOvJl%ST0rF=L4|u@uzi8le2oMN zw6KKdbq&Xgk1`!2B3O6WKL3DJoiKXMK$dbrnDFO2Og^Gl~_!G8&)bWzWQ44_w)AbKHNci3<2u{s@{zAT|qnjunZ6 z)kLkr?V<`*$W((b@yC(W+T**e4kp3-km|2K=n~Q7Bk;q zh`3VHoDgC;Pj8bloJ^F=N#*x;SpD=6^^hJNu>HWImQ?2bWfSSo-TK%H36!apCZse7 zz7CyF3D199`!~5RA>{ni;(D^P&chl%!SCLg5J2^7SyVJnua_=`@*fe5m(h)+et$kl z#TW7p;yS3fT8d_9>>m;Agef@l3Y=WvthY$(Bn&e_RzX$^z(@^biNq{*A#}kn_owqwhpf7grpG;v4q?}7ojvutqXUHQ3a1N& ze0WYVOnUVtO!Nw;N~f-L(*atG*y<`auhQu-Fn!o@^AJgeMfzDfx!YdryRSBQ-;P2!wk90SH0mnLFt2eEx64>P< z$o1NPk~n`<8D#u)opAJ&T&r@fb=^2b%ZyW`sN(-snzc^^49V1hmDbI@d_?APba=S^mVUhTjkOP?uYqa*F|dfQ2>B_y6FqWW zp^+qDsuDePvFKxGr{I0|f-lMM`z=Iw)&x5pV{LgzM?O#Ge{uV!FHK)h&E}{h`WD=e zu6xpb8(C2`7y7cLoLB63N_D`AU)|7$i*)TIAgo&)l)p zBWvU6p7|jF@7e)P1vJZ7iu9$QGy7kXBSnye7bRePdX)%;%S9`sDX75^1$RsmD0Ps} z+17gFfx}u-?+=p_CN2pEh%(3|ju~dvfoQ-0+~n^%b{rw4r)fw|d>7cAPzuH;U_WN0 zY!LI8+s`GY(}3&o;;!)?sqe-RGfPOmI}eh=1JW2q*9R2it6a{i%do-pQ6Xs`cP90t zE8#(VVuV+W;;+ajM(pHBhnGbfqlR7C**5l{C+AEXf1}cpeXU;Q6m9$y%{XxXzr+sAkL-a-DhSjHU;?YM!*A6QIt%jsb9-2%M5G{)%L71?^i z*y$5k&jy}ir`m%@%{tOTot}l2c*wtRjTz*kiRTTAg};A&OI1r=BOBa7Jl-BHA%KDh zIFw--NY!bQ@d`K8L*b(P!>ZEc_t1Ms3Sn{XjMVsP0~meJ7eCX_!Ip$WfLlRVdV$Qm zS1b;H%#qUm^9Hi4?n%vpcx}|9r<0f8=UgqRBwqD#D3r@6dJ0nLIGwwF4XE$w29g8r z%Op&u)}ycCn#gWa2CWdY6lHkuqH!QQVYNb?Scc9EN;3_s$&xI?dQ*$K#d4=qkr_(q z$H9$~G6N??<*NewmU`X*bX8+RBx}D}M@f5LB;&Cfj2o=FqJ2<<1&XvlfBf4j(_7LD zna(}hpePh^y{y?>y#3<+qdV7F-EU^yewdMZnQ%PMhBgokhCFy@^rb$J1Db#|xxIVj zgu5|;z6aDhQiwQ0`g}^vtYjgoxjCbx)2_@M^7dN4E}hMQ#oT{Z=xzJUX?im5HPoVXRSeZLHkpkS*HxoA z;9=JHv3a9a7#1xiun(Qazc*&AkydR z@?Em~8F&g+Eo}fFRL1Fd$bQPkuZn~+%_s1LgMd_nj~80ZYtfGe91KdM()>HgKUQkZH) zh%^x+iuJw_-Q>L&hkv>fBk4-lxnhPh)_aJNJ6;qa;JH?PBaQt!OzHMov{G3VN# zm6tk_0q{#2Kh>R!&Vx0S!IPzR9Rkmy8B3WQtAc7V76=nO7sThiy$O8F%10uv&7$s+ z!n?L(toJrav>U@0F41Ayf_$zd7(9lwuE|8mnwz~__{{nAkiD%p3_%P7t3*4DZPfrQb;I~-4Ch>oby zDK&s-KT-F&pC|zb3s8u}VHmQ@+prLsTpR;obnzM2c3ZmHX5yEggXxdBu0MluN|`k< zLbK|oC*}PSNc04hwBZ?Tz!FE*+HP#ULU1DzHaV2z6iRV6R5#F5#sO84U@yp%xGBzq zh>i|fQeNDnAmwTf*%VH)uu((*5n$}m|A4}%^o(mj%(H~|$dw-w?Vu2+s08wiQeGB# zCHDMmfV_6@4JDt7FYzQ&xbxrj++9Os?H+6_+gs`ZbUpk)cl6d+HH~l~_Uk-pwE9YB zTblP=lTx~R*4QwWcu{S!2t_oug;XepD4kQ9Q1{9tE94DXt8gKHsRBEc$W-&0rpNMG zzBv_VT;=ifkl8inUkdp$E}A`dhitIKBuf3bi!DCbl>Y0V=SA*{P-EV)i$9_uv&o2=UvW2Os9O4hh587sh_F1 zK+trPD$VgW&jaR19?~ZouyNnqZIkn2aXKf!C@)wq{DAo)Gtw8(!ms*mii+g?Ju+w@b3!E@Ced)#2;Bx8)9Rn(~yHjyZ@% z`-mkoi27$+-4lVcvy2GEshAY--ZoSb|LfUPKAMM*X)jkv1^jUce%k7Xb8r|8yL3d_ zbzt)Hl!L_iCDosA_~%krNI$yl?}z=u0u#a^K5>2`)WLk-PlbxTVWrQk92Y5i&yL^U z?6~<5ziLT$k)8E|_}%gE!X==7O#w8#ll} zGL4a{N95fM!ULywGH?BLDzUtzN-4x^#$*S}w6C!ZalJh<4~GI$ICIEfpTY6ab8#P^PPsUkCt1S4tN819X2Pq2g_Lz)r4t60 zuV0QH$jb&u#(kSoUrlZ+QcVAvD#ZPTeIo|$8FA&24N)0Qf~hgyZNL7r!8hXyY$BP7 z3ep5N1uOf~OI;|=WCbA}0J(wY^%JOO)?@F5^Z!BL^L8*K8_m#xO%{Gu^{z<7PgRCG zZG)M3(HbMDZ8H&4qfCXKuCS08v=>7#;nrl(+hM<$?^p5bRC-{b0O5@->Az<~?o<7V z*w_4FYYJBr+G?-D)$3ldS(Bg0Dy%&8`%nAZpymD`Sl3n>a@Blv72L`d3C!p;6B%aN z)34g}{Hq5xZ>02k{>l!$klhC&fcw1m}AmMe)S zY+Owv?CB5ZY<>=sTuY6g{yNmz2LD$l{GUen%%ELu*5it9n_W+zC->>6wEcs1^w~iY zF!^cS=r-)tA$zx`I=zWz7X<{7HW6Ykk8<`{2|^E?0WdMt;L!k>&SQA81sOgnJh#i#N3ySz8%_#PnH^O>=gsoR zNCD@o*k2R+v6Z>o-mwD|+dk1Mr(ou&vL(uQr3pH0{GVWvVDp>EzcJ1+RH0Wh#M4~S zF-p5BQOCMDqSB^uJ3ED*Qd!HJ-o$S$20O9&!_?^3R|R~d>twOEHA9UHE4zh-^AspM zj7I)YMg?004^9BLoATExhNQmE{VG=C{l2?X2HxPq7)<<=CqbNx_v7#1|3-FUD43UJ z$RieZ8Y~Rc8}rCw*1?ptWv)L9SAIR*352_apBNbza zM-u1b%g@Sq{lH^g`stDjTXIp=w=vUVA-XAo70tjW^5lqS#3calfH_e84lV=;^n8SO zY_D3VHM69a*zGjR-`AFb6*a7Ms?9nX^f~g}_=RTOZrs8o7MD&s8&dpQ(vuQp=?-B% zAzdF~J=@oN%Z=%(uY8RTT(&MhJEp}24nyZjGl8~!*$jAU3t*vyj3sLb&pJwt=fI)KMu8K}q$65PjYScn!=eA!d2nU@~ z0yaj7W#$P2B4`8=JnWAfKHGg`d%8o#=zEY$$$tp=DIPhQPh5g4>-S47T!!doUrtbL zqEY<}Kky)u6Yg9pztPn(OmU&`7bT8e^(KXTW9TG>zzp$7ttmNUr;0G7_BS&|q z;Ar|kF!7sa`}{dfWd9FLl==rI4lPmc=MS;|SGh3$kt!ove$mw!{V`)Q5j~+gmbGQ? z2sM0c+pUSse|_wy2t5r}gpQUUA3Y{HJ$XzLYjQ+e(2MusKi4#3sPgPz5KWan{>1dkPb?8oOefBVK@IseG2~@4bn;4eO*-U& zhyrPloPO$6SGA13kpIz5&QNs*PI4U$Ek!?3Iazs3qTF{!dTTwVY(DMJ(rWU5K%$`w z_Hi%C#t^H#mC}{{b$?$JQmNfrG4Ov^2SsSz^T=>=b=hueMb@@4T4mbvC&atMspx?S zhx}`vUe)B#281AxsC-AF@->F)|LO|QZ{WO47`PZYUWFveUbg6q3ixK%Q?|h@Wud3{ z@t9d$YX9G;P~qgc;VSg#^&lDELkC@DNVLMUdD_qC$LFnDL1Tlc`P3)8H+wqB_aj># zwO35``XwcKlC;N0`#0>YDo{aF9Qk4J-2N9itxDd6%34XzCY4tT%<|xyNFYj#Pt9_Z z9egWrx8-(;T<@f82K;as=@BR8QBS*+T{jxA<1s)BQng;e7Kt_B1b&dw1y>fn0+ z=h?e4QbdQMkC?TTs}r2@@gep@GNl8x4K|tTHQ!&8kjISlKzfvYQcZrLQ{p^@7si#2 zdw%yH7wJ#b}Umk0}pIgbK2@(l2b!BuDxh!A5hPs-L>V2dUG9D9Un(Kn zwafLK1_AJ-C`E5PcsqUp9IDrS5MWOyyoX+FyJfB09v>2-{Uuo#ix2~r7p;Nv>-5(a zI#;Ln=A?QeGB>#Pr;>+1Rp^3>_+=^a)JTDAPB~3HJo`3AzxgQf$2lo{`KCXxPT9;C zS$kM9a>^w}#q&-)OVsxbJq#cEX{&ql4ZQh7fg6s>5&^dU8}fngamYWAk9N=eERKGF-&GFBj5*0-P0GRxPf zm3!l6J-WSq5y@Ti`8CF-w78PJw~tncA^^VLC? zJ^!!z?zKp~c!f@0pAU5yr;jXn#2ys-hWwex>eaun-=m}xZ8O$kqp6qVjEf>u_yz`Y z<>zU~4{@&?)#; zGnMqzBAdyny9Y8%Zw0~ecdd{5_+zXRkDUeAXsXlWg}UB3Is`xbLk;*eNuNuwd11IQ}gLFL&AhT6G%QhoouHC29$Bk%1<=V zi<8%8W}_C$F8u%pvyJ@YAYh%43>m5>J+0!W8*h=;eEsr$X}SxA*>FM;FhMTQtcY&1 z{ZfHTbLPhiy)pS8IB{R-^$sUi;jI*+$3+pTn80Hv=%n<|JD@fG8VMcfh;6$(WhV_l zoMB=nG-=#PnN0N=wV$^NZ`YcbP~V^8!RV z`*dZBAKf~GfDigis7pPIEKUvSo_vRU((nB%TVJWYnIRz5_h}a>8J=48KAiY0pux83sM_P+EkBGGs8MFF;Ugv|=WBy3; z>FZfuJ5fdIcAu=H3Z+L&Y{(g5IaF!<)>-gy^}XBg!qruF%LP!Ea6zeAqu6n5m6{!D z2Jv(+d2}`?DcHGy@1V5xIe-h2t9gCtgU$(G!71fGI@aNP?@x}c0k&(q;{lyp33>J6 zuYY$X{V=qG`IgLOtJ|uKv2Fb@^?;g-12uIaD;f8&;n1U5+_IT|6qd2G(lCWe>??W< zgY&RRxZ@Q4d4EX+Vp&A_nCd5wf+{TI%vAa&qP1f7aCq{4 zu7x9Kq8wUYb3n}j5aX;uUxefx7|gVWK^xRChKeo7lbsw#9>!I0AYg@cJqz(+kwnYl z3NO;53qfraZ6)2Uw*oKJ4atfRD;~l=jAN8S&q{b++>h)ZCl{AQa>@8wCbi4-G;R|8 z(MP~>#?@Y#3C`NX%f*N?8)|yK&>uh>SYBuX0B8D__7^NOOOa69$h+~QVZ6A~S zDw}sOUwwgAy#&?wp`6^@SFt(cs58He(#)=LHct5wqI&Y*we7qErEFx=Mta^987$o& z_sw4{zZ7Y(5vlAIdpD^jg?_7!ku~N`*(&TY&$tg$mf$I^VgQH1y5)+bs;D_i>x^-U zUyovYEY)<2@2Mc1urRx)TSmABm>Udry8G*aJpg>qt~|2Uy_t2xvUtsAI!B|ldTr}% zk?!V}r#2zP4Fb{i#E_OY^*9(B4v*fez%o#9L=u4%PR9qN99tolOfbT_n6$?>y4a${ zVBp zKO*J%H4{5kJl(Lj2key!k7jPVKREkL4b)r@%O4t}3_OBTB<1JsFVh;Xi>NWL7-Gd$} z4pW~WoMli$-Y0qyNI}@X$J4F{cF9AtCX917*{xT%#Mm5nThLDHH1O z4;ed5$Ul1cEvWfZqV=fo4Zi*BKY?P4qgCsQ*_M@E>&za?>X#y;E!M+_T|c29ULosv zh&A?;)~Yc%C}o|CIW(kOE`$)LEmNhPeLciZR4$DLnWSdvMuYolifgc*Hab6}@q>OD zPF0t=T%2Q&v_M689LdC<7^Xe@$CGaJ)Wza!nvf!k++1W0fU6<8cS^~02JDAV)jCJp zW2wr!AUCgbGa`t0tw~5?tkH%@_|Zc#5-)QVx2UK98rB!fa;U_S5yK{Ap{7lfbvbpv zB&cQBCdm+ZvRUEug-@i6ecLOIWnsCdl*OVS5rwUYt@e8M`u#S74OOGjcGdul0B9!tFbN!V#W)y1tOTaI_^MEqg8DW_pL@NjJ2~RF5N?oH-g#Ly$~=wqfo| z>rS0C^Ly=QFY-5Vt!ozs{A&FfE9$%5hE>AyStyxZ&~vlr`*euHp_ z^|aG7$_G+fGNJGPM2Mt^rrh&y>x7I($8e+LQ}GSIYDVq!U~y)JL`)blCS~d`g$0s0 z=%N{lr}!eX-<$jX7?{nw0;2Bh4;bHjkdI3FrlMqyGa5kaUL>rL=Kyu-$w7GuBl`Y$ z=2x`+UeTRpZwCb9HPM3S0tJ|fnLBi1ki4VK-Hnzv^UIipQHP$Ux<%}2B6-te1W7bj z^IkmbA~U?y4+E`Yp&|3EgS4Kuoy&#nM- zi|>5&QW*#)qyF+T4TYdC_Q=)n>Tr8|g0Q$nB}Qzfv6j*VVR&6ij$yjKJNK<&V~)O6y`0)Gba-sB!x z5Z!*g!^zL`ZPqhGk&Ao+R4C6Q_F|h`4~ZE@Xn*$<5E3JXiK?7e3!l0TTt}(TrCZYA z0JLsc1+M)NJXipq^NYO!p=pJ(*S*7H7oa`mO@Ii=vlDG!ZoP?G%VoDZ7rksf$ z=w}KnnI_yh4-$+15b7>IDxqCl@-8t3R#z=+>o@ges^=<+%r;iAkVm!x(-wNn4eiK9Tr3tbsfI z&=r_K>Z~Uzn0aD_^GZoF3g~bJA$0fi#=F0gE7wX8KIveG(fvjY})!v-g>UqAvZMm23IYu@5)d(SHY*|~ty3wiyV@n&+x52XPHA87xnSE0$cERuF; zncOtN5&9$pQ|XymP1n(tk%utMqEe8C_+PjEbAaR)+bCb8-CP5H!-_GM#ZBpfkMOz} z+gKSYoOqtes`v)Ohd!Ap-y{lU?%;~ctkIqoh=JHtre0ApSfD3LFBH2dhF!UIIY7dQ z>-@=fKRel-I6ZRDr)N-+gY3=P%N=v2gttE9w!FB_su$`pRvF(zMJuoPNFwfi4i{mX zo5)-757anl3Q$NfRLYL@HFTDsb+>-md)N3WEcs~c1V>@UiK#>WWNhbS4q9(RGhoyu z)$lCCdzWTyqRd5%x~qlpk?F+f=UA@aK8ttpLFObWFcuI!wWyPBk4 zeUy8-?2A_>yibAF;tm^zII%bXDS#{H!1+@9*LA9f&QKh)W`I&j3VoyKHxR_i8AMEv zECJIizrt=fF4arVh#qQ#Dq+~z_a~EzVbd`Em4t4l(P9VmnD`xW8y3PW zYG8U(1t?OeYdrQY&^NbYt zpR9kqN_ZVbqgf4&PZGnqTZ`^c=&cF+bhbrk&c(O~;xwMCH+4N%wK)`=FMi8>k1`N( z2Y>5`u49{)WB>XvD%1YU5#qSi@$87(5YR1{UHQQPd< zpsUbYQkd5Dlp$9+Vdt6F%hs468{8KDV7Uyu;bwYUZQY%}xV~)cO zkOjDy{bA`a8dhxrYkP0cq%295CRzf#(WyBnY~}N>JYq00TV15Sefh0A43riDQnt;U z?9!xqA^}H|Q+=M~^)`G@keL&EK70oT>z$&ZR>?3>z*8Gv+x?!RaZKr|;=oWPx?7Om z3Y_?9YGNN}cprs9mqwg!op=9!mTGAC_Yf|QuEV?rrmDL*yjmEUejDcDH^17)h%4G>ajU9uzWaV zn=J4i!Ak9Or8-HC1%5S=h6SCJ@D~BF?s^@)dTj=q?biLa5=@r5g`~Tv?(!Z{eaH7L zQR#O+h;{vE&bb&-(AzmJv;D|gBePvUcj$@=#zq$|ic>{WUWcKZ^o z?v|?}HJf0hOhtbE<7>)z4sVD{EKzAY)(njd_PC5^?=j)gyZfN-)(qwQgi4QH@S=u& z-)nUGFH6=ts2z+iVR|Bjjwu`O8Zy{DD7h@Xwp$ zw&JJ&A>_XqfLHoL|HS}&hWx_-fKFHJULMV?6t&0L#*6sBltr0}f>CdLJa~o+8Xf+YJ(j!#~GYHG{BZeow$O8Qi5Qc)6isGI>B4X9RXH>{9lBWJMnwt9W%NO#UVT zc6{lOuk=LID>?*U6a+^{Tt^7Kv``$I!B3!eWc1pt5?!2c%&uwoMTM5Uz6icVZ+ zogn`YCk*RXnCamiuQk;>3Pb8usnCzxhJ7cuXpvg{{rxjZnWkjQKO{iewNzh{F)raIR`P3u(m&`bttf!s*U-Kj2?*Y=lMu63N{E~Cc;%a?I>V>__9*0Wh z&nD=nj$DdSyI{7^#B?383wMP2%x`8>bW?O4lkweR>xK)WWVhJ;R0W|{NV0o0jBaGZ z#x%MRX6v3hgc}toD+Of*N5Pwaq>6BdOP%8$yV>?%dUWZ#qb!Z2$k)s6y6$3u+17Db z?KYaxq6{i57V@h79*N7IEUO`s+Y~r!6K}g-3~h{(;0tNH9RpJ}gBpJF&Ig@_HXIPg zE;p=#v1obXr0xKxo%M4QX?%1p#$5?7UaKxztwm;8Iq1$14YSuqg4umHt14Lhd`FPi zNb@wyzcI_^s1x^RRNi$6S_2;ZWS5?N*t%byPh|L>6G8l>0yKBB#5Ay>%i>Na8Mt(f}sL z5-jrF#QC1zjZA(dic_!gl}z7S*k$;=CVCk=FYr&m)%U*wu7JN1u2upZT1cbuOZHO7 zCsAO&mHX=C%L`QarJH1595gkvg4ik^P=Kl9R{Ab174)khyj7=bZt!`vZRY1NXbl9a^*#wIv_*)xWBc<|UBhp$<$ z^{9!=FJ7h@GD5d)Mmk5VhftNKU*aLqDoaqJ$Pv<kevA8GK<(D3sC}(jNX%rmLJjm5418&Z!QlxFclQ z7A9@yF#ygBWVPO38YT8vS9N~$6Motw8ykcoFF59@w;i@(^Vhna(-t(*zUQx5@DwC{ z+Dx}Xu=)d6N}zt}0#R_gsv8wq>{rLfu?l!loajr1`MN+#?csfbLRd-f)@!xcxV{%a zFH>c#B7!WY04?SZ^@2&)#Zd4Sq1(?Jnz65hdb?gk{{GI4?x{swVPz_?36joEdRA?N zu%mK&XOC2TZ)`4}cK^b9eJ!*2W3rA9K?UZG$ysQILu~vW|BFRldBXF8In~yRBV0lW%=R000mMUxLKj=nZc(A10L26b+E zEx6;Wm9V@>2GnPg5|2xh;wK|&KOiY(9}3kq;W4`*R2-0a5lB&M&pk6Qct)xI+pF?s zHa8Y(NTab*2m`nwJ9SUHA}#XTI+NDxG+$u^DBS;zaw&cdV^}>>^P*W4!fI!V?=55J zjUYc9fMZZFBh5*LP4#4y`th2K{K{|$6*oN!TC0Rh(smLU(b*M1b)oaz+t~LX#gf9T z$u%ve3iF~gC>h5sXRaX2zS8>D3XMRB{rSsht?Rn_+A}E0ludOSv=^e^H?lE~Nz%F(P7uZi|x(lYu-Tb(rHUo{2UnpZ%vb*JYJV87geB!DUy*pd}5)sim z;p(#NK-m58jUfv6$xoTE>cWP?wgd zYsFJ!$RJXC`nHbdl(`1{y^)SJ*wi!H#JPg zpId`WKPmSAYUWKzIC*9d8^6^ZN0a8P!$!mGV&cNRMKb$o8I1OQwx<)`-!if_lfNho zn<@0a8-9i;|l!6RYrTD=79$7gjgNzS$ZNw|E;Jfj{lvaO^b&N5DzI|LdLAT}nFj62Jjg39Uql&*D_{QBg6ONu_l+XzqZXD4N>bkh z&>TtPVnTN1zwcjK>xnX5^aq_Yd~09%2!~!Dn;A_}%YzjAPX+Z^r;BORe)LLYgO{&;z`zfM(>|38y1=Z-JTwz(wCjWmchV~6i0CG z(E3Lu=A^_Oyp7R(8XjUsU*W+g+V}cSNs4&v{fWhGj}BYff8I6iR{m6^sad}DBiRVt zK7)At$cR^JSkKZAZTad4ddZFWSH@FEhMaNVJ~ydXdksNnHR-P{X#Q=Z0aHNif+)nV zu>e8nyrmlLwd3wyIV-$w zMm5jHimuP~%Hw6Zz#(7PRmd}#WY;GkV^oQ107HD8`8)V5(n$X8v7w&UOk3svr{cPf z+9Aof?NW=oaZTN~MB-VelP@fR4e`4Wdr;#n+^86W0pAS|80;Q7343+%E=Ne z-LSh~qsdZ@W0g}WDr%Fpswwif*o6r3e#b(f$l(#Ta0hGmN;txsZd<(msOV~vjP|rz zG?>TTSB$;fbec^Lf^*-1;Wd%J-8ZpUhyls#o*3_l!h4@T3gRbvN!I06WZ9cVJzvh? z8*Cc>@kowy&s;1#cS-KWXdxNL(#ZB1ADnw6b;CF=8ZxDXQ*xy&?cGw?r!7v!!24?) zWA3%@P82yWTZ~f8*)flkesqM6kd*8|Q(GPN1jw3}6JEZu(}nOvkjAL zi(?TVhncTvQY4a67q<@ZLtMUMBYdAPOz51t(CK|6?atJ{Juwc8Oc7=ahKac_y~Lv# zXV`Qeg93oBlCrl*&K)wc9QH$+&C7vr6iV@l zmjc_&UrR6V7#h@ft2owt;F!Hzqv*l(=ILI|v_s|)HK}AS?&^i&y}Cz^^J5U0K^#$I z6+22tvRuMj;C7t&F9NAh*VZLe^~QC{?tnuwMZ%wI1hvgZC=X+~mBve=S~@X5T?`nk z#Ru@zuilBkomL^pN{t$+5;vcUtKo<5@*KX`$ZH5xx$r>KX050*=ZWks2&1)qGz ztCeW)5Td@XZVWw4kUU)!K7Q2B+vYSR=Q{ufuRskM=*PJyav_1R7z<4^e-&P2sQv&M zf~@6xuH@Yk>mHVmkjG`I-6MbrKk?LHQ%2OEtv-YJNvWZbr)iZQSdO0N_p;v9#4<;n z!DQ>~I=b-8w&UFyfay0F#g)T!`zGj^^SLIl|9BA%>=&mlm?cy0R0|N^8AF+ffjoY6 z7AXGuV7V?XIi&9XLsQ8Xw)A=r) z%q>L}F~aMX>Wv*@AQSK|_kwVaUwQA1qxZAn-%N4zKn+@6JlecKJY?Yk%x76%=&y7+ zTgVYlCKp|=-^9>;(Xbm}RzV>b0N2HN4)Vy*dCfu^cEs;c9ytsRx>BdO&$l3kTrpD2 z4Eiz}@2rjzy(dL|x8JCG63x?&%J8=yb#>yST|KEJE0TA=#5-33FzV>)tTS@7xeUI! z3aKOi5nwVfgxQpgOEN{(U5-d}0`rxyiZJNZy;r`vsGUb_y4c>gL^rbml2hKej+Ojv z&jW*Uby*V+=%RrD;b(oQIlmlnC`bhH=7@)98HtE1R3`9A8D*0tA8PJ2KaQ_;dLj34 zOA^V`tJ&KRrxy~5ycO~5=`lD zwZX8i@&0$>1lpimbYlt`QBsVD1^5^1`Kg9%Mq|ZTwb3{0sA5e39u|r8gHAbpyzbD~ z(M9o~T)*!NDDtGfsa}OO#w|2fKr;jq8Sgc~4hqi_6%_7`#y)WO0Js^@YZyOAXyNZO z$VM;jG9T*^*E@sWp#-zqFXovokll|{(%El`1Pj+U;zQVT<7hUpZ(wJnaEo{y`BPiYhfYk1 zSa;pY_LEHO?SPqI@XR3mMIFiOZhOv`xNj;KLlsF|r?xR01VAqj91r!HU%J*HmsLJspMnp*DJ7NcAaZ>t4o%~{ske1%^$k4R$=b|10wAs&#yj;8YOjJqjA$VnM!mwwnQeBQN?&M@zBcjfq{X=DN^(M7h>Lo<8Q5y zf4N;{hu8YkG*{qU!BqhBCk&uW>t)*&PB@MV_RB;tJPdw*90SSE^lszZE4W){A#+YuK`NHxBTh$B9gT9uam{<1>KA*E#H_|mCB zkOlF!7Ez2BvD+%TrTP#umMw07(b!{&!4&z#vzr+xAHSTdGT9cqY>{_hS*JVs+yENz zqa|k{;?jhL^=08@(!9v80n3TN!%QsOO0$RbiqUcV9}IJf=OwWQkt&=k4&84@*Q2@S zQ4}d$#q`3Il!9Y180P&dHynxlV}!Go-m0XJLf`BW-NQ~+-9rZLaiN@Ow&e?NgN^bs zduK(<1sehztshy(+p$tZXnqZ8-g_@aNF#jXn=Q=82**BdX$vJ!;|GHN%}|$Dy_>oi z6@e+iltdBjQg%%0-bAvGn0*qn?KzBx(Mx-aqm0C)hYD#at6tC0AKSFvvQBi#Rw;MP z@`jcF3ML~lamX<4r}(e-nc=xwyU(oqHCG<1$0s=~GI09QGq~9ntpGgr;iy5Wb3`N- zSn@*;DZu(pNaBdlsadYvEzC_#&JhmRy4XBC@ZcwoprWr~(4$|N?SQN?53|2J=)#bM zteHe4-D(BRip>iAmua%XIZLfh`gIMSamCe4^YNVku?tn6kk0RrM3AT+)=L11>g{Gc zR*G*GTxRI}q+R&{FKyWt@UF3CPwc}lIBUC|#s&GoVA!TIjP#WD>hvP>RxqS>CFPO4 z#)+M@A@ZF-g4DGs(pih9e4AHMY{R5)NiPuUBIA7oI$db5#s z-Ld4XFIp19)1fd+fwy}t^n=#urG*aR23!;qh-H1$dc}BKh)fbd&MDxj8n_B6yj?oM z_*JJ2G%A;W8onj_S1fzro$@Zyyzu;ZhxTw2EfxKjDoKfOV1!xTBmM;L z7v2|8o(r;7^qkRZW_B)qO@Rvd*9{V#-i+_-tQuZtuLuuJjW81>+n<4EbBJL4ot%B> z*~P%hCAP*|AAg#i!a<#zkjj*Jx^YzWehXgpp4H$as_jK$2zF;OzowLlYr&pP@%3>blbaZ|+~wW^Hqya%x0c#EAXHP8Lcd|)l(3Qh5TXX}gLWB!e! zn+N}LD0+PBKNWnMJ{w{DcA8nOWg=ux>YVpzt$qr!9^SXM6Mj*NePHmw{$w*SLp8Hyk?w#GF$e{-PT0_u=W5Yp<}zFLxxUVi=k4 zbZ&t!!em%JCZCGbz?t|%bm?L6&V%(piI=ZZP zf%bE0tJ_2aV2oVedt$$;60bOUpZ`PHTQJ4-Zrk3$H4q>;!QI{6-KBAN4{ljOf=h6B zcZc8-+}+*XU0?Rz|8w4Z>Yl3m0lKPJKV4(Z`OGc5=do4X#jG`6?{92HR8ZVjb_X5DVs8cv@>)@V!ro zO&rRVF*BLnw|B1Vi);_u+$Bs`kU$-5+R)=ql%iVVPP!?GN{}ROR-xH0oS!~=Uogr< zTKvkaUph`hTbxeW7Q8$ED?N@Juq>W7Ee@$;RBdD^ph@J=Ku!1=xeLq`+FH@|0~Rmx z#I*e`@Iq8i5F_wcVH38U>jmzGT*-|8StN{LUDN2_bo`fx3WRMTqKi&}_G5>dl+~WT zP2VShb{;uRNBfMGc}aFK0zcM^p7QnE-OqZ<_nx-r5b{AtBLX0jg+hSpvH;@Zi@iZ) zr5jqL*(<`z;ir#lI#*?@f2EBKJvMK@5`s;{k#xl;F!5}oCr9a}akqFHKrDV*D3lU$ zIEB(9P&BW!)?{K$e!1%Y*zO~=-L(g0x<+EytD7>S3_J;eAIe>@dPxmSL$u#a6@Kpe zVB_n+QbrIxEQRs)?f`4M|E5%CE}5)C!u;?!scHA|B74AsS&${6bJwJYtI#_?XlDO( zhhhBzjyx}_?`fdDkalRh_<6i^a4QUVssEKM#4m5{&zt*3!u1YCmF@Sz<2z%( zK9sm3nsGG;-a)8k1r)>ae2cI5Z^22{uzy1=d!bL!8QNCxIBY-cJ>_ zjBGvu|8VmEpBPK=j;&|UO)!lZ@4I;?fgcYhmA+-J_d<);BJ6Lt4Zol$?jl*5_1wS0 zxY72uZ>C&TdKcDEGzx#>)!kQhMY;43?DRRVD~jQ9FZ*O?{RyT518#ujNHhU}82bV4 zowE<)g~+h}V7SzYv!ZWpLoqi3m!<y|sUmj1xyDXGPxLjqzNJ%>5IlBAqNE8QzzApU|rPL zrF)l2XuHZ7#!fh!jV1`wG3&}c+%X8upMJfxY=ezl50LqYVaTtekQrq+Bx<= z;iPo_5aZ-;;Qtjl!naAsYlr>_i4o0_q?(=?pe10<>v>X&=fTv`uHU-BF&UNFVlO?c zU}zAz#K=ZZj#ZXHz~9}%5KgHx(y7n-yzJL= zePE89lyjjq*32zYAk*rg<(Ht<1K{I73{mqm!TJscN|)&%&}A&U15N{}py>`dr#Z)z zJmjr;mg<971f32PXakNW5G)ej2dEeG;$!F8u4Vds0Tq?B84dU;Q?H7G+?_NN>_Qtu@(#aMRE69nhR* zv}c z0vOYyBn!Jqz8qk`p=X`+7%xy&t(PFw)<`9xvMlYSeWy9;8A)n=NpDPczC1v^kXmsS z{blOD4-Dj0X-MY2b}cl2pX&#kEv_s8^J+8l zr9nA~!TS&T=mVAu@#t7jDHs;bJ^>uBeVBLbRQ}Xx=ODniswt(c^@0}~Lj0Q#N|$Ys zQ0F0cpdmfC&^{)Z979_1fztk11vvxBQh`-|?5XmQ^bN0d*e$>Ognv!)`VegY@i=s2 zJUIUU&_T_eAT#o68uP`Ko%_cg38mX!Vu6};v#f5dHp&XmOdi-2@i zLXYQeH|Oq0DdU$OhCQXMT?_lw<=9oDS16*q>O^N{vIvZ8V_LbbW=cFikaOhi%0 zdU}Fu(t(sw;NR}gC-&VO&)g{bv@o3|RzJP(L4{P;;QyEV^SIXMAu%G56kum$D1fU) zzMuD6c0+#I*09=M$j^c$zkQN%CX`adrec7F&4|Qb+5CAOYZRJhtN<1pqL*dDXb!t} z(aimY9MnH7IQ7k8tS8a}&Y~E$PVZw7ogkIaxrpJyQ+SpC;r#6KsESS%OgfQKt6Gq+ zLS$(nyO_ZvR4Q#tBh}KP2Ea9|hY|iDs}Fu#fEy%7!t;84Fh#J{Mb%;SOT9#XF@7B^ z64Wg_h4`2K6S4<9rgH!`Y0Sy4!-24^IT@*^qo+orBD3@CN=YmW3sq+l{*KWYA*6jA z>83}|mjdZ0K7Cgj_#y%htzuX0CUh>y&ygUE`pTECU;u(b4IIwQ%%t_{@q;?T9YeZs zx(@+2Htt(hBAF#IW#`8w3ia=$0QMK;_c70nk}a9Ce>p)>%0@nDBQCtp59eht|IkMM zV+rs?YU{Kce^$mBKCSju2$+Wh`;Q?&I~_a`e*}&Im>T-z#aD)$gu>9}`-^&w#gzw{ zGgA;+J{C5O-vkY-WCeQNpa9$u-s`jVhq0?Z&9oGTxhTH@7oB=cCA(|C3MNgV>t~}P zm5C!0;%~@YKY#fS+9XPz-zQ3LHbka28&dlpba-9k>$Cse5DNUaBNRkb+xw}%2IYc^ z>^-OAp`MnrYPwM_vj4?xxsz$h3zztdN?GcLx8Y)$F+{fb>Nl2%k0qz9NKELD(qNAM z>f1g{MROZ_Q{+2I7Zx-pQw!YAG2YXB;luyp0r-~%z&mokvpuC5um|Tx#!c<$3;wVr zoktpc;0nxIKF!rJpx2S{p!%L77>h1)9xZt^7>Lmo&n0%)1W$ph{f8D%xCmP%MIJx4 zV1#O)Kc#M;Lpy)gO1X!?+v!xA1i8oZjq#h*m&%x$bcheM@MuaMn-X&{Y#%g6!s4AD z3L^<9+jHZkDlu{u4iVz}KuBWqC>$w}R;p?6*q*kHYV%oh%`*l5}3a+)ki z@9H1^U+4S%WCr+a4qinTMW0M+PqJc(yg&OQC@2i`F|U^0rF~0ukL}hA9Bl*tAIttW z?ShbLKBWI}LzvgJWPerLB`S`=VFD~**S3CI5FbF84_jD8(WA-#Zwt!=qLULz6EJIc z-nAVH)wK97#^0QMcnCngx7-dj;GA490*2(~ z2~rd&9`qS$bpF?I{)?9O#tBuYWgO=JG05*Oyt!4ts}kHhY;6_U`)MSs3LXY$W#hMq zW$JT>>(Mn4bCXXKLVqMA@qY)V)W7}5+nMG}1r)XSX5jh)J&=>@IP5GpW0}AwAm2FGQ zoNl-}z?xT4BEN1!*wqT%Clhju)=I5ye2Vcu+9ml0f%or>64{Jl>C&uX)Hngl+XS3q z_zRN)|253t0p6Vu7T!%GzQLk17B4%jfa6OP>8@|%KA-#VDhNccH38jUZP5~BF;(=w z=hL~|i%_4fk>p?Ev;)8K3Ez$NQp1sRPVxf7c%P+%L5lqmZ%~7VdvM_In%9tgZo>pH z+JA3%@Tyscm?{kgLTUF4P04|D zlC3Gv@!?Oc+H~y1g5(L(+)1;T3_Z>0rU;O6sLPW{YJz6&SYn$E&#;!B<^rWp?Q;T6uaTlbBiKz&cQILa@0>or1*24>w9js+SR+E=c-p<9^);GL8)Wr-Rljq_4EcDT~wg+u(aKx%vNNC z&WmMvJnK$EMJjG!5%>2FIg5Lp2~Vya(LQ z=gAvY#f#{_jlb{i)#(@^xKZ~MSBcc4GO}Nx%{ATQA`G#sfW+(O-d=TLc<7DoN&gB< z^*C*#fm-To$;$Et49u>LFL3Ehc77~%PDLj=0vwUyIfy1{i%Wi!siL%JW6B2VVfO<~ z{Q>3Q5zrc#Di&jmIh&dnX?xT&ddNqB1Ts^Vr7}l)GKq7THZJPHzEJ`tY`TE{4t1#Q z-;*}Xc$mMq6d5|YK(5+9z|rp{39+X#cG-~S%qe!cmg!DlFrQ$-*sI~StNHZCjVzA{ zffFhoDO?3Z;sszj1D|D?$lbcmEYoF$F_=XTr)()9&d6Qn(SVS(A&|{OP=l!<@d|SN z39KAQF{R_}o2+>rPv+JZKLYDuuaN8Kyt^U{v-CdB6r<>cq!z+fk0a^ppR>*_BQ3Bi zJ(6hEQCZ%PU{;GWJM_#ZSN#NeIOd`VytAo=YXNe)M4(jgn;n=3)zuV$8)mP+|)k}u*3rhLjl?v z2BjgsN7Ip62jSWoblhPG$HLvJ6l-f-P`VBl5_dTo&MALYt5{Mir=~d@6+YVEP->i1 z5k8BtC6a#Ds<-UeBP?XS5s7iJrIa?>>WSOrpAW`w!wVdHF+rJTwtKB8w$>!tp0VYN zHx2noD>g~KOm9T7EiUeFd!SX1)7?mV>j#M!p?j2H^KZ|Y9mz(x3ZE~uNFf$SSSAK~ z?EKyGh73D#gx__u5NdC)%`Y?oe9uH67YQqqa+9^4e?I8BdJAw7|i%*nUQV1Zwq5e+g}-qDq+-Sq|0rP zh65kyNx09;V2aI6kV(jU6|BfhOi|f?r`|jC0n?s#cfQ7IQvP|_!mk}z zO-Yy9J1{;9Fp~4ukFlKH8a_s%JhslE=lsLJXmtX^JZqG(PFU1nU9DL+cUrn+F=3?b zSf`%eokDqPV|A)g$v4@>1GlC+D$H_JL!j$*T9^R{MwFGL>Hh%RcglbwPl&fw#aCyA zAvf%Al<2N`vJOvwVvYx(wyjk??2fcN=&;sr46^7$yqB^XN)@;U+SM0GhswP>hc2~@ zo(w18nc*;u8{tM)>#DI91wZQ-7=@o|8{s5XG|$t0 z8-R?4IjU{pFey($5})YpkVIz^3QvDrvO zvee&fYPx3MZfCUQ>H|)M$g$K^Xb?qlo0lM;&Zp4zRdlN>L^QTX7G$#Oy~mnD_H|Vf zf6?t>XTByl+5`eNHFuMJ`PpQGgHjskHVg(O+{x}&r&xpvijG36AQfz41TXI2#hwUZ zb#!|sBS zw)Ug6Feb;ugw|9evw65GML?-kJ8O4!>XYj{T6>9e##9E}-p?$hUb0fa5>C}I^xHuo zE=!i6^1W|&R$`V@E)b3;Pbp(Q5O|u;5Vys(;r}!VI zRFoUWx~%}yW|hPi!?_>YQZfBfUa4Qk?!?*rBLbjsqr?oXuWK9R$`@&3;@6_nf_{?v zl^aJ#-%Y6N)DqY{TK$?8`-t~JxR2|5`%KegeYT*ie-2YukPuQ3T2Bh%#$4N44vfrw zD7Z>)#u=#A&@VP-p!jN^PfDpRL&PpvQ;2g8BN_=LYjrV~Ia(Vxmi$e?M#Zd4{1j14 zUsvFHb^UXwQ;zMbg12bG59gK@1H6h%$BscapR|YsNf*vW#|GXn(Ybv2vPxae1r_Af z5`(J+S`>PxH% z#+VHt9B-2hb-#6iX~&wQ95mlFT*Qkdxu~fS5!P|32j1?k@M&v#Rjzy;Q#6E6eFdAV z0Qp%Acv`bl4b(y?*O;kq+K4H{!>p>VX_Dpo&TWpYPc57XWRbmI?=OdT4X8dZXC_x< zUTpjVNRs2BX8eC_*WO~WkF0}cRFazC-<1GYhPKIl=^y*|SR30NY*$XXTWOBqYq5iot5^9MHGkh?;?CJ=KhfYb#OvAe2uNG zUmgS*;J7fvBToZDeKfcg%P*r&*%M&@n#_OCVth~Brzft=m5gWyFJ|ImchKO6Wb_%Z z@-UhzgGS+nE(jwqlAO;sEErdiRtftO2z%@8s%4XsHR;mS$7>1;1xXN5fu!acdU1O6 z%UPGowM(PPQ1&i?d>Tu3-bvNe{su8^$yDU#tgcHh9y!zY7SDIMk9+jXjI&%KzLd(e z6GA$^-bGR<*;!Hz+u8IEJ=7#t1|tY?#oMew>|-8)E!#~dflP#`=DNGOG2Nj zTJGngK(=UpItzuZ%z0dVo_e(SkrdqmtYm;|vPxALq?cLRdEX|iM4pnoeg0!&SH3KDZ4EAX9wIefr{8Z6pnXc|444vtBonI}7Yr_5 zOvpu*&VyMqr{aue@Qbiw%po0_?3nuBP z(jsfMgN9_mJsfGtH>DU|UW;)KIeg5Ybi)Vk%t*fp19<^RK zf-7DW&+6YKMN1s*tsp+%hM}7hS~rhqEMh^KFo!zl59pi=vQ7pN`U!ys{A4YrN{wPL zgV8+F3LA`KEL-p>az-A}phEsT0`eZyc#zTh_h6O4PwfML;-8B} zAcOUR-J_%&k*~&gcE)+)z2s1Rwe6d zeRflp-eS*cKc!6_g%enp{e1X&?mF_(um!8ls{;$HFuyR`;;iNYcD94H#@r!0E!;G1 zB(L1GJ~!Gm+7fw)p$X+mh8GfC=nd~~h`9{)E>E6>wghUiiz&aW7d2MCYF}@Z7@u_A-M@l(5y^aE&iynCR;6C zugQEU+jTdNcB*kE=+%ZgfgG|2DpR~3vO0Be;bf>W;}N8JXj^YZxg=}ShfTu?#T5iH z!lnS;K*!&!Z`cb(a>dY47NOkzdn7~lkQm{D53p3qZ*B@0aVw*f7lqLZ>#$Sq|29^w ziO5zFoU&b`;v~ZfwZ(pJlHmwjqCb+mKSw+k9yjj|AI0hFVJw;CWe)N@sZLLNyMh*~ z+xdLkK3<1-oTW?q0a%%-wwiM1i`{}BIqU&aGt%cpMX7J4+yCMlT7dU3MZ7qNA{-|x zQkvjPAl}MyKCy(Y9=>A1k+zS_Xbi_Od76{gWLcA|!!a0QrAFp9-e2bX%X>=5NPtUL z4I5+gJkgSK_3hWXO~=ts68}>Adb;rdC!<>Ri^<_MH~_SYr#Lu6$J`q`CieUAq&^)$ zAN9$i02^GM>_*zf#Q&)xsQOC6C1^=Q5z)o05RXBIN^n-i@}yN+M~nXXf%6OYd_@q6pbtP&u7t{K&}$KA8!p=BQXOz8QcC4 z2q1x9sQP-eek1$iGf|LZSyGqAUGA|!ukS8tzs1tzG;5H{TVwE5fggr$S5oB~nFW)c z5nloYA=#uAd2ko&)bhfit+H`ztyliWQTfEIqL2~Fp|Y_I6&yB)V{AUhuU;#<@&dzK z#8I}uZV614;9ID)>X1ph8~GIFvMJ>#+Q=~ZNnW~XF~83=ZWKzSLG^nH+##$$j;KYS zx}CU@7CB7(Du{L@#Djr@bxS5U*A&Bl$5Aqlmsz&eHN2cLB0>rtMMVq#%>v&ifvutC z58+9bcFH9}a`^}Bg-G|Qw2Naaa9WktG%l^iD5Ib)qfLu^6=++F#8uOgp;jn zvrRp<;Xz;LQrSkNhk8*-`^@fvvsXjOHWBm9$b3BxW8nbQ*=5VMLeJ}_%rj>3^OewG zC!xgZT&{vx&cyE~LwKNW)X7EnC~R$Kh7MYM%x}h?9~&+66E|pN?pm4q3_DtceiR`b z{PKk>kQJ3O0p!J0upo5ZTJ;)qgOCbuP_5lsj^s&r(wsh*P%AcuW7$4?Y!&@LRcnZV zsiDtdEyz>mZ*rr1yCuknV78WkAO;l(?FL~0_!*d$IMQJ!AqmMO(djc^5)#!>;m3jR zx7IV9Zd%WKY=$rz&wYv3h z-w@3NAyy)tA?B7MRtj{Sl?I*Eym$_(x{kMBE>_VI7g|z8BwEMHnXEw@H>pI{a=AX1 zLXs!PwEg<>@>CUI6T%tdVZnWdtSj*sj&PjGL8g!kV=l19ffb z_!ay$i3a08f?n=@m%$enSkHzvnIDy2p$8&wVRNgRmfsV)CT}zCFEW&T>`}0<%5WQr zLYd8;q}0Ipk(Xa3>QyOv%Mf9rK_6FH9Uhw;r>nt(V~LrJWc|3?>emHuhZ*14Vl1}$ zdYGR~@_fK0E#sbJ2_75WEJuR00$pZXL7t^{IZ7v%DIO;&NY_Nee?8xGo2s>);3(^e zoz&+y=4r4TFkbx>tNcU+Yl3%R_fH@xn3u0NTK&M#@+4;dSO0( ztBj3yKd!0(^d>*L-|D~>3&^z}DsalSt}LE|m^jo!X(aTjV2%lmIo6wyN#ae>y29NI zw}kC?=tyb+qB+N~90X_7x4FEgGS!_Vy#JQ&A@p##QrOalYb~e~wH8CjqCVn?CIkv* zvpG$FsIk+yioX@GU;HQ4wb}au9t`MI7_&8$%clpJfz9Ds6oX>3>NvDg$;c-x|Bf<_wTt$9db`XA@}jb-)jC-K;LC21|aFo&F!ShLJ?7?qQaHsh-<4I<%D!5)UIMt^p7( zphKcjM{Q6454Fy$FzBJO4cxnihdSo#fqb6SV`0dSPX02(v-rEkxwhyuXU+M2tV%wL zpT^8)4wgJnDPk|0YsNV0wC5&~12%`up3~()cDM>@sdp-MUDyf35~tE!d4_3>V(sR{ zxmP5-dilR}8jvjyyp^T=${a-!=t&;uJ!0hM$pd5R*prz2WR>jnd#pJ7UrL?k7yb}) zUjEIM2gcXC$vT(u%I5M4)ECJgCKY-4_(1}u(#;Wr&!t@Ix{wmN7|w{E;Ag-Z2=E#3tC^CG@+f<^5r;O3JQ z2OUGc?Am2}()^^O$qi=>SBFzXU@RS5F|PM81&a61BiC{tyY9eOROMz67->_2M(cjX zH^#`9V3ZAr@wmXcO==lW^M0lW736qoa~ZnX__VOpDfHb*^tlcXWmqPEbqNv82#>_h ze;*-gI~@{{kq%FWEe)$O^5l?-FhA*?(uN-4-HSp?3CyqE4f9aB9`!}h141_Byhhd0 z06Jsig5p+k3im;hvg8a_CFtm^1=}Ofz5x2m7zE_eH9xkoP=-AeXzR*L4^QM4TWXXP znv%G*6-@ESJ`mlL=ofLrX!=-01c~!JvyU`I9y((Zal?L!3V7Tn=UU};l)@mhlLigLfu_vXfUg2y{B7|P18j|M@ESVCM>$^DSYZ&y!i z*qbKOLx0t1NTa0Vst3r4he!ye0jlS~?Ni#O^tcg{PSPCpi#|BxYr43Zc3c=UDJn|{ zITb2$EiI;G-p~lp&M}i^R~q3njH|mpn@t^?3JDIx=XywIwuVbZ8zb({uU>tqT0Lq~ zCcC4jH!!rQu+>s#`fr8$?=Qy!uTPa7Hs5cVHQqWj%({^orcd6z)XFyt0S%jh2sSSA z-AeuUGyDGaxF0^(qvk`~D?g>$f#>7-j#p$OmmgP+z|(E!`-8^2yGO^nKf!#W%nzM4 z{p;7Ji(5JYEqAx1%!q{A zXBqzI^S9eOmlxRNa*6eKVDkMf49I@k(aN;Y>!=USee8H}1HKf$X96UhmFLeA2OSR_ z0*5dBKXk6Qt^JRF`1o8JxB65D&NqfH0H>j7 z(^bI`-#H({Nr(I zyx#~h))D|rW;p49ycL1ea_kh)P-hM|PO)c62rYGou*3y(1AB_ItkQ4Vf>5Jr77U6t zCRf$%c%Y<@Ncbylyx*W`-LBnWFMThDzAwUf@WKh12`El7tAc)S5*(&=oR88iqv7nk z1?M2(&^KaJI-X;1tM>oV|2hsgSE6C@=My)g+;2{1;7?m^q9(58+fS#x{Z+JP8HaQB zFKh^=(|%vI)v|;;t_e{8lpvMr(yM-c-)EpxBlU$IuT~7k-L{4OdgsybtsqdAN1FbQ z=iLPoy%Q)*$Wy4VZNc>-xg8dKq94#8VYNw+XAlymJS8;G&mI_6X!{!K18?R~MIqLoT?m`EUvaSSaAb^gOD{4+VZt6(~G7 zR-GGhvJ$Tl>6(~8JjPEtm*ZfShW7F%?*kaeUVOODQ`&yEURC)KDZjTr-|+A5JYGrWffsY`h_3 zFb4e1&vaB9jdmleTo#KT;Q8Va`j|cBSN5mnoP{3KTxSY{`&$*ELJz!dBCA9gp>8hf zS2y;lSxu&;^wYBAg)cKAf>D%dVaeVu_)o848cS93JC1^_iDT3N7fB1Y-0Y?yvEeu! zrf3HXJStm2^S4;>>Gxoq+n?rA^IqkT*l*x_J#*=EUx)3xN;F+HyLS&y(KCxR~p_-bSmx~T_7@H7+@ImJvH9T z0ouA$#l&oJJ>TXxH$se5Se-UX^<2hA3A#zz(4os*?G2*h>rzvyY8LSZ@`viKH{Oo^?EVd**yT_Em^8qTxI-3`HHPtx|l0#%0ci`2QO%HSCn8B zJN4DFSsA`I&=8tj(_7+w4ZVQhgbb~GNrpTq_|P&QcJV?=$jdt1k>Qe#Z&uHt*!Jve zoWfDkwhZmyt~rSlEdgX-{bw|B$L{Pq@t`H#4ET2L`5xqPEBodxbW%b_p&DRg{W{3I zF%B>%%SZzD3vho7JRJX&81c6!Fi=+Se`$H{&LZht_qYHzeGu;e+G@G3#$30a)JkO0 zXWxF}a}2bEIIG#}!k@XC5fyzu`F%HV2QQ|TyWW9+S7+f?RtKX1SR}H}Fm+6#T;tl= zb2a{%4H=s{X^cA*>4ej`;00nDi(_r(g)XZu3VS0!HU<1b@_+h^HHD_(5R}-;QF%YU z^VJ(ipN(fx+om$EXGmk4_7SM$(zSJAU?Jg%0=BJHxiFHg{7$%V34x!=;>lk*c$NMei(k_aVjC4;RM(5H7LHEo1?XifvfDQU0KhBRo{DA+BMjl5_gHc!S z+LGIkd3}U-i$rj*CgoucIs&!6o5jFR1mC^|FJ@CaONt^4w4Y|oCE6EZ{3CGnx9qZh ziO3k-cYU8n3pDeP94oO&=WpA-hI{2BY-1ROgd2hO=pGE;V6@CrBTl-FKj|VbIDP41 z_^N4+Tk$kd)Xx;yO$`hz)@P%ewAK2-P~z;@ZW(g}3X}82BS^^W;Wk)sHWm-_mCne# zNchl}`<1nyXNJm*5w}HKD=5ma+f7ckZAgOooeOSows6&?MYMZvuVN6c_7_xn_%2Lp zTyqz66qn#-f9mQbo!BKR>#&jTpX<IaG! zQCG2R7Bd)A+1thrISEf|$Ia0C@y8vj;yI(@@zcCF-o)<2a7*3>o>xky)F?+IpT{7W zo4#=q`;vR(eiE>$Yt^p zigdQWsXs*+4OHFrF$&vD{WiFlMI#_;V%=;!eBd)#m$armDK?yQkUX-pZ;&9fv?v5! zTFpV$wu+OOMlpCH96`lTz!!t8u&SZ?7 zfM+t9+|na!R0vVNHT!EsM!Jw>`umJphKxt_c^tb-Q4*O%cM@$FM3sXJ&n$_pp z5R^3XKdu)~gmao5$4@r9i38IWLnB=`Kg^%eeXOK%63zloudw$+uz60cPUI}PbiRZ{ zg^#}2%0m-K+kwW0V8RQj;z=5FI@3d9vqk&U|`D_jx5CM{AJ!g(pJs3!Q3yENnXHcIliC0^^jf7GOA*%a^)eLM7{dN z*|_*i_aO7TbeDUWBttPe%t@5_&I(#2wAzg0grz5rd7ZJYV*a#t0!S;LN;gnjEye0 zA2AHswbjUSeq>sY<1@XP{dsjR6#>W*)LqG(8#JEEwG>r7?KNrpv#>W{jaYT-;iJsE zhk=P8vS7Wq_`M<#j|w-J-Elm$(3DhV_DWNVKUr`2;!JT_p26?I$KoMi9JLmf5n2+Z zJl*Fbu)#)?ZLN-U)btWIdw)mojYO2ZQYw>p-*RI*Tr(Hgi@#{!y4=rK>A`Gxl6irF z+8U6?>F*h+yLkRZWy#|H?&?pwwlT29LqK}p(n@rI*4|8d@NxW#JXl^V1~sEX8fOgA zO4-8YUgpmC^&~IQ)5}8hB%7r!)kUR^R+?_!h%Y_i>8@;d1$B5tC?WU%ef&u}8U&1M zrt3q#jv@WeMIm}Op4Gy^4jz=Hftu8*e3tbGL$5(S@srm7I(eY_hMd1nUe&)ZbABP2 zL1;9;<>TPR&&5JQB&L>^i7u=97H+ss@!urHHa+I{d3U9Xdw2X|_?h9sW zYbi!2iQxSeU_zE;zB_0u8a!|hmac~6|C?8X`puKIKSiBMtGk~bC&j}Gr*sLBee52& znxNsPwW2+`l9uXOpsxBmrTs;8-ao(5#?FSPJ12QOs;(~LkFRB4UKMTK98+7z63fZU z14m_4RR629ARS@9UHsxUeIgANJl+>=bb~QBOIa>4)GpE!GG~=U4JvqTOg=Gpecc{# zmX|*SjU$sns0gFncAjlbiGx(|oPeFa6(&Iup#WlHSa7th$Iv%n{L_^f`)i7nou1V} zMw}K0&O@sfF-U-YH>uB}6cCey{yucPBp5cz1&zeGoGx1&yjk@4amP8{u1KRi6- zxe{AOg&geYtko24iK4?)v~3xks+Q0l6i-AM3I*RQiC+=HWJJ95*pXluQ4?^OxV9(f4yL(KCQ+(nf zx-jD>8F?-Q{dHB`TsrKTFx%90{uSr>Zc)ABP8J?{=H_MYc0JlQG*;+J3Jia})g0&H zZ@H=eDCJ@`W=LP;V-VMW3+4VfTaaUjCL*8toX7>ta$=UXg>5+n45%f<>AL0eD?ft28<`si_~f@H1seG< z?u=LseAd%xaRi!hWWbqx^~O&)YYA>Nm8Q8??jiJQQJc+nv-T!_P3z{=6W9^4HSxPQ zqNN+jo{7Lc`tj>xacyyltr9efBBj5o%xnVw2g^>&$PiZADouwXbpAisCs!9qi1|lX zIGW>`OqR{~N;y*!na*T+u{Q+gPx3-L-p6%ihu8fzqMDA>GjDvGuUNF|(`Ls%r!MtZ=1335-)q zsy>-5`A_(55#>{AYrj5^vL&^p_GOAzOURAlCPy7)h>Dr>0T1`59I_xjFg z62*^B8(ECE?edK0h4k;91IwDi&5W(gLxv|BH|c}vM7*hNs~zaON# z7cmm2$Fb%_<{P6@1qgv&}8rfI3EyJP_+QM?RG!d9V@R-E= zFfa)fL3o~j(4sxpb*ZgaG+*|fdVE4<;!XC7gS;>>Z^nPK=%^O^@lo(OII}3qoQAbP zT=9RV%z7_iU*Viq{Sf|&<39%`?P~&8Ft(#NkxJt41C9KteUz;ErelCdy1S9Za9nub zLG&Uy9Y!+Zw@BTagfNQS7F1)9breN=HIEx1?cMdH3NGZG_1s&Eu znVRV=Zlc7=iBcHE$-P$WTceh5h3x5zX42x`OHo%(>h>XO$+`ZFZ6gdn|1LnULSL=9=;y*ZtLpnSKn%-zC|={Zzs zwE$|f982c+x0Yuc6_$Q8)Pnn|f4vD*T$`(uU3J^-M4DJyI`lrE1YoJ~VU$W}$B}!d zP@^;M@?SU4u;L>j=f#Ay0-LIxw(eY+N>*nKOmN(l0-FbjnE;|JtW4Q#r|OVFTmQpT ztzCIDC>a&1UPIksn=qN2sbAS?9U#x;mt0d%j<%pL7>Ba3Ba7CJB>5vD66jo~96!8*nqT z^*-u2oLetnh5?%POsq}M>}^Vxim*S6j%}lrV{N0DcFh5`D(`Zc=<$=BV)+*kdMMwd zp{a9_soz$IsN~a>k%;OUSDJO!b=7cGBT|sR2Yq*n&J(;k^tC@*YNo?(MN?s~?ILX* zij$lDz)S__38xpC+2=y2&zpI+=wC_R?DbR^IWuZ=;{YaU6lN!NAMHuLX2`7@=^*WPzW z>uYvs`z~zH<;T(1#vGMiqGriLi;BtOx^U1&Nw*z+qxVtLwlDF$Y=R#4Y)WJasUnXy z9Gi@+f#(5WUy>{Ja!3XD{LW~aZ7N6tQRD3m{}^MO)I`MAl;9=bQ2?o_$$3gHIv%@r z67c}#_hjqp@5zdj*7uc!G9tCRh+n+bi-)yx_g&Egb8$nK*mp;1yIZ|rhkCwBbc(P%M959Z58Zkje)(BCy;h^WwySr6v#vNA&bQ%z&Q_H_8 z1sFU*B!PweVc1?(p&E&jl~Kns_?l&cxR*o2(i&8fY}|+&lpKbYM~boaMYik4=sx-0 zai(fa9FCRt3r^L?-2In_yD5nQ%<$B>%RG%!?KXDZ=*t=oYGSal$VH|solRZe|X#Vey=$_-dql#Z$5VU(shl9 z?or(MmGrV@&`jUX^Bg>lSf!?ltpYxN1*_-_y?+H)*(2@n-y2j5e)ySpFU)jRB?i&7 zsjCTbt`MZtd+$iwRj=p21x=#6hau$-4Cd!OPNA7D#!+AJP$l#f{f;K&LOo z+Dx@vXq-S3ID0ByoR1xm^Xy|`a*8cvUt46W(!Kq1P2%9#+_I0d5yg9%p&{xYS}ipYtId zh0PDZze`9x%^A?lZhTBI7hZ8?_tbJNTl3YlQMI5IQ-eN6#1k@I{LUDc@kv!=V7_tR zo0Y;zYm2UT8)ttS5*9+R#5zMptfQOxNm-`YHFQMML_bUla{>E)k)iSQm9nXK>2c)o zX<)uCOP~~k-Q&9Hycpjy`e33PS0K=>1Ay$!CjZmse2RzlA-dU}_O_?Z{|K_b&YE-l zxTolj&`FluWx~UFbvp1G1h@F8C-OPe7d0>=B&90QcjD-3x_ zoetCcbn{x~RE_;0D_;(f)YS9R6A-=!r*>D`GO*$*YefHG)ONIy+~Phcks_{PTSYOu z8NL`b^0XJjqzgM3q)9$7ONekCYKY&M@|rxI`x7AjG1q7L%**L)AjHdQW;pqc$JyaA zkDgsGajkm8ignO}iTh7dYc@b#BzBJ3LJg(rQ7S2rR2`K0@p-kpMA4@r)qwY^`o2hs z*zlCYZ*(G=TJ+X=-E-zFANLg8`#;e1D$n(4!n`V$R)$hknI_{I6q?T6r!&lO*X={k zC)JDU;8L*}%Vt(pJy22-$e$sGP23(+CfCb6z50y~>56XD)#*Lme1ZLs^zX}U2df2t zQ6ZwHo&~0cXJclriO{iydiQ}i1Z8h-+Q6f8t>Sbe3Z!jSoVFNEm33O?CYOKJ(oNiC zDHAUJ4pkKPv{pGr3Z4uhhzK(FYZLM{`m}AfV0Cfym=O^S)#O4`+W~EpI_~)EXrWO{ z;?E1Pife+EPpPV#K-Q0;mOn)l+i6a3S%t|fRm4NDnGQ2lA64}CY7fJV+Ltq;oU?|6 zAdAYUipy1WlSVRs&uZJ+>7dl_$|sUC613D8;e+XIYRbfZm6+4Z=_A9bU9l^vTC%mf zk&~nUw45gkVWP(UFMreQWeLWvgu(^vYPV2pgTFuI0MRrAE34Qipv4oZQZcs|+Ylak z7WU;?im=0UYoi$}otv|i2Na%bx|ie}j%#w8o4etx|J2Ycb0#)VX|9O^?l+s!fkiK0 zc`Jvs8~G8xgfo>`<;o@2_$~a(uEQJ7Q_S?4CA?{m}L)dBKhXUD;C!VT2(!G zi-6cH$q8n|DVK@YQCTYr$KbNA8*x5gg%nP$B|k=li{BROMn!e?AWw6MS?0FXHQh=b zrd|F_A4B?-?Y)ml<0~VI+*N=^U?AOHG(HviinoJAc zD}%P&Uwj^(CQSJzOKKN3Ph~~2H|d%P0>EL%!Q6WI8VN`7GK0U=Wi^#Vl^N?7u6qSm zrn@1MnDc+$rC)vO$+zNa*MM%C91#ss0@areF4-Gk*8#5h*CzKA(M1XG>%6el4~Gqr zvDJ7yjos+SduK7w!7Mi+(bMC*rI#DTwV_|U8uE~__vT$6#&(VaOS?nmR^lTkJSlSN z_GsXfgu-h?yCWj;F?Av>7d#E%wL16oZa68_2S19jFoU8t*ZIA2`u4rc-Qj_{4RiID zBBbD+8s_YP-8HoiS<;Zm!j9=E-CnTP+<7;~lwfER8sY!VprkhCgvoNm7pI9+xu5gY!RUO& zZhv*1_Dc8>0C-766SB{PU2;>?7*P)OHE=ih9FUt0{n!l5z5O~5O@9p|H8}Bg3sjIG zu!ccBWFu+9d??B=+-dQAD8B(l8(dx8k0R*vsI-0nDph^D* zfeOOS<#+*7RB4$0W}ywYxC(eRb_)Zw#kA@(=2{A~T;CX}R7xypQ-UZOR-8a)%(~%? zqDC7ycRchSn3fWQNHa(I&CAobn$q!)xvoAk`etS`0wCH`7vd8tD5O+k0Q|a|uqOP+ zOl5?b3W@T;78>5Jd8|H}E5}@@i~C+=IinvI6`rV`HAI9@k)d=!zT4SVM!`B~cEK*o zp)^jWcO5-7EH6!D_TBw9*@l1nE4|LYk!WU{qLvXO2DU+QYl)S7;4nhGpKbOk*0(ET zMMRThP}xtOTeO$_jy-^_EcE`7k5-}S@aGA2Ui#2{I6+6M59>svUD>N*Hm;c?P& zbE9R(*e=BS0pKV<*i<#b!1r{!9WF2H0p@p1B~DheT^ zA1xVj+7;)$W92$X2c<*KG=nW8@nQ~C-4EPqzk60@kZI&UPSO;&=^c>0=*v7mNc&O} z9`=b%x;;x#5d&=IjZU-~*enL7llc|K$v}e<#tsqGr>zUVY8B{$->xJ*4<+Y5m$U$V zJtk8Iau;1|zo%|$n}r`%nC$jWrZko-qbbPkgb#dMWPfyi4r@xR(HzRU*BrW$Z6DNd zavhjMVzR6Le$F@=s?f85HRwJo<;d-8hi;Mm;&ONFW(GjmIb;oKu;@qrs%I;YmIB97 z6S5?#Q5MHN<=$o%4#UXQSZ5;C8H+%m+<1#7HA(UGWJK1LFihmb7^FucPsf9=&931Tn%+eSCopmVDq%j)!W z2}lyi!N6?i#5AM`=n0RGF6%m|tc_aAu~EVcku+A`FUm)mS?CzPyLJ!8ienv zzyK!3IAkGg_0;aO6tWauEQy7>~8V@MToc{%Z9j@fF57n;Iw$0EXk#b zRJnKdUwZQ3S~-mts!NOuSyDmXzFS9IAE~2w#gVnOyFaH7>h^=#u7Lg2zk*E}Wiq9| zwIWDM?jIgy!hAGATEiKaM@f!yeGlqQczoX(=&Fa^kvYA4&@=tVS=r1ogwAs41!Hw4 zZqKjqV&nN7MwOv{&jE1W??jDJd2iqFS=-x2F= zhrSQ{{XGd=5~F@dLIR{Fhhk{UPLU|pA3nE9Xh@=84%qU$asg*UDEe(-*B#n1XBTH> z)}kAi^>vfm38V!Z2r?X7HwAC&w($Jj`ysn$Z7cRr#L7&^ASQK*8iA2OSf>M7T!x6> z%w)Of@>S7oA;!dLMGL7)q(icF7DJ9f$3lKdj~8&I%rXuq05dk0gl1?t!(+}UkSKgY zVg#2)knXk~WX~1yP0)zBy^IsAWj*>${7UAK)V<1cG4wjBuIZ&nikXpi3SStcV2=X6 zHe0V0w%GOgKMLtMaX_m+sofD_1b6$m5*054NTG*d@B-+IQ_RiSW)TbixcIktuD|Ze zxEXv)mdOmDOBI`&P1%a~8sh5@pX07d*@I43w|239&5TRA6*VAleVL|^RqtD$A2AcT zpdLuVPFoh9W)E^e-kd9)II?|t3+U*mdxQ}lt<#ec&Gb{s`>BWBRcco*I0K^D=s%?Y z%*4*{7Cm^Ce&|cq?_?Qy{}?5CP_TC1tH~*U#_j>A2{rg^PqNGeXVtgv3|>vQ)Vqtq zq+4qGCEtMLG-r(ZT$d z*eE(+x9U0YYV4cg@Ac$hrW&9C?_F$zt z+XxLV&Qi$ljYF)^qB0T=WKOu129WfF*eN)?LQw~ zxqQZ}kM=iQPRg3WH$$)NG=j>Ug=;Eg7qh`uBq)vYT^Gv!ZvM|c-Valj0^85GfXI4N z70wo$1-5;!7_x{cQmeucN7Mq<=iWSlW?(Qin=KW$fmLBkWk#pBW7cDQ1JayLbdxqm zD_bUJ0}fY=)T$C*#S%L^A))WB;jPR}6w*LHv@0ga_!rN0M}9ZF?{xq4;RnQ3jZUy4 zR~<{GE)_^DjEQv(ik9Hj>OI^8WRi@u_v#CJ$){dfdYFuXjhW`?x~urN8{NImm)-xR zB@_Q)Hp|ZmFPxJY5BSAmJaElSTr=e`cpv`F9dAM3FN-weZ@xzYgQ1$}9#}dXD8^KX z$&WARggU#0H&Os>$RPD!_GTGQW0mzGZ&XuaHLRa&gW>gY3y&Jn$SQ65V!`Rl9V5Kq zRX3bF2GxmgJ*9*XcGFvG+4befx(SXG(_$o0h+t2_Y2^|q&M=i}M3N}qv0IQ*J;3tz z7m!v%mXrio+p)d*ep~=tr}bY@CIYl5!)wr8d-jnST{FF{KbIE5{G{ zvg<+pjo4U?jZ4A)dY0vd7>jZhG-2R>z-51`#U9_y<9V4@`?TD2fP^Ge$jdEP)_?V& zJ_d;4Ek=H3l}vUO8?5HfJB{Kms1tylxdtm>gh&7Tp~>#@vOQ7dxe>*gB3Odnv9m1S zgcAd8`WdKP{TQOU+Nqdu&_JPSd8ExpW2(Q&dtFujcU0az!_5>zV++sZCq~??jvrfS;Z#To5&8-e%3Np?n?nzLNs?x0rJl7 z&c4oKiGFh<`s!tvpz z?(U~4vndDgT6d11*gTKdyj(ONu{~tw-gPU7Rz_&yGC#lf&87e7cd=v(Xx^1A+`%0P zICWt8YF7&cgbwlA_%3LQiiHYc0xZ6y`4iHNGx;Fsdg!v5_W#zHtJ|K$LiOdcLTc@% z`x%YLkrP$il5OG4c?>y)8iQC!whXwyw-iiD$m%hQN@cXnM~wRHbt$pj(=BNf6n@EizB5ZmU-#H z>Ee8IL)=^B{dFnU-g)N!&s(M1NFUa$!+u+tWiohDWJNvyS4nZlLDb8IeTZN+;afjl zd3Cjp&*5J!?xQH|0&D<%s9!qrKut8o6_Z4SsC~4<<+qzur$9%fg0`-Cuq*PyCeQij zNt7EZqJ(3WCDR02Y^dvgNaBEJioTWQKb=G+i1s{ooZvC&`>IJlbUbI3_n9#jg8h~8 zlvi|4+szD}nAay72NN{?nknVyj!_QW{RUb&B2V^*xXbm+vLsQ{z%EI;CuT9?=A?Kg<{M{r9#oYi!_4%uh6 z98#YNRMI4u%;tE}mV;io0nEqv1gvU~6wcv`gZ=>IqvzwO%9+9TfS~=0&~ptYD^W;O zkv#lmm(ju-Ko`YsLB*Z~$L6x5a?e|w;}7t4<8HUIm+sSZhO4A*75%vCm`LiBsd;+> zYq9G{$)%&C4+##5YViFMV<4Ib0E;#5VUeLeND7V$ea3rKXE<* z<#*beQovio`4YBamoM8brSM5W)^TrEkj=^_qag=i@fQDffzmAdVcp)OBs_~<)~3cH z&OvtPCKH*>_q&e8nELf~JVG_Vd`MDG$qCh#l?|M+Yy3ZGf@ixT)vG=rI13Wh-b(Bf zCV0f;(0YssRiOw9Nv4v`lmZ!Fv<}h+stUJz`9C2d3Tc)6t45&hsn4P?z{d%G?~CRQ+rE zj5Ty);Uw$D&&~)lK>uEF2VXQ>(oEnT*%a@CgKoeM){>?Cy^kfv)_<>S9hHv5|Ch3R zv}dJv6Itw>UxU)>0fxY`Tl(8^iNDts%{AYb#p`Q?N4!lySN(Z_uja)(=}lNM!HIA zI71`E?l|(x-Qn}s83o6*XBYBdqS#G=)$zErCA$E9=DWgQ59*hMf zNIlG+C1G!H7rwCS#ZBen+P6J8mPa)F8m92pN1gu&pAq(E_L1)4@6ztw3CYKap<2SN z?&q{yR#)Qtc~E}j-Q7JM(hCTePw1`?L4I_%fhe$5?(Hb`;qc-uV{2rbzsx7&^ zX!X)#^_}%`ZNO`q@cTVE;O(XR?Xnh-9L4%}>-uyauufddfa`L3eUwS^@&pFFyiEx$ zPM*e%>3GYOV-l|O*TVht-nl!NVl)BqF0Z_4zlJw^hNf^EB5K-B8ckxp`#o>fT~1M} z2nYQAesbCUe)%totTEmG_z`fl-134x_jZ#-PTB$SN!$s?4^@YQc)f{h)sh29eJ3W~ zzuY`l$K%v`_Ev(Gyg)A;M?OicOU!G(q;so1~L3 zcs1Ng-@Lt!!qfg>G;syY-ruhMBg%zsRfRbPEp3~@Id)nP>A`lF>cMwPWG~0-oi1E` zoF2r;QEDOG0Q4Zey>Q!|*Mb3xgUxl9$1vSy?8EA*8D9SH8_ynx+ofrD=QdL{teR`J zPPvPoOltNuCt34lwW$2cv3;ktM&+ljE$%&Q-7dq28XHLM6+Sm8CzB zG5$n)hF{yJS5H$eYDAopO-RVa|B(c~WQE0kF$@x{=IVPJ;p!+9fEFrmfsx^3>^Uaq)=qh_~s1`AOzS}jGaqnOXnxc^9; z;|wpS1rwTiQ7hH@9KChK=7GGqh4-^ zdeqhE^n;92giw&lM(4dc8R;H>FqO%m%dO5jUF~>qnRak z1LI|=GBn7z=8J8zM>y(wmFihK&1DVnV9_%QZa+Q(|3v{J#?tNCI2OwWQoeXV36jEWh+utim+Sx#YFzSZ%O z;7-!U)oQxggnyFOatt0&&pKQuSjd9g|JFl^dq@-{_sMiAH>ZpGipQ8;lvGn_Q+)`{ zf;$2zd2!3qL`l>U{O#*ma;AsRTz_^^v6Z>pxbElMgkdvE z{Y4dcER#1xbY6wO&E~^ARtfT`7Ql-1FVJi!JbP-w2!97?^5vlsW|=PPPi3J8-RwTa zYT4Tv8w^T#Zv&s4pTtc4r?p)DI7 zFdVyTQtStKB{KVb&eHT*THa||8XWG7J-ivNlN9>9La7l!<&W5wp2S0zaz0msCk zHG{(N6wyG`%vpM;^bOWz1G*1*R}oFNrEU+F+#9BaXC=qhyn+J&QclP{xx&iN1;rZmo+R}2T?|gsAj2laaCt7D=yTjfw;?fHR_eJI;>^q*Clzw7 zeh&#|q-onmNb-BsDwfYiK2+7aP5%2}L;=9KjdYM=!fwi6!VxZeiae7Z#mAI=R7xf@ z48g#Ryt_Ou&p~=Ce-9{9D!h^iR)qIcvT^(+3aVYgv1s7rf6Z{04--rTYhqJTa1Gj) zmXpjDnrKuT^)F_M8vUsB?7D+G7?U^|{??G2_;P2E1=yB8$ehFK)Iz+o_fkBc?g0^< zdj{27W>%qT;!w~$PlU;_m1Bkcew#e-kwD!>r+e zaZ^gvH~hzGdQD4o-pX_qryrKTV)HrVr?0i#dhWVep3tW5#mdTJb_373A;Zf}mlEVE zfivi^5`jF08E^A8dbR^E!J+fA9QB6FcA%h1sVfWuPTST(k;SDJLLxToYj5w#Z>)rn z9~A0Ni+SXU+pGNt+hag4$kJ;Q+gZ@h3d50>i(b62&cR;nCGC(&(JAbH6A10r;J&AlAHD0Ug8aDF}DY zX5?i0P(ho|bcYIbG{WAu?&WxUW2U2AW8e#@I%odFKl(A$+ei6PQr*x&xyAMu*)d8C zujMkamv@TPD+4cjwy+Wn{m?`(UqrwBJ?tX6Wn zSFu-yS}GqqLG!EQPC!s6@8J)!`6ZgHV@9hQcgSZzlvCm*tL}i?H6z#yLyodA2g2rN z+A*Y01|l;d_WBWEf$5JH(4V6HmrRe3BumVz82jxs1D97t@6=WD$Nea;6Hfc*+tv*+|1;bZsfUY?z4c?WS$Nfj zAM^K)&Up*>liJ+I3WK%Hy?qyOes1#LasQO_Dj3?SHR-_3d5i@p8a|uNc~B4cMo)1k$Hi4QOlNX>CPX;B zAF-n?`k0P}q(kVqDA(U_=TIZ4jBJWRda*IAo{Mf7>>X8`Ot7})Gc$QO9DUk%yp5Sr!ZJUg!H7pB;J!Ul&2 zIkFkd(!_g&H`ZR1A-^S+7(IVxGk6=vf|g`Aero$ud`->b-fa}3umsL$6}I?>PS4iU zt5sc8$(Vb7*Ze%WKGsdx!O;@J>}0+)wOmlDH&zn`o(L)SjA6WI!-z45#)yIsH8XLu zs#@Gy_0v=;3>vaa?mcbUp5rq&VE)x-y}D1c3vD)zocP4qW(prHOAnHK@O5v#!DVWt=G|U)UvJXZ{S0UF!1HVZ6iK_p--Z{F5 zCSC~|0I;VSxAOu2?jA+s!u6FL>})S@0HN)e_k2fURmb?KT7p;3ssxqE^N>-%zaMJI zfOOKEWNux)aA{bN4^>Nqi!Ld2ws$2)pdY8S_iN;sR-BjEbEbY7}v`0EnCY%ns>4^~>^N>+$7FV1htx-BJ}$ zZn;s9YHWGI*Lx)Ls1qm{L8+zPqWwZr2`PTcB=yx=ip-F(>!<5|ULodsxF5VtC7zpQ zYB}mx%*NHlep#X6@~y0A^RRhOr^n1;x3oy04mX_?Lqr<_BYc9R0ByjC5D#raq%iU1eWe_NDlhM;o}A#Sck z9i&PI?eomza|ZkLH8r^=0p9-0oU1xOEV|jSro0n9Em^(i%9CGgWOdDGT}?9(fGTiN^G4g(ga$1#LRubBdH69%cVe>b)9jzoKWdltW?k^t}PFcRT3<=p>hh_AX zh-2048@t)J_3+DIR$Jsc#{hM3{y}|2#TG-U{`oUCAU%+kxz{1}Skg|3sRDrfhS2DY z7~S*x&w0g5^*h5?`rW1_LoN6j2JC9V6wzNxG|+~kdjxxLiFQShICDfbuaN~Fn4u%} zpgd{% zN-bSWs*x&pVyAN9dTIicK|uTaFb9KV;Y2lX${N&cC!zDH2P*)$I zWV>#;wc&V4#1o#gKxnV#fr?;l)-MpkH2m(x)8h~v@(@Eq^`e<+0N*Q&T!di^*k!R` z<((2L8py+q8!`VNWhMtb0fbu1r*r!w%5=ng21cC_4gGw>!(JDrg z<#FV*uW%1Hr&UHX4;}D(&LWsR5;Dkj&9HJJ!h*togaZDPOXuMs-^5x}L62g&=1f&pAQS`%#xmHV!>nX~CbPF6+zI#iG$Ne$ zXErO$7~a0BWX$;K{v_4b?Dzbmv0a^Q|Hpg>F4e4z0*9?0^==x^N`>(((q#gF z_FI3sII$TXtz2h5!8VRtPUrX8l3Vo}0HM@{F8BfU`1o%FE| zz%Ho9^rDDU@9?qa+TO3K4s{~fYeqFx5SlP#p{=0!58$FB+McP>e-Q9zo^Kl~f_Mp| z8^Oz|jyp(~zS?-G0MF^D9)V%qD2O2)f#NL3y+E`CD;hl#JycAd0G8p;)UwB?DzhE> zUU|p9ejN;X^zUNP%|Q(xl2%PuoE1(~m?fnf3Ss4F+$RFPwP5!O2k>Fan^7ct3}0cL zQ6E*8C;>+Y*gCunumh%@$_}T$GQH!IGWP#U_2!QB7`Smpe?YRnMW)6}3NV8$pZSu+ zu1}o|2byDpolx%xGiCMsSjK!mBjU{yGD&OPB)?E#0sqkwn65sGB<5&bi>ORRs`jmK zHywMcp)vY{OUJ55=EzyQCVRr_Ks^B#w=5rW2OD5%V?OW)Pozu1D0O7uNU?Sp{ko@7 z(5R6r3XK>hjGysqtE4G8mH3CaK{f}<>c$u>5=FRUYL?X@{iHcDMxJ|8lqXJhQDP^* z`13*L4M<0jS-lZn4ZG{&ue~fBuD00Hr{3(Q4jXoLJ-yi5*Xb}1174w=5f>u*M zs0f9vvcR5o>h<&l-LRq)3=bf!#5Pqi_?YWUggH{-)ND6PiWwwU~&4(w^8cp@ zwMLxWRj@1!AZgS-8dIh1hC~Hd`kNR&Unq#IM2gg|CUcb-cf2E4=2H9;t|}|jM!lk1 zWHaB;C9TfLCNWXWt^4p-PJnTsVC&OPMIUpo+@-N4F^yC#btk9Fi_Nsx@!R?{ao#)t zxb>cdf62jBk+BJ28;T+p&iI*TTM#4iLIzz1qC&tE_oW^NxoMz6dbA}oQFi?h*_fO< zuou2n1a9dJsq`bxXJ-ey#V^agG^F1TzAj{Z4a48tvHk?Zg&VSA=W}Bvz?r^}C<&K> z_OyyP84BGTOfz{dA-}vFva&17rUeWJ2>w+@oo9pHV}OO{$FV$s7fhgSQg>646ZF=C z966Q3?7rgVt4BB?e4x&Jybk&u)ol8^|5#Nh{s>l~EAA;!7wZcIZAgE)V+_{#v(l$X z{!7y5is<<4!^`In0gAmfJ{1y9;U~JQFTu{CPUNUDw3y(jLV%*|SZ~X~hP{a5bTlvdo z>t`|NQ~@3fobIvt1fH=S_i~PDc7+v_-`m9x=0O>!l*SmA&4Ut*-BmIU z8OC6#HTi?Ev7k=E?M9$Up@V6fdac&M>S8^GY4C!@x|dw%X#>Y!m#6W>nZWg)F4B<# z07kSGZ;4eeWdJ-HI{Z(~IBVtKb-+~=YY)(px3UY*ys2sGug>q)12unw0%I@fny|TF zDf4-aDn6urfFW&yFaJ}}`>E&Q4n-*Or8Baa@rga(><4sDRw3ROnnr2Otq`2 zcFcV7y=*>_4mQI$=uD@M5OHqNG;$q2vHUA1H_o0^wYlV6IwC~^(^c=s?-6{ft~`LH z7ZQy5ZUNNTUTjvQYf%krVSoCEMCP9h^yPKj7=(K%NTztTjP^(wFHXPYN4mAwnQ$?r zt$1KB_H3u`LG5XZyKLrU7rlfXh!z2;%2b2NeR6P=h%M%k`tDxC(KxxtTRlIRLnJ~N zHGpf|Kdtk1sd|;M>YBjEeU0Kh_2Yo+adScj7WAhN>mAScX|DZFW-OA}Jq=q`n)eL$ z1Ta4!C*AKKDp=Iu;3K_l$DDI+w7yY5Ka@EjUlLOM{|mIbuLL<7b> z@JPHfc{aQ(KJ-SwiY=^h;L3&DbtVer_klcqgQdHfY=e6_O}EdIAWC;Ufa93`Yz*_) z&*`I=jUABTaDor#3; z2eQh7NO z_kt37i+mPb7XYZSZJ>-0!GtTV@Y^t~zvyHu1Cx>Tu%4MRv-#PYt4<_hc&XwyH3K9+ z0(we6g}mQd)Q=Bi;Mgwton$g|5aqLST40$o34-X06FO`;sNM_$*1W@lSy(jv@u#9OV4W!0nn8~Bf zW5sA%i}?5@5WG)h;1)*f&J?`VUm9!-KUzB)@@a zqWBVi$%cQ=^oNLG&>&p`mYLZ3TwKHwQ+#`9qQZG2^Oh z1Na*cRo&29_R6B$UJuQeKhR6elq!2Yl)ha)my~Ei2O6x+j$6CR0b8~GD1sX4ttM0l z;cOb~AvvZK*u|*Rt+ATyjj)r0UjzU?*7cEC-lTR@^zb|TjMr0CATV@l`QVk`10K6ch5*-?Y+pZZfnVgK6y;~e*{y|6@uxEJvEkf*;PCmrEIHw zsNY3QvvcdTh`!V(vwdfPabsy0^%?yO2Q@^(SH8&I%)2t3ZTICBv ztf2YJr1zNds%C1oXLz4kP7=-)?nNM5f|Ov&RembiffY6k5tgq~3RMb?OH&U9TvKW_ z8FaHFt{Y(>1_wjBsbLG2WlD>+Mj-%Ppv=(M4Xj(?A ztB}DQ$OUW>VBx@EybXale|sygef+dCaLv43sV8jAB6JF>dPj|Tv~G8cNBwV0gUAo@yK{1xd>S% zEDq2GfBS9n=;caN*>It#n+W&rKj5Y3XEeEM9(YO00N!{cA(Wgvi(C zw6cFRz$o7lP~8ow4tgcwfuGj=J8Xov&=aH+k&3ju=_=r7tkP10WlT*P(u?PhAR|ER zK&NwS#0u|$=U-tzp&bBJ>czEChi^FBOyR$^{L>r}0-rs_4S)u|D{@3ab4`{x3Q|9d z+t&Xmk`0FPTI-foD(Q48*|S-X8Zs6oS4h*{BbDmzfctj@H&cXf`6vaMuH_DyUsR$( zNMr;JwKi+K`sl)k=*DD~3kIn*5iBil z(beA;{hY4fa0S4DygzBEx+MS0?ai4s>hbnEzH-9e@8AdO^)Mly1;WWXXj^W}J2=mF zxY4Zx2IKQ_@(g8g1QUYX(hQ*-7G*#ZnsPq~Fj(M`+N&ji^-V%2Vf@x3-(*LF=N2Bg z*YcR(!2SgLlX@u`peq?@j2BfXeLArPvkA0NMSBr1IO@%BS3l`{<{)g8C8344} zv$AUhz1uT^SjLUTUwFcGozM6`U)zz${C{|TZZR9rVT=^KWESfbf4}eeUIs(a2anVb zNiw_k_fSs1Yr|qfQab)cy+{;ZJD;#>b+Pk`qs*5Phjyfn@e6I=;s0jzDyWgYseQLP zYq(_&$be7#W@YH#dM4?|1m>X#EDs)Wh!qWqpRD)WE*&>T3rE?O6&eqDN*Vkyc)jOX zN9gx#Khk0lUO(sC7aANwa!X;)2OhynjLHC)2O)6Z4E^|2v_DwxfXhPyE`CR8TgAn# z*at)>&v_-fK)+LI;0>L-JQMmEjF8O5x)}Ka17N3mp_)P%N4-K?IcO6^^0?Snoa&~Y zXy&8>9Q!NuRcSf*MXifGbQuP;cM`WC9QpGoA9+uKF_3NR( zbZg%Sd?YrjjuwNbhN^nqHNjP*NkQ$rd=kUURQz7!9mYl=G#EjGJlbVjtn?1a_f<&x?y#<8c5nW=I4|)fkcbj3P;pL zIH>n!@JUnEUhRK1;g=8WC%;{S-H>R0(fy;>{7qNxyPv*!4kr`B*BE-30>^`&=RZEY zD%3ye0~G+2lDafsA}qpriVcU6Qn(zk%Zv&~uS8d+&KY3GS$w3X7Ya{yOI*zWl}TuJ zAt3)rOhdQpXa~xfoYB-(7Mm@MpnhuZ1e;R#m5-W~+{U%a7jgG{*U0Or(f1AJurKA@ zG&R&`HXD}Wfp((mt@b2?XX(iJgo7p_JI=<6^xhNS8nMMnV51S>13Og1&+(A8CAFz> zb+;I_i~OBXRI4OE+M`n#GyW0;$f%rUwNa$*ytc#k4eKXJZkjNaSJ=43 zm(xT28*4itE-%|h3nSi|`3*)Gb5sJWZPH^s>6 zkB%hicXTmrUtKLq@2Z@{m;VaG(iN;h>!EOmk&*M7c6oj_JxpnT!~(~@ zEC^-$SHPDcG^{NfuUIUrp8g8Qr=_XB&;k_Sk~TisrOv6D#v7=~)0Jf++V~T(Di0 zH9g`5qiJ4N?(QPQl2leG>0V%Gs_?D|*`R-LRD1mPm{DhFG${>$5j^;3Ui+7>_Q?Mn z=!SOKbD|3%+%6ULx{CDdH8ALf9%eA5-Qw(y$*SidJQbo83sfIP9eU)djKSf~u@;rF zk#3qKWbx?m)32miW_l}chvoeIHK?H-dy+K9c*+UhPs_;WHBghW{<91sp3%vIo(=lkA&7-%NE%|QA-orTzYkP=s91Z24-brB3`CRs zEUhe~as6FBmy;z&h)W&b-XS!?v$?#V$px^8g>|(1_+dT#_?-E8aDRv(IsyJK0}r|P zrOq-7Y!wQ9bYsjL6Ll-3wy3E{w^!v)>e!zkyGj$n&PI!;z)}Oet`_+J2oTsf(UnUi z^6!x!HZZ0fr9~6bIN|00Hx&v?$H zM_+QH5-*nc`WXwxVb)70=2@HPFv}m!EQZQ6I>d{npx`2#J)tiY)U^f`DkVxyC@b%Y zd{r7vMrZfex#W`0LBoZ4Wd`X#_PULrmEfoU<9Qy`^ED|Vx({@9=SYX8;`#wEzQ(BW z)Q%vJJ}Hl=qfVQ=0?AMhjB$uc&Lv2-a zt+;v@4X5a6j>*PT3jGR-C4O}6lf5SRYBpw?C(#q!e~Q9;q|VHHM5g_?Mw&19B(?T0T4&a*SLl(I6e>%mxR# zU~Cn9{c!6IP%Tf~^kaZ|YH0D>|8Y_(iaMe@l9t&?tRXH+^gX3QHLOEV#H=#L2Hre| z_XE*D(Z65b16j=J_Ln0}WrghD379nkoxKm_FEk6Ew8yQ#Ueb^aZ(iNu+=;|>IC{>R z!QI;QLug=Q+ThPd&UeL)6|d8<>q=%_LSQ}OpcfntfLHc_1mtWCfWsOHQ~x?Ibf zVHW;@U1yWtT&EHI@rP0HSfPT@mu^2}X7j$kqIvZcJ*%wghu{5Pgi1H6v)k%@dLjLJ zCLO*|5^gi8FO{$PtfWoJCvYDA+!e;4^6_iCgEZJE!v*^t@vzPx(;&!>k9}KFl@{NX!;43RW%m3 z&4{n<;FK?C`EjeAa*xgUh&-tv-x9;dg+4u?h|ZVZW2V9@pFqF)x0-+~Vf+PtIlAY6 zG4|f!Y=>{#xI}{3#Hg**9<}$XQhU^iFMB3mF)+=_ z@#b)Rx{y+b-+1QDL_dvcj7!B_Z>&E+DrB9b2JO^sCQ^MQ0o=A+oUWwsUP>oK;M)PR zo4pYDW&k30gDirTx-2FT-M+6w`cf#SZegHZWPxcQQI6>mzfdDRE$7$%5uOp5j|WFX zBwW6I1SRSvZnp^bJbnx*5?9$;iOL}$F_0)IobGNZU884uGJ=<1c|V7ZysajdD&r4X z+Eb@p3KMqEwb%4KED`G2iTK4h$cs!j+kkP|OAYL|kTSt3=7GACdu8C)e0NS*6Y#pI zbS2V-_fs=7U;avRmV@t>QeWfW;(NGkfh|J(zQT&BrfS?gHRQNS$0p zWn4m3Yv?*#Vt41q*I0W>T1<2ES#(HAoagV@HOy$-ux~O3>j!yxkE~r)J~)qR+VO}v zy!&3E4)0qUqWZ=e<$aIyC&Iv&0uLTLfX&Xz=`Yhhwmc>D8C3x#I@QDmtIdsl{qYNU zo;%718xmbW$p})cEIOjVTD#>rtdUQAIrqKYaernK$4lXaGJK6%LE)7&wN8KMuOS&k z*M?Nm@OhP)OGIZ9q0)~vM(JqyITn;_hC5-QIPr7NmyaZ17n^g=}%7( z!j5S#uYEHQ0S_`dvTW@?d2!D2&9vm)16=>wyQk?Z#@bi`&TbkmKs#pJ$%>i?06S4H zDZW3dwrNt%A*o|9A^4#gTnqPv{rBHE(@?lQ3@2H^jttz63};L^=qT>!>Q_e<+997H-ddm3zD3_7 zp%zgJdv`PZ^Sf}6x)L;oKuoU!b^|k}ULVK*RiUIRY#6+fQdx@g1$z^IG*4pgtCpoG zFP-%mi)fz~pvTL=MA4i}(i$2Z1-it()wI3kau~k1O1$8e9Cjof6MD{kgzMn5|G;lli-J15;#M3Jv&fQWV$#tD-sQ2Yjv@RWuqYrW6~9Gb4D? zE#6{O4JoW%Na8R1Tu*jk?N{wLvaG!o!n(3Q5YiYj`UG`k05*hwT;J7J@;!6U0Ctfk zQjQ{zCPvqqHrj39Hie3wh)gB#ALN&Hz!*`t6hj6nq)`3NHQg~e`f~AXu&i#u!flv z9qU_K5mhwu@9NHBbVhx$AhoVHC>|4#T#2#z)ZvFc_`HkPTvQ*U$FH0ZumTIlTAGI2 z{j&a{#>4+9$D8TW#=nF(vImwE0~54*fubf~pDanTJ8WH}ct(`))n5|=EIz9lLpUJJ zz`ifKghp8ZA;y2JEDP$zt2uWEVB=n1sKV%>OwXm+8|(IyUQm*0q911L4HBM2WSxy> zk7n&inuU$x(*yPHWImfm6|B zaN_ZD;PN!nsSt;Mwl{3I>u#HxX|z;APQuf7$8I`X?D20MlMK|ZH@a{298N8)pnvG3 zfnM!J&C?wTKuJ9xiuDRMDUu)ALB^r;*@Qz(+5i|g40GHM*U0Tl5>i@xizOH*yTA_f zkfVEoph=fH>+{>}ZY}Hn3*Ay)>hO7B&h1`o;ti=`Q8;M=(Amq;T&tH@1ua-uP<*#A z=0}EPo*T+I-v(ENo5An_lg}B)Q(kx((Xx$k*=}6uId&?$EK>Qe_r@PXvEl!77U0+-C&95#$b{C+|sfD{OshQI-Yiudz z?cPIsghhW}6^+I4AJFT2o2_8~D=p;T|9*3EV+=JH0U_ij;FoR5ze&4gzy3szs{F-h zdHWU)qYAXKN3S%$3>eYP;0@8%*V6qDeD+1^U-)b^%^Bk!O`B5W`tVKtV)&Ee=uAG} z!zj6sp@EH>@)QMIet@)Bisq&QO)_D2@Fz2av zyQ85DjnnWR-~P>72VSv`2!a8LWM$LQ*HmI`Ynu2O&^HMM5!@CrFYKy*(b$(rUElu; z%WgW{{~AcPS5v8J6>xna465l~g>Pl)A>X30eShgrf+ou8bc4S>Rddovk1TC`@|v-H z+VTTf9(68(`-CPNr{#=66@UUU#yFeH4y8B4@$bX$4&;S{1FeeItHO_aPs*eOot@n1 zuP)?v)~a^hG-;j?fvxAHAgGXe-mdf7T21mrLr=6%lH9)-*zAMuP;EEoB?Wzn!sP(2 z#8JQX;%;xJRvx@~ESlzpkNx=z93?rHBRkz^N;uHhQ3e`3|6pLlnf^v@PnivPF~aIZ zzllWY`5`T!$Ab`uL*jya;t3t{Yx7#gDc2P+ZYg!B<@(Rf{GCPl6ecOHVO@BQ?cE(#NS*sXA zrMlJ4dj!TC->=IgE`5ynDGQf8mji1)5h~af%PjtIX1y3X?$)6LbXVzEgo>bsH3u}p} z=bM$`)Hgj1dIuc0c!|P$z4Q1l)4EQ`^1sCBGO%YQUDU;#*G6%-`CueEhaKH*NtCZC z_3=sgLyljWm>-q9@rpJy)1s~Fjq8Yw(kkZxp7aQ$!HS3z9?m6J7gGFrsOVm6Chh@S z%!5wKr9yQ~fdyGmVeEYcjW@v=qyU+LxMoza6P)n(^FO)P?`y&M|HVAjq4%$7E$w?~J5!@T1mE=sEVSGGmuP+N zY3;$Lj-OJf}NiMm0FvdPBAk--UcS5m(U6Mufe*jO$V^2Xt>sAl_@&n z-2lp@lmHc01$OEPCIxZw;j)K)uE*!1olI@hH)|pZXRkF4u|IOL6l$*Jp+&&o^%iXH z7R&p#%FYh>Xw#B9k?C(FA&Eo^ywEGXG$yz9cD^Pgi_&bv(qyPow^1B(uK9MEG@eIA z@K%P=G&TX=1&iEXVR=3M?g>c;emjwj0ImJukPtl=yrEI{Vc|$&hiB~DD;isT zZgjuQ^{`i}?&phovd)h;l$gNh8Ic76Uph8>*GYwHAZpx*HY(Z-+Fkh!<=#(90-RV& zOQGbBoo`1mD*3*Oq}f#j2ikz-%{zr2nLAvE`;4k!!ObSE^wB=jL_FJuns3=iPMm_e z2lU8gsXQ&hXt5RVQ&-vaP!X{)ET_#xVtX}mvc=xa!dxbFtF8KG)G+)4Kj%hTf`e=P zK1?&&K)@c`Js7(-T_$iZ3X)5W&DINM?G90PH`?oIj;a_BzWxj@bxo?rc9>JZ{wvqD zMeu$d#h;M9v9>}n7Ntq%*33{1z2#C!`m2u1-wU<-UODgMX$q_c!z#0*3?wh}9eMb> z53n@^IvJ|0l{%c^t;DT@e!mU>)Yf#V0Wku3)y%}>WA$K4bLSaVZLwj*TF94sbu|X* z`Fw&=iA<+lefjct5v#|KxeSEqM2FunF9&6`-m+Ko2Hhma?h6G~9#!A%BVUp+!*(Vb zDgr|H@;FUXbb$tIpuT(ceB1Tki1*QQ1~CYzbdVuwqLd4qi)@V&Hv^&0_632?eb3Sz z$^qnLe?~-~kv;S?Ojx6@O3b>iK8+04|1A0>h_{}oe=DOk+4W=ww}+zs8;ZCB52h2O zAowVaS>z#la8VZfg1gk6@1;hMl>o4g!l@gVVB@TdH6Kpg87lIcq+){5(D?#x#sc*1 z=BJl923Eq8D&i6tPM8T+K$nh5fh>zFLZ}PFG@H!)Qe{KW0YJuNHn5Jn{#)rj0}+T@ z#+xZeJeki?{TIcSx*c(vp`@ADGmui1rn4_JMu2DpZ$_n-q8hg4qR>&t4(xb;OgafB zsIX^rZ*YvuZXh$k+-w=o~&lB^V+YAWZDvo6y6G6A1h@`kLFqh63*l8m8 zCVEETztvV8R71P(!@g+hyO36%ZG6zHb8V~(-D4i?>E=?H`P5&VIsvl>x{+V@dJ0oX zbinN(@$Xp_KGfxS>F-*6d{LoNOkS-JW7pF<@cOJuG@li^^>A_9c# zhfo)zKNWIB+xb#!dn$b&y;rIvnzJHd(cuDBVsVRtj7vOT>F_#mGjYQf9(%)Y7Kr-< z(de?q87xE~7l{o?<^Vb{V2x|dhLd2fS%%lW7`L`zmmB(2t0Zl&WV1XF4*k@KF_N*f ze-2NBcGxZC;*gjo;na@MIt0)csLN{&6-2yEi&ysHBC7Zvk4>FtQ`R%{$=Od@MhSVp zyGNiZU;owj0RiJ+C^gV>$|B?&YH&oy?E#+87MjjM!lU@9Z&*|MnKl973~gD()>$x z@uCJpgV`On77h+NJs-f&?R_RDB?LTBd{_NQfI7Cxxm}gAvB$QGj>D7c6ySfQP?MC# zU{;OCj6eFSD7M6c)|KaQJ_G;j5Js}*EUIf|T_C$wME3u}=BoUDrvJQQ^Z*_efo7~76N$Br#YwIE!W z)DZYiQhFBR_8(9W%0(urG!>Ghp-PZv5q`I@3xW4xSX5ZO4(=e%nHRr|_@V`98|2*= zUo4-t&E_xDSDF3T!M!jhxrT@T`EhjvXj8ebcGCq*RbJ%&h2wF5lXW|i=+}>2F`cg} zdV7pD@-4O_V^GcCP?tMpbO`cfBU*JRYyG5#EAw&W&(7~eA@q8P`i9IuI3C_TgE@5e zzb8&^ZN%V*sE6V~O!K?-^ru82o>=$Ou{%+IxQi8On7;{+8F7P<&2Y(0+i@b{E|5rw zI}>5oZ!irY=1;t&M$Kzg+RwHUl zgM3CV2?4Bf3cW@>IN+ts1S)w(UH<04#S{`w`vuO!cE_83wXez#%+H&wft?a+9hi!m zcN&9_Xt%MK8s0~m9S*(4lBVm;x{O1nTjYZ{8MRpL9<3nw!Tc-dMMs^WSu&Cn})V1A_D5h1sMv1kt%$t#9HaS2i^Q}Vh5L6 z$pfz%Vj-N30&x5BMb<(EPkf0MCo|f@)DQFAR)L;1=L&Sf1juwOEn~M@WG1A6>!^6s zp4UHMggoal+uvxlEBHLOFwEGgTUpNH)XY{Xz>Ib>335#!cFnqv88dLqp<+NUNL7cz z#aEnPLa&{MHMIon!L>dk^ytB~Z&nSal`hWvZYG#w4_e++$}T!EV1>ouE~b|cRP*qv zQVdRGQZTyUt!Fa6r64Q0OE$!>MMuw1^DTry`^QjKJk`wu%69sKmX7Sx6*X?}Y6)HL zm{PL5`8ltj#}86H;L>W~u@mn3l?lg{AW?n}#tf`_X-a96J(sq>!`4DfCE{pBng=0; zJCCdeCv!VHiEahHD-5#_3|~EdFt^^|WrM`{d-aMs(vFQJgP^r~0xkHDBw0_fU4-@M6!W2bc9C(r6~mI7 zw?tIDez%zZy|Vy^-X_s#C8Ul=|F1y-^W~?FkITZd-r%HEOe;JS04RUcBxQ4@T>p-o z>kY>TnqU#W3x0ta(3>xjUmjenQFq*+;jgG)-Jl~0xTR1zOY*wf74&xumwD7wkDx!D zPd5q19{nUtuyQ1>3I>{=1key$B{{vQbOK7&{@gKBwRp>olo?t@2;RqPpzJj%NDUYuhp-~&v)VgE?Xf`-tW2!lPgk3vs z?jG6PWCVs9{PJt7$cKvdxnyFMIu3VSk9bkiN8(}5*yz-F@f=_o`PIQCClR2gO(w&!<(=igq5E1gvZvBF4^1`diGcZP>rsD;$c2 zn4T$6p7&B`;XYTa{v+EMDn$c!f<4CanDMWf8xR4S*{&ln8VEcpps&{>!&T=wNl5__ zXjFq$p`~3i1$|T@H!s9T&-YfAf!ZW};7tCCxHE%srA3F@-1MR2C?6WG^i8S>yzriA zRlhXj1A-zD1}t<}mkW9?*>=h41aE;@z*Nuugln6q?RET6(KE`TcuU-=u zB49v|80XHRbdM_|!ik}sV4&+07^4}GxMG6J(^_X!(&5YjF!mZw*R&S$bHm9$pmBkiACOeBJ0#g5X)QF_$bPIup!Q9;^xg9Qr+O&$2|r z{M0R^nf+Vo9;lCiN~UP$8$9USkATKGYroI&nF-P%M^hgP!GZMbLq&Y$)?pv{j?k32 z=y6GF_yMz)vW^I8$?39ZudK@VI3OPK{9Nf0VM`cY*%&N)Mzx^Bb zUG%-BT~a>ovNoV=1M=c(`4pEP39kMX)cc>Y-wR|sO=x*&Pa|zn@B=IJ}7@ z5s2bKqdz)1B^t%=8C^?hfsB*zs-&LdPx|C0`w9q$ibdRstj2;LdYFcJmw;3a()68H z^k*a%zPudTWO>K$Un%%OhL31}DEtX!VrsrCZ zO;Vo?TllSEtTO^iQeAQCdC38STk1dd=yj%Z&5FSwRUj00Y$l%Ir^x*1OzHF!6nISL z_FC)etrYleT)ASOrkvxN$p;<`Om!39B%a8fB!b#0MnPrk$~BU`IY5^2PJ68A>3pf5 z-?d-28q+TZ_s`<#96GnI$mQbbBOn-OqvtH+qBtif)cdkCe7{zKSi@@L*fmk3JO9?6>r*# zwb~gR+cX#67D>s|b&=znhB+eMibkR;T{!{3DxUBadiVYG!FO#u^9yqS7*Bw6bx=DL zII-Pue~QhY9j@{xC=siX#GB1GgTVfop`M;{;5$z<|&}%sTnLWJo&1lsWao_Z+?FF z)$?!Lr_;N@%8V#F(iSA&H{cz-4Uxh&T_gv_B>p4hz-@d#7{%=1 zSPtKtbx}2>IGAsD%Rp3XJG?WsHO|&E4g){*Ln=9CVzsgT8*%#RM*nFDjeiF)ZPA1! zP_fxAei5+VQ`ViK3ObF(u2J@cl!TWR7JcBL#WV0XWZ7iQ8CTFh8#y4XIC{s|$_E&3 ze3`XoL-uh~py$U^ctr;;Zzub#%Tf9HngE-^M)*m#>_4GqM^C=S3eW$6G^ci@azl+7b2QsY~hJvc+73Qv?FYu9!_ z8y5>>(564;a~WiB8ppz=Ws2+RFh^iwj#tl(2UESJupbVNGQeFVT6070eRMWMyxweb|=CsT0>xrsuA~?k~{c9}d7RVWz+nWQ;*=gs#pOU$u ze_`gGkG+SPB)k^osNh~YZ8_pwPT%>)gzH{Az91&1Of5Mc`wuswcelF(|DJJ;#A|Bi z#aa}vyFs3;qiAGy)Zd3Ogjg2~JGrIP8f5nx#$2MLu2$t+Hp8N| zPpxXi`aVx>z^$vDU}PdMPFxw+7jXkMOksg+Wz=M-Mjb?lI?f+@VAW7V%dQAm^85F= zyF-$j84>yllfp+pCE&|ZhS4Q`MB|$aBm4*NQo@w}OBp+LH@m(j(*zqW4;Vg)SwhNf4Q|C`}k0P8=lPL9i z*=O2-bQ=;k@A`fqW@jE{gh*=cB+6r}*D^a!)<{6j4pe%5wg8okwQu68ocR5K{JU5p&b0+^Jrp%Y1W5 z#^V4}39sty%@+_@LjS^jy}M@_?T#+CR1R#v8{NDj)TB;OiwjqL{eGBXPCDkN~ zk4bk5ilIl_#a2B@j7S~T^DG`C=YNWtSj#^Xn*cp9#^m<=n_b9QWx~Y!zK}%92bg%T z0FT&~9uaBT(%2QP>K#GY)}RgFd0^uH{=>?gxa>9VLGdJ*fKEGU`}~-Y@24jRxIa%K zyCEf&EK^6FL@i$M(@dheSUjuLNeW%|B8k-+&!>T7$b9ae`tt23#sl1kw9Z+~9Zt)I z<%Hfg7+P<>N84$$vi|r#C_WBxl#t5d;gpz03`p+!lhTTKi9 zTX9Xqk3zTlwil=uWA{qEYtW5`V>ryifU}X_EGxGCZSXLotH}fzE~RDkU6! zO@6)s0Y4mUI4ART${!ClE?ePPAbPwTjr`frcoWTKP2NtG61<$R@qY)OSybU(#L%fGwD{tuA}@S&re$RzytOyczU4lThrGyA`oiD`Zl z6Bj!3_dzgV3t3tTu{$GoE+eL6)x6NpXuckBNk^S+q6J)5eEd`*WhPQYQ+JxqSv&ss zH8a4ODTTGR*IZzwELkN0$3b;*^T7DbkCK0>%-S(OLh{FuA;Pvn_ckG0xVMpGS|*n- zs3ehdAnEHTK`0Ts=~TWCzjk4AMiA*&CS@U8ULdOYo@zU>4LS}x=SyeNh4d+NV z?h|E$k{k9k-KzTTh}IQ*RECJ?8W6PdUCyjw53NpIwXz+G+E9J8N19UHsx>Y|)L=GD z$-ZUGrF={L0Ga)c7tQ2`I^Oc6TaN-Syqlfv>jTI})@fYkS#asRA)G2u4O?q5QR1e6 zVB&haF*2IF5E5t^z8$Q-)DBpo_YCR6d}UER*94#NwQuB|y}VfJv3T6r*IgPa!QO3k zW+_LijGpN5xj7Mv{w>{JT!>q7FpE_f3%{dmS0t#Y;^gvb=b&-HZxM=iw&lyxggplq zTG&J(T6I$IH1&Y|OzoamI*?htQLB2lKdEY|f)e(;Q2#V(2^`+YGzj}c1_td-ksPaFO_Ftx7(}%6D<(ryq6FSA%ZYcpfjArm=%J^(ca>JINv>q=6t&;NkM2|j zN7-(}SgN+cO$SmzI)EUPO32S@^78>aJC{tMytiMBgP1s~D!Bk&(0-7ka9UG6U*&+d z>slav9ykgg)p+|6D`n7nlR=o-ZrKN{A^=tRIO2dlo2RHD=gUdqv~TQ<5ZFVNzg+y- zFy5-AG(jAR-P=RZwH}3eLmgzfkI){Xni&6>c26v&N%Td6lF8ntOLQ z3@FXT4=|g|A2Z9DRd>CQm}vb>vkA|Pl2lotSKNB^QMq@x~R%z)@sE8FbEzC-th zeOSeB-}1^p3lZmTy4L3b3zK*5z3*|3S3|@tz})tB`%)w!zEK>9yW?M?(ccB~C&%680YvL&4Z}Qj z1mZ9^R+jD@kTc7U!cDOVZi@ikqN3kW$}PX%NOgFvNtmqdj`L;ori^H42%d5d?2K)Y zdRn-LMr75HyqXsCH@w$BiqP-oJ|fPGW@x7f>^mF#;jyS_=kx4Lyd@TgV}!7bz zUhj3%Lw5Pw^zez_^?G<`e;iJ}`NbWIr_wKDTV;xC%+mZpYIQ#a>*>)$n~mh74{f4% z+*=Xe1Qv~)?2%Y5NTV%nSQ-maDwb)k&G;0q_Y{$a<`WbDsByo=U9mfX+Qi*=(X)QA2Pg9Q9Uqh|aTv@MF$Mje0z7Nt%B7ZE30f#bk=%_8@MPmdn}%<@T+gl&%7 zJo-C3OD~Kky%IZPsa!m8iqxFEzMu^`me&s&(5e2KUGR?&X128iZKTTNi8x(Ha?zX&m zqpCW>7KfBqlHn^(#-!H1K&HF6o>9yQ0k(1wMxEyh)+}3(QdW79#8kFpq3H(tJ4sdg z2uqKN`Ln7c{I882-~+cBbkSiXq=uVIjAtr>63$;RN%0%Kd=bWxp7Y`7*n1Zz=4|uj zPYPHXPl)Az%wJxNjff{?qSc3?rI4C*GsE+u>BWij^aq0iCqk$fKi7`bAB7tdFOt$| zV6NtIoty`TS}=?kZOsnwC-JCU`)udZ`4HLwJ+uJSX_eZ~jEf8r=rncY{F{DfIma5U zeRz8!#Cz?b+t}uLIi>5~=PHnDM&y?JNCIY(3!kU362sUdkve+aF|#ZdAUAP)>(mI? zw^<=Tyfej~r;}r<=NKQ|Am0RE%p0U#PbB0DI59i?igqtYFPTI=cT@j4FTME;ar|dn z=b*KmA)UMiR|V06j~z@6(d3CJbY%xvd;lj-h?l}`#RiZ;Rl^!J1|bq=(Pv3vCZng^q`s$xA3rdo@Zp}+=pMe(7L-eE`Zg)@>cb(0=jo@n zkg7X1dJIPe*bbwj@EA?gNA$rL{7U;40%uH^O{sX`LwoIlA~itH^{=Vd30QInX;i?= zvYCu0b76aFT?yv{;prq5w83dm$sn!)@7Pe}$K4J#E$R-O!j?t>d8nu@?4&yb2Z<<>|oaeTk=JUcESX-~7xiZz#!4?%-b2)NW`Oaiy56?HM4@b-|LeFKRA;5E3D{_x%jB;-zDgQuPVZ!4Q75qpI(;FKMa_l3j!6IZ;P z`7i$lM|^A@0!x~?J_dj&KNJR&bT*820J=J-&HGwHWqif8OVKpDcQI$47P)HaF{fje_L&jb>Fx7`wUh zoky);a7)?}^l=+1!REgF;%7BcELHd)anRyrL?URwLD3q&W^TM2o508oxv++ggd~=N z+?yNdsNu{G>_(PERVD9HZrps5zrQ}W3!KH`Kb@xmyUNo#54U}qqI5lfrcKdmu-}zv zL*=}(oFq1=`*vwj zwiKT~adgKk8}mV7=4+$(WT>p%=B&+CR6MVF{4GK-V;|V)Zs(WtcKZ+4kKGn9@ic|bAcHa6hI|}On~-N={RCv z;RT^Zk|EA3+lkV~0pkVQD={ISH7!Odp^WT>8TaugqqJh|*g(;1ViHwT6E~E)#Xk2s zMSu4;Wg;G)F2dz2tG>@5cq(J>etLT_#GjY**Zv9lb}F3_@ynXYCjf z>8fT-0zk{aL%G}DqbGHp{txCdquuZ^C%B5iQ;u%4)kk_O&aTZNAxN!1WCFEHx=u2; zfdbFiOOZy*3@|j95Tcc!!;2_N`j3)P^iYG(76lXC)lSl_ZNpzwU`y^gl3o4 z>~?M|4Ev>1x|L>TVO+z*6ich}cIECa{AY#a03N}~UH3~bqADN2dJZSibW>?VeN+B( z1J5=4P~##<7Vry2e?}Q@%==JJkpD^d%X8<3|L6ywaF9++zc#=D1f2 zzr((f(&@a*xb8|B8?S)(x*mDRG=E8Ps?84U$I# zd3bzXi7R&SsjQ$jUkLG%l~%a&8_XbuC`zlZAz!4@h$L_bsyfav z#smep(~j%vQB#URB<(2lA*lLgeQa*k(*)GzXHTFrk94L*F{H+XNzZN5HG{14grGM! zgo_w1&TBqvdCpuCeR`x9{m|CWQWa5WF~e?QHvXnkWyB<=3436_=s)=a3|&wyYO-^f z0W}W$#5j>EP+hg=Er(}#vnnaT8~yR#pskZE93962dU=*KMyci{7a%TqY!49ni1F} z4g?k>ev`A8LB`%NE*0%NYw(iSkI)uBihq&hDUQnfbp^lM8l}Wd*my{68FOG%xxLb4 z{7C#OKGLqLN0^#Z&ye}ANQL1E{wqx|xi2f)JwA_I-1TmB}XUEw~R_?2&NU_Z3RN6uR6_b;V7EEe^boM zU;-C6B+pwCO)T*q@;TI4Z|m`=vBk1n)U$8`-N2&cn5O7xZBmr5q?>9lXs+%3?)LsW z67qZ;F-daiQE+{wdd{pf&*qCtI<`#rAguKu=0&uxV+VO+Q&q7EkC8H_nv}w3@eD@( zJ-5ypHT?(k498HlN!qHEUTFB9sK<4Hn}5N?Fl3MH;#f-H?30{s#@6qs1i*b~Iaoeg12H`Wds4r@47W zQ|4sT@-#+n%Cwh?3ci&0LpP?!GBU22hOSGNt6+Y6=DNF)Th-DfW8Bldm`g^?u74nf zkbAmMYVoC46=)?JqzB!$h6udzj8+?!%4z5+ks%qz|VcN{@?tz zhsU3qI{ezp1D1V$@}B;|`yDMU))t_eq?y2{TbR{Istt>3Mj%IxrnMlUV~?`k21LDf z+-b8>&}@AStw;ET&rAZD_EJV8;5s(ONQPIl>BsQk+YC%`sKkJ7FK#lFXpWkh%D2+~ z3p*;OKlW!=5V++Eh7HrhxL>uCWey(NV_W{hk+lQ0h)QVq_JS7m7rw4u9jl#f%e|qZ zf5Gd7VA}#lPo0nSVgUZ59JHS)|u`XbGCe}oycb(_d*AZ)8jbINUwtFDb)q((hR zj=_^}(m*WRxeacR&tDnOL)kJsie2srh75S+dI|q5gTHVmxQTg?h}ljhmgFSQ$WdHw zl4|_fB4}s0XQZoZ;J(xAR>(EZM#Q}zZB|MC=LmAM!hG65Sw48PS2nIxhP;aaq>O-Yt5%LmFHM3M|Q=Gx)p{GL;E+v z;XEH#t($HeMGCB~uRS4B%%xkjKMqc6>JKp6u$r_fYjCMB4_06wo}Z^-TuvkZBcw|T z*jP`i%7>o>?}dMA35r$0o`H?9g&d;u&MWFznEGyf|&v zm~7rF3j%h}PJ3lBlbkwwWikVZ#Xn=in*zYxEnZJfmun0vG3q*JL3q9<3H3|QQ+1uQ zM;QATfzb&v9YJuC;p(;T@|^L87^$?yYXJz9X@N}=eyDzIDCQvGbfmQ zMr~kPIp^fdULy%8M&Jm8-o1&g)Wi%hOQhQ$9$ar zHW{YpfFyRsbo-G$`cK$l9S5K;>_WGa{f>5S8h_XRK0d!2)21GV+B$tad-UUyXm>An zu(0=})BPLwQ8c`M7gGuf96s`pxWo3$W3jAw0Ylt1`;Tp~8bK&cjD-E!XmsS}#9BTx zctX(77Td>NH-afPzXQpr^ILHi#7mJQfwzMAbDOi&1oK!tUN$`%0ioA_<>I}J`Li@`ONL-0Q~fFOI5}LsNj$ta=_)URchAR zaxWBG^PgynT!`Fi^aL8yo63cKicHl>`8CyNhzMxHCmm>yv#b1)!`>%bl}kNE-m z1lspjoCscHZ4zCs-Ak{?|2QK?vuGRNaFJi_@fdLzV&|Juwm=FquuZelp0|g#KBHAbjGni$f%>nV_$^}ef;koeMk4MUO@1^YfF?a8CBg1!?`bIBJ#5_S@&vk70%v>) z|JIL7I|FbgdER5zZI>W&l?;Fkb59q_@BZ{C+|TNl!@{C16SwsBUgXKnX11r3Ge)x;JWX5&9mo=IDqA@-RaH9zriajV{UH_0jZ*w1G0ysRTW2&rh%N%ID~|)l={LQ`c-3AR?GntBBvT1;hJ|xm+c< zocC{>+G;Y2$y}M-e+-GrIh?}P!-$LnJ}}9b+;Cc2=3~GA+S<-@qFH)dn5}Z|IZbIU z6%b)p>*^v95F|Bm8{Zp~LsIoNxaT2&J+Xnl^+g&@cxHd5nw?Z|?=itDs}iFboNU=v zFz9Dxnz+YAHK`ANr=0)!uN#Z9^f?k@qfbU0DIt&EEEdq*Z#BvKjo?v*uU&Qrh|P4o z`nV>rMnvP%NYr%X+A8VP0}`q7r*?DAmDne?2_l;4^i?yIP5Ro`bHTG*jNn(=<9S_V z64GKc(5`$9F4J(tV@JdMScyK)cBzRbsEy&OU{8_d03CVAKRG-l*P%ahbf!gJthHy8 zL~lT#-B#WLi(5?PoD(16acnSDg)He&`*KoN*dVBO|KSH*BQNS`R9SvPZ9`RkzJ$r) zRyXK$TF>=MbLXI7*GnSugFiU!U$J=3GhmXUPq)1smd&VPBdPKzqZ!W+=4nvX)HhIp z;-}o7e{4QXlEp30*a`=nzsLlQ&VO(@q0>j~iAt`MjAl&rnCu9_15)`PS4y`lZ=vw> zSrwk1t*%OuP0xoDb2^nODFl4MleqfyoxqHl;Q<51OuiFnN#Ok2%MVk%*{XJqaXu-N zHDj1hrkw|TO;vF^E!M&vqVe2KME~1M`O+<`Yc=A)nqPvgA>lK$v4czP9Y6|SjU-W{WOKN@_=(qXf zXl`}KE=rU|U-hgWYdj4ipdULbN^uUt9r1{M=~+!z41G6|Hg{&mt{_ltvW?`zqz7Yj zri_=8ZK8cxiZJFxN$fl~<19yCH%4~7Y~GRA|H?svH>_xKhP&a!C!p0+hcQV=W5^%V zEW^z#*#=zTx9^Lw0uv&Q0^S5?C1Nq!nqrDQmAO4>Da%K_@=I>Ybn%+krHdCAX5rP@ zQC-$dS+TseYy2tlg8gP>;LBKmi{hEZ-?UNN%v$YH^s?G?3)fa&=z_=2^T_sj`7b=C zN0cb|bGPTB$$E}aX1DUq97f5EVqbW zXK?2MZZRs92696n!6DpAwW8@AQ)(i8<8Sb)SEh2LS$$2nql+g2!>jhjw20YPcnMoi zL_#d8{a^l>!}Zg=;ktXccnsFxjlLnz)|Dri0NP6pzw6rc6$>$2)I86_`-LMd1Ch1!4#8iplq>AKwHU&Mk33ffEE%_jh@|&skf4#w_n+UTOPp0 zKtp;MtDTGC#(TL>j+qE)OK-U%vj7>632G*26%@NgQ4r7|%27#1zBy4otn7ui0V(dvnls$iShvx_9Yz7-A zA`Ht3sP5_Uw{%jcV%gQ8=zq5>J#!dCr)F{IHfkZIS2td@*(!e<%|N--hrsLPXV652 zs4K`iXcWW@_5SsWnqri;`8|3>vjD}S!4D{xPJ^`WE+3X-4UfG!6f^jI?~joZ?SqjZ zt&BphZmX+#d&TZ7`T(UvBNG*v${X_(nAC}Rg#g`g2QGDHZD0s_=z{KX3h9kdQaY`Bv?mDmZVywCQgVe3eNdVB?*~xUb zqa%pG>cVsnL&Dfe@w}!Ck>6tU{sdCkF*G(%Y1WHd5F`0;e8Y@{_ z5Ui{GDx$ew;tK>6+@mCWMdWQ*Z;zTqtZBuX>Mp;q88KDDL8v-^=W%a=#LZbJ`%Q1bx{2MVbt=&uA@|>bRTMVPBs1ifgEf9kGY-D}PN9CM7{ zd~=9{OIj^KZ;o!-cEJ6rTKIOgn{vLZbz9cT4y{r4-JyY}v7M>iU;5nr1voHkU8+#y zCKxF1ir3*X&LLz-gcZxa=Ybu0xkEYqW0O7Tu!3C9AN658jPpc*M~m9GN6gT|{|yNJ zdvWSpy1c?!_KhTDnT(DH-DXz>C;amD3CDp8Amz#TiAP=R1lFJrK56di)W~qX&LNCOUl_id1oFEpb>L7+}0a0;v1H4l#Yv z3f8G^9fcM4FWR1H6f5JA>_o>M+zT?7e^frSIE>|tnW(}@Vxg!Ia?gi*L5Hv zZk2ua8=d%DHXB-*11g(o5`>|&=o5l&aDuDY&&3LQb~}Q0WKVcpd(PeMHlt2lrt)V~>IuAy$gHtAOriL;>0Ic#xEF;fCNKCh|@ zwa&}Q6gLL(`w1J#4Xea3J?BMB7Qe&p;bIp%XnB)$Po)Yw+40Rm;;T^Nn@7CUdQ$7m z1PVC_Vd!c2G@65tQqA%EQ&pGTlO$wa!bC~fkyY%o(f)h+cdj)aBgBK5K@ zKS)66*L!ezaLz6L&G)}g>saPjYFyn?0s{nf*j;j0cW%tj-XUpe@er-2=-lolXK+t{ zp|kxzGP2~>Bh6mlHx3=5d^s#@WJ z0sVLM5?TnmE@RE=sJa@lDLda#SiT+t`crmPzh$)MNC{HGNJ6sXBoR(CE?&9KKcp>Z z`X@T?&zT^T-`h*Tgv2ee{R?qWIoQB;3B92MB3(>?AvFvw zs$gqD;`v-zEd97a*8Pjk&y7}AHJF^`uw7s^EgX;vHipnQ=E>ORG-WZv5gbH+r@n+V-q*S~7g`P>cE6 z(jn2P2f}$FT9(d9J>_03HW0CU=Zkf?ut88cIzsuaMpqnDXA0I#UM=_&zucK0I)ET_ zzT0!_ACTYqiXamdB<)+OFX%L9*1=Yshchzz8C!c%s5@^K+ipF$aAE(LhymqTfSP3I z=-kmt_#)ROZEtZ~+s2)(Q~;%7=}*8~Ugj`9!mA(E2j1wdIEsBx#hzb3pJ|R79KA>9 zA;;VJ33o0s4|UiPb)mDQ6*bvz_)$YD*3bytsRrxn2OL(MI^2F3mE>Cx78SC`E>yRTA?mD!= zYo(^!eaAvGxVrH_bgOUiF&5NNfg`rD`nvCQ)0I)3Wigy5UO?0~u1_f!MyFQR7t|kq z%~d&RVrkX$3jo!P>?6$S@JM#Z_)Z&s+ATJ(s$p2OlnZtjY{e7N_yPY>`#cvc^Nn?n zl}z3L-laL{{iYei938i#8Y5EquMs`^TnyPxJU3G;iy&nA^IEQgXvFQ>$rlLvg1XJ| zzr`!h#n0#uD-~R=z;(sS)J{-ZTTDjgEuNT}eJ4=%KU(dCE0W9#o;u$fq|*viJS=V* zNZ??{dP z5$SP9l7iRa-m@US^_Xx+X@uy0Q|{~<<{V|>etEnD3@pgV^)_R8y3rvp?_WUeXi=u^ zk7B=Xxx)#CDJ%-M27+vtZ4d)HyH0HlN6_oZ-#du%{`m<`*F}{dpgE)JWU6yHXjmmN z7pS-8HlV2OD*4P!dz|p5pq9Y)Y_^kN6#aXPGXaKX7B^1~0uO@ryi`%wBt){#X?CO8 zA~Tr===z4jv&hHi_K7V*N1)Z^Iy2%DMty~CbK}kmzLPLCO2*esxeH*S}H9edxK%I7!}10vrgw-Br5 zn_=XBkP1qMW%5d?qPXU2kbq;lU`>3p$pmhy0lFsjmz*QYkKxX6e@$mm0$sz^DQ2hV zZD#A8_F$(+k%x3>y`rT2CDF{&2<6gaIk`oqAQJm!xsoQ4p}wD~Wmh^q^Q8yr?TU~tbQu3*&zaL*0IhtQV0vU6L@fd`2qwLG#!8v_>?Ccv0x*{*jYxxfVywOTVIVX&ZkCQ41e>8SL2AkycVVV*P^#kOW-a zZ#to^*Z-?n1JG*SkCZ+MaoD)PYt3=-#9jW^cdwFce>`rwb)|VIT9#Bl47hcbehvW} ztWT%gi_e5H_o(WwD~fh8Irj_y)XvUo`1+>aLp2T390CAlt%l|nw}%lcZiSWA<*8o$Lb2vw91TPg2DB*V49_J_cA}k z@$JfYoIU4Mx3qO#V}X&b+z$u{s9uUNb53IaLYVm1RSYs2hX~?Cu&ZS(atK2q?Cff9 ztzGTcO;;s~4NxQDJ;ym#CWiiIV(hZWryvRc)Jq0J zpw$}r(^VJ!gjOX=W9_&0h~HdOtGaQ0vbuiY(MJMhi^oiS4gKi&H$o}b!lk*XHTfr0 zGfl~Z^rZm}e23-`WH55So4RJIyek~6%RJ0ARDZ>EFp#;Gbx#`%VpG-SY6SFP(A$_SDb!yo)yA8D`nUn$ARvgxHZ4 zddpC#$&?ay<)5$tpNJj)M@dm|zzocoE0R1+Sz7g2-zga;F-QRMnox;KBSHsHz&tY< zm(E6YJ0SfgNP5>~`GK6&K#Mo((`zQW^K!wyzEFv6aVu zc9&J~wYbf6*D6dgIRXBwgou|*?<@b4bY*K6m3?qcln{6d8oc=mg~CoRS6oaYzhlZc zgkj*cOv1fEU2tv3j|rBLN7^njQ^gjZ*JWx{a`>C5)%k0gi3gJDUiua0R%KC@_AeXM z18<{Dy8tjw^mRdp-x#>9s9ep_81gWtlepOVH>v-`Tt`n6oX|ME^h*r7uS9RfpogP6 zqe?u_-oaOW`o05%it2k9CRYU*JfB2%i@b>=oHc!Zv;buHKZY?pJdLVGg8i9a~A z(?N=^AwFW48bez(!{|*>WI{nxw^sw%|HxRthh6D_^02pWcZvwmOkUR|={zNo)1}dT z@LpC_rPS+G$9%JU$X(545)c%mOx}~g%rUDL+lTuehQ(7&$;yJRnEweQH~^v~_d&M3 zXQA4wG@YjK7k{ZBoe$P9csJPTj?}=|SGWH9C552yj3p!^vX|P4yAsey&{)D?0|s!r zRI`O{zq_*~f zC`DHBX?^fr2o>yAjAoDcx}GiFrcFYP^#st)x>pyM$jS`v_!;A zgfjn~Km#|fla_w?8yiY#>oBGt_-yTtsx4mXXq6b$1I4uftWCmD+QH8_F@MSl7u3Q? zr=R{L=LJM2x|>2z{D)nt4J7}aM$myWKLInr|1b|GF-|5C?31fBeM z1XiS^UEMmaf;;kmNh7#z!^zmajeGNJ$l=o$PfNmG4=0%e8uKb`C<8%xdeS$1Sg&#r zk9KrkssgODi1=R=ghj4@60mcjQG7^fx{u5{f{a)pU!~ucGNk^uyC8O>ANR{)uKfQa zis053fmG4*21_VU9>!#|_#JBuh9-A>A#QJ|2YzY_V?ZcN$ z$q^>2R7BqVvPGIWHp zszSs6&{`BlKLEz!z?4xQQMq8h>{P06ZROPb&I*0!|39k%3hn=}8sOCJujC=laIk9E z?e$~uSNFRiC=koDK|sY!N8-Y))^msV}g{3I7X?K7Lrk+Tb)UF&)g#7mAmJU zMwUI?N{+KhFI9gi)Tq)vnfZyY)J%LHjvT8(#BVO0jg^zMV-`17Y)M%}RU`1im$J6C z$O3>rWl#>s4P_FZpL5twssCV*me3S^j@#o)t*zpbQ30^CB9w+rT-YEc7_wMSbQ9E; zZfXn-n`zf>7#LLNDG25N;eg8x9D$2h{)Jo)T|MRVh^c!ERo?EvP0q=+?jM@+QW#!K~?xS?xj~eXAhc7{TQxqs=}$24saO`Qp9COqfeBJbuMbAEglnRt z7wml-U&w%-A3b?7AJ`$>M?%Ptf|`eVJ!Nh^B0i*$j>+ndPFzHZg^bs}#nO|Jpbs39 zv0CpQ=0P9p7U;!STh*D7ud#mPX`IS&CGjey2){P6@_#vu&2@o}D~Tb%T2#hVio@63 zvwYQ&sMk-R++d}^o&w^Z)3yr@gBb!%PHUmvFG2%6dd7QaUCNtsqler#g;}acD*;FQ z{XG}mVb+fV^7U<%R0Qh6WjAizz{5Mj=lHpXorWZh8}MZQ)0Ld0um;SOiL1{Z<8nG> zsD8{Hf)i29p{ANbG?5|z$5!G6FQ_1v7fs9;IXZ(;>(VOx>c%#)iF9YqAW%cFdfKW=m6%Uboc4^YGQvA&Gt^4V*QkL_agGyPFyv~)Ix zecj2mI`MDm-pMKeS!rm(Nl87Nl+TU%wte6g`ynIWG!oy}e1u9Wh$k;=cRmg5wE!TAOkqwnXI9IuY? zluM;h+LS(BlXcJfoPuB-9>$q_-#yD?O1>s1{E6|(Ad@$D=RweE^yyD)fyZeJC{(9y zP=13YB}X#ZNeqYUop*jyn4r}jQhvkNm=S*G=7Qe8Kr3-} z$wOux{2w7g7TsnR<)o`QcaGP(z3i5KUSp)~)888&YDWI-%HSlhnhno7u@*U$bGK+5 zqEN(~Y?_s}7+W?Snp%$%V1(&6vA1<(QjiT`(b@zssFWmu>IiKmA3==iBpfY|31_p< zdarg)K+lu$_w2^v>B~~sMW*>Dao5P_ICS!(U4Cg$8a2)vslv>~-**dIKeLwSzP;+E zjJ~u5BLF@U77z=b3+i>NX2vfOCfq|9-wQja7Tg{PPP;HtR!s&164p8}BO#Z=OlF|CYqz2Tq|{{zfB@O=H+T`K3L1vjxc6mu<3Q`;e%K<< z&95e#O8d)S3-1%zJTrb_^9^!@x7nb_S{;6QBuc63o1A#e=9=qcy@}t$tqRNN6s!$B zWv5eIE2Efu|IwXPIds@VD9CFE>0m>D8oi&_$p(4f>+x?qM=;OWs-)VXr|A!n9^<|S zJ3wqvTvoK?ty0BV#_ z_pNuORpB|afO$R~OS?J=1WJ~71plG=nHD~KjeU?Q+0BthT}FmkFl7jW!Kf?xDv1vtT*yJ*kB z+;MJJxkZo!H|L(PF-=k0mbJ&_`&+AKs!WOQFqeskI?}@q>zFWZpB!nMoJ=MaoWB&4 z?OBstdY3Fb%5ON^e{2NQP^*ZJqr74Yh2~<yq%)E0Q9;Re}zLyLS1L zdU2V+BhVRqLL~hBO%DDz=|`>Q(dY6N<>L7G5Me2oO_&Io>MW^#6COrFt$cmyCXMvA z9{0ga6WEjaU(H8p({Y5kPTWpqOZqc21^no!gt6r)kl&_~4+&)5fD2XSDRQggaP7!v z(|NHT*$>6gRvy1wp^=FEH+_F-gA_+3)rF`fU51l_nnqM%Rms*;?zWcgP;RC{`IA+! zd-~sk5$}uct>gv}R;Hb_7Pa-$mRRU8moQei94izTlAlg+h6SDFb<!2dY2g?3Kp^ugq*jTi#!E=dB2o{$TO^t675 zJa`!7f}76B&%j3BRG%+Ejl0fA)oEWnSf$dnVfDG^?VK;EISYb7SVy4V?~2b-+Gfmi z@ZQNLmx}|iP!bK(S0!updb=7ejSh}8R%ZB?pXspiRVYB>ZubT*w2dl*nZ+gz{FJC7 z5n-KKvJyKB!Tvk@V9N5?(e*>;pHTNh0rEH#f4PlvCpc_DW@4fK8M@ZqhQ8*fa}VLs z`?pfc0sJ;X>RcbnjxN8uJ8~bf#fGZOtU~9G=DZ)54Zg6n^q}D0oO)GOc>1deL5vh~ zsYM)Ug8*Jd7wdo8DDw-f94*b|KCt^(w4l0D*RF_L zk`RPRr7XTr=3MCIIn^mvb)@~dx)Z@~pfd7L%mGt9ZnT^PM;N<=4QKzf8sSAx<8!}K zfJ~}^Q&k6-B52SvWoD^VK8%ZzdXefb;7`ah<%MCpTtDib;6D@- zATmAql(Db3%tgG2dn!9^VD_=iA*X|7zPjF2{x!~_3{*}yGGF>$p7hVmMbI^GLH9w~ zqMU1r0Hb?OAggBI_j#M|wm!exNs{OK!;a`(QqtOgvwCDCxa`=c{#fq$UgkRc`#8yb z$(}OwIO#8M%i%NYSJ2SWLN%Mj=@WI*=pKl>^C{eJ$GI_xfm1Sitemg>Q{>I3h8+;5k?^mA||fc{vYIjRsI0 zV6>SE%ZQD4h6hl?5UohRs*bzPyWi|T-cI3qR}Bg_VA!a$C=PBwWSB>luC*+OzXXou zZmKF}#bh&LShwNVkcy?eR8d=9t?F_7EVrwTL}kM@`Zzz-=P}4j8S;8`B;N4x@BDQ< zJ}sNaoWYe#SZ3A%JcZ`ZzM<^C)ly(JRlQQUcI!C5#C~@D!)*I#!4+$wOO5LlpL5c|RW&dab`duCs$Bf{3MFun@v#ut&cx3bLc>)f=1 z@cok3^igIq`aCizJwLctK1~7U?xtU1$+*AB@^%p5A9KUS!Iw&_NgJJ3Wy^H8%nunR z3cH+=q>!}VQ`U#DDHhT(8l_K~)p*GCSS0M?g%$B=d3&bLFYMLWdFZxU(lS=lFcJh^ zw?VodhNM(|aVn2(TyaUR>zN46Vg={Whqo7ao(_BJTllqB)7tR7Q$GL}-I75Mh@Q@o zQ2o~{hfA2^oFfn5l{b)zRW19&wlk?GSnAifS#YJt8-6|8l?uw!whpV-Xv-V6 zDq|)b7PRiBUH7ruT5g`xs+=ufofJP`Ayy48jkmebw5`pBuCfa(i~hm+VtjkFqE<0l zX?S!c51KIP-Et>0bOuP1+@A7| zkh^e4NH82$<=jn|QY-C&+2=TA%cMN$z{ea~0;ea<QD}%`yRdjtMn8WC=(Bepcs=u{Xt_92RNu44pFuDujTBacx}G z+K^1inc;__wG|pKE3QAoKPqnPDy0(uP*quqP=lc*$63M34pqGfOCHZa4VnnbM5pbp zkx`lgqc)bYx`WcdGCz!5oMX&F?5b?k0|dLtEiCQ&N0EjRbK{P{aonywOkWn9EeDCZ zWZ)Pje()b5?=V?(cDGabO-`FHB35{H_NPnA`EGqfcg$8c`xm^9wrWw55GZ*%lp(x> zCcgjX`N>%{v@z*+TJb^&D4SeiBT$x9qXTBq77>FRxc=c?uNKO?Ult{dj?7qNLvf!{ z0#nVmxnfb)(&c>XWb9M)Yces2YYS={{6c&QkmN^XI8psIXdS zgZUK6JMu$iJBbg+8)57^X@uH_DKmK?BVRcwPr=`9Yxp=9YZJ{AAQ+f)7BKvEDTx^~ zXb($NVUl^H)7p{>qqn#@;*(>v*QT1a&Gs{o+v0#hr3rX6^@V;L&?M64OsvwNNEP^5 zvJEDDoEvsB9U46nOF^iMB^yrP^3;x5-U-Gf~h0s%lYG;DH zK@wNzJ`Al=5DUydlRck~l2+5QIwtkZ_)m>-j2;VO$Qs9YkT%p$+OTPG#2omi$Ta^M z$O)<(fumr+-~jNJnakAx4uZKwwHh}$e34<3B0}(3fB#6zg2dk3_hWpQ zq8>o7Ys##5h)iOppywbgb$_QSxVGMy3%I|O&tG+!)Gq%;YAb0sLR((O{F5|37W?a& z^gkSAkC**@y;^ts+K-%~v;H&}7ME<7zLEj=KGvj=W^dY**;(vXH|v9u`iKBt$bRpR zq86<6uz~WM??M`g*<(!H{*Yem(s)cVDH&hOztcX6U&Q6Y0@N%vK76QAs{`8Xyd3A% zs67(VP`qb42gRai3f4F~A8c{La%0^w|4#kh<`eIIXp2n>o%kbT5x*{7FwZv|8!XB3 z9K13$o@U7tZxBj|nS4C>Wf1L<Gv-fUt0r!)QC`Kw1(ILHZDr~6X!Snq z`K!1XWR4#@hCY;-yu#*usJ{Ws2m98CoVsciYKdpO(jOs7#odduXWykre`5aj~#$`YyuQ*9qRa9-DB*SP66Fty-Hhu)oZq`@Q zona$lwd67W^O;op7seTdof@EkY5*9>_JI63<{JJpSi-0!!k6Yif@y2<%v4HvW;yc+ z6^E#Si3L=h2ev-PNEBUiFaq^inr^5NwiQhMzzK~>1recuW6$sYR4{ntoA|6;F;+21 z371_=rtyele+pH-?6OK<-pdz5&CWC0Axn_v^Eb@HB0bXXgWI+@cW6)XRwIb&PDwu? zd1VWh;zumK^ne=*ZHWM+XueCPN@67Mij~HIUWw=i2yU90ihQmBo)Uy#3WQ&nfv0gG zvEiHo_6ZOktq^`fCVO2QFXVmN-z8HxSrsRIg-mt4-mSe3A19>v0CyYgRL*L{pC9yI zUN(Uz;q#&E!H$34>h$$a;q`w1c{}6j{IXve5CGabZu-Z`p3iMxZePzkUY_)xPlaDo z!5OP-k49V<4ZAOQ_eNefm*9bg4Qem%!w4!b=SHCv#)=c+ha=!zIHKY*VqnJO=2>s< z^=0%m^ZZxM+?~;o#;38J*BxLtAom4$W`=or@Ok<9lJa^@leV@?Cj7X0io{p5>mj7j>pMUTy@S8yy$PEN(jP$aI1e^7-e;_U~<9 z<}%a*_UB*MChuSGm|xF(FI}|w9dcCu0=BhU{4M*Z7_ZQ;U2s|D)WD}ItR?jQoLQaV zf&S?b!^j?c&!bsoOZIY(ELw-L=@_vMou|n8)^(xnZ3l+aY~Pm=EzI&*72jKAiiz#6ob2cbAlE{CU-tOMu^Lte`seYK=DddBK*4?`iJ-+ehx> zGV;Vcc>D|3!b;0rP|I-PB^s5~JV8TQ}K-LllY7X&@CxQ`%y==EsM*9Ggf4JR^wW*DG@b0-bTX1^AT;c zZ1%16PCm)99t$H*a|atMBpA+l_3T7dqJZvPhE{7@@DU?c_LB{vgd&$%bw2l3>)iXp zNv|5=nn$YQ+kAIgJ$?5#o4-e=Zyn9kxLfJ&kM?5c9tv>UeTi%HVP}9Mz$b@#OjOo8 zcX!?BvkApaa@Dv8OIAwytZ_=Ts|xugl_2I{#&!9y-M5=F_EP(`Cb_|v1LM1McnQn} z&uiQsnlhqwcdmO;zBVYXmI4KK^t;@Gx{R2&z8J@^?GsbU)~W@iSB!#~FU?Yo%9~9u zcXr#*sY+Jxf;YjFqnDyDKv7hcW%ZRDF8YY};E*@3+r8G+x6pMui0p5 zZn0vnuNC`pzFmAa*!2y43y*!m6zqRU;IueuA;W4;Uzf8h296DVSF0*bEW4-<0dp*iqf)#S!#KyJ&Uy7;7N@LEG5Bu$j~)rC`%L-OScrHV z6mk!P!Z#g}Cs!R4yZXY2jHk&uKA`do?<5lCo|!GWTM83MdgEkI#(i~mhEL52UtuWx zQ0+Y?dgEgBlk9#=2iRpo`>axAb%ARrM#FtJ%S3NF$296A3TygNhnraI;VjLcDz0CR z>a)=_C)VGYI2@Ul9Sf1!PhvB6%d2q%aBNF65{~s4IP??m=_{0>E3n>!A!~*mhsMt< z!-6{(Ro{tEH`zMWY5g~YZRRc-Q0~tKvGM071y(wK6sN7MzycmH5-3;vt{MQXHI*&R5M-L{(EN5jG5f#Ic3JD?T92HK>Lf%lMd+RN4T3t?Q3|SWzqBivn!Wv zYGZO0QOF;HBIx|@3PZUXx7JVRP2OK|;N{#P4)8a8^d~FhFMP0PObVb?DiIfCxpoWi z_+_PTono+@w_?DH3=A1-)dcd4KJo`@%X&T293Gw*5n<~_7r(%77AmrdMYoZoAa1nr0ln^HPKwhL zz3xp{z{zvZ$^)-*cLVXYPEX9~)RbVEk|P5ZS1Sbren=Mucs;k6n|w0xerI1-yZbt1 zQ{@WLE#2}ij2v8&@d$k?U+7sp#iN+UI%&Wl(UZ+s{3+eTuyfHaO3U*M1$rVSbvLzP zjXoGR*LXRPALScO&O0EFYln&twzAIhCK-ZwXfk>-mq?_9$(PCk&0O zcA!%hD{HKM^F{8lukYLa7kFoykGLvbM7NAJ@EXhY zCtYNzFD74tWH6+BICI7+L_+6~DALohsaNe#B~-t{E<5*>TUc+hiUzlfq_!{ukRn9G zgz5zB8cQ(P*1jGkZN(MHzyEP|-zViG^HU%4oQWb7i7HVeC)bVsCq*^}STMOes_@Z~ zJtfN%x>~&m4oogn9kT+Os)7j?I5;l>RL+Qj7E$qniGcGddfgA+-y^rmgFiSlogV}! z!P$o;HgKt?iuZ50)NPrz{B?ad%s%bJd7VwQOTw46{Dtq~HJ~Ks|#%JqAfB{52 zRc)DDDixd&t*$Z<)WIT^Qzdcwg$&zUX-EFnJK(nT>!8T!P+0Ppc6ZMkhjS)l*>Vga zp|%e2Litd*QdOy_g2g3YqT_(e58Wi z7sBrqI>shWhd@>X@o*jsUdb3%fcAx#&z%S|kNK$f5OD>)iNEeSsdYN>lIpfY5N&g& zpKh)3hOR_@A5|39TO&+de{bxw+OkA6exebFr3B*}(-c+>k=oeW%&a$z0&_Apt@#lXgadclAPq0ZiQqrLy6)KIgCr;ip4l+wtjTEzVP?bSTF8xZGU})hLyye1t zBOc7}?G8djJ{jk*0gP=daYf9o)d#7 zGCz7hV#br*k4>Wu;NGsaA|~Q|y<$etxzDZASl+Sug(MM$; z8-O9pBFE*)e4rnfem^NbnEs;rr1L=NIr_HVv&PJV?r3%%U;svwZvsoOg=O)NWCr+g zCDu0z6Ss1z;0UVgjqdhu;3S3|-VdsM&c-MW`Wni)%_Q5*^&{YMQJSIXOIewPIF|lq z*bQ?KAlC3>FWxREtU&%!Xi;!Q_sX%AjCt=%n_Z5?{u%B$sD04^rkvTuEHDNfq)D znxx^DuXCD`Gu#wld6>2d^)|QUdYz`cXzL1wV+tVjwseR5j1|&vZ@~5kldxZe=%D(L zyKmB{MU^i}XbsX1;2Vf!pCXPmS8W2oIfm zQH-En0zyDNv5w(_nO<9{?e#9U8p9}%ziih7kKngZlZ&T9neJQY)5)|4vC6~N16`6T zPxdo#>b?%ML>)b$*{nFYO*qO&qh^;xz(iIWnyKT$6pskXG$9(wcHg4;CGVrOtVtid zKt|+wvwEedh{d;Z3tFwgq@wUhDTyK%8h*Lh5zOL%W+alE_pz@$qY`!SloU?S2CC6o z$QHr9&**7Ht^J!>vzOJ$BN_X4EoEdqEeOE-?38CII>oCHp;}5snbUkJ>r^E1j783y zuzXXibus0SL|#1l1RUZSnxylZ_+%^@#b!}Ypry!eO53NYuthX-Z-nVs_~X!jU)o?( z8lw7E;Lg@|la3d=|c%{UHfA$cyKi<$P+@)Ib@l!(V`t zDQ@)K|EQ5+HIncIg@iD8i0IV|Ghl&r=>BC?R(kn^#7UuWY-Z7Hk0U*5h=R83p>vRHAz>c91@otV(iRYzI@jpXl4h-k-ZE z=Y2j5V)O9uw@5U%kmH9+hD`klf%H(%WET;XEgg~x784aSRR84~CA;3d%O z`s-S~Lt9I{wEX%X2>7&M*0sP`9y6)iFSYCmC#snVe=l&qh@_%+yn(m(`-twn8s1p~$;G zZqu?*O!eP?HEBzmDA$0{baB_s0<~6$D^pX%UcsN*ib5rG3za$H4R6~35*VLmC8u!i zwo>(?ehr1lcTgPE_r~xiw?66g-&NgVN55Aoba9dPqBYiVJj#)!=#GB1U+pZ zG7D4IZui@5&(d)K4uV3Briq=u;tv;3FP;ug1osa;NIe$x(BWpvVnoEQ>kY}%RNR5g z=UC*&F~xQqvCQke3z|WS4|Uf}$8Ga5`B*G22o8GaW?|}Whhr0UT|p-RW|Vdafpy=$ z{~)6Q;YoNNL=Zn1F~%d4N-$TD6y=MmA=@9Iib@ zi3&YKxTJ9Rx3BP0*F>$_G8tVNdamq%!mdc8+SJ*Gsf^=Sa7Rg{&Q?hChNvLbC8Xx(+>M+g1SSvNJ?cB`*!&T5+w@jxbjct8fP|U16 zAL~4V9Qfma9BaV+lZb=P&%ve>SKGVY7T0*I${2Z@$R}7+T9-2^njh_02cVjG$8?*` zwJ89lMyT$pI#M-e$1g%XrN_MJ*5KC3p>VsQZ#@d{g^1mS>Q`T1tHQFZt>&l2s+jcM62YJ{jbBEH-c%AP2v~lR8!GAS!4~s!)D}r|vFvC$0x)IYE9YL4LKiVQH6Jy2n$b)}>d3*KaV5vmV+I z*Ve!v5s^eM6;Gpj8r@_*t@+;>%0yqEZ0~#5axU+BN2LMa+VOc~Cn7_5wnEKk!V?fq z9baB(bvJwZHuH8u2j{LNgVEz{pP!>Wy!phY2gt@1Ubpzl5v~v5pE6!DUZ-uduAP%23WP{IRHa{z z7As%WrI#u1Hlu~s(acF+yj~L>m;i*119z!3*zYk!PiC$^hlI8AHDNrH_^uU!cWl(# z9rv!fVA{=s$_2UJBZWF^W;gzo#;*six+I!z&olfPJ~}%5!ni+H`VsE>4-X$(9J`_? z0I`E0M#*<^CChUi(I4en`TBFZ`<6m(%}TYoeKb@@&nISYA5M~;*wI#7`~iiloaL6n zWs|U?^A?)VPs>TfhIK|BWF8EXR1z$oLD!(~DhdP`(q7_3-5g~IJ=|E@-bM^DdJcp08n77phllMqP z0 zJWh+OE?7Q4P*4EnKamCeqBa+?<*dU7Avwh0F$=fiG&~chI%kAJ+yg*ujV~hGex`)8 zfb!#kc`;Q|5L;S9C;L;abBzamc;oq|c#RY5rEtM>-}1w#V!i?w08aUARAJj!RC)fX zXWaM+nlT(MtU3m(or;a}JVXh0uQHjRn(Qdp{w{~MAFFumD@?qyoT?c*{Gq^&iTE3W zLX>4E^ghL%uN%VFMPgR)G8(G>t0$D)avDh1zh5O(#rY`~w>rj9+QKed#4R3<@?si- zw>a+$$%=3MQlB<}fT7fFjivi6Pf=Wd-G>qiYw}fI|3b^Y-%6#(>Wb%2J^LpJhm`f| zj*=@Lu9(^!Nh#k~0%D>wL*IYFP?0bS`%%637n{8u6_bDuY)Uuk@=p&U`uSRBp`XLG zXNg&nEnm-ugSmR+-q~x@2#w&RZgY4};cYXG)7{)}WW#j8gS@8dQ>h?iFdnh|gQ940 zuqZ4(d$4in@7;v)IT{H7tPuV8A#au|CykO&_L&zk2gLCK&vIekp=XQm2l>Ta7pm0h z4)O z;q(bA-est?&mmzD4kx=~+6sF047pHtPOHznN}WtXXseHrq=B#1R4Rf z6msKBG25>KZ(i0^zukl@ZmHwQ&dUKnVdgThezb~3%QH+l8`;shf&$zqygWY#AJ+2@ zr#xQ=yJ;vllWU3ZC4|T6W~?TWY)y{9DAP0Va9`od7tW3SAG*HrzmEobwrQHAu^O|n z-PpFB#1vOU>gfol0d-+w5#ddK-XIb*g zuP0&GP47YfJsXKdqy(dqqZ7W6Ge>W&1Bh*fObf9PGtawhs1a+MCiGcz!&C@RNTia& zrM=xk0|I%SR%EiRf6{j3tYB>3_rYJ|oVA&sy=-a;zTa|Q3t@6giQVrP1sF*8Uw?h_ zaTj>1U79h_*?n51Cd4uA$oZoZcIRMKOX8(SSx(;|d(6UGWUrGTebN)b#g5%J2mH%V zi842t`B;yZxsU=?92!U_d;;^BtK)o>u~hl5eAxL^*jR&uN-5!RG`t6$IMudU^>4b7 z`e_u*vKvBeN~jNJGzScF+v+x**shi9K?d$xzwhez+4lM^++(Fh_LCv?lWCtbj<%oY zY--Ta-}6%_cfLR>tb3v&jb=mO0Ag%RXjC}bFpS(AWYK=>Czh8oMMYmMADbW2qMB3g zn>V1Iw3D2i9J48$E>sIwebIo2>6OnY%^>Sm!pIex+XLOgQ(x#+V^UG z`Yl{F!rfJjVuMp;%{L0)#J+C**3D>tYI>|BL(BzsN(jx=+vxjO?PD@|K!vBx(1^Zg zWA+;7?I{Q&Df_YvAH(yg`^)w=weZ?;lUW-0tp(2XTo$!kkHh(3cL&NN`WeHVb5*|1 z7kahJ6+tHny)twn`7UVm3v3eHS{G7$#e8VRfjKa}S?s;YX@@{#0^n4~&sW zqT0mz-a87vPLx++-JHZH@4usap+LeuiznGV7fq`n;@4xd&ZYdto<|8~@lvIqnj!3! z@@d*EhwtUU;rZ+b`5kMV9?m3NFZB+Rv{Q9!nMSDAm=yJti zr70vr-ljueJcxN0HYSJ~H%M1k6pmLoRB9z-T$@OCzZ?yi`2|mj+hOru zigk$#DU=c6$ja+q;5hvKlmOSQ+)~$3XYx~5liuK!TI_y7W^9%G7xIkXj8+il`!g7Q zd13aam1N^g)%t+E?LIGTFeoR%UDMT}}67hsLI4x)UXkx9`v1fcaIo-uh=acV=gSfX1=4 zHanF2YQeMOA=0?PfdNwc^k!oDo>W?Su7HD=^_oU^2U%0010;qIn` z;MysF858#4m?VUey-0wPGX%WVOb!p{QrgBh{D*1P`ELAXcYoXW-EXiB-Z--2I+plI z9B$_7L!&*v{`oBi|@nE>$LbNJPs$i;pc;#BmbH}(E~yYXqqVoUAjje z%PI}PK-{kV75cxs+(||4?k4@wJ%sp|K@7G*+cxlePntnm{l}SLL=YHx{CxEOq)b5P z7+5t(qzuM(i%*+8%5=$Cgf5S z^z~|g{0a8Q-Wa_&O5yh7Qo=E-6ABLufcu)ca0Oa~he_SV*-w6;AZ6`~ZyD4MSzAmm zMIVWIz+N9^H(dM1(()!m&DRQj&goL{<$?OVfYvZ?7M`zZ%?;sSz0~s~2LJl+)DRq< z6s*5<<&NI%X4HhXPi^zniPpS4s=l_B54+e_2WNM-k=%DFyg1P(V}c#!du7*MfChO3 zJQyzms~FUJ^(*`Z%SMOmO->b6P}WdbhGN-E-|ha_;7>Saa%}LMF=iwj>-YHRcr!t3 z%-f%)kUXvx#KlY8Id{CsFZ%>wu#VJL8@M`AviS!V?#ed4VM16fbg;Q6&uka43C{2M#fs4kr{qlYf1Hm_Agc4Ze0Xe|?8`$M$YKtgN zK}eady+CNBN(_5i4p)FY(KTq9I$QJ<%vt&nO@S;cfP~9*NVmpkT~~3pSYlKf;Y^a? z0=GJ=TM2`aehaIQp*yCH6ofjN9#R>lFYP)&DSGKrG?XPxm&P&5NjdTM@-4u5=gCO! zfQJ^JVaQ=^Pg1^5iC_N-50K&7pWG{lUu2gcsUx%Ps6w9lHSCDLyVsvl7qs!M#Lgh* z1?8zg!cps@GD<(8T>RYnJi>ZpC3+J_P7Mjvy2*MB{eJDEG;q#u&&Ism+DctVFZq~- zh}}Ni6O44WvB^W?3Hb`SA(~4-S)4~VAa|41*pq*6!zKq6Lg1bp1H796gnL_hw|{t~ zu@tnvVI8Ybx|gn-M?CY;6ysO+V!5nrTZ3b%5V1WoWO{tV+SG7Z|m-KbgP?nhxq8z++4Pw8VefD)d&fb*-xiZ*OCT=T-0f(!hD?{_B-#$9s!B zNIs=AS&!PW+kt#=20+({9$22afAaehsMQLMIquOw0#E+CuHB)!3t`r2FfS4Q8aa9; zORH>;#JjQ~tD&o3n)Hqz*6W!T3`C=&L{lUg^lopb)Ru=2Z*_KvLP%@5VzG|nu49Kp zU{NFo_*EZBe>@XV&9R6u5hncu{uL>{HLqDdKB$wY^xftc0Vr8d%PcYLinUDcX>q@4 zemjyrj4K?Mou?YYx?x%?{?;9hI`kf@RoEpPze@II+`g-Kvx=7n5mRhfbDmDEP;=Qk zNvulmXMEO3$hpqr0XtC~H@)q(tQ_I5Fz+#Bc5*r}Iwt5$3huA8V7{!O!o|g93?BYo zrqHQ3ZVY^ZfF(7jMBR^oXS5HNN9(dM?QG&&heF$q{rjsfb?tL$UOq2E8XJPus&AZg z%!rptiAyl?lpjM+&phm^=V&W74&LuDpR$|$;X*XK<4&w&BA#N?Fw7f-Chn)1F0PeW z>+-O-z_LRt!9TgOf&*Q>uj-=U^pObZ@m}K_JJH?yz$#ypl)8c3YGm{2GEG)#+WA^7 zaJgM@rR!SA1|P$bYgmgRS%*X4SVV887;GQD#wVe*B2sqNgc9BK+uC}Lk8DO}j7=)P z?C0{YnQW7Pgl#-1Z`31ree;*o@F{DpPGcX}gLoLCsXRf|o@Os!-4XrhTp{}B ze}U<$vr50mhIn(g_;oxP&=gJ7#~e%)5RlodVcGR%O|PXQA&9g+eaz00Bz)_OJ4tJZ zFb!Uq)h>`=!_T<0iN?rTF*U`aS~X*0Tbscp1%6um;i}7mT_gx>G~>MT*ctImkWGP( zamJMP@oPYg9>!dLXE@W{IZLF#q8L~;voQ*ETwdKwhvQq?`pDS+?POGL^BdiKl{;{WkNL z5S8(4%X~4FiC+4UJK>^|Uze=#?3yc)Ex%sy`hpd+X>1B~HleFA`hNLi4;U6V;aZI^z6PHJQgYD$aF ziT6N}<+)O}(pIw$#rI}=hN^}r?*&TjkCYCdgFoKw?e_%lkn7$^)s{zL7aP#6Cov~4 zpotqc?&ma=2O(R@zXTUP-V3`KaWgicRo;(+k8Rs>^&h+h=V@N=8&{#!Z_mN%fI`9I z_qVs_Tj$p)+vqUcS1Z}to?w=ekp!$)d4)}>Js1bpz1y3Endj}64bm`BtN=^Kex z10znV_j2SFwrq4Gmdo~z2cr}?UB4H-W$`WrTKv*t$RQt~|O zMZ%0uLVD`1gG8+F6+}N%-&HsW0gI?if?T`ETM8AVX}-*xCxPmPQ{p`HqTn*rlZ6b7 zn(Jncko9(-*O|J3lzo`xvia~M!VJB;^;>m*QbA~oLfcE4As|nYG>{XFY61;Pj^$7| zN%a;qF{hp~^(T){x3+bPyZY70F-9rIHF0Zy@$*Rmg_95C<_jFpe9~4RV71EJsl?>h zKQ?Zy(L~(_W|dji%trJn^2f+WTsXm z1>SAUUfHwEO|NVQ9yb8QE}LG{JO`HAW?-k3A{|0%N#N6}r6 z5|iplcj>2Vi;DhAPe$$^wT6Ka}BX8dQWi`iMHrY}*Sn2@B7grYDECx7$}qMyN@S%<4fGRclAg z9-Tt13mnoL`K@2@LNQ2Qk8jESiVHlGP7Y`rkK2Qa6^Z zZzyZtO@M;VXviHRx%JPUHhK%9lr*AUe}s?rR!1nTe=19HlxMk9f5ijYex7YmAlLfQ#}b) zSY|5UQ#6BfisF?SV?iaRM{L|_i_M?=`v^HJfF|2sb7&fg{UVRV>QQoP*JCg*xkPW+ zm;@Kzq0GqIf2>GT)c_a$c}H|_=#3&|Qq0P!7vqfK(bFae+VbUpoXGzu;D!tiW5F8i zRz-C)0>kb+Y8U65=rS5^AuysuX8mnRwH^kDJFn$aleenFgW)^mDo>$YD*J3!KrK!^ zRlnRL*?+9a@uQ52;l~!|d-(u5tK@hH0dQ3#HU6ZlyOS zyh5!3F%B`~*<+;s^v6$cRSPi-J7qJWbvtr=EY*`dk5BlvSPzuVGwstgAMP)?^mkTC zZ0ikjHlBR_pqz^*?^om9vfn#4?lcmk@lWvUOxarf;qNC`UEYr``#?GT1uwT88-_RZ zLr;%5J$=iWpEcWi=7rydjpFCx9@dlN$KC#s=$a?UdX)Cs(me)wg9+a?=e*;btl{t4 zY5v2SSHt@^=KAi#66>T3hMEf$7=BgiT6d`0{Z* zL^DL|(>7o@q-5<}47kjQzWb!f>aN{-yNK26Cz;QJv&0<^RM2?BM<_RqRn;wTTe?D8 zl+W{n4s8}hqG3~D#%O;UTSp%ocwB??-Igcil!Hb#l-kxkRM-n)H^|A++$n_nNo!ub z8$vJ~j~FJYD8wTHv-{RAEk{zeMaTY?}1!(ChG?OxMbGopR%1 zzCtHKspBK@)`u#X288MMz-oERHk+!50b~@hSf&2mI)~5+6v1J7-E&OLDK_t4w;3$- z0ylldk;dT!;#J}-8}hCiq;_U47bA2rOO8wtoOy2(x5_80!swu|T$8zq#mMF6;{Woj zQ!=G-^R*SfZ*eX^g-W^|bk4ZG;wLtcMG&1k!G3A@gY&w;hs)QhD%n8%lT_zv&+xHN zpEONtq(aAzCp4ex9YU86qDw2T>if|vi?`zz^mYjqa6ffC@t)HryM*_1_8ETT*pr#f z5vgi+5t4lJs~H^3-5xQv(*Dz2W6xvX`{_fFzdiM#{K`mBudUUwXBafcpm`LliTVb7 z3T`CtuGed7e&Sd%)5Nn|7QC9xy=jNW)^2VODXoct;GDx81kuv zR4jc6knAb2FK0cJJ2l|SH&4&-CN0IZW_9n!;bUiX#+nmF^H6Hns&xN4h(GF9&C$VZ zOW3BcOEA7T30-ex@Z-AmmgPF>XoEPL;#^92U!^iBK>Is7O zwk;C84B@gCce0{lMPPQCQE$W*7diejR0At8fI|FX;>g=gi({$ykbo{htr`CIB-#mo z4@;v%chaG>DZu885NoKqKpReJ8N=R#pIJN zl#8u08b-7z=v;bxeH*Tb+BSZC@u*-=PWqZr+Ho;UC-}WsG%{%8xm?q_4LZb~<2;$c zd~sm*xQ3&@ci04=np*u4?Phy)o;IXI{Xj9~7tj+J5*%+vwt85_zx6;ShG}V3Kwqe} zOa5H&?xO+K{_uh|QJ!1fhg>-K=ya_Q1>B0pVn z%Xh>q7s9wLyea76lZs@!e-W@*&_5vDo_rmF(uu$8I?_*hAiEh`p)|kO%j-sDUeVFq zzfT7g+-{nR{Xys9QdRYby2gKFi^(4gzdlr}XYGp~AOl$&m4BTc5WVb{5kr3f>U=X* zYAt-G<^zN;j|bFv180l5=X$GwNDAGAjm9g${FPGC&oL^7{o7#5g?y@#C9c2IV|B00 zQq5FEL)7bxIMESA{$Je7ZnsO{`5HOA{d=hlc=75^*r?_u zp~SxchPqZ5xOR5+?W*C?q<5op9)31WE78s9UYTX-AAIH? z)5IIqxzKAjZ7Ky&?v_{G|NZSedkS<9ZkINS?rz$EbDF%W&8?7(r($ z;tac%l$4Z}kk<>tJ@OYkL*k{L->B*8+WKBDS^1Q4{llUhXvZ9y^TirKlz%%urJe3q z=WiD>iAo(VN+Q`7iha6*mMGmW-BHFpeLmPE=QJ8h)P0=qh#R@XO4!(qva#jz)G_Ep z$WZ?4VZdQCtb2~Kxrf&-re{o_X8TK;8%-V(Tl8QUyzH>Pa>!xqIYy;6QJx|UEra3P zuqH=d9cI93bO+A@HKP&Wb2_8VejP5)#_maO^^!}BTZl_|#wA(J`~4tTR2vZ#{Q#qc zi(5w)2;Wi~`W03CU*~L0aDr$Ed`IW3^~E1Xj@Fv4E0g(?2k%A=Sm!qHy$y;CQ&M_* z<;LuKyMVUX7bhZRgth>-BCZ%+JrSRrB?9%6w_6(N>esIsiHF5N+2c=#t`=XMn1{+} zWU>uX{5-P76B`>Et{>)b?CdogCDIC>t(4N3Z14yy+~m!x1dBz_0euad$(=3sJZGH{ zwlaVCE9s;fQR`AhUwY8*1L`Oi(ZiZXiV3uRs-BWqVkEN3mE4i&*E7d zNDV0p1c>6;3QJ9(9OAlPXyQF{z?^n3UAIv)l!_Q-pLQ~*P&rz?Itv6N*k^EcyIHX# zR4@-16l(PKWu=>i`6eT0sonU(+%hTqCK4K(C$0w!bVCG|=-M3a`GUa5)eiW0YR6WR z^rvp`?BNsJ^Us3Q6S941&?@uTW44hKqo=eJ51Cg+_+l?eP~Hq>Td0 zcT`<>8FeD9P_W3#tRBeZukaOt-q{9oMFRBa77OQgiop|W?c(+^5^U)wf-LPkR_UwL z*kcX;iJKp%HFQfjftI-OI6Z;++9|x9=O$ni`44~04{W2-Tyhy_FJ(oU9og2SZ(+?8Fz(ckw65@L*QRa8O7jg1tZ z`D%so_+{-Wf^zNX;(WA?(U)Fj`HXO_bKRuYjW#|nb$VGN2=03$`OCjZcQMpoQA3yv zvi{Y$GEd`r+}ACO2ApHwSRYB_8J>MR>V(#HPe}ntGVE~YEuCxk2JmXMst1N4V0n&? z*R{Y~ZOLd7dqQlN{tO{!z@tx$#@9*kg_yMW=RO)+V&~rK%{h5?S++&7mG?)RvYM&~ zkL^~si%^R7vLKSgEvmh5j@5BCgaxHITKCbVWIQq1RHSAOZHh&&iv2OHO(D~}{-M8u)Ofj1`~C|Y(^9X|#8xP&b&eFM8eV}kQv zHIL@Wz_;^D0PQ3mt>MYC*x~-0oiag+(Qe3+6h}7{3*-7Y3&pCow(?K3|A-chw(_lg zj5N*StN4ZGm=H+>>jkEkfItk(1XhwDKnyDmxDA8ue6EkbOB`r?Y(+!BZkOh)o+WO_ zhMZ}!58&K6v`z~$li5`FGe%>S-E|NUG;3x?qhu((luO?ZLsx5>zCAfy?{IC&)toE@ z)$t?zW%!L@cJqfuxI=yg{}&54pUy7x;<{9ks{c6(#r}M{Ig*!C+Y!NuvNgM^2&I~% zZ02@+Z77t^M9Dk_R44NW2^Ay`cN@-t^u@VdHX|N~UxuEv^5qVMhGb~LpJim}5!S1p z)ZF-WPWn!EZBI$ zZ0X$APfo~tDMbG^52y?2>9uy=Q7j-p4u+2GLjTMp(KErd@lt7~EScQOfupHsF8MrR z{H0ExsLtFj)0NJti6AUMZvf47@w|nPk4_~TXMNSg){N)c2Hfu z&2wpV4cp2bEDs{;+cNF%;0Ad#^`04+*L2m~b#K7EZGHb<+Dt|ppcC=FrRs_m;PSYZ zsSxww?6K608jDm;#kbImWaeQrl%_JnbIdMm+@P5>_HqOWUR{5bc4jIa&)fvHTm_lH zus3n8|Hj8^?&RYUR*kQdegdzCJ|O7-a6pHg)#zJAJtRYyP|y=fAm<5Wb!J<0xp;%$ z&)r%>rP}L*beGW`)AwJD^O^~Ue@d?UXXj(@yUyIMz(O`k8D!eXE26MZJKD!H)=8Lg zSYC1+h! zU%%3mZT1-jLD#KEW)nQuB?}dPUR%((R+_sWA?N_o-aa5Q|NRA!;eCmi8Uf_cQ_a|! zA3XZ_6tZ5%z^|uma=g2LL+Q9w8lE-C6>%rF`qNos{v8Z%i{Cmrg~_xaQH#LI6lY?= zEotUYQye2YbUgj&v^sCiv|JL;<`+1wdnnIQ*yyVqJBIAFk(-UBJOWcI&{^EaWOj@E zhVp=5QV!t?d@jsj@+9Y3Luv|^bgV`~gyTQ*%^E2k$MVR(KVG0U6WviTA#abhq@14Z z_BebNkN5GSTy`P3M$xoD2WE3JXMxsZGlAFc_dM@?sFEtk$0tcTv(T6JARBrl@CL{U z37L9U_Ih61^CH&BcdokoZ|*S?Mm8VhwQ@kF9%7viPnz5$jHCIK(Wt3$p}9MdHJ;JO zGF+qvrWPfv%IYY?5*H&Bwc!6EyRFEy=ElEh+$^ z$?943Gqo>#{-7_tN%S5|p+4xwHG(c`lk^h96+>LWYE|~Ay^Ma$*|j@8SMvu8Tf^L& z>6`DB?qzw-zdW~Au>|o;lt<-3lzTUOCzF*Rk3x9Rn1)n< zc-o6NvH0^~l{l!-o4pDBFBmWbe;?B2Z!3$n(|s%0$&~(Vq7>PfSh!G~{$9nRD{%-b zuzkf{TFt>CqVRC9A!BMk3Ar2WVKfvYEV01YRaqC;!klcEeF|ul%!%xTZy-t^M5#R) zi;3vVr@&qQVdA0x5w-rbX}I13xsh{7H0 zG1J1`G-O}7$8%(q@gP3_eYcuyg^J*`g-R?l=vQ#|c_93KmUdlNF^tF}H4H-ji%QuO z-Z;2*#RY>)TktJxhA6lJS&93OC4cW|+z@^$VGr}g)I^Pa*A|!lQnO<2dvuR1`W9-c zq|hb;)NSePb?r4rs6DVQ@T?r6s!8)`bUVF`;mXN!X>g6NV-L;E^8o8lum)QO5}WSyC0F zib{HynU0G!N8Bs(_;o5p%KK<{fX;&5CN7JzFmay+x7;S|5iKrQ@isG^CP6^zxp$Zp z($DrHJN7lsVDn`YY^KLUPR^;Z^*N?_V#-Xq=KZ?{gx! zcBm`$#Jvry9TvY?%?md;75gRr#4j^wyNc#=T1 zrx6~mX#v{vqub|B@jn0DFNEB1CYcymIMZk-$jt>e($`vAmH#OdKTXWdle=5$540f` z4nEA1uj>**<`ZBz$G%{AE~Ikb(r47EyfCcr!{83|2P(#?%6D++hhI|R2FdJxRPx1f z5$rbI9;i<3U?5KZW*0Wh7CmxDaj_0s3QVg_F3g^p4JFQb{=4K)kJQ#aJ+xt5wAFa$ z)5V;k_GEhU51B8gTHY4nc*p^(7RZ$={kewr+~Qv{4bs%l;Uho zd3Ov65nXRpy~aLMt<^8D=>};2n=ZS&dZJhb>diSr;pd8t7(>nVG2?w8S1367?lu>AwlA~LMET`0^ z;jF!+MH3JU{yvk?a*AfDMirE4+8Hq$=Oy;Dy&{5R=^^~1HFCHlVIn3|C6 zVY(6A9Uy` zFo{*0b?HRSi|@}}^p^%Gu1rf?pQosokH4B1Ekk8$-3|lOE0RqMJ&Q?vJF*JveiSdp zfHK_;Cw|pE+1@Ll#PrUyHSCP1w$;#5dpp`_EWzPTGE;4rdIn)q~GT2 zM}02r9QG4i(;f#KtiYVD@@i9IJG$IvOl)j$WXo0bnAvgMXG3n zud0|-Ot;e7>72D6i7>rNxpdgC+#RXLU2RMI)>a~UJq)$pOMM@&aQz16QZN0eA6xnl zq32oSBhXsxliC9w=8E^GWcn;~R>j_@aHNsIQAg*VSY9Wsi>>3V=uq7-tH=JvUYx@3Iwy9+|K7TTLD_NpA_LuE%4hiXwSobjK8^cWbaJL0GVCS_EF6u~ z?)oii=*KMLjxllBsX1JDBf0Uh1_goAG_y)m!TqB-mbHj(KNDq{MsF_mRA7G8mA~5Y zvQCQOcIP1ExxX8~9Iv35NNbJJ-a9=+fk~k`$*VFJG}B4ZAui>F~Uxr4Dk-rw|RxL!sKc^{jXql&Qb#Jz`94h7a&u)+f?lszB{VMOmArU9O;22IKl&vBny{rmTJewY!_@FSs(y65FEvG-)aqObb3s;Ct1E-!tU+8Ia{l0qEjZwLd z?H`lWdmQ1~-B(6Ee&E8i%U!Zyfw=&wJWiF|t_3Q6vUX%Z5)mG5pVUzj>`wov1Ri%k z9I+0kM)Vg4uYzpAJuzjQ9Z42Q_m?5y66P=jw?4t3rqhY2l!i*_>}=Y#J%gw57vp!e zS;dNa0=l{-Y4a8ENTza|cl3u$Qq8O)Pw#&Ns+3HSq{zzE#pxUHrgr~+1H1_Z?SNPG z>9@PxBsp1LKL=^l=?#$Y5SYAGykuy532|WSP^ zE}{EF_#I;0N5Ly>^#|q0g~MwB3fz?0UaGyf4@el@W}2M0$mN%nE-r!mVdyJWatu)=+GRz0ML@zJZZm zbUbwEfFdF3>Lf=yx?%uZF@(KgHicneXKSN##l^Yi4fv6tcw@W$YQmzBx?%RVI|9I7 zfIv0dZ$c*@+1xtg1RO{Q3A7*A1n&{(&u2{0LQ{4YxWBw5LMlLZxzXwC=_V*shK&p$TAb7Gl)$82#8 zW|5F!`G4=Lq2Hbcei36=z_YZPnN@LRQr^oUSXrq}9@tOQZ`Qi^Y&$b4pQhlWeBDMC z`y##>6b6BOYK1slA52G1=M9_UpfnO5-CmsVZ^*+gmG)^u=STWQ8=8&k)!mhAK>(PhdA--}?9p6?onY&Quhf5pN{#i=x=?YK7iq1#Ol_ zXUL!Y=R%&7LtZlz!eUQP;~;p!PQ8_b5czwq1Ro)cR5!qGqBRhxRSO&Yp7#Bw79kh` z$^cDK<|A^MD>pjIFA$0OV}F3}N&-1(L!LA@ogEj6T4y~&Kov6(!KT5^Q6N3i9}8(3 zTGDfo&d(UqhlSy^!g=lTw;^5mTqR@`Slr z)*O=O2+e&fi{*aM!z7A(8sbHk?m?>K6rGO^?3c zKa>%70lOO@?3Vx!Pb{L#^^YXU)QB}U2}!G$#sRoXf*NcMr^@nuZhOklk2oMgLzYHD(I;!gG^ ziV(1suowrih5MFt_k0!uAwI*VE|7R_egOFx7Z)bK1g?z)`UZbp-tf$(M0Weh!&#gB zJa9J2sqhWU?xEeNb>#d}l+8?-YXarBp_%ZGQFOA`b!HRV+br|vK-(s^R)|wzhFfk$ z@EA8>vz`#NyDJnWYv(q?Ob#t&3b&C%1=wKrYQUyeF%2DmZ!|N;bH<(S*@lkmtFR@U z@a)@u#rTInE-g-EqQYV6+v5BCtv@lq#)96inAXJv-qe}ImgXw;;m_bcZzkjPw;0jX z&h5N=f0sJ)MryOs_ddjpFpa}eH48%|JE?8y`%G$dNsMP%8F^VVpW3gpl&t=7&Z*ZMrT``AhfZ)* z{Mad3#BBWh`K9<3y+2W(ZzA%xzF`^c^r0y-a(pqUQX$RE>~--OiCSgAYsy)*xr>y& z+T6Q?X5!Qeqljo*6#V&%{Div@b>aJP(=^i2F-;^6xfx2yZm$U!(W=YeSy!@L*jNpF zOQJnhepCaIiVM|UoyW+(0mwk9qM#7Zlrhx5q#uDhy?_7t+OzQ;68^y|8jqFKOpfz* zb1-l8wQl$c9Ar?myOMlp46PM{9h)*x=jj@@o^uGz7Vuw_%Zy5G*zh0k+vKqlAL;_? zFHv>(ZNl*GZ%`cR3Guk?;1EjA)iwi{wQJdY$HCP{XloMWP(Ko+!=8ZB3SsR`BKB*F zSE9UgUL>24O$uU9C}Ep*@CIRH9UOoNG7w8PKKkSKye z1ULl{Vt-Zn!kUOR`kVSWbCFMDepB8>U~uT3DJnSuQ*byoJ>XlQG6moXiEhqdeN$J%(OY8HFPebN+>W>hR z?c~GZ_LnqVmssuuLe1Aj8r||pg%(#(^NgW&Sg`x@6--d^2~TzBowy4rGKu!bYmX%A z;&`+dn+aIe z3{lU^)6!^sBp>Z=fuZTi>C}*S#Koc!;fd^=f1$e|aR`LiZ|a#gu<_2t8lI{_VhoXl z--^??2y^$}uVj85f`=jF$NI0~YBl~Pu+JhJY)-V?XZ&WF8C@#@PNVft9U&ec)V3W@ zv6$myhB}>!13v}qZ#jVoVhj5v<8~XB^VsJ^zZ{p^&j&D*%MeLf~*xh$md-ZJDyXkOGTnj|6 zLoKl-AQnu_HO_1WeEnCau@UrBXMUs|n%TL0&bxDbQ*|C-Q`UxZ5OQ5V?{ggaww*C= zt>M3%-jVHn#$ovH1sgk9=-ZZAx!XWgtTOFlrmEkSC#k6`WuXLd`a@<>+Rq>zUg@On z8pUtQnBkYTHU_TP#|@&Wcb&g`;H5T}51s7{;`hU`_cE~>*9FtJYUE>gE6Y>(*lvmG#s;OR3_tXbQsr*o08jH)oxS3k^LOUiusHc0gsBUS_t&qmp6f zS^RTY^56>vE`}&;Au-cbAy&oZvOyEpkE1Cww)R$oPS=r~@A2s|hchM<>{^_LiE6xy zdgPv?KpAa%5=m^=#nkFRE5fa%xKk07V4RF(*R;tzI8mbh=1s_Q-F0VTss@b(H@SP0 z1328klBW%3kVpVw`N+#8;xzuQ7>*vZX0*U5@}27i$~+p3x`6Gwffs}4PGU^*ZY@^jP*~ZS z0*kj;^i+@F=)Y*#&tv)?@9${4;iM!nH#%WrZ8RuZQF|8_7wxDvs&(y3J(#*t6Tg2N zLd(iyx*GiW6F`+o%-+YGU2iWGT!5-Y9+>>hHf_#1rM|qlECri05s7jPf5t;_IxKga z1ZwIYbSaf8_Q}dOR+5Vq%0j&F&C(3+1^oGJ(qR}V7?-liO*ERsnjLzeA`0c-1Y^tK zQSs%$Qq7=y)TE~kdnjNn>#Sgr!m4qN4<9H)$t5)o(_l!(EVX?B5&BH2ahz~TrJW;Z z&od3HTDRD{Ge10&SB50)Ku5&OL-~Istpx~LBT>`H@wiMbAPr8x(h-I=s|n^~075rx zX3Zz1gmMEEd9Hz7U~9L|5J}8nZ5#mAdl*W2PAq2p{MauzMk5w8Db1oP;25%JF2Y3{ z&nov1mjgB3dZxE~X4R_ECHuZv=zF{pkuEm%zb5&}G$d7Ygz(?=L8A8{Beljd>)EC_ zyC5BMmfRHqKVIy)CoS6B&Tge@0AfuIJE8fvu#^w9m?qsQ2i}rMoBU5nhSpHY)E`$& z27WODpG@TuM^G(E|z@6|!NdDE809rigGE7pQ+t;S5c+=#%2htr$iD#{lhK-=p1@%p%7Zqme>BedL4YQBmraI450>L^=~@|I-fbxo+}Wekbft( zBN-LMlQ_fW=F1U^D@I=b%eL&X^o}ZI-5qb6cy`#QM^9bCbNsn2F^GR6$TL-#WcA>t zF6zp$$?*4f-ofXy2_xO8@QVr2;ORsq_}Hgk_MKj!lg)pglO?5sM-lPeM+`p zs;hfzbh|&wo602EsMQzKxTF8K%?m`NeM6Dg3n-u8_eT|mgTJUcyLv4bj?;grWP^^+ZKN(y4Mp)*AuWTY!ho;odMXL` zR_Q{`IFtR_g9aY`ED>_P{!dS(+Hg2Q_^6rJ4Z>gVT~}`Ze;u`iqVj7~dUutpuR_mF zc4yoj{6WXB8)U9ia<=@!Qjy<;V-Y~O57a5vamVoPRVYZk? zOsD?V;8=ObkeRI;x;odB*xILy?N(1aWN7r>a?i@HY)-4lzm1K*y_O^|rg-8of*uN! zWY~8bpILfGGgcq>c-*%)ZMHb(NKp$xhY~eITRg+NvPaCPqVNzJmg$LlRX9+Vl=6DXQKd;={Pek>2~_PmWK!8g^FGnMDGIrJ&- z?FeLJRcLO>*3i<=gcqOjCIHbQ{BZ%}HQyWyzh=G!-Y2|YZx~j;XuC``1ZVX76%X`5 z=o?gbx=NeB)KAa&(&I8~p1_JSh?OI7HE~qR-{`rTJg^ntO0iMb#320ZcgL4*Jfwwl zZhyt1Z9t!ylh*g0^Fmj~B_~T0{RMp|oN&_!t|Sxcv2I(LCFmM^feu*g1fU&b}(_?KBl5jec)6%Y|8JZWagSKD@n8c z3|e=da!*(3S|&h#TL)2ck1Q7F-L#I#z)`EhbEK$+PQIAIIm0s!uAgKe_oMB*2rbmc$ik75U^~((jKRMPFlmoU`JbfqVgew>mn}v$rA>IM0&FH4L z7yst@d)0@2#|fNm3`Q@>Jp{kriBh_R+Sy-{V`M`UApS}uhf@5K^S-V(CAfL|lFeJU z-f-D#?)-MS?+drA({NVFukUqsN0-P)>HB?_J)ZeV+X&IQ_S_nyWp8{&C(5QMC*e7m z9D!cQk6)hx1evY8qQv427ivLFy{Yz#&G}Y@7y)6F8|WbZyO(n;jQoOWI17yzr8bu@!w3Md1Aj;(&%YGTQ40HCgD|#!;l<) z6}Q4}93Ck-#Q^xA!*Q*lH}8D)SZ@iCK(;!B@f79VAJC9VJ_c!)D6azmORNY zo|tMmG!A?3P9&~5teK2>rXGckI@edFud_!bOocVl@Xq{%9i%5$Y&GQ5g_d<2<|X%T8&{8v*Tm z(I4h|SG54{TFdHiq6Lg(0;Tm7HoB6>#1BQbU2>`kq_m?i*TV=>iHgXQ6>Xs{k%_y@ zH)?z%B8+xOf+?G(yUgzZxy9^4L7`)mlX!}yla1N(5q7vtzpr*gYfSmwo?Sk_Qig8e zL1el?HEHe|Tk?NynOOYQj1IohsRKwRDu3!kBC3U&mg$;dMY|{u{&7!c-$@>;hOH_Y zz32+TJoGJr6ACDWubM%v-G8P+D>?kKKPIDLr@}Fzl!f!EZ(yKvDEKvhL-8(yo)&R9 zxf9`*3V-mGfNr+^XGn1=r+J~H$R-epeBJ1I#3TyN7*(9*yKk=+^usrvtzHhk zk{)Kjm5WW*PnPPPB_ajfUxc2Z=DHRqQKt?_jJEdCZtWj3|8KMi>nl zYC=hVt$1|v-x6lUj{a(L_#{S_NdyHc%t56TuOmd|nL+46XBhyH6?`%-#yU`LPQh<# znZ3JthB|gbC};K~t5Jewh-TXT_m?x*V)D?X)@E(KWGkq8S5p-Sqowr5PrSBh#Ddq~ z;XB{ZKXKrnUgas~%C?78GdHE4dUjWvz{M_8S}y$J#(Fx!u-W+OI>jt1Y-@A0>1Yfh zjkY^MEPb{*$gjarzJaCMn4v()soPpB+b}cUU zo%?)cZ4JwFcN>DHT}j;jhV`uIB>){&P8nHr@L?*FNTG{Sw0`t;)I(0YV=^USSsgQq z2U!OQhfWL*dRUUOnmk9;jT>)ZUNqx+IxYor&Rk@%AROA}lQzHis1{fo#1pR3VHa2i zLCLsjR))w3h-rhqu%l&vY3KK7BXqJL!tf{oEKh zL|2AhQNxV6K(CJ|1r$#t8tgTln}RdoG_%Y9a+{OHx;d)Yvu_&G6hqAa6tZb_4@e#J z=8Bks6JS~*p8?vtUlMh8qJ62^MjM4jsfU9l|Ey~t3-cULp)aXPo(@MX;Fl@!A`KI^ zty_m5*k~Ll4t#UGlTPOm|L*Rs3742!nQa989kpovJ0>HGT1cK7*q!TrcS*-JYm|(h zeka8Gu4J}Rlw;Z~q&GD<>CGQXMS?E<{)tE!ZAqYP|{u1-Xmc59&H zJSvPIh&;eRnC7C+6>-S5INj4pdy76R&{vuW`B)(B^4T@NoE_#0I87~1|O&XM(ngB4*KCRgQa~ZTJ|iq zBbJuO-9%ELy)O@Blv}nD^A1S&hP3UQ-3e=5`VN}0-IQ|0|s}w}xJVrnS z29_NKa7F&&*rTsTDEjt{Nu=P|-i5wF@jD>0f7z7t7e|6);rQND50>INWMqF(wECUk zkXgM^8Q7cWHx?y(^iY+Rq}OrZ2`Lq1B!G97uUXS%%n#;bgJ$e*G%D^|Kg&{!}Y78oKsHVxq zu^#nbc|>#RJ7W8Kb5SoL>>)*Xn%K~GmU`FLADkmP1IoudJz#o$waRvOELrC*H`h)AqI>j3{ zk%O7{Ebly7Pd|cjIq+)-2#~oe)aso8Chz7V2}O1elcRx91TU+e&QilV#88cO?bF|i zF_03Oz#0;!GI^EiV?IkN-miBg1aC+lk%Lh$54}8m2p=w~l`cXwrc^*OZSUJiyV7_C zsm`oCba6QBbP9zCa*hlm%F4n~KZZmhfwhjIu{(Sgfz})6Y&}d95pZt+P77-x^c8m zHDJ1u+PBrmzo`^)c=CRu(SW${->ttVsjNi8nkqt0w*n-;YI~Q{K$j#|xbjti!dGUc zdHhSt)FDSNZTc|D8yIU(sM{1A(YQGy-w^#@V%4=WRVx-403moKZ&L)VlzZxsL!*lq z&(Z_=Ibe8FyLF{^<>F&l_6_+E<17ir;LExD>gX;H-e}FoO`Zyn+&VRP+6f#btLyez z#(j2RPonZE|3GqEZfE6&z^?S0ZbDw-9&K2K>T7K{nF!$p&Pq_>DDwWA{r8CmY@w%D z#25A1p1?WQDQ-2>7trAC7P&F{zcoGH-x&c0I1=PE=q?uD)<|uZ&81ok6Dto&McAJ3 z>7f4SU}@f#bY6lmQ!7s8YsH))Xas0Q4p>vOfi-><&Drq$5BG9%P(=t>{`GNil|B0j zI}ITa2YX4MoD3s6<60(wOUoWQ{-gitCL$E!GI0d=K9rS>C$3Db~>9 zm!?NP{hqtb)o|BK?e=n*Lojv7YUe%i$qVbM@Hr3E+?X8E3jiVIvpZx+OIltDx+#!Y z(P) zn~Oza?q$AW=8Cv*j25pxs8i)mz#4Qd4xUStDvkOdQthmQx19aQI+%_3YHN*U#aOWp z`3Ri8L2klsE!RNj51t$_&2!@sGeOII8K-KjVTsGT13%_L+c(mO)!U!Bny_2<2uP>s z?mOr21=@Hduk;4_WS(lVWSga^p6+|iP+Yw2X$nG4i*KR29|0#TadR( z^!^xTD&tq4_*$b)QuEeOwoEx%PQccrcGh@QI+(t$ty`xHqP@m?S(kt?n~&D)CIkim z=`@$N^niudrBnN6(JM2+(;bL4;r;~e3>15m!<0k*39YM1+avTix_Xd^Nn>nj1rM|{ z30(AXoShm{DXWm-i_x_i2f_7uzFg3He`>9kTfuVwn76ENz>}aUo@mihfXR9y+G-YT zVDTP9oPme2?Q1YyX4m@F!dXm5e_c44Ag>(&M9FvDDfq>y0ZzbYK8ajXsQVbVU+}xs z#}-T>U(sElWnLBKE4*cT7C6!{)l#!54!ato1DE9xW#pQ_$_|%g77|N`n=>V%=z6>{ z%-Fm#+2|Bz??F>!vw$Y;7%5bSv9VtZH@^y$HJSdY`kf18UYQwakVTvWmzA;&P3>%C=M8B+{AEhRS8TyT-@Qd-w&P`kfS z(hw{8uFSa43VK_=x^3+1lWVU}TYR*MwsEP10&zMwZGn!;KtZMzl*<|UgQ0DK_RRtL z%N&Cl@0Nd`pZ+=ABZ`WJOn>q_%p?B`jq8J1MtlVD-RT^@JFjI_(lgu&w*D_^({Gdq zkpGpP{~H^(rh%W3p|@}v^gSeAjS$U$&o0YW!7cWz>C?rI@o%@B>p%ctdSsgu?&CQL z@T}H_Fv{}AjLlzQmhujCC?SsCob$I03MD2&f9ly_6Ky!q5?8Vr8<#_(RWrv4)j~vJ3Ql6&C2kpyzEd z6ZO|v`T+6JaLNhk##JtYGw#p?4Ss*^U{Eh+@vG{MYX^&q({%#n8 zj*cJ$*V=?I_djqURJ=PJXuS5@^?zyn0U#E~z2>h4KsW$p`PTYe(|+69+RblmR@`!u zS|GZIEWA8!oe3V%(ZqstguKMpVM(Q`?Ph~}hI6LGSnPg;0LaU?dS$Ce3r;6@O|p>= z0KKyoc|ioFNMr8f5d3&1in?D)Z=3pwa}qvRKr~K0UdP9Py7W5;5QjpCjIi%FMa=76 zKc1pbz^-wTuk&Bpa9x|Mfr$N=2l;;-+f^YE0qa}8q&eI}d1w4E z&K+x1QGu{s1goa7=V4qz@VHSJ!;S97a-8fPd7QQBpir<-q>2Q_sQv4nZa)?%Uj1w+-B{rp%@{l8-p-b zs*MtLHQ=#*i<=BZM4xq-gN#nL6v}CD;w9Kg zbU_C9iefV^Ks1*5*KVziasJZbPs*V$DzCA`WQ%VgO&?WrIM~XE9GkPDK8LT0PqLRA z9S&7TPhxxJ_S}(apns7%UfMCCU^x|U9tC^0O7O%9uXF8UIuP&oHL@NPzdT_n6I2~17=DS zK-K9Xe?e4kw-u#3m?4~eHxJhUP^*;JyCrdV&T5mrhUP-J|C#OPIQOfYKM?-J%f$(n zm&ev}ulR=lAF>80gr6%>5oTTtCKf31|4~c)X=_i(y5oJb7tx|gRwfVV6-YHL8ed_W z1BrZi;SW1Bd99_FC(^xIS*JSr&djSaV&1;s(ST63{&|ILP_?{R<;2A~LWqVEhwn*! zLM8-46ETdU#dY%>1!@Vvo^akz=Xc7xTDoOfj@+I7f@`2py9O~(ax6r;R9XH@Gur3B zVAgd>bqRk)(x~k#^lnj-PJ6+y;>6a25`QqItonc8@aIbibb~ZMXRo8Dsd3q2Fw)5v znRoj5zhwSTnBY}anp~Cc&lOV&?k|=f_7*}loWoxbyL|V*Xdd%kX)jQ<(f=#AD9i`C zyZjF!UhVYtz-?eFi#Edca^*bsIoN1o!EV3&$e8iuc6)`&Cj3g>3Gh8)dQZ>wzwo}s zuaQ?5FN>G2FZloefMU9~TV3^t^V!KKF@Q^wv)c2dc5Sn_Vll&89pAni)?v;SXD@aqyxTY7qKXrKMn`fe$I=+1@CdWYlCkvtQe-~7_h8V zShrF16Jir2ZZpLW!~A9|SS52?3c55G^K4)Wq#xGH75aLn8~#s{NM_wY)~>L=F=-_O zc>zpy=EPbhXee5+c0Re*bIYVk0Zt>2b5_QqtLMs9TRQ^?^mXX_oj46oP#xs4yAEwV z78e)poBtrmy^^&py-?;S`BwW?k9zo38Pp&}7e`l3c^ci}sr>B9&qfaRu=!_~Ky)%R z!kOnhfr8lNXsN2mj?A$|X&ALV&icWO#BW?Lekos6;dSM|mcDNdn08MDb61l4My=h) zEhDFU6rc=rD>J+FGfcudM95JH4wuj>CF@;8oBv@MkSi)9%*7+b5mAD&TNhhOTpmDz z!a}O`#w_pU`Jn&1@ZoHeF9Hem1!tkCpD}jWGO>xS6mI=6HQCCNulgXT9MU9>^7<3F`}pTRj;i<-Y8z^@P(IAAvO+(!iQy7F%}pH9s!Ukf{J(#|Pvuh3Wb z=cYz*R=fGX)!ZX>lub81_;NOVIQuh$mrbVxw@){dT)$0lx7D*{a;B9^7Le)+%h9jUrhgEXt^!WlKEUZ$|ejU}_;W z`ZUT_Tj-k6r05kGfvS6ek%U4fA@WwJ|Jr-8F6QAfdM<&3{+{>8DL#g&`$3q{->*B| zdd6fI?jKRhjE8rq#iCt*$#;GR_x3BVFsH{MXrs5iMfCN^9grcDNh?IC>V0|mbI zZjEc(y>BEY7y4eTSno|P(%6Gs3Y6&AP$;f2#U71s3~G z!Hu`TpC}m2kHwKXgiGsJzf!js@5uvikd~xm!w&}q)cRa(UYXNbfY>9so08Fc8oX46 zoZF5B<$D=bb{Yh@dz-j@Ejnwk&U`e`KdRm(z`fPWLL;Q$y1O;Pk2UQdI! z1yKKje-}$uvV9WbpM()?qM{cf&rz_=27m;R_avsxsPJV8bi7RdJF`lcAbI>oX4Qt8 zx8dscCdMe+Vjw554&-q5rElyjf!fBX^W`p=9*jVq1E!H1nNqR;4C%1=7Zt91S~=z1 zqrNGy_wiB{d|6<=D3W!+*O|5GkA99yTl+7HlAUGM^t4c$p4KEK2{@*V9jG-G~uGd&+Vf3SjL%3x_^K_8F?z{m+v3*85 zv^;XvM2Hfv;SG<1&xepofOjyGz(x{#s5_carO? z55^fbv7edbiFg+SIO`<6ZmHFnVhrpv}f?SOY(Uu^a0!#O5=U! zo0^7z6v*GKi3O&TJjPz{Hu!q$564=(w~}5Lro0^k6OV2=eeddJ-$7=+hzc!rd%S{t z+|kpV?G($m``bFcBz>2G_3CY$(!Q^;A6!C0+^;?TU??u^tG-N(A@z)m#y2K$$tS?~ z4xsMSPJAG>q0hsgXOe=MU7#SM0Kgk#4*ITia4r^1f(fVe@85k-;K#1P$6w$hN>`~! zd3{Q3=BHg;NFB)}XaSe-?Fo3F2mm0Q;`;Rpkz6#*Swa~}skw_#& z=IRFlQAJ{#?(X!ZgRkouxa#$7LEQWtLCxmO=$7WNeY_S4MT|rG*%g(e#n=1@79O~4 z2Quf6dO6tC=kYK7n*xCsnC?L4_`$mQ*7a-^&~dxeG3nvZ`+Pug%$u4cr*kE}w(juB zwKe2Nf4d&#X(veat2dGPhU(nQrS2OOHlN(yoF}?H9B-Zc;kq5cGt*Vb$L*y4tCvxS zapa1Z%iP-b@EQ`IwVZl7L{9FXwaxUkh4Bwg^|_YAGLdr!VBTM{TnB~M*G~rxuCTc= z8xKGKrVU?gei3Tvj4T9s*O})x0VC0R#Mr%-C^`U42cm`LLhm)9^L? zkv*r=LJTCG|5zCR9=Z(U?eQEjD578%{ea28jaqX0H^~UQbm_qspRb;CPBu~f_m34PjZjKiY}vWGAKS8K?bqJ;qsH9C0$%872^OM)@FNDC@Pq?aOb^&rt0={9-#>5^i*$GR0OinUF+ou}9CEs%9|eqk*>u>4HsF6l_6}>8AsVR8>lVHXh*=)~ zRC=Z2)uOiRGPw-;zQXNzJFp-OZcEsCGsbm6vI0aGk&)!et)r}W+|z>#Sj1%fXH%5O z2p|u&z0&Hj-VIV9W<|;dnPX5L>WlvFeMIx1l8jHSI_<%CN%-}Ibx_Ls9fnpnE2A2N z>BhPll~-DW34k^61|hFFZiZHMMwGR;nQ!sF_+>Nk9l>B57ofP?_E#evTm0KP99c?M zeFS6}UZA@juN=F12uL5~60>}hviynbpo)+tL)+UoKHT(!N(Pn;%pWtIS^CaD7`hWf zx#-6<4LC?x z6Dk{9I2DRuOdsZW>y3LZwrxr!u8U1X31*@98=exbzpXax(R7>!76L$&|1^shCW)zd zxr$GX{EaL-uR*JMC2~9E+yhFlVb+n{we|0ao3NDCZ@+1Glsf9RBU*v_W_UU?bubH2#_<}fRwMt5k z;K9AA3PWbKrfgm&u*fF|R+L2#W^I$8eZPP@xhr-v;K8=X6z9q-_xX6&sK~HF5kkgB z<)?WmT%`t^VM|6HW>|mda&RC{)a0o zN24^xGpsk?pWXLO5DO1gCDc~xc7fT97M>5w3`Ivhh~}qJdUQhHL~gV7zXERoo@T)! zbf#4^y~pf4-t$qC@kq4}Ty#EhhPd+chBCFaLLvgHy%A!ic+MkTsY8mLT9U6delP5& zgi5o(?@*W-CN%WP-gHN_hC=vCfF_?C>!5VZ1*eJED;FAfm|(OctV%DXoS1h0MBAnf z-UWqEORHi#l;ZdiS2@EKVvW8NRTZibZA*w;4H-Q2%tA$&B{M<|B9WDKuCk zB6>gAYAAdz@{q&S(6h3d(W52#UC@^T%_*u=!(o`*jp_C@RrliuzAC^+hI$m=Lwvo8 zmZyOuSlt0WvDdCEVR=Ev8D3y-;sSHYhq`iYKG?s0aO@f*3yBVvjY-L~1T48eEvgEr zsw1Z_L8uo?jI}VB>F|B2&VA|6s|U=B{)`{uC4QAu zx=jBc^4!#fRCX5~>JC(8IlTlcCKQ3CMQU&+WOQFV@0$A7qc7G%#PZ=rP-YZS5j^;q zNCdb=QH9xlZKW)Q3WGwN-*dblmK_n-;fEaU&bA?Ysd(1L!a2MpeB1-a2g6N__b93)Ne0r_x|K@ClOdJ9`VH3%jzBhc@Y(d{?^r+B1U3y0!UzOfH1?JM!og$^(^U?-e*O>-+!7#8)|g& z67@G$inddHI__)pPIOos(ghe;nI#qd*ZTv?56n&$-jP6d*{E|DpFdZ>(}e0&$>JE( z2dwTF?X2P&|L`#ugg~xc1>3hBjVdCWF>D05 ztz41K5XT_j`(>Je!;fk87eEpe&EFYm1aF=hU4k1hZ6OT_f&9uV##HZP zh*bFWiPWI7AOcB_h+zk}C6(SOb$+xVk-AFIYXxFLLoVhDi4COwBp~1wyq38HniVNX zkK;-hF)Vm0-tY}Y;jQ}b=>AKRhHZ$Gp-pq8b8s;}m5toZe%b%`xJoK7`){KNsk-)> z4~iUM<`ZemJdo9kOa&s)5?@Dly7Q~N2ETtG(|L(@rxAp$pOPj9O+@OqI@0#lw{}}7 zS)ax!q2}Y0g{cxSPLYoM4~G`jKI z`mhowhDk}Pzaq52SA{jJi zft1s{Jad)29k=@^x$xi&n*w!Hh{_{AKm2CcOSGpYf0K#7Z%_7=<1cvVZx)Rnzix#e z(DFYW@fvcCQlZyoA_4FgkZUzqk}Hkyt|hbRdV@ePQ(c09Z++R9Bw=c7t1+9 zNl7Fuwu2TsN0xDyRukcXKjj56ai2|h*g&xwSz@a?aHn^{gU+=DKZIsR4&zNI9jKDs zr%DS7b_0kOY#;HPD>Cf(@Hdtm!?m#(>x(G}s^lhgZo~7N^T{L^sNi3JW=ZvhWQA_5 z@mUB|vbM9gw2=?X&_?6KSFlx7j*8P{&|`~9J~W~>{Y-qqc0F+i*7rkEtrgYAD&;b3 z1w7^HTZ&1_0=!8B#rZwxk&E_Ba@6+XbTk*}5v;9gnj51y_^pNq6WKLqe@-60m~W7L z=~4%0cwO#$AZuGC8N)|uz3dXixp2jk1&*48!~;Pu8NyBi!dRLJfJ;%E4}aNtTihW* z;j<{YnFzcKeHe*ZF0EGH)I7@l-%K*BxK?UI#Yo{5t+yVYM$HJ}F1xeXT1FMV@0ZRC zYVGl3=N_pLv=BkR*7CIVg1?5iMC>$O9B1pqOf!7p;}i?7qRuh>?i_>Aq}~9J@GZiNaY|x0gG<&r z1s?J3jFMv7fxJC7>$ZW+^tY}z&&>Z0;y-5yhvluLR07GHLM3?y+sPp(xhs=q{3MbI zcai8qRI42!&c4|k9#D+`B!rP-sYbs_K_mmLtF}_9?YDeSdP-K@JA-}0Osb^cMcLwv*-UP1t3;X;GdutaicQ z-7xZTxAE-@NE&WP;LBL`ixM)8rr%NvZ6q=B`KE@|>0?#Z>`}A_#8KAx&sjn< zv(8Qv&}QE8`;EiR61}^Ea7M|a7w=#T&&kCtp-gg8(&$tDA z2Vvy8xD4i| zeX44Rv1dl_!9X2qizHD%G`;!Ete<6Um=4x2N^rP{4F`GiQ}yKF*w=^xA_vIh(6f zx$wYX)SK&4SD{g)v}_uu-M#6Os-WB z1ZYz8AM4lTal|yALat`|i$@zeJg}T_S*k<4*BTr#sVVc|2o`t(uOY33L^82Bd!#?G z|BZY;51*#?-QMU-pnWSB7lWUc^}FfT;Ce|A^Pr}&ME83Y(nqdT(ituj9K4?v%8M6< zvP`A9S^Jhh7QY2wXY`_pIRs81H0^4rNn3Ktcz6j00@7{Gs~|% z(-{oI2wPGZPRnlvCjOvRN}$w%Jcz42;~-4x@<@G2B8?D5$Gkt1ob4Pl)$09b?^3j# z|J`qH9K`)Ep3#`%SP*SB3K#$9iBh;5PYr=L9&dpfEd)~8JIoU4WC5wG`4*&z9mL2- z3>n*Rk-8?SMTv^Wbzw*oyIK{ALYh0%ckc2l097+O)3SXz_vroZm-}gFC3GjJYc@o zOp9tF%D?5QfYqdS8GOyPBnRqnaQ|Sq5CDCxAPlx(4^^h7j`*GDzc|soWQG?bZl|Y$ z^@3TJZ#Jjt1uF~n+ai=mC#{OKrm75NDteXidpRg0G+=S85OgH>Q($~}=3ldk)>%k_ z@*k+um!`ToK6;}r44jbmNKyi7If zz`v~1YZI3WR^_5Q(`Ab%^+OdeKYY>c#zJLx?9>7#Y+j(bf&wh9W1j%m4*(tZopP+EB z5JN9YMgGs)kSbQe%MAAXEj}p9BC$@f33`h~QrDj{#AAVty)Kx9B_mg_6GH|{{%bRI z6x_?zDvYUvMi?}}xhjE5Sw8E~u`4|GOnM!Fbhz!l99dyAx(2nL@-QT!a-_0LLsh=z z9Et~yAp3NOHrzOLsuRNaZSUK>)W#-eTv_Kr!PNxye^ z)Ar^8+d>1I#sxkRh`$2|>x4 zxFchRKJ^N=O1~ooXUD(H*S5o4XItF29~5hlwI{rd_2x>Cm4O@{7?VUy&-~pqoPjT= zM>g)3dA&Dj;))dF6nTkk0MDNpd@Owjc?yT*)7cTimPWJu)AHy~2}rWGH-w&%%VK{Y z#(@Vct0Nf>YhJyan(XiXOC@-|V*(S~AnRp@WH|3EShL(ElcRhQcnzHl9rxpmm~Sa( zCw%s`g8$qp>QPRnW0%@sjYkeUauSonsWc`Ro z_$Gp)z}G)nM(yr1#BCf~ZWR$oRV5QRYrMQzY4GB#^)&#O!mqt+03+RNb zt@34WcTF5WyAOw^-zVcUqFu4J9}(3vGj-Q5kjDCr1^##J1zg%m&;St>rMg!53wXEk zkg57zJA@afv*bcG8J%zjV5x+`^~MIuRv3kHEEF4z*4fQ>cQSt+&~^JgNgx1YN&K7d z%)Rr&&0c@8$a6oCn3k8T#yHt^HAU(DdAI!5I`Qj>SnlmgwDGTo! z0B-jLSFtz;BNbrB{>&^TJqRC}CnOO$BBq#C9+vnA<0~VMjymn#-O$8JDzHB`>mwjE z5iz>{8=&?a{o^|nrjsM57-Ov*bZEM4S`pZvBQL8MzmsPA**K(yaWML45_n<1AE1E3 zBie^Hd$Tv7AeehZoHOc#vX|tG{Gx`*k726aSUSf}3?M;)o>%mWPzVJth0fBcL7KQX z`RwU_0K;BEcU%aV&uX+>`?}k`>7tTM~)#L-;8o1qkMu9 z?-KYhCzx+V!NYu&Qy83Yz?mu@W-j)}0i>Yd`LJ`bX#VhWZiO85oIk2HhT=(wt+#xp zB08!P`;)co19DC>*T0k%Poe1R2p8s+>CWq%Jk=mWvkb947#SAGk_=Q536Pf9{G&}| znl)?XIrlp)TDXQ~jT*K0V7 zw+tDb+0a;bDr~x@Q5MPOVmdl!a@|a~gA@$K*XXMzniq=cj6{#RAuP(_@sQwE zCNV)LFKc!H&tCkrtk~``2?4282$F<-uPz&?oYC&J)Ch?7&x#IriI_8`7UDj;{6dQH8R9ov*`S zw6W_~mP1UiaBDzKfc_D;pCt|;^!9V4f}YLm>E*&%(y5xfC)73U)J@rDe_)N%H3DmH zn>6gxi<`>JO|%J|ZwVAxlAlkuUK-<;bF$m`w_-4_J35h``Ie_ZgVo^7+9jUaMV%~ZxRq(p zmg9||C=uyf(d-r;LyJA(z7iy~s_N}^NU(Mcq(}}G=11R&lcJTM>}^|v z-If1bhO%`E}f8Fi6zkt)@Jyn1=rQ-m9U&dKpe`4lC`QrYc zEa(#yH#^cQ0q8Yy*J}7*U0QT&Pck_u?10?(KA_>eXB`OuRQwi4t{u;~zpGrLD*882 z79CIHT&6u|bu@6*e3DpxdapsGDuCMSG`@1WQtqr`C+l!=W_9wuXi4#i5{QrGQ*MCa z2i!TTjX2{UUS`*v3R5eBy`y8yp2(jXW0+_TTEyxyM+cLfm@ChlvO7;(?sIx+`$H{| zQ5XVSCzhZbM(s+b9S@sXazRA;oiGn)g&88B+uE`_AMa7#+945d|L6%Eo8=i?k%KVt zMflaB8Gf92B%3c*Py6=b{Y+{=p3${CALyBz_a?C~qd|-ap+2C~q^C_T-3*hY;S*ve z1Oc;V|F4%6T{woPzMr_fpIqmAFrKA>lmLJYH6+c_xk5Ek1ov1 z(9K^yUmpJ1>?&SvN$2COZ3*=1iOKA0@6&W~GK~~hT1110XG+J~P*dCIrH_8~{clvq z5%eT^Eye!l1I$}2Dbng!EaH*vVRtZy!&qVtJpif7k z7`nqae0)o6D){m1-_dj8?WmDfFp1MEKP2lce}u=laxfFs?|TEp6(}`8JP72H4eYVh zyUq$og-_o1@hLtSSU?uu9le$7H4>mcafD-1m3Ico8FN<`m~2)k2UgUk;p-2FlSnxC zB<;f*dIpz2IYI**Ec^!hIG)(4&LbJCH4a8Eh^pXBs+?)Zf8Lb`_GvvJ)t`K3yrL9y z6uhca9}$oN9mSk?bMwAuzFTob>*UPnV%QGhxa#RYd*pKV|VJ=mk4Q4g&Jah3l& zq4qc0%OI$Osa#p@o~x?l`|Jv%5bGTu1@wSAYv?wW)~au@IV6BkpDKooKoOo`W&|<@ z8xJl5Eq8AQB_M5IPav3(dlO0h-}fOD(2j4W6isYLTfm^M4Ly!A_+L3@#HdNwSB6hc z!dhgb1Mn}6%kGwQXwL8m9~v3Fk9AfVlKC6V1j&kJt)It}78Om4VDr7doh>eC63&Zd zM6cq;9NW>be1c2q!<`L}fg3)lx#ua8IsJPbJ-gT-46w;5yJRbwm}i=PPoZmhF#LGc z3KhdC^ufdbbQ$vTw^A>UbSJ{{J#HGQD4SBBwdBbkYzS%!t+wLh`@Zj)ik6*|F3bBY zi0C>-MegACf0>}0sgX~ri&+*45}eX)n-*LuWQyUVPlU~BkF`F z=gS{eHg_!wS{>TMOO-(mBP{RbC3lUeuRXtD-PyY9tXwos8;WEBvHV0_}5 zZqvMp**#%fn-RD7bt9TlybASNo*VTJN3r&EiP-2=zA7P>HNtqtin-Im4E2ISuNc|% z5xzW2_FVQqUxh|D&($)9!6s$+n=Co*z5cM@IO=!UOGx|^iu|E(XN3HV`IUrjRBux; zkU;#*fvm$B?mRP%WkhSiX1pW|ERNh^g|Fu4uk^vxtTqN8Xu6)~7I&NFofv$_$TMUp z_!pw{t9@sNb@KwCH)9SJ~umeuyMytDUY(S198^*+J+HQ+yBL@N71|&PW`kA;j zgBPS;qV!7flUkqY3d)0%#@`HICvx&$8^X>9CC3R&tHvg zGM@J$bBT~!dvtcA(b^b@UstYc4wHpa*Nw<;ltqtju>@je6!Df* zp1|1}diHLNlUsPK7HRtY*}-anR1~SW8M^v6SumJ-h7Nmv`|!NuH-o|-OY|{{hVy{E zOB~bso^P?f*tn|6Z9&c~xWh(o=71gfnksLz)PD#Nvw%FU=v-EZ8O6pBe-^hJ3h5A2 zoPKToY6ijI({4o@GcI>%lq2uPH6R zD=|mMmd~}wd=;!uj8)x$M0|c)q$C-IQ$MCxRJ=N;s4O1~v#<&%T$R#r8mY>Mwc~Ku z@icDKQ(E~pfpP@1tB)Q)dci>6o9r~FroF#+3@b+980sn(uN}+1C(P5q9SM7RoP5%n zJyg98D|i|0?;-EDA!ClK7-;o$BL!3nOx-QB*E`>XrEx9-#TFfZp+P4&#|?!DLU z)ocCQxqIjrx66{1%jAopMlISn4if=QAU{38BR0@W^0^soA-FkrL>SB48+iQrq;B+u zQt*iqz5#}ckl!P;SCeIttwmlvoKI?IC0#7W(zEZ;*%S_m^6#C#y3{o?V)AF?kmadT z;w=nE5cK>Ilx;GaD+VcW@bbZ-@QN29-as&2u2#uc5O*hgvzH60wz!=&2NAX4D9U0V zUGZ^ZIUu|QJ}j^&F=S-bp%-ES&y6s%dz+Jj{yYrcK2U3GHFt3f%7eA%cVYUzudXi9 z8W}^|UlXwG4#n?8h)q|8BmHi|gO;`Brtcre-WV6AMY(>rkfs z5^gsGv;se+oN7xW4*bQR-hHZJ_tlbOia^ei#$IrxUPQ(<#M)4Yp|TD#x`3_1xNYi^ zZS|!~za52pl^3Tu!fBus&n|&-4hWCOj zW|iS^hYB*Y=Pj}N1kD`lbj)d%z~FnFWzar3JQ0xz;6hjzK4{ezqQ>2p1bWc+V>Cq5 zK1+2kmVYspoSY4}{s*WzdcTC@aQw7dF$4d%J`4Y*EigR194?-Uk6C&Du z;Zp|`;vzSHVEiVp4h_`~xsqLumkJ(NH*=oL&2ekbGF6%Iarbel_@%ETR5@Eeh^JK6 z$K3`q1_>2CF5xG>7&4ZCA@Jy%=i^B&yNUFl27>FiPhpxP)=4G1g}AXU+!Wq@v!5d zL8B8=B8B%CkRk32Moi%No1gFm{Bj&lJ*!Lzjpnmbx|brw*F$)9mm0BC=b-Ph+o%j& zYb5Kv!*|q>T#+E=D=;0FIHi{}yQEVxaO=^xsqV}?xi?7|yeL$LYir26REk;wRXTWN z^B9uZmQ^fbjPt%VC*0QGSrQItVH;ND>g5bF0u{Bc^C%cO(!CtPW8Mz=I)FtoH}OGp z(k+xzdoV3xtn2b#9N(m2P|1$;??EtgsZ)<1`CLHTeAZ{kO?>5^pqKCMMQ z2~TG8#2^92C)~qZFd`l9@-M{l`(TS9n}>HjvOYx_$hvv)2V# z5g(bPBCXzl%>Ke#WD;llp*#hIGpOE%ytg|}mq7|(M6*JgX2jC80_F=?! zKSYl_VcaD2kjecl;#P-vPJ%yy^@B2Un*BH+nTZBS{xR>LW$;k3&(dtO7H|u?OP+^; z{%!#xPQJt(9@P{Xv#=W!V@gQ1C8a>jlPsZP_sQZb=cuGxy5c34bt+le%Sjr%^z$~r z%-=wnFE6UDzi1l!0~VvgeXHrceQE!*+A!=kd?72zjt^9Jaz=pU zj8`v2x#ZlkNwFSors~#)Z?u{u?1UcGg864*l)raThT5zMvOqA0;epD~0ro8ztb)rXUBN@*VRyC)9f!jF*IKy zRNL7HRIChhi>zwZ*i=dpkdp-B_DO5^yNhmXO$~FCR3&09KR5o0l1^1XJ`9lMMy@zE z{kuD!lcZD1df4F0U4j*5x^#y;(cf97Bk%AU66!s{K#l+8 zT5}g_dJ}&NQK)w6v%kB@7;HD5!}i1bJ2Wfb?2-|xTg*qz78~GyflkY)9{#oGvR^Mp zdWJaFGcXE|?%B}$J<z-*JyU1~c^sysynYDkhw;>b~0D46cSKdB366a$hF=u0amEC%$9R0M~@ zNr@4c(`JZ2uE~v3XKNakbBoGEjY~|L*p~dszcg4uf9nsge9K1}JaW6}S6HU9~4MyBB z-V%XmP%HJGFh^WljvIC{>t1}?MfY-?cR(<{(${YwuC1^;Th=$wOs?-Wwp1ezT1h~6 z>+m0UI?+H5H*ai`l~gP|PenEINY)oSLZY(O+Bgc>dE}fQQe&DNYH*#5tT8VhmD4`UWM!v9S+9X2xG_`9xuSuuqCL#H+uKN!3sk?`STE9Lpyk} zN?~fBY`5vu@$|#35q?$a49_g=Sl`Jz_5gBqY1hd1f8Z8p`?@UGCI+9RS zd~jO9>IOE+yRJhtktCEMPGUfGKat^H__p?LCQLVr!kI9>@gCm{pSVoH3F?;i!mEA7 z0|{3r3P~eC4PhCLGI_Sn+#!4qBoqwOAPXZ>Fql2}nIa!R8- zKE+EJZ1LBMAc`owT;6V!BZakJ>0i^tn7$mtRL*2}^W~vsQm=xvD&|87(#mV;Nqc3S znftU!LVMEx(iTf(W^v}F{nVC zi+iBU^_o$$M?b*WIN+R33_?fA6iz{6MglMoi-{O-?E>_V^oiMdAlSIdBW{?!)QQbs z7%P#~k578ho!62fMxUT2#5V|)963)VIly}P z6z|Ss|Wcc&8`KXZ1G4tp@A}Q-nzcXh>U@$gA=#eF;Hk5E|!* z6LV^up{y`u0yK^D42EBQx#>WCkboS5n{KAb+-~3wBIZT_DCI*2zTluvn{SX&jxgd{ z6fF4NxU}0kJ$!H}%`@zdUeuR>`j-)7(IOeXZa8m!gX>O^|p)MfwXOFMqc50;DU-KW8QhT{nl z@mcSC5#*Lu4EOUN0UyF!<9zwO!M2~Q>%5+8;Y3J|oBVa6>4KB$g550Gq?JkN#X22B z+?=s#oH1z!19{%#qaM(Zy$)?-9uEzT;b@6mpg;9A8(pCa3N92?vr2XpP&Z&AnZt0q zud$d3#FYMX3OlxUX~arV%<3JZvzb$5=FIpWpwnPxV&(wom>SvL_lsBZBl%F{SEvXP z9_B~)E{m8-M+CRVv1_BuO_UeyCj#hILiI*8Z9Mg!gAxhbd%y_Y^UihYE_SBXI9(+8 z21ktVr`4<1Vhc;J!N}__w{+t#SNy2Z*!1=5a-7nKLT#LthA6n2{Lsrwa@d1#pcgJi zYx$n<@IJX63rdQj|rx#8f+K3%(A%xQtlZ z-13Zb5=MpI5P8YxAz&tmxngnn6+PHxMALgpqr_!>=K?h;Zg%Bf72xR?3B-?qZKLBtr}`5>^HE#yBEwKhu;wWBu84$nsyv=d$|6~7I$gFp_J%c;V7``G+gHziT?)-#3*Y{MQK5swKiEsA z)2VGq-Jc~Pn>wzmLFxWnVI4v*>*C(_HFW!joxzWM2yS~rTcz@jN~uCg6`mrBn3Lev zX_tW3Dl3eQzORMZH_85C7!*MD+#GM`SQHs3iM>X#Jl#o)90?JhF(wQR1?f1xat~$W zC%SOoHM)FRiuI>vb@SQ_yz%Rflu_P>%-+P%6@M3msTs(xjMhFqi0X;uY8(TZpJ{A0enL_ZG=8oNch)5)cu0I}Z zbNGWX(0nm6>MOSB9}STk8@qqW7~-a=duIP8Gh!PyWaw?qU@lBL-E;Ya+{|pQTpCBp znz!XF9}lxd|D$g;KrOY|Dagpsaj;Q)g3ybAXhNGLG zZ^=5irvPinteKft&Rz|N>D=Dw*3FeMOi0KQ&FUrt!!bIj=yzw@Am(>Zj+-O~;Y4Do!hZ+d-@Gapzx=iIiE|F+!FYAz3wGmY zn-s4?x!ZGwJ@xLBD{^rKRbdsZ;0`E-8i7u$wG8HFtY3Ov=)Wk9iTr><0ozUmyWxY? ztR;+2wTaqD|=A@Dl;r;?HeChg1zA>2L#JZEjN+^IZIh^uM7zt~4 z%R`qH?+CR=ZTGU6E!;&{PIW&Hk*#!+?z{2@-@@mN47HUKtgu)h3c-~erp0VZdKZ4e0>&N`x|pP{NkQcswojF&>Qsi&tp3)V(ew01D!wjvL;`+`*}m>baD=$i z&SA}4*L+U~DdRo~aSKLVW7I(%rk zWhr68<~-_<_>c5;yvL*bPsFGrwAYFXRW_SSAFvYogH06?7tDe{Pn!#c6s9;g5zQuGM{Zh zfnD%w1U34A6-*TWD@&g{Z^@A4hBN@fvFLDa%bJt$mV8q-of?yzD+^YT{3idC zr~hx7o&i?N_(U)Y?gonwdmInmdSK66o=OpP>VCAt@)oLJi4=2izlZ8K?hI=GFGzH!TfgiS3uO~aQnbwcN=ETdp24TS^(1M!b% zguq);U;pSzpGgKMuFTS_)nV93O>qkR!c`Wpga==I3a(w14$rgU|CB57?*#X5oB_KdP+ zgAnRQ5$6Ti@GoY+|11kFQ1`Y*EjOu-Yc~8joPGB5C;JV6iN;rECF9E|1z#b%DyScP z>j$_P=QfhaRE5wzmp z7Rg04rpUo`l38Xl`HVY{9KxZ0l*salZDET(X`Qpdkb*pX@=);^GJklaX{8Jcc__k< zkZB~x{)SNB)oG@)?pUt*O&zhSQKHdy_X{Ayr^)oeqrB6w3v3ZNAc^W2K!jsRqKye% zJZ)NE$7ldY2631&G@^!r3QWMbvwgTLphNQH>6#)P;a;%r8b^SlHuEC{H9{db!mIa? zlSce5tlhn2`T@_w^RbWK29|gkB|dAq$k_tRY9p95Njb<^Y$r*xFzxr zS)ED+L8`%aurP9Q;ynRY^6}!FxOAAXXMva=I8gUIkyRSE>nq^LfFyoa>YM@LS$=+D zzZ272mB!YRhime?kn>t2*>Cy2Y+zZ-y}1>9!g1h;OmT#zd5KKu^6UvAl?p&^*gk_< zP;pPWr`QM$IuG%Te^lZZ#%S<4sI4ua2r>!R^9kUt{GWp(j^%IiHCreQjwr*Zc}C2k zTGCi}4XqVB{|pmBw46!#(l`namNCNJqwGAN6!qUw<1sl}e{dOZ;x-gq(c?7aeYi?$|q2_L!%e{83f6b+oRRzq9uCPMT#x@Eq zQFAHVS=o<$^7h?#nZF7?JTHjT3% z{fiV?)*M?gIU2XT9u+ST?_E!c=t2_ruaBzTF)zjxk$~s6m7~e(mAg2cWBOJgJe5BV zZ;ndKbwytrzPA>+1pNG-u6^VyVYl}m_RbqB)b&RPJ{4_@6kF6q3q;muJl;iOi{Lp4 z%7Bs72^1e%i_Q~x%@!R3hTTfMj}mhAEgEL9MefC>!>0jCvU%FW_wG*Ff@B z`Ns$y*rR#CWY>hniWnU%a+Di)uv{8F&m(unT-3)cxMe`8r4U-sJVVq$8_H_ADi@cK z5S0FzRJf1%Rib)_=ez<5YKbA|3RQPul+{sU3u**aPCpUW*_+=fQ$gInf(prCUB-1S zV4#2r+)M2#rP;F0eov_LS*o#>UTIu4kXDlSrzABYnf@FED^&0;528LPq*DqDUEDS< z>$p)!P!QmtfEH0Hi~Kox)3))@AZ^OL`6Tdmsb`1WtZn2=w}(0)hCH&krE?ozJ^{ga z2&>@|9h!#Z$j1gNILKa#pntCH^6>SsyMNEIXDJVxccP!&_M}OE&7>r~ZD`&$AI~=@ zhUbdk7H0eu(FLc`o%Ye7h?=23Z5}@|DDjX!`Vd(D(%rf&x_A&0uig8|S8BJs6P<9= zI)R9xPKQ8%;S$zrBr{YJY^WsLTl#ZOn!Th+*3i!vUWX$SoD6=w#IUvk$GMp zks7ic1$FmE+aSf%VMF}7^$IsPyx1oRnEkLISh=SanztgwMZ9dnc!hS@4Qg_x`JED% zUGbtwzCDZ6=j3;JMRsrI(M4Rdm5kaDS&R;)FYqT^j>@TVoJ~C135^g6_+Zd+A`#iB zXR=uP026&6S6A*l8|Q3jf|$^s0rFzg+v zpT<54B~3_u*exhn(sUc7W-fPhO53*A066=oLT{_6+wVItTBCiHMP%lbOE^F9a;!(q zZx9h4#%3XHLuD!imzn#0_~REYqg37r=ATO%<-55D;pp4_sVC_71S zxHpvtsiZ69n+k0?6_ zWa8Mj3O~@~y^7`6>-IrI{S`>1>CfXbse^uBG6*w)%D=;gAvv?(ty+{nq-V||LhQ1zrJ#WJY!|c(W+a{@i>qf@Zl2ac)@E( zV91Rw@wUw`);MF1J$1w(w+ffWEkcf4L?5TcD9vXCnSq3+fpgC%nN9I1Htb*jn6I1p zD6zHEe7q>BU>l04GF{YaiTd~yhUw4tCT*|^Q)~y^)u#qmG98>xMwF*xpc^GDdp0>dPbG>-Kfq$EhMkX)S7=A{0**S`AG`32zH0QrCV%55oPL+pn@x%^I=l?6 z#b~P12M!BMu2*>CCdGt6?;~aCX1tOQE$X?;^ZBVrDU-Vn6se&A_1YaKYjRm+_ZW0* z!RCPBBTNP}SYrWt;Vh?!!L6jb9A+Bk^(t`Uv(C5(FDxXyBye6NctN{w#D~nyI~ZDh zsDhVcR4H#1jcCvcna`kB31S&MozoB?Rk5nw>elYxZi``&N~W(kwy8kJTh-dVTpLUV z@Bb3WCL}fl(rJ_AUqhR)LQ#}n<9mL@5YG`Jk!c%IqbTS`dMP^}*}S@#2!8?TYYt~v zzo5Co>7m5*1071yb6QrwHD#&HZX#3GSB$8%A~T!k$5zFU$G>N{{hOS0SY)ZO8w%2# zB^j2Kng5kEGr6q8yo*N(12!sSOz=GBl{3DIHrTZ%TWev9ac6fA_E6d$KAugNX3{d* zY+~@;nyOitChAGOfcg*FHSi@ap96i%Dbsd$ks`GtDt+=e#m zv_ja@(rW)z%&gC)a*(5?f9|Mqgr7z9)J^E!{5`R~&`-7r?{*{dSTb!7{kz-LkHOLmVoj>liwnQ$x z1R%_s1$Cpr8}ilFN+4GfqwzX>V9w(w7?UEUmNdK7r}uQU8Aww-Hk65CZB!!aK2t;b zcC7p`YQJ1*LpHoUxP&#C3UI#Eb1Y2;S1)BH(}ICE+ekMc&L2f%T$sVZPz2L<#iG7N zdV>iK@f(6#c>k$h1dBRxQ}7=|uK=jA^4!3Tb%pkDL$a=U^e`3&Ouc)0<66eC@2;%U zvb?)c?dx`%ZOizM4ZRP_zeys4@#AaynvPI4GNCkQkgLLM5V`O+osE$^qs^4D+NIhE zJC%i5Yxr~>8%tm&{62H<&2Fo=t(n*~P9q5`)_HglH3pfFnVt2{qfot%%>kyN>a*Gs zN+5Qu1xb!z%9@AbPP59i_fHU?oMt&nRV<~%T10`#oEWHtYHLpB zhyrhJg$g9x8RFK-DQnMY-QlHjfBE4!c5`bD)^<0Wb|dW>YTWC)6m6I-!u~wSsZ7D; zi)&>YjlQME2I@I?|L+8@c3?ij{~<$Vq$oc?Ay&zTL9>hlxj~Muhw+EuQyi_f%tW{U zSWs=S52i-;jQK!)>GtiyiE@UAzf*T3W^Dg{wxV^dn3i8$z6L0qq4F>4zO`ROdyE2Wt8@IXP&4qoHn2V6^| zasd1c#S9UwEOY@=pa!a?VHiL&pI>wtma6Di$t}esw_RFPY)a?d24?MM-Ik&23$OqE zN>1*ruX{2+6tWH<^_{=4@!yPR=A+Q{9!5;eL`>wvjXqB~t*%^#L8Id$agra-)|9{L zNfy^z!?Vq!3k2e80OgJFn1rR}qQ3ay>?3MA6`&2C+*wtV(n|}ce?6??B`-poF-nK4 z4OH?R!o_Kz-*u~_{A}j$+~Eu zGtlCd+j21lw|q(2;Qx%R|vkbD}K1xl|}sw*?9D=N?y$qs+C?QXGd8-ykUHVfvLZ*SDmowO{pzG zf`NxSO^SIJ*a=%h?hyS9ef)8&0iG}3^ghb`6Ovs3NGFtvfJ5d6$LTTWXwdBQ^qOVBP!t;*j#{VrIAv4~cr*1M*e z<$VZvAJDTX+@xSVU$RmGG!}4pziD95d6>w)6E>-PnvMDGc8J(1zrha`#!Y-eM+bMIIn)g3e(e>8l(@t4Es)_7geUl zkkXjtBIu~Bi59N=FmK5D7(e-#%QN*Qwb|v6QcE&z(e~RDY_lUZ0_FsE1C!Ffp%eaJ z6#wBw;uZnh!~G|UZTP2$619m)%o9;<)r`TNo#>xcp{5lt<_JMlKSjm~zob3^954|| zfeke4b;!1-NCZ*ZLO)(uVMgM(3^qLn1x1SKW?cZ_BP*`I)~8@BcX=NYXflW(4{r8qY>PGBI=IQuwY({(=B zSk=Yz@-(`cE}+P&Z%_#NSv5r7Mz_)}2LB%rAaE1d?&@5A)3K$=?+Dm zk!@q!OY4NdSF$fT-tX3pO-s|=~Z`FpYM zu8$Ipo)AlZmUJDCt~PF(+aPL;wPfK%Fa;!K0R@#0`6+8HoQCIa^ExJLYl$&X;D^qW z$y8IGMUZV&fIwp7WjDoXWA+5*8Vbyr>3S#FU{aDPdCLmHq6a}dcvspn~JcqSG~e7^5`EAD#R?|NJ5dQ0w- zboC9Het*WK6!DF`dwAI%`F&bEDUy|SmilrIyxtnTtr*;=-b{u&bei0fzc>B+>1Dx9 z3p6S6b_zTlcfAh*z?F#LyZ007_Rz`;+nXuN;SlUXw*T$+A;rb}l}H>3s)+U-@Q^*U z;4g>@z4b7w-MAqh4 zVe1}nV@h>>Pk;9UpR^D^`!nhPvhlt}^1cloK-c4O*HaVle%|BTWpaoWMl?A1s?B&m zBSP}F(DlCmY_qHEZRMpF>IM1HJ%O_7ROkI^tdTPd^&%S(QVOglnW7=88bX_x!MoVJTx?{<{UTZ|I|A0D<(->L{1&Qr5C5|6x)wV@l`Z(T4}Y-B`A$*5eLgi zzVtqFw>p*%?`t&Q^>r9h40OFY3*2786P4o}*;`kH;F;b~6WSw5CJD=rA=96#-dB1k zemSpZiVIlO9XQ3Yq!rBVdUsa2x_SF(&0>>T$J!6{A^+spAX_s$vT{<5)iW+_A1C#& zv{)N-R(xPzDoXU?>t&i%DM7Z)Z|-D4QKtHx)WX)=^p&8V1c$23!GGfT(=mF+cxDGq zfB5o^Lr%J~Yr={QdM&pkeF*T=p7qu_gxAhJ9B-#+Gy+-Lv+wThu|^*wcsiL*p^W=# z!-)n^bRZXglJAKrgVkZ3V=CEWUxS!q%X-&*dU-77Eg83}^4rfG=$6k(I1KmrNwe5R z0xS7$Dk<>}S=e2T>8W`{Y29ys2p^AWnZkRB9F=g~awBUCXdt|j*q1tKo@gJrz2!~y zx`0hl%!xWvQ}DPC^r=8e#(7R|H%FPyP=bJ>Scm$m*b$U*=CpE<;}$ZWkM0SXkeJGE z48I>o(4SWAWoQr>uI(@8)0vsASYM_ZIzMN};{;=HruGJeSqL!>N2}#2uViLd#-t!C z>1bp(+Oke=4sz74C!_xO$yplg8jOmtY+TgUDedtw0QUnMZ#VFMTv^~a;)Jh{yP*s0 z`}Jf}tETSOKP&{Um%l$IKmI;>KB?OFx!L=$MyT*hQ2urR`B#hGwM?8xHR}4Nf=(;4 z=F?1{=L5_iDDKF<2IZ!bbpuo+$$3jsp62JK(o2n*`>>}s)S#{`QEA!t=V}kK_YY4` zkQ;iu6K-w5#-~yjtQq7YeY@jbk{Vfn(KcP|Z02mxT4yWO_SD`27BzBlqdNX26p%0uyXBVFNr;*Sl)pLkXvZX z*N=Z2QSskmFVj&jY_3)h+OFjo;D(%9?gO@FdyDD_)V#eiqwEYwk^Ufs91DA%5Shw$ zJiD@Ay}$&Eu2u zX}MZyZ{%cYq>7%h4UIapVuTu?r8;e2yxPl6HttH_PFCh>`f8UM*%RZ=9n)N6ETK&P z?+f<++;8z}HhbKZEQB9Y5X+Yx6vt~A(bgSc{=qb6)OAuu+PS27$(TGk3^jl+^T}U- zlkYhhdF@lTr!n9DAniGMs%giQROwl>O4ueZ^CN7dIO^9-Bfda1l1+ehI;KP7;$^at zX}0{wHibiV06iWLtit*bxN3|hCwnL7uYI1iJ*B*n7BLy|A*1Q6DOCx0u~ zs$p1x;Vo-Ce!p>fSO5T_#>3+&5KnNxUi$6V3Rd4Qh0BYrTV(du9qO;&m&D0t#g5xw z=VU(`kA=x-Gz4yMEM$_VXQQu;YXT&D5ae8wlHS4WyiM42Co>`MZr$BzrOCdu$^;ae z`eBi8QVs%+gw^_vtGLs2DQ)ks)hv?L+3S{A2kP3|+Wt#0ec^y|`DgI6>P37T641FC zx}Mx*TUy=O2s);2`{2i?*#Xtn3HQzU`jN#j^%mK)tbU{mW~n9q3Brx-V@R*?=pqPx&wMd-o?0v#EbC;~uPat~lEeEIdcq5A-rKgFrC;2dj zM>RL)n0w}3@2XjCA9llQFL#enL5*_LNSe6nKlH0-qpz`MWE*0an)K!{nHg>4lAb?( zW)vHL9;F4udB6$B)vtfBRlG)uBBvpDSFnCidXQ|JwGKQJcLc{jr4PX4ND^!(xbz){ zpy?%(cIo?)4ctVgqU0SrzP-RE6%zP63tXXFGIbf`B<~uWSD$^}oh6PXR2=u7KN3_n z?!~Ns8aXnstsj6EQ2%s?X3$$NwD_S=jX|0d2iJgk;)j!zmgD?kXfT8k>!U|NE6z7X znAe!2nK~d3Zc;M!#(kMeIP(Y%QDPkG{7ZEvBd07*5hTV(Hc%hmInTCkgF+EgsA zAN!JPpIa$yOU-mX^eE5wpl~5`r`6gsDCcTB9)1F}FE;Y3%^NdBxbV|jOP*eQ-#yXy zhZx|7onI_q{q*Akn=)-85I65CMn8zJIgtYqBLG@WHX{fszVYq44u!gk| zddEJAUbasQ{T1K-;Ah_`thf!MMe~(pmj6k?QB0yZ=F)rf2aw-{Xlz}UdoruFfqHQ# zOA8j*^5;|3{~hTUttS`eqnG99*4^({jPl;<>s;QY;DhMzJ&hVLetgp@x|UOyNA1oH z&+?Q_h^$Ah>B9qGUf4ahFSAkp<Q@%%nBi%iN(ojn3LlxR$YfT9eLxMk$qtyy(`?8AbSA;o7(;9?vH~Q_SC0!DC zDMNB8BG>#eZ`~550m>q-pe4OOiitP4f9Lx;&rjmQ{V1ng$6r=>nVXG;6*bvI^|Z+Fqe^~msSd~Z@nkxfIAVIPr^ThmILQud zNp)D(H}BRvc082pun_z~1@YNE!M$kv2k++2F|_b5+Wk##h%S=Rgx|-%86VX$LEJ&$ zi8LU=jgHoHOuA_!f(edynG)>gFmbpUW_hE@t}+dR-TT=Pv<(3r?m%mc2>->Bx8Wohb|6vQr1s-Gy2+Y84+v6~&&;9l1>g=g+B%DHQ_@En( z`lQY&!Ke5aeYwi~xxk;9ZwcP{X)5~Ivpw;S%xKeYQeU40WT{S&2Ss~=`8LB)qa5|% z%Ag8FBlo&9@XCES^_EyW? z4vv6|Lu||bpD#6{E|ncGP$?XG+(5QU>Hv17KH&L!pg!++wD0=&p4OxC0D3zaInHsq zzHKE&_SfqGQeAqMMU|Ur8v+2Lbx^IJas17~oCAd39X$%Tx_n$CMB()M`-13?x&P~a zYd1W2LW(9Xbp0!w1GY#vTpXn}TXweDTZeDT8UOg$(EcQB8Q6B5z)T}C?eDj?6*r>5>7O(C4$*Q_9_m`Xj$HK_f|z}&dtU5FDAjXcQ9EGH^w5fcD6MxG zM|Wz<%0w!so9^E%VRaii7+ANY-{@Omner5U+!ehhXUKRaR(S90un+Z5t!TAhB;bF) zs}64ValUrz`?iJ+VbK459p`&C@fG=m>4Hky$EW5oZt5-gz#9 z-Kry((1x=E`%eZqfxtF&%z0Q{Rn&+pDU!PUMvnd{h6_cz5^<_nZ1HTI2}yA!y-a&@ z?oun;oFHrLBwSh{Vq4QZLH^_~^xMFy&5#!4nhxAyPeT*auFmd3ZvK+BJLaPpXWE+t zAwE?Tdg_zY4c#%)w@+57POujM zA=O7zTC-e|(y7xCu2%TB)+E85m(uB}xzPNggJfJEVH71@7^F~qey8rhe?}Sex z?(X}t9WXAyw0kaf3elRG_-Z`fDlNoX-1C>Gl*d~jPJq?#ivQb( zA{M6BpV4}D#&5?wNw7tG8&iT;gWf#ny|seQCQmlU%o|KMVoDH?6BPu)w_VdZKuRuM zI!fhwd!FwcC-U5dgXL0)M3;i^L01jjE6-C&3D7uBC;kf6&cW|$aT7QSc=(_^IB+ke zkI+V2;9cB2-3f+N>FJv_Ke*liA=!HP`ZkGrw~yXItdP<)LW+s4FSwJgxQV%{edzB- zYk|3EIHPmR%@;Z)wj>O@?sR*7O*~R}x;VE|Ds^@NXPTXqzPy|4w1+BV1cZJCEGZ5; zz+8l!zh4gn^QC^z@y6$L#|*dTrrshkn9fX6Eck=mZC6Lz{F=u~Wb}c~^{_|A6 z&aO(^?24?#vEC&C1wlP|?eyGlh9HkRL1gkwDb&Gl)wrtu9fI(P9+_Wu@`iozN&#Et zRkyx-Z@wdhk;k#EXXyuAge(JoazpWRKnPQ?&FSeDfpGeFlJA-R6akAy7~Jp$&d^_Q zu&o z`{y^AT#vY)z~g+6V-RONh{6kxaTXm&5p;p!kXo*+6h=H*uPSq3B&xTj1&1oI)KI}z zns`JVi;BDSXr=6+y1cuOCd{ZJQ*tkr?aK1fuEpDpdr)s!)w$J2uDhV<81OGtf2jO! zo~%a5fwrt1ps+L8fWA>f>-Qc%T-~)FVTiLJYumEGamns}gN-<87cIR+yZ&>(Z3?_mgj(|$Ro(Fol_kE&XWD$?&-(aJ5q$I%p z2ZN`v`BjW{F2@ikK@=4GSHmCud`+J@v~(-3RT3BWmrY zveAPFihK2cGYN0dqmEv#VCm9Vv{r#H+_Srl zn2dfUpqedr0EjxEY~(YKZ2$0|ta_C*I={O;pN2!}-OA(vfVix*HNsBm)b_i0X-zWG zp2;)1?2A9{fwA`#W`QWr%wDL9#B2Pkj=m0_ZNi#l!P_q%q97NKHwaL4^;J-G8VDpg zD0tY<^_mv*XN}p6PZ54cH4j~uYOd7(Mx~x5@OLNh0xJK+J1cwJej3cIlV3X!5*SQC z(JgqCn?${&PWwrNC~=BCgB5P>nt!%Xl_&=>5)MV-Qx)Z%Uz%!j5W6cRXaGy)XYCc- zr{~x65`Gz>v8u_h!?V8zs_O;b@G+*BN?ig}o4Hm>!ml_9xW9Ekn(ltee$5903`;h+ z97@g_fN6_`k=PaakTOEdq5qjpFb@ASn>_Dp{5+|D>IB;`3w82tb!>dy|29jy_=xmw zgX$J_4^tm*gqDP}26!_(e`Ft^T113d;o#12&YoAOs;0aAXrIs4Y~-WhwE0CYxz-&= zF{|L$0*&Mv?$Pg{(wG>)9Lu$}7HUVd&h-+rM*JtC1q^OFH}>R<`7T&YO)te@>F-xk zrtK2sP`4-C^n*Ft?J%kKl#}V%3b6mtcWYK};_Ml<`GRlxD4#etGzHfJMfblrvxIOcSqTYCO159E6WgQ^*#5YQ*(vd|1uSkV)#Zh=4X% zgFa~6CRjJi7;`51_4xGq7@i=ERI~>boF4b(c`VPF_d6B1Q7^_RJvsQcLilZ- zXIa$kxvj2eK1%`*5F_tyFOBGbhC{rd-C1RIF^b&#F(var>VBQoGiXqN z8c&4nugTn4b?B?SH{dcx605f}AhfDIvPGIiguCRTH%A~p9<-mtkhJZB%USS^$pOU7 z+iCd5vJzF0^7|@MkH$aVuFt7)(S4!Z71CLCoP8|?q?R_~JHm;)aL5r`y?%nEl z1F7a5^_4geKk}#1Y*)0!K+P7c4-9%L`aK$N zuF-MVYz9l2S@`o$opc-U;-&{GYTwYGN*U*NdS(#1&jbvq5=9r)<j7e{dW`CS zeJEQ?D-otHC5m@xwRbiQuyc4*9V6HZpgOA;#ad3+DdW?yLZwL&@pm~F!GgeXzuAW* zRTP`!%KqCXC0IsK|yCaE_@ES+BI@+p8bD3W0Qe{R6U(WN77B|j_1 z6+0qCdS$jXq!ZM*Nte?@L3;VOuU_UclP=Qb$FFw-cr?BWBrMaVaV;%3b;96W zjxZIxpJ6O5EA?N_h4w<*GcP`12(f zxm0T79fQVk3q;~Mx(>h+_sv*KoO?vS@BLYP4Q@9na)&Z5o_NEhu|vt*r2y{Ny`vdQ zHL`}-VhiF!LdNy0ih;}EHdE`t=rK|YlS{Ef|4qSHfw*JZe%U;!zv_3fU|qG~thIrP z!u`-;&ieJgn%$`m)35yu&dlUYhT+VChp2OmEH+*1dl{6rQtzCt z_pWkPY17r!Ls)nZ1{YGrHcg`i5MLcX2m)64KZi2+CY=9=ORMmzKreUl&eTB&)|q0F ze6C~TY1&nKA5gJ%8B)-K%Qhx*T83*mXltRzpR3(J{7mS(DXB__p>OD-eHLsT8C*(D z&N7=fMS?c>Xbp4920@p#T;!;rSbpMV&WqJx(9? z%8AVd)2ELcKbxEDGild1$nK8^Xy4n`0oVMXOj2(_c;L2zREl9qSI?W+RTWh09)=~i zRUTK2ZXa^oSd*4y6YtmuQ3ytPv>okj@!$*v$9fYH_c?9-FmrBi1tT}j-%0!^L~ zTdedMoPp!>%YkcmsA{|5fxYHt7fPZpin-pxqvM16hbAs&TfaP9C3P6 zJDz6d!CDerSy^7NQ&YFB5%{sVSiEo*4 z>+`OR7HX-=C+-?ovr}@a0>{(|G2@Nq_B404=zZ^C4-@m_GTAOw$%H)V=XPmoYwSLr z)LWrbaK6QUYahPK!l&~CDjDu>`&K>)IjmYEgp*v0_cwMb5dagZ&3XI1>j?%x{Oo=2 zuqY6k(Uj|AuD|!@h8Yvn5;(QQ;xO;;2f>yv)y6rDpRU&PI-&EeEksCX0rYIksXcmN zQlgxJw_>gCz&VKR({ii1ybdmLxJ+TKtwLIDN=xm?;WK`2kZWHLP96{~YFWk&-6fZ| zfaQt8SjseDkaVW}r)VMbj?r+%kce9@(@rCR%PX4UrVNN3{6CE5&6Q6bKEIe|>2W{m|D ziL2wkymDKgG_noh@*Fzedx$Dr-lQDi3}wD)C1=Jtn`1s1iD6&)$MPOygl&! zo9#4qOn!HNU-#=Ae)=myE?{;`h9YMum3!j%nP!KhM@n1I@2kqV`)9l%5kj*xlSqIx_u$ z@ebZN{(__{i)K>kZ95+yKOZi=|0t6^e4II~H=ebdEpX2qQ*WcAKYZk6>R+xZEAmkN z@npL}aGVL)1?`9pAotd``*shZkeF_+@}CG~OLg$hjbtTf|26U@H~|%>zpHs0+06~TnWqeaMU*T# z&3=FE@#+p?i|G39L!J|!Zt~)R>OZS*0wvPDkLkm^zt1Y;=S$u1+l^sddkDLbz3}l@ckhtk3D5#TC3i=qU z^(Vq--cDM-ILsKwJrX!cMC z#*B79T450_f2V1Yt)l4p$4cm%RQ-=99g{*H&ID;^bOIz|8QnY1qIx#o242OhTWjwi zrPZX|hN;5(aY;+PFX1%|MIYHMPqzw(>hgKg5i~0I8 zg0_o$!%NxQ^+@u!l-uMX_uGmE^Cyu(LMQY|^_8Czlaugq$(-J&2;U^eNBH@qM=BQQ zCAn*V|88Z^i4aCNKW+^Hl8{#?eL)p%JwZB7X!%md>mZ2d!RPPzmB!lY!}QxN62S>p zt}OJE-cw{5fp3wr!LKTVrJXNFA?I*#*iDl;E89R(mb243<-c84xDVi22G8Sp&GdCe z4pmp=t>pG#_%D1crb$zk6y;rQ_D7I!hd%=3k@`^EH8I%Rp!D90fnS?ipKuwG8VR>x z9u>qX9-uvm`r8SZW_Ei!A(V!bgk^L>ovbO2+9Kh^g zM1WSGuV2Z`S<~9&I_)l+j>`8zDQM4msTk^*ce0-dXPCP_bGnO4TCj2vmtfz9-nWS) zR>!g8L44YP8nzkP1Qc{F-ti#6Na_;wGc%MR;rR^dNxgUk3M{_GCYfg{uV6iH{p!ou zO}^U7R;*PqdO%d<$CI#etWs7cI4E1^$@u9Z^n!3x*lfngKb18`(0shydD_Go=cxA6 zOpK!8dlY=RV+^f>KU7~AADT7d5=*&vA~UK*Xt+!ZMKc;{EYMAVl8qm-8PGV=6@Cmh^?BQR&O4%snhL4?wQ1@JkN>AMtJNv#AccC;F{ zxjmwGPM($6f!e0@ao1O|1=I3tQk$!s=l6I4qo_XU0RLnv5Hm%1qm}o$2$uapR90T$ zFh(Az1q7HV1n8s*tpv%2vsnK@;8Udvumm z_`~8l7_lA;4io%V^u)U>BEYq52vabF8vxpU!4SnO`NYK-jcCb9tA0L&vx6pi{|7 zh$;}G(p`|+4YXTMpdYW{-zn+$Fe;G=p1#1SQ(lVE_7s9Zj&c%}o}1sYG{URD)YyGb zTngwnu-r_{SN?bgJ-84b$;X>+O@*N5n(Y$-9T$XVQ=V9rRkMW)Nr^|;&>pU!%k8mq z$y5k$JS&RNLO^x113LU(%~LGZb&J{!@LZl;J#rG(IH;)E^CicFL_Oh8~GM=@*)HpfxQg(L~on;_`mxEcpp$L2{l0ZaFL#)ZTMqCPolIO*DIsc@` zH;~XfP!T^r?m^**h)?r|&)^^7GK4G%>Zo1h_n#FVW9S^r>}Rjcitxp&M?o@pAsYVg za6wu!8q!}yav{ucC=uvT=V}wnbL!&g-XNjJ73mmXXt>c4@sar3;n-(I*zJK$%QM>9 zUu5lQY&zOBJz6LAiVZ2gm>Or3Tk~$FwFa;1cfB_ z@h_p;?)i>46{?~|ARA8LoE3pEaTOO_(oySRyRobMSkC`hxN**_x!N+=I zkYS9Vph-`ot~cr|JWm+>_diEgC`YvCBJ!*sG$AQrm~smtEnM`Fs&;|S`LOFEzy#_}>le7Z-x{yT~9ojG{MLe`IUu=YL z`%`YL;84o+vOAg&GXo2cKwa)%a6z-HJ+HGBI-}nmVTULAlsm9-9xx%czz4XIoFZzZ z<>xTjE~8SG`Ur@0MIQjr=#}geq=kE`0S6E5oyjdEC!;B`mr&2DGZ;p47k%`_g!UDfE6=t2z~HnI zdd6ZoqaS5%@PFK!!9X=PQU%EqLKNUe+e6a=z1FdWI!+ul?Y@{{BO>>PAyLVyGU#E( z1}Wowg#43APws07mX&gnlyKoqqx%Dl_%AZ`N~SZtmggJdB0rIYrNh0OHj3iMh5Bna z(fyqz;T;Y8;_J9e+<=86p#cO+pnvVeGPEsnvr=J`u41SSnwTx2>>!WVGcH1 z=;SB9pt_QEIn)mvHJbPh?3GJzwM(qz>&|lOfWcG0jX_EFj*6Wouud{bHDOjs zjXCv5Vg=6yXP-xC&U6DCQV()3NDEcX$G(;w6vNOYfzYCfJu9mpp52Hbe-@Mwg)A8E zfeaHHm$(pQ5OArPg!u5#rIx4k~P(^ObKTC$qHCs*Szrr|ua zbwaJc64whrjwH4UW4u8EXfBSVeKp}KV^4mcD&!p?5anhmR6%HY^v$osZjps1M1$;J zAZxNg?90fP8@L}IJJhe4i|!Ix`k@*w$^(tfr~I<(%P8~Eb%q)a%6dA<2q^XUmc_*Z zahq%Ypy@@v*LF6q-lcA~au$tgj3W;;;#o>*568GmicX7>j$?FK;Hdl_G6|tgk)!#n_Z(YcQ zrg1#i^jXUPaP)?lys-SrreY}VHlw=a-IR4pvXhYCIzIU>1xehdB*K4o2v?OU+YWi~ zW_(GT1ZC@;ljXf{09o!H+iz7Bladm$b({lv69@>IjOgC?~Go}{M z$2cRnTbZ>*qw~BI5RkAMVdfh(4~d*?XnB1X{@F)td=d0Q)5DcV0n08uV%OC1BX`h^ zu_$zKOGiDLBA($wG$_?tH_a~w`YGKqR3t+6cnWS)zY>X60$KIBo0|~385Fj>2sYAc zI-ZhK@v~hk*0n@ZE8d~VS*f5uat&$gXbJc2-Ec}Wazbj)&(QB8Gj1WB~tyRKOxTnkZ zPqh7+3|PIsZNG)CLCKd9hOdw)OeyB9mmX(TkjrDQ`xaRWr5Bql{gtGd-8ll*=;aU2 zKD2Ea;vOh*wDN~zY40Hf=WiDXtY0=|`A6+o^dq9dZooi#H#ev>dk>Iz*&VLkSWUa2 zZn!rCf;~h<3kaI*5K2Fpulfn2CfT~vSv6i;#G8akK$Q3TY=_dj+^n2`qL3Qhkyy2g z6u(D|zu-XUZ>D8;?-CKu%6Joz_@$Aw)?|-Ci;7Mvh3dfEFCmxcZcLaZd@wrI<+6xg z1a%;e0rzJ*9rvw&l3bxqXyMzg=EV+Nm2&zJS2MMI-bO&xW4? zr|lW>xWCVRL2fTOJDiWw2aEM#4Y}}Fs84OGIu&=1)+cD*TE03?+n?ND!^NxEc}R~o zZ-nnSz!qo|+VRJ^t-=jl4X)+#syXq0@fh36?XIsCLH`BpC%VhecqOKlO?wG($9{2( zx6ulSVBgFZgVR1y!EPFkOr*|y9f`g^nb_tj#iS(8#X1)Cmt@-b6dt@x*cL{X)u z?uSFf%&oA|L20Vk+`@r7MjhDJh+L7EFe`-PNa%?U;*F}LvHvfDwsmmbe9Skb#7Oe& z@D`WhY&8FMZZ~%jkFmE$jjUrqBv66`9aGcdQ5b-D(%gsjxBOvVOK}6Z4A;I+Q%dKp za`3nTxkIp6-hK2sHkOK~!%Zn$Ql#*02hl%6 zWT%dVv`Eb<2Nw4H;QT!u5%@T|fnoNHze0t4P7p4_C^F%E6@*(s0%S*TvE9H&RWmM1 z7|No#so9UsXPem<<)69wgDhtIulU%<&o}VH!li#$m;6Q|z&k6-6M~{9pK*dAr${Y> zW@s)~ajGN&m5eHsAh|}oQl0ORHz2z@ej(G8a5!g0Y0eTCNnUt@rXpD4YkR9s2UPjX z6klY9huum7W9|&OAuylO=<>@F_PL(uaP2m#JhNg26ri6t6XtT<_K(_P0AmY;{*^M7HrUS34=# zL1LA9zz$^Du9q;NcIP_w`iSY2 zwfB)liD*n z!J|hPigGa#0oH+hHIPV6H>(VQodWI z(yu-xrqga4NuJPcGh3u%{P?g=7vs0={f{jnLynL_58q<-S%a@K+m*IJ6>bBxRnMo|-lF!tXA?1FAX!m?7oh)TA(YM&A z4R;Sku(eY~tJa@s&cLD$NdZJ8zzF=(=sprXqVG3|Jy)MObF=kfnCxk5Dk#kn#Rvk* z*~!{6-_$F`OqkYhf0^_9v%Qo4lb;T=1&IP&nHMZ+iMckxQ4@@1ZPRg4xU>x?9eMxJ zCIFZER=l?O6&x3)Z2eu8JTqAO>mzMG3_cO}6IljpG~l&OCLE*9?}LxbdXunC-twN( zFhMHFgWfkFb~2eG{06j1ewl&3r$Y+l_0RbW(=n@GJhyH@@7}z?uHs~#k!OLgr@#F7 zL>NA+%C4aCrr504#TeYFI?)>U`k=Jcq-+TxQ z-}7f=ShEy_;EBo!B(s>-X>$;`b!>RSlPXLF>5umhJCJXi^YqAbqc<4OgT)^Ssqj31 zx??!Ponv2g;ToL99`vdjB`P9PlEnI0EBHS2b0T#3njN~f;N95|47v{wWzTY{F%-q8 zd^Jgu-(vm*_VTF&m%Jt|@dPAfsg%(_>YsnxG=gY~)3EUYBIN6mz*!Ax5;eaXq9&h9-aQVekCBog87~LAk2zq*PEDjeD-^FdhTo zALkC=j(AWT5V{?$wwDtJMyoZIFRNGL5naO2aIX8%hBe<1B|ryuoM8tE-Khec^0SPz zrAM0OWb#{wP(dpWHjpz?Yb_L(Yygbsn3c+HqV&^;U}#I z1X$pdzu_i^-mECFpT?llshUZS9oM&fBVb^opYH6o3QE-tjxf`0tuN0ezy9o;1xcW- zn(2&RGE06b;)%8maNe9*TGUs9_hYYoCMO)h)Y7EyqlfuO~$ zNsgOeT%t_a?gQxDDDfe?8NV=$Wj%h_-*7)1(CyDF5-6lH>&WlQUPG`YyY{+sVw{{4 zquS}iDs=-)V1M2dFGAiwo7N2D1&_I-j`tP;g>r^%#%j2K{rI7hOUD{02}epkv6%-6 z*vA{qWRnSoRO-HjawokRh7=It%_j}8*qgzXsRco#w?+4bX^VTqEYG+Ht+yoic@vpk zTAFa1iK!Ez3S>e+)g6t&2;)I7Q&cXe_+$Zg&^W}LP?piL>`WM9)+5=hvCOE*G}zXF z2JZR9$)Yuc0Zp8Fl_)$COssI*y@vnphv@YPms=34H#-0cpgZFBonCi-2s>{4t~sH} zMGuLc2Bts{JFZkE!XC8xC68eC4o)gPqSiU$Ry6RBIW$fVE88``?2TGl+x?L@9YO=w zYCCD;6zccxCx7j|sF4~48gbDE{9OkUw~1z88-;r0S5bsmaHD@wE>mrH`>HXw(lY3v z3b4AL-_se#Fr&H~QX;&@UX+TSA$2vLvU9xjY7ZQ6){j)mHANlck*m2!oCo00Y_`IvOr{6Hhbr)9K_Dz^A3a#>z~v5qOsrx_izziP4J_FyB=>3hSwTvva=i;ccLOC5binDq=?;5 z64^PvW?!k5?nk922Ojr{Fw(I%h=XW1b>hccmf)ngv_0lvhXV#y>7=4ei}x7E9Bogt z+oSuqugm9;^NyOV3{8Are;5L%8Fs8b+SLZE2b8B9OEosw5Y5NGLz`soMX-xz1+c(1 zn7v%C3XbahTGdvC+UM*$e8hKpa&i&cBBYgcM!gR?!oY4WkXzZSpqZ&v zXdjy3f%%S?FR$(|0U@aHgO&@KXo8Uc>a~MJGcYmv;!>4OE^ebUr>7oRu30Cs_{Q~m zU;Rj6I>5(cYaq^Zk#LF6cgfr6wsDYA$FaSX@tyVaOs*JSXThknMGQ1bO@rk$~loXpVajen!GxK%G_^=rnvWq^;9`UCJxC zaNvod8D3ACuzpiuD6Ho^4n$>yhgGcD#gWQF3irSzo4mg(+EsH*U2l+CPhv6Os^t*B zrk-Zn{A$9p6Qmy816jwln9^P?uxU&iLhETZCD{=Y6J_HY){lwpH;qV>y5V$AX z;o*-;&YriuXnBtJDASf+a4z!jq@vqVxrhx+P*)mj!_e`K>;8P|Zv5D-$zE^&;G`ux z-y6L;RmR9HU)D?n_Tgiv>(SgA4SRD{H3OiFO z1i{dfq$$Ap_w^2$Y7L~K24prLIAb1Ga>4zbbx6eoJ@b z4s|BDQs71_Slu8;j~ri}B1mW3N@ ze#7y+^U+{dakqW0B%}=p{&5sk2q_GOf7l1*+PE9X+0!0H$)iT2Z(qM0qsVs657)cE+=B#;)`njE`EjM_{)N z`m8sTWbcBXtjM9i^%N%=?*wB;QXYufe-IBt8;Auv)9lM< zFAx#8S!jjoSUwdGBVjPT{6ZVJ%7mztOTS^|akyBPkzOe_0-J99M9c#veV(#72zQ;b zs<+)mV*xD_;aUGVf;*HoFYqh>4gy09elJ!={Z+-YQ7}<1pGd`i!u{00SN)YeaN3@? zV=s4YKMwi*qpWA<{=W7jPT9D6HA{JmlOfT&bSf-wi%OJU=_-&N)ME}YPetpg{Ir9= z;?QePjH_n{*XASl3GV-C2fT&xm$$Ai8j6a)qnSoF>G>W2R0-41il(Q}yL8KHaU zJ;kehEwSN+*BAKvZJx;}=i8UI@|IFtLaY;0OmQoVkfxXi0Zc9nN9ofwnK1BiMwLzc z;?EB>Q$0r*KixMy6kzNMvyW@n>R*RNTcs|xnJ%zc=U`Z?YUcE&JMx|k-c&?{78GcV zkAeiJ3$-E>1OLw+=3D`r1h9#b8DDD|$B0wm?-zo7(tgj-%;XJ2z)l)<0M* zhfVaaHhduM`FFT@dg<-ZK>ilB&D(!m(tr;elxYm#n~N^gm&Y$v^_5)4?=IhCB?YA| z3GF%TNYc>aNN3}6{w6SSMEu7KfxTjjxuxcYMew6ecDE?1X@Ras47S zeQlR!Mc(3OqE(&mC^-$YiVOLZ-hMw&dd*%qhpwFjX=AL@4;WnZRmgu$KSo*!!#E9 zj`uQgFcEH3%#o=95z8C8Ilvs#)CQDf7_b%)83oCHR*nq9Mn${OYct*${f0l7YB&$$ zTy`JBe;*u6_EET>(Pz%svT*7d0HhRU0GLE3k{u5Y)U{72zIA=KU%| z$HC_1XJ)j$jB`19ixqlEXZy15*FU;6U`jVRQT*Iyex9#`^e8pZa1`4=%#W@Eym-M6 zi*;AcuJszV!6zIu$V9&P)_EOte!3^1TPbFCUY+Wz4UmUdM|~!Id&%h&2l^O}!&wlb z9c0oYXAA+TtjxX``$Z+NAfj0Q(*f8i*9?aR@oH>NA%|fsE8aR^owJx84Po#|SDBvn z`ev%W{k#MtkNOiEPQF`;i=5;@Md-)68_z2Ec!(qV+ALC`)A7o4b0U2@1N zWB?0j1SfV{>?`ixlVJH7;MPXV{J6kToxv5S^9Z2ec5s$$#ynpP^{Ugj1JVeq+<&Nd z=mE~hy_3KIv?+AOFa-kJF-1azjssE0#uWHMt#zj`zAiZ^uad*){Rb$9Vd&UaM|+j(&0JVqwEPuck7+){I4NRq zy@)beAey0~7|+1T_g#zAiiYz`dLbDfRYuxK*C~H-o5v&M9aqm5p;k*>lD-2xKjaeY zzw*8~Xdew1;?8r}fOM=05(zz^GYavE%SP*KbFB;)_BicREW1lpy`q|9(lRy3vc9K7 zA7*Uiofm2u;V3^Y7+qJ>tX-K`7kbo2bb}(f3J7a1H`9Y@fR8#^NE=Ayj0-n|wo+nR zyE?j@?>Z* z=1V_MzlgN~7W;24{DZI$=`(`FiOQ)J?!$Zx$?oToBhdCPLfWdEB56IwA;R`vM|Ao~ zEpK%48_6HWCy3}f8P0;PZn~u~wJ86EzNj$tXlary$JT!`fh?Y(lmh2na1bdv)dzM* z@7uAa?3a~J#gDPI4nSdPnGZyihjy8OXm@amNfG`4mJF0B%c?HW{O!i?NkO;@vaM{{ zDN}Z!*o4Kto+I#7+^~kidbzIvRYx3>>$x;=gL=Nww=?WkZ`{E;untnR@Bt-S?h zwy1#A8M8EhuWCR8eLA|{*JUtz`R-JbBAOU#F@SoO>t9)S!d>YTMFu8@#c4ACA%^?~ zwrM~)uzrDqYJ))MJJOUIb>E=soNBq=55GyOaMbB|$(r8^yuS3~fWxeSsL_5;#_dIx zB^-+d9SRSxa&B@kEllcoB;lo&9Yd^AJaHq6UbEm%UC5M%?HWuJhnE+MKHF42+&2E!VK*IiX7|n3RL?Rx_t3-k< zuR_9gmml12hKoMWxHa^nStsKbw2(_^F_j)-u`HLj5p_l0?^wEPUJPf-B}=*PT2u1wv_!22&+SYa7Rg(o1A=8Eg25Qls)iC^3YL9^{Id$y?e zA70q&Vxpr(jbkYg7vbHwW8&|K8A6Rz8wbR}N z^9aIUVXHJkL36#t7kT3C-HZdq4A6Wv)kHC@1aF&|-PO$`OzLLE=IW=#H|JjfmwDiI zWMG5V$t~*yHkYsmMK1-R7r?JRHl{_6F6LM;xTA(O>}g|p&lcJ^!I z$cFa!j=u?B?Z$GA=?t@D->Xj%q$?zMdO7Tj<#ufjstgsiaYA=% z6a7xQNi4=44o&sHEdZWjs?vVcPizDab~E~Z2o6?Kp6G+Mj@tlF7ZrCtO2bi z>%G<@bijgC4Nw;=^7XH14n$KWp1iZ>mr`dLW}WYSJLQ9=T`m-7^xxbTCrDfSwLbcN zM-c)0gbkvms_Jq5e^=2^P^&r*ITiequ(Ud6Cq`>)OYM*g+pbYnB6jc1Y8rDX)<4^dsnQGFuy?Z zB@>1(%}_9bztU_%5m(K8GO^v;D{K7Lk?Sg ztZM3_UEnGtsNO;CZ;s``eKGU2mrLnI0xAPnx6rrd*^YPmT;4(UJNT708bzxQ5^F?6 z5*_#1+}2vOu=r>dcfEK+!jo?(#A(gDXoek=0ZqZ~mWC^Q;di4cf)zg80M9q}rKh-Vq??8)8kK|qf5I@~BCA7&;CE?2|X`t_eh(yclf z9sqirDXj*Uvrr+pQL>W?UtoZumC!v<ui=k!6V5tMZQ76WVrZL<2P=ur-fyX)acF{7iz|zyk#6%i%%?6A1>#ZsMY#u5fPxYKitgpsgVe<_N&1Wr_K&%K=sa@0gdxW{rHr#Nxd{!+3mYz{nT$41u!A(7cWo1sS$&R+!U zQ|fllV0ShBJx&7AwFPQqqNWZwJhYRDyL?zRM=C=d!Yrl2E!n z^G~EGHmpW|CAdhsQOWZ^H4iqh<+3C-+v3o5^YFe3iO0$%txX`{*MPVl*AEY}TmjI) zF_GZFZ{6I$jk!SG`k-zcK`e89n2q$nG8imiT-5XFa%Y}#Y(>3k4&hm}5fMxsax#&> zKMr5kT18-vEh}Q@MldyN;`GN=h46y3^ExM z3PEB?J!&tvk!ua3G86YhJqPiW4nC}rpPMmTDe(C-+VZj;UfZY-8m#iL|DiIRK#3W- z-CR)zQBc`de~G`7qBNtDI()f_BDaerB#M5WUWPM`ByOlvXJib0ITrZr%nBj6I@0|w z$Z+z1Lk3m;HeDdkK{fb}o*pg)eJhrAxBnmp_tvcPz_Ue**O+O?lPqdM-)Joyb49m9 z_7n&Jyp-nm4tv)B1`VF}tp5QG(BHsyNi&{Q{*rx7-Zfw9-(>IZ0dF@d!2VQmMd#gQg>SRgyA zDZ5XI3~J(Nm~e-XEl`jV?oWg!3tV{Hb|+VPSS_mq&G1h4G8`~aJFgm2NB>UMg6+#D z>hKPcC-GO``lz(w2YV>M)0H=+9ov?Z$rIxxG3~*JHu4qTa&2MKR-`<6-wsh%Z(NEJVfP5r^Sr<9VDNT(QxoJy6U%p%Zj9L+z3VE z^o`Jls1%GHK(7E#zSy~v$x>iRIs8lp8B- zlZzz^um=T^iEw|2ns!C(5S(IY&%XKQHekBD|qB@v-Ms}uYO#k3Y^`S^+eshYXJh} z?+jOwo@*v|MrYY@fy66~D~^-a-vT*Kg@*@27xRR$c@!Xxd-kc_B^qAOg?*&7{F9{y z^@#AhIH1mw0@=Rk$lk>S!XPSTv_oc@1vEAGEjp9TZ^VT`Fwg`uD}c9k`j400HQx}U zM$$fGzK2va8a_+xzD_T~QP`nzaE;YliP1J|oU0piCo-%CD40}$*IaX)7%9o5SxU^T z?6GyMS{lC&l@>1%#))rD3RX(kl9It9lJtz1nV5WJJCP}BciURjPj84`O6x2*tbGJZ z5&8N5UM*I3U3g8WAJ{^ce(mxR)~SW%&X!MF)VV4=nJxH7w$9-R>ZU{g!+}F03&Nmpyo8n^=!0ImHB%}`SiSiPmB zfX85#{z-y@06e#imOo_Bu~a5eI9F4GgnY|B(1`iVvo&YZ?tfE~A&ecqN)zU&NnYy> z@AOfXmdS6KQO}O46ayJwUrk+QP}fAP6zs*g-DT`lNQ8pR;{av8LfAo71fRM2uby+= z8jkEF1K6Y8m_eF>4b+n0UEQ^aTGVE!UonHs{WR<+Lg5T z{u;jK0zF2na`e{8MN^Z`Vj5e(<(nCg$Y}eUK{6$fiEu1`i#hPcVKi-}_!l_U;Gof} z$TU{^irM7lD?nnF?}g^Ri-(nbQe}c(6rO ztiBg}9?)knB9wi3pzzP45at#{L{^b$)n~+4BnEh?gG|_O{99JsZ?0FK(%B=IuOoOg zlL&4mk#`RJ5a0LUb^JWZITxB_KHtY^@+Kkk=v~HWEKUmL?XaJ)n7U^o5rOAK19d)$Al#!uw!U~!SX4egj3&l| z9$ijep2=51Oe#Cu1IT2>F1>an2yyr@J+to`I|h8=1!)3DUowAe78}8T5RMtG`n|c{ zDfQ&5i1iO!Z!#tzMiZ)9YH%S>5k*x93(O2y7h@+HxbXb4_IVGMn)!E~&mSXogk?L( zM+9Vh8cj#v3ZOT?OxjjsGVX7k`GEQiV>;tGG9IkaU%-A;YRzD(3)j^ly;vb#84pr5 zF&3s$|KxB}pBks8K$bgo1K^a4OEI5S(TpIM5QW(c75!Lw809eE;R^ zzRWQSf0lDd?eEhQCL~xzA{7=`r_W2!ss0M2F{ zAanYsqC}1)E!}f}+}TD-@D0K@KjxH^gsVhHb99&ISS9K92kB*|0)~@F5vrrt26OtU z?0#B{E7^|bft(8nr1j}1E{n)q=du=3vLn@Oi~cy&XA&D4VLBWtV8$EzfoqR8XR~1E`z$bzDN0 zQzxjb(C@I#`g^;XZCCkur9Ld?On%lCXuX8Fhqy~x@c*d}jVwHK(TaIU=~9)R&GvS7 zYn376|87TeB3DIgIrPx(9wLotyIx|}-m(A()u{y6yXbm&t;#C!b%C(k7Z|VE;%u;d z31Wn8Fk?@^KF}J8nj8T4)pS*tRSocj?pV*-_OBkFO>{ug8K7#&&k zt$W9I(y?tD9otsNww>;ZZQHhu3OnvN6?WXQ*+IYcKX=@7&xcoEYwW5TRcr3Ge{0V9 z%x5~xn(r;4!^*DiZfHfG7aw}E<@Je8B?gdHj~qA#nP1oLKU@o;(ly|mcsJ%Ke;)hS zzh2G1z!faojc0pc=wPSGchHZ4?h>>*Yfdy2^uaH`C0sBq{NktgH4VxSUR+2BOZu|s8>qjB*pF6r;Ngm?NQ+X{W=Ve zsCWCu7h`E2OE{E<)qXhLIam~GlhfPqDEEZL-+Zg5NP@m}rB9dhE;G{OaUY=sQJF`0 zXCFfu&&6!NF45sw+<=a4e{19`kOOGK%si$5ZN$=SC_7bB{oz8Vajka@V@Fq4y1?s5 z@$MXyk1t1G4)*C_pOur(*+6i#$|2Jm9B;4Uv(HrQXgrWJyKc2ogN!b{k0WM^w91W4 zcj79}54N;MI}8z%SF=;R>_}IKX`^aS2$7Mij|+zTgT8M_QcTD)DK>=F01Y6u>Z%(Y zir~}5$%OW7x9|;kmF^LZ(Kboq9I*Z1l*VSRLmgxrJo!!bLz01KSh#IGjxEIKr;?G> zF!a9&1=m1^yC6dO%1Dd7Xk6^8XDWZLk_!E4qu{mPKn0a}=%2K!<$8@cwolJRy2(zQ z5mW`?1MeDrT&~A?aPSB~x#<%L-8H|Vt8FQZ=E6K^` z#3)iKR(oU_N&afKi{uS5IDg;2y#Q)<&-@e;EngY4aQXO{oT~@?l&1nGn0pWX?oLN1 z^xWn}xDc~kdLPO^^+47i$#`+*+^5P+#P+G!JtS1m%8tjheqFpt{LUN|*@F+b8=vwygXufavcFuPBNo`TPVYiS?sWWGx zZahYsMYE`=kUA=GYxRIU*dau&xg*>kTno<+5NBo*(_;%{wzKQIfj4P_T=-^+!jfP& zA7z@jy_KD>3r#yR2!xVsC<|=`vS6=^vqJYS;dQ@4*=$yEj{$a5=vqbmSDtJ`4^lP( z!E2vJJqqABsxi~|l_~SWZcaZ7^-ZET$x2^QNs|I4_R}S_)*XOMSp2;{n3^`RqPdsT z_+To^8zKzMrjqz;)Id2N4}(>*-J%Sr$1;{bzqu<-NcA8O7@{hPDN-lo5Yu@b$H{T$ zv7YhdU3Fr@S1c#9&7ylMvwp&-jvfL1i8$E(%D@ z&hJeQALD+TJ_3kmJkVXJpfZJMyz`|7;2ZRz^;EzKN>9Qw+30w88bZ+tLlu4eIiw( zz|cVucP*0X*`cCZK6;yjEuW!JdseYLT9p0b5fFAO#cU%He0Ije|zPl&OFAl zjlCE1&wS+wtc7x05eo87p9CI*9n+xW!murX7R zG`Prw)?H0U*m10*H!wk{-lp=2_+eb%4a@z9g0L9BMZ~?fB~4&S(rh|)aVxV->1+?i zCT*`q{E|Z^A$BpukroJ^hRc_bDyaLmY&)ihGCs8ekV;0=LA&T=#$Ue`?p-hR_}S_$ zoc|xwi*FfJKP{h`GnvRxC;^Oni}te}RM}@Uk%v-_OS%gsYvZG(hXUPVPl! z4j4hN%ypEcIw8X)-U3XR#FxB;I>Vqe$D@stqpLs49wB(fEn#b(W!7UtJ~sA%W5;B%Le`$P5V=@}V%G63N8oyF8&R3i>F zho3&OHRy8P_Ap>yfQy~Jd$29Q4Y)Pz@q3moF?0FOZq+`+RN@+o1ZTlx*tc=J^CniIjE}cB$n6MDNHO_ z4{)xvnC+!NqI@uSb-7EKa^xuFfZ5+w5z64?TZB*M4IDe55pTu89fjxZ75W*Z)oNGr+m*DZ@3U^vhNaY z5$r~gx|-asMtpE+oRCQ)1GuaBpw8QLsNM4i+^(6i!S=+WNw7JWT@5c=ru8HS z!(IRAgA(&+=uuGWxR=bC9s+@KbT zt~9>H%GJZ>5XeW{%s@#9NDVBm5pNM2=|L|a%sE(5i1&;_zfkcxiHX(UE%8QCI=;oC zV_LIi6v5ELYY$aGdSp_HW23pSZ=UFZv&h6(3H# zx6S4@4EJ_GlOSij+~VUN=Wp^G1QgySSmpH_7$wM}uVIzF(ECqrCSh(DtSgrGSExqlFE@hS2wY5c;NuZOyP@?fDbt_)Z_nT*5PMs$ zvXBNGo7s_v)>WO;H^c^O<>Ep(D1;8p$X}xS+Cp@cgzV{T#`(gpe!-$OdI4x*V)Vyj zcA4zM+#`R4G(tmxxynl_x~nJ=b9?|0p#> zP5h(ye+N(7*n$hqV0L2tZ+7}wrpklvU_}*H9dB+v*p%MZ`=M;l{Jn@oNygb$MqzPY z+7!T)oCnJJUAj|c#JYY|3yb63%MywtM@#~Jq$j|K7VHEpI~Xx~{M zM_EKt?S;cFVQ+=P%@4r%eTJ}ND-Va1;YxBtKVtQPnjm?!sXDSrboxoJ^Id%ErQ~rM zN4cpfVf`ll^J9-MO@nGi(qZO>5(FdYg$Y1JeS9<>LbVR>I*Sw`SLfuo9D ziWjSQiG@fZ;k(oWzMHxsJK?bWceBLhPxkqdrQZXi!41=}e@F_dn@lxIODcqTL?X)i zZHR{6zv*1D7;cVO*R=(kf@r7mC;XUP^FNVgIC35;zM!>FB5o(B(;+3cC@36gd1t0z zfj4pCr>4Rm?e|AU9%hmRm3|LnzR~vu7FJvHZbQL4nc%hSpT5S6#M;n>M&l~$>jol z8}5~W(W4O;0y}7EfmV?hek$G2!;msLBIhmw*tWc)#-v1mCHn=*&?Dg!?~(r52T(q= z6?KuNn1Glqr+ea48;ochJ*u91P&Su`86B}C-5;UpgDhaG!KPGAWs*a7!qbhSgp~=s z241B9LqX?FSQT)j#`|H)$yoD$-|_rJ4*2v$;P1B%6p44;D@PkrC!T>k4iM9byN8?k ztq%6Cso_l-JWQfm>VQomv*4pL6~wqDbJt8h9llV;mWp6)Q~UbeefwXJm*(WR{Ngbo z)tnJp0=U_@;LGG(>!XmrY@v<=#VzlRTqiS+upmxUE!<&O??=h!!DqY@YYZ~AIO|6d1`8s;f{m=%8(v=X&~}8Z7}&S<59Dx=sm+PQMEgeea8FjJ}BlER@8a;(4-dn z$~xV~4*6!bgFuJ|C2L%#DdQ#g;5US^f4|ii4>2m&E`XjNgpqdDFHNY5M0~%;ihN~) zsKBQV_3wpQRd6&tmumyhaF|dQS!m+v9KMrYjVkfFQ$2B+WhoLsUqBd3x=c1qFQ3UK zMKUZ%J0766(mIm{>e8XX3spD;8_2Tz)F<1@=TA-AYomVFpA7zZ(PC=V5SJ83-yMznjpYCSbsYm zS?>{8bW8jHjYfd~A8bT(8+96Kya;9^qxZ=Ajr}wPnY`k2e$IR()YyJv>;O?R4!7g4 z|LNum5+{fj9`t;Z(0}+U?gdPNyzLnZ?fb@y6DJZMY|Ws!lobE zcz1#4xFKItYPhy5J1zs5X8rsu7m+*D(!^9C!Z?NMjLt-!zs+41hR)@wz#d%Lbir>R zI8d#rS*;o9H~I+Ceybz@Qx1p`Or}`@eE$*D0Pxg;@}q|}ePb6!Ggo*Yy>jnTk7#zA zh$z2ds{7j74uPhir z%fEZ+a`k^NQESj`WuxT>eK2mzuz@Vt%k#O z^mU*aq#DHhLP_&{L19^8TX)VfQufy))~z`gTpE_|r(NE1Z`9%@`zYrZt>j@RaLQdR zRI#6%b=blaB;@o=|7}cEV`H+B2#_9ad9pUNyQ~>s7j^ zkcMlINSEFoZ;0J#a(mr!y4a2wZ2>Tj!Z~zKudlNnx2bQSrD@Jh;1?`GgHIy7za|;o zORbpb2M@qvBOatg^mX*~7Wdd=rJx8eBXYxJU;O2OrF7|R_G+h|!+8yFK$#z=d&C!t z&7v&XdYa!^gre1a8`{%1^E*-9_VxrF<^_soOAg#2CCBg6gWNV<@10;@fU!#%|IPV8 zjdPl`*0C;(z4zj74T(#H{i>-?@p`f4u4Ju7dzxZkXV;Nco@;Ed_C-M*DJ zj_(9f`g4HZBJ;%oSJfAK06rw(o-QtQ=7AFn)jNsd;vkI15Z^b#X~@+soq;_dM*!$s z&;V(SmnT)6xfnWk6q^Zh_zpZPeII7mqEJ%IDqI6ebO&lEaI$Dm_F!PI8U%}t7kC;l zv`IBo!x8+nL}qFnE#L0&?*l9ER$w(T{&gB8Z6a@OD9xCEG0isx`1OLwcxCd9a3k!N zq(>mk84dpLH9-9|K-w74OvsGmr+ax+GtF52+@#NAR5Zd z&We)UWbKb3`h2lyE^%sjhJ%*3-XpyhPJ;i2WfJX%b2a?KZs4fmBP>bAIg=X?W>*-p z20E6YKe%jo40GyYJDUhw{b6%_iS%PvaA2DNGt>pks%&)$pj!PnN_AAyVYWn{NAr4) zUCix<-@6CoS%fVaroOS~AdX(*KAL3b*vHrkYpjK7_(VC3L2ff_`)F}_ulRLC(}lKJ zWdG0^dM?zc3K!D^xhe<`jXkzM_3oLC;@E+_Lpz;F$CH`ycm4V1wpaaDkKu`^XS}X( zUV@5-wr3myK<`yYf*-nb(BsTKtRvR0=%}VjRL4~8sdL-3pcGfr2p%6#)^x>-eirP` zaXeqW@OO>KhP%kDHq9%Mp&z17qiINmxj;|9x*y?OY6Oew+%l*MEr6D4CyCLHdrGk- z?kmp@v4%N_3gxT0eP`@OX49-28H2?7IVx0J3VusFfXR>dnm7=*!wV){y51 zQFTMx`H^1`M!-zO9yje~si*1@za;Xa77@659d$EyBp*Oe!Tl$wsk-~7SzSQU5Ixl3 zCi|cq>QANA0BG&tY%o&(ayt*!xFMHeW%MSg)3+`;TL!}s?s5FvaOJCXzZxq2iXA4% zmadr_P&EGN9bKji;~%^uYG*FCU|z1DUD#N{1-ZMj!tltzH`R)W1l`WFB;_KFdCQKf>yrj#+~@mT~!_o z27efysXmbMVnyxAVrpiSAt!1L= zFy(%=A#FV_tESOOW9Yj6gfz&CmNIR?hsTj6dy0J-UQRYA3Z|Hia5QL1j(~Ui@l30< zhoEUdOP#`Vk3%)s2vX(G5S2H}Q&Toj29{D+s$gAL?x z8h=TJ{E9b$_SHGpOpf0WRP{94R$=bUl32TDK&=ZuK*^JoaSn5@IUyu1HD&^w27F4& zK<5(r0WCGz_|B9~Pk?Hz8SDI+L}pbhRV?)|{Z*Uc?F=8+#_Je=ez4&IJzYUH*6X6> zQt|X$GLK5!E+=Cjd%>hKRI><^G?63Gq(o=Mm~EoMCupU~yvp%1= zUAD6pv|RMwfNo^Ud?fZkurpe5?nrRT^HVCN3jj;)P)0S`#qjX?H)j& zza8T5u2+b#t5i+2LJnee8%3_7ZHF2(8$zITh+{rhYT>cYC2{%^mB1o~L*ONum%%mF z>X5{n)10Fk^thsH@H3p)K|9eVLGfTxzh6yMoPGm7p6RAu`8kK1+2!O*ZAD*hukyWlsLv{}?!WEaihc zjA)y{_KgB3JCh^KciqiZmxp$KN>FASCN4DpQ7q)}kyB3+qv8MuR&`(-9E#zpL);HmuycK}$?FfWaXkykU&lYO8YU{U(sX>TS6w{k=TI@p&;H)j zig+D@FXJWNoE?kW=Q1Y!d#7tB+toX{)MmpxzT&g}R+5N!zmb#TrKSO=PZ@XiDIF+6%x z1ZTtKSs-rDGD=d$txavEzg^Czql4(flu^SNPZmTtvH10&WCL;^6#YL;kBw0}N_OY6 zO6i&r8Y{>p0a^~)(60Su)vrlC1avXpy`aO+m4(=7}COYn7EJ|f9N#b zv6e|scV+hxPXfBp?g3;#zgn73`0E>iNyhEh#E|EVF&-vtDe4-W14AV;-eLSe=)?n% zYNMaYV8y$;jV4Ha5Tt!^co<>Q-_suwhNyVInK=!@2X(dim>ei!-^d9Z zAz-5eDmo@bf}Wi~kiWAN&d=(B#^0GyS>db6TB2egTkzE`1~6fsen-6GlYdQR>a9SG z(HNLvg*JL_+9BR;4DnE8G5Se2iBBCqe3zE5^7zcom~WDqtNabLtZsVVDM@cqC`a*u z*$w>w4$+{Zyxyz$fhScBO5Ve;WR^tJ>FF}SR!hEo78QsH)o>M<_IqBoldQ1mqF|XZ zy7$qHKBe7#n|x%EF2TES+Tnbezxrnfm24c{z#qtRNMl2c7ymuT3z5d&GqHl?mtg%q z9jqAfz-vBO)8oW;LMbLff23nI?QZh5LR0(&%M{%KB!a0}D%Xzx2xsbMs*J?I&H8{^ zy9)Z0c-)!VUdZpZn7-NwVAafD?N|3~%@atpi;qeX4BtobqIbRStz=Su0-PAZX*LoE zAk7I9M3bj}ci^Ma|I*7`6od6L+u9_gUEaF8D~+VU;>WSwkY{gBF@x{yK6fU&Jx;J_ zHY(T}N>O?;E(Bd|%5HuQigBiZKl-S`-O%55VuD2{F!?%m_j&CL2ZPv_@BWWkMwEjB z5DZq!*k^zounoJ48hu^a~V|O|hly9woZYbZ|T=j}NH5GsS_%=WGAE``P z_ietW%lcwcFEO_5A$VS)D|zy+tq!UKia}Ov7$YEdwv1P+SzUs&m{=j<+$wHMZ30SE z7OlIbl>fi>?%&CM|pw$>3j)w}BAa551qWD-sma*sTR;&fe49A6TD`j)=|aW&kg`yFqQf%Pee!@MA9= zK%qpZ5Ze5dLmKRK>17f6?Y)vLEH#1m)l^Fo+S1n#K$O+QO`Zw&G(dEb0@wI2vgPg6 z%}ai9WI(iVpVbCsrfPyx|4^sNQcMUE6~Ch_{CluC+ZBl~xft}@(%ZAtcy4wbWw>2Q z@3{y%*8xCEX58?PCrLoHp(XysqK5Grpt1SVY=ULyOwN_xiKY$GsE(nNVYcBxz}J{d4Dv%wN>|A@D-jnP>N?5{=0u@{+|DfH1V6I>{WOVD zFk>F~)?UD#?y}_5a5t%{+38qT2-CvL4`u`!yudKgu@41Xbq3EE8H%+-qedL9@l%!f z%&-nOS(E*o(#zadz#C)R+x{Y=$2ziMXogLK2&o7{BnpWan{=*}*V2^IM zkIHW5)cY3_P9^A%zJtBJ^nL+IyMh0Fjj-k}Vz|4(V!35pFCv+qXOEILC96wIKi!_D z95{~m_`IxJXTwpTGu9Q)DrUy2V|H=IXUZ>ZUOsIPn8&$IRwhJAM$=Q=MHCiuV|~k( z_A_mTDvf^k9FG_hfA(dgF=%00hkWpDU+u<*3bk*C)tOtwgtB{-@C^k};gdzR#yTGB zRlwlk&&qP(E8yT;O#PBm+buE#u<7#N^C9g*jX`ihO7AD(%0YQUb@V8k4jUyE47I7j zufSD0!3U#s_p`KnW4TszB2nLrPC+K_%ZH2ez~r5_=)$+w*6zVuH;uC^nK1H)T*MMp zVoHiBdM7)V&bLHR3G4#Y=`dwDs1AgGm+RO!BU4Z$u|rY)|8WDgzauk}+>ks%1*kX47 zR){Cdnq`{Q#=)9X>TlQHUKE9-;T0(AOx3@9c0`(RQpvIFeldEZU8{G^_$KLImYoz4 zFn>6Fh|1c$8)k*fnh#rxXo-H|D;G6|RU5y2wxE#Z8YW1?X@IwR( zy9fRqNqJUjxj6td>9CdDl{0iXdVjT=zfhkXD-=I}+0QF(lw%>ZIa}Y0E8Z15fqBzQ zc$-VJ$>^i`CwA&W+dLsC3i3VD8Wwh4JSh0M;n!J#IMCFTIK&bqFcYTwWS3duo>0vu zw;fV(0BK_&D-t4fDJ_v&GJIcd60iG#@>*gS_M6||D-(d=m%T>tO~-nVT}KEo;Ii_A zjqNo1Rb+q(z}MA8|E)fRN3<;dTFu*3Di;|4wUtX%HiT|x=#ZcW#<<4ORm-NXY3g@D zs$DW$4mQwSU1Y(O#3^QWxF*Afj~*Xh%wtt>Oe_6;Mgfn5>C#&Qk+w&~4AJ+zC7>k1 zkswu$Ee0ULFe{;(X}V2m4+)pCU3M70Ac4ZI*F)=KLb8eM&`+1K4VUb@WK%MP)vSaU z!-8HY27-5y(Bt!d{@qV?B-U8i+U61W%g1pkKr+^vnVZ2mBd@Q}@oV{3A6L8Io%y*) zbC1)&Tm8tn^cAvbxSEos&5!WMp3m;C=$Muwt$Dyv!b^dzNySQSQ99H-H)Y}LW2${! z_Lo(W;+aHOCD-Wldz4yEZE3WPnsqD-b<|bbc^d`|2KOcvQiJ+!l@IZy4A!P5VpKFD zLr~vgcEwKb?R#q^%vafG){UWxbPUqFo3KXMlhKyVTfT<^TlUKL$Y!dGU6#M-1d`)D z7q|cy6#!eKJZY~@C)^U0hqvJ0WoNeKOYeedTC($TXTGK|{M^LtTbP`8Dl1|>i%YRL za<%zZ?-#5Xz-vp-h8|kPrA=SKAo9H4;BSprks^T?8}NyDa*zUB{#4#dliunAbV&mr zU6bZLYRSixzm4XuW>coodxR8oa(Rpn{#5i`vOH)X{eqTS~_97z01;(uq|sp0@{Mln9k?zD@pJ1y3x%@ z*j6C)5}m?U(heLImGDd}2(zsV(4RH-MW2L)C^T_Ds2Ja+vXh$}*nli^cpja(#|2zR zS1^b9&&Ai%_XRU>+AdSvU*Y-Qk1cdMn>gOc{PFjQC{`Izx+P31dB};4Ru1C&IhdIAN%y>(# z%F^@3P5>IWsdDR40;Ax^FJE93T>;bVeTa}my-S7JU8yY=N3@G_4X+wp5otq$KS1A2 z*I878{|f6M6YdD;MWre=bVt4*9y_$l9|klzUpPk>=*(?!GgSO&F|Y@X8)7#qyRCrA zJpWL=HoX?d*G%<%A@4A= zx9;p~+D}mcEW7BT`3qE+6>s+v-N!KXC2q%m6_U$Fpu;O%Yw{S1dK zu&z##(q^32e8J%{)wO4>I!Mq<0eaz5lqybto`%cRy5hF|iY=gGVxMp;NZPOQ=-?#$j7tf#rR4|~9 zp$|Qu2#}NzPh_2rdA?Rz+#qt>m@e++hZ9P=(QCpwP$FB?9BB_U06O$k7r zMm0SLVDqah3@+m?)}lT!Pa741P%P_1{to1reEZZeMm6nj9M-}Nx^=_|nw2e#=Vnz; zh9miP)Au)T$uD_(oARkHAfZR3)r&6#l)tuFs(o+OdMgzG3arR$SJfh1h!F{KcJ%2_ zdIBspw51kS?6HBk8QUsZ?`D#rvzt3*^j{}5(*XyUabn3kxoN_ZE+11B@v)X*b_J5(++T7F5g+WL z^h9#uO!cVTbGEBMpP(+1&B>_o=q^ZpRsZP(zU z1>n9G{Fo*`6PG^p_v$DsVJGxi%BTUZrno0}=P&fXK*`?RhyQSCjZlO)v*ZXlZg4z9 zf(3iZIG9UOKVlXPY|-+IJomluWUT39VVI_gGRG9=CP=1f>@P{Vr2{Qf;UvqTb+Ckk z;pyjAaI6W|O-#9y3u_$li9Hjcwc9KKx-HNr>O#zRiJeP8d$YQ{aw-^Np)$`ELsyss zXt(1GG#Lul;Dtb(&|;}zj*9m<7+AEP6~$@mPZ`K>Mfj$)MRLT8Q1U<(piU>EQWx?f zmv3MG4$y8>yl7L{fiBoj6oxiOUtm{VxSG8xBIsW8>DU#t{Qc3}0FA^sPX+}n{Bo%b zmd#g!tg0-#p%{XZ1cOl~f{7=XK}{mdw{jnoGzXh@gG6i~kGYdF)JJHh08kyyX13G{ zm0_Al|Gu5ey*+sof=?|o(#ZS>#7`?7{)}&o@XNkGgivR0Cb)?}?OZTv(O>g=+>|q* zvMGid+dSk6+(X#YUIw4ZYyo&-!zZ&)pww!OOr?QUWT8%kwcejMpp*ZNoia|$W7HEc z=dCgMBLO(9+bnlA`8Xupp;lR%2PaCiNSaOhN?l6Kz2Ruf6zmJ8frp|^`rIN06-O9D z>RuPL2n zr>4VEciRB`6jkmyl3-cwI|i@~;RZY_N02{uz7KAOiP`}= zBk)UG_aua5(2(7fY)Y|&TiIdkH~(ZGiW{f~;qQItYorXQH;h#I>sT)ooMiyQW?bT? z9_bW%DoFVh6Dw;8Nn}%9zLcQ*>8nZY;*r$J!S|51^5$I7(CS7IN)kJijHW4qYh6*d zhI9}N8kZlm(jP_$v>HNMEer1eO7V55yx?d*f-cxtl7x}S^T@cADGj)EC zh2^AJWTsG_fEX0!UU|k1dli5`HD=u}xfs$cWyt_s0zGf8<=v6PJ(Ba*s(Ej|m|IzJ z#t5IDv{dZb88rNTcPGu^~d@F9IVW5iK5AOQvcD_2x;fYS|BjRWb%=G7>30E(ztn zlt_OA2c0sn9x2wL3f~PimM`YIsnW0OGJ$gk;QCO4-xMBEoI{-EYrX(mj@)q6qiGH!a<!)tEcG9#>I4J55?=5~rTWDTlBJ|UX;1VwWWzh~a%c=lwQunldQDBpo6A8g zr9S*$fFEKo@+Ov}DidW;>=>LJM_~ZyrvMMrK8-ogjD5P4;9O!28SX%iq_#wbXkjCR z+!O`36>^LZSgxj;Q}QBg;Kdm~#$mHm9c_P5bC}A0Q&_cmfUP88T*_C2oDk5QW~Y)-uXmVHZ`%J68RU zU&a;jx><-L58ab#d;KBZo5`xE8DFirM7fUF+c2n;ksQN`;VUJC3~9YF>An{v#@Hg( z94@o%hM8ZsZ#!ooMoUWg)*;}B=~k8^6ko#SPZ4UKxKg-EQa(tJ=6W%x2!}DHmHyCw z>daB$`q)XTr=r2BL8f@^9f-e3D(G6xf*AP7c@Wx+ zfFX?iumM`9ioJ@=Q=MRLjBTP*G!ACB||PlwY1097M*`qNMC9-aSoCsrQ@W%GGJPx&nhJG+zf0Zg6Yv zVrVpcQqEOP2Psj4VHK5f#)qp&)8$7t79fP{Dh%(!v=KnKZb)-buD}QMYO`hpsA7f2 z;t=zpK;iTcDemw8s)hg(SjnMT<-OQ>hsGL^GIOKbkvUhzi}^-hdXgwPHEBl8ss(of zunHEw`@KCVILQe#U+CZ?RIcTmumJGH9tfymM+|X>Hm16mcdnF{_b#I0N|lW#hG0}C zOMO@9^ukDb(*?kcFX?oeE9Yb3k9?SmmAv|opdbyv28M{1j?l(w{%L-LJ5U26wSr-n~5t~%1-}0 zxh%lcmU6cij<>w|Q%_1A%y`c5y~00GNCT-Pg9?Jy8_90qFPXZ~mxDgv{FrVg>`f)S zM%@kccgZnyg#q^UAHmo~mk_4IJ0~;4thsLrV@0<+%ucHzn|>5@RoHU8dVkkK#jP3&G-GQr`T~%s9%-lC^+f_r_5v}~+&?IG z@?FgjZomVkP>(Nk=z=~}C}FU|Bat5FTwGz|PWK5{$&1<{msoqHAn$qDVJz$7CVhX9 zszdg&?KS_nCt}}A#f=+*UlAw2pCA@8k6^Zfo|NAnjZ?^OS2qhO;kv@lW>=R+DBdX; zI;RP*&H-#R_tuwmaE@-(+x~HJaOLr%)22F*phYE z-2;IiJhR^5L&H}REM;y?OpZJ_c6oK2Tq}B=b+X=9hZFjlyT>Ta{W1@nY$>+M>ggbJ zrHe$bb(VhW#2nnoVy~mlj?@hj%C`gFopG&$a{$`@bV+olGFm@(V7m}^@*R8uM!I>4 zrB7CBOGA0OMDpTV-B1Sj`@BPI!e2XASW=Gli(5g>hUU z`Gkk&XEZiWI^FgV^~WLlH#eaQFp{+7P{M!pX~jT^GDUQB!m4{+(YhoU9#3E0xP31# z0pzf)g0rTYa;hbwU{W`)gIl=pt_M_#rzgeNy4tAk!@SVhgrMO517;^_UQ`z75R#nmPnFLN=)V7uDGZ>tkVddEiVQn6PXJa#E8_PC1 z2H8#qkrm6I=X-yWJX>zQf2(k<}%#++t3`D{HUO;;B7JRy+W1 z`MsxgDU5N}MSf=@!(U(MlrpPX^~50%=#lZW7mgd;MO$^Ia9!{&n0Cu%6-)xk8^F^Q zQ8P-+jl8E>^>@)nt(IF1XQT^GNcdhk^^m$+e$i8CK4RvCY48wyK|x(bY(yqt;!|>A zMgb=^N|`S9kwQww>;j4tSN#XJecsEk0Ewv!T{7b)4pY#^X{I1~B& z9neCkQFh2@gGBkoC`Fz)+o1dV(R46J*S{O(emTOvqi)-^HA^Jva1imd_>Lnp`kn~K zS~JBj(DsRk>x{%B+>ZUC3E*R?5%Bp~2>6(0%UAgBdNcS{!208cBI@7c>!oGhN#!q> z%EVZo=B8gRfY(Zse@C5zZ)=19F0Vh& zPtQ}xl)j06JyWf0Y}vBG)BYOb57^(w?x8z<$|xJ>|CE0DT_<012zX%|%s>A*iw6ir*vLM(%|#IP zJg6QFZ`x}w{Q4X5-*WwM?eYZRBN=dKQDq>?==&CXZ&hW`o3(Es+R*uqU-r2I5V1Zm z&;k6FDZ#3-L+9*#fB6S6SowF82?%(dF!`uYJi9f6s>2r|)=KPt_%j0d_ffdEW?SJa zxVM;6U1l$4DvfV&1EZ>vlUGG&HTSV`V3?#Q#o&tb>SKC^&5)12yY)$!NBPk#wJ|jK z>CX9ku&w8JyT5Jk9xIUO$Kpmo@tb`bk8!5N&mq0Vf-N@DozOSH50L{OOx~L`j|)(v zIuc!Scd6SxU)uXmg-F8o`wcU5^nPjaLD>a3BN6D0;?|fevcd8rqNG@HJs9^uxSD!V zN1RldsbZQPx9k;1J}(pcbvpYvegx=kW4Jw%Wmr^9TDe)SU zO8P-glgxvOYOHSm1h4J~vwuI?n0V+QIT}rpc#?V6Nnpaiw>PqXH(tebHy`{rHN6z| z*~Y1W6M>3(Yn%iZAoir9rYR^fa<0A>BQ}%kRrIaDkixJXuorp>!?2b#?nB>6Zbu5Y z*CS?ha-&@YqH%J1Bk)9A6Vq;OO>b%Esdo;&uqk10oA)1kk@B`n!1c?P>Z&Up^U=-t zL%@@F&sMxLOP-u}30W#1n$>o@-2q+9AC9Rl7a~`6KD|naj-C|eOtNQ`3-5yVw0Mv` zrm9X^?DlvHxDWLCz$P_x%u)Mcw3o7k#Q*c(>oob_f|Uw3Z0TYh>-nX0dvQa4W;K48 zwz)MEST_&LeWpONqWcg>o|}o)X{dI_hRXFbXFQYqA6zIe2<>J4$ zTgsAT1fcvXN`-g)%tClWBkV`}@W!TGnftKrQ_T!ZnQBP7t}LV6vzb^^rFO ztn{Ln?ape8Jr5Cvsi`l3m+QeFT9tP?!)Sn^0BnYthkQ#H9bn8Upd1wg#tV13#xe%u z@rS=3WSWis=>|SA!>P%pNkFj1)Tv&F#_bn=4*R6fkX^fMacHq=CB&`t&HD@ z+{rQpi}4yzcen$Cc^mw9**5&T1t<*jx00`VfJCQI0!Z^9;>LRv04+|oz+e8CeK;(wu1+D0`9@yM#v$!?=aQx; zR%D2Q_qQE=;ppChCimpOQg6w?x*V^D^Jp{4+NDTd*iEA;5VTfO`+zgwFcyb)D4+Acty$eZezQDH5cDe|T$T zi!?sDJcrrVuAYJDxrFQq`1d@~c)({_>wK=YYw!~}xCT6{%%}VYx$V(%@L!M666JP= zolo~|6RTB~#Sd}A88%n_>He`V)SOJMYzr*Dxcs4i3VQNw`h@6}Y%9i57qTh`0Ksvt z(dyxUxrf8>g=?{dk+)ziKk%lQJ+T9VH67<=>-uAgPpFQJf3N2heb)&wf?wfY z-%aIdQSWU0{)EjaD$f8bav{v`$o5h((dtwutR+7xU$&`_35f; zX(9YLOB7$Ccl47%-WHNleRzk$#7X_w%W`p==-m!kTRM!p0kp_Rv{Kk#U=7cd*#X6N zV)xFiMa=Nw=TH{yP|(fD;+Ezv%Bex;-tiPoXm_BEA{6)LGFaprojBE4z)!RHBT5tN zmE)~LyzAgBPb6%3Tqyxemvgj?NY%amZ6}z85o-xm zTsA2NL=wzDOFu(H3oSISJtHewI|#;(Vbbe^5ZSR=kHa+%$l_%~3%*Jz>g)&b-#qMP z_y#-8@$tRPvsyjtkf@SD0ca&?A{{{H22JePqeDWybUmYqayt&2;VVR-M^5g6;)F=o zSQy-}EAL^M&tIEdxQLe8ZBy4Y+*3-06j|7t&%ZEi!abf?T{G{7L`OXTFS6b;D6XjK z77ignf(3U7F2UUi?(Xhx!JP>Z1_ll8Hn=+kcXxLPJ`miUJ9*ycd%s(Cf1jB;RcG(+ zy;iT)y{o1V(f(Wy!V#e2^9JnMww93=WkqJT{T%-28C4S*h10gcF2B#+jH-3$^%3w3 zF)9$y!hZm)2-w};?X@WsT#WqehR%ym*vGe4EGPmTTZ0U)pv9q-QcBwkn&{OmPw9|~ z)>0YNg!rPRQCr?&xPc2)cI82nRd`s`2DME3pT2G&Nwkj>#DBK=ytMT@@M9=4N7%Vf#A~Tz_BBVO2xNNYseC^`-gKv=j3MHui!;29Ud3fW6-)%VTnX08 z-6R9=%yq9paI*?GRCq;B3$u2c9YDnP7IilDLSx_@`s&^xcPM-8mZQ-|Fyxo-zCzC^ zHg7~6xSt$H=w8C3iz+0{a;tQuMlL>&u~0oZYUI$F=~VA959NQ27&!%1ni7V`(gX;}T6SRDFa%E)Q!yyyk^1lg^n} z0M=?GdOV4$raKl#!oZ+><(OC=q&uWD zuZ)y+{3~aLLS>V@bEYfsaxR3g_m9wfa+56*2aI$NA)D0XQ7|3L6d+^(mTb!mu zNv5Zwj??%Q{f!Nq7#UJ)TP}8A+kX%duFYO_=LeIWO4Mi8JY@)y7bRkrud(i58(w4b zQ{INLWpIEdgoA55!ivXm&;Ep;0mT6Mi}dZ&j6U*Xy;F=n_r2dF)UU&^=gPvAscx1| zrcDMHNZ>5*aCty&@@GTk zO}~Rh{Y}}X|U+@Z%eB*4vUte+hv5U48JmVC{9A~XccNgcVeOdrAcs}HP zTHVU}cpKdVr=YkQOt-w>^@wLfb%zGG#?z}0S;fyd0&vj>PlBvGRnDw);#cn!H2Co= z{QbVUqM3to?3r%SH-Kj@-4=wmYcvG#hQ}mhXU{09p*=(|`k16j}znKGn`42Syy#)#( zKK55B@B6|kRegwJm#%&&ogiTEm}J(v$du+?#O1EB>EMJqe+{H|W-Q=1RQgCOyR@v2 zlB&a(M1(o(qyc&nCJSkUC&e}d*RA3ByksI_R`*5r1;{JNhjolGY|j7S+t^axjzo8~0=EU(ql_DI$kWpD2WtN>l4Wzo z+Kfw>L(BLnfBJ2aqD>zhbejm;jw!o==uXDVLMkB`N~$5|0jnv_R=4*??L@_>UVtD8 znu8Cq<|E>h1M*QC>S$ErLq=2f|iK4#=#c_~@XMRR2;hwE^fQ=pp zKkG~V+qH5rSP8q-c&pdo)#~nhc(PQkjYFd1hhyA<+#eq|73z7=(}eZGLHz@2g>zj! z;G(XH)1oKssb>;)#oH$wf}kTi;X-kKceJUU$qkOe$`hU!c#j zrR6FC>O|!NcmYFRlu4AN3PPIPgy==5ey&R8 zbe!8c_gk2}k~Vd3waF5Q3G-($)355|7q~1B#BjJdQmzDSo2`mfDnOMJ2HjQk{?lb5 zu!=p)8&?(`0|P+dEZ*T5mf*uY6}2b1#X0ihL8Fg)tA{S9qi@ab9yFkJq-otInQfkq zWtc~=J0)TQt{cgS>d;qVHY$mgL3;`=aZc8=H8;y>x>w@hplKN+S=wc--oE?5U(&6l z_-Tn6_BIm$yKLuGQ9E>e_At8u5UufmLAz!8v&{LNyUs&iKE^vqB=1d_2cB&}l_ z5QtMYV}pI+1iJOqT5{{?1g-9aDuIq!NC(I^#$E~x7{9fALieq+AeJ=Vq`tx?|9+(u zl-6+jw}t!M1qmvSZ2g{o)auQu(;2yI{(1*6?9-uUKs=P3q~=R*^HNGYn&LIO=Z!x+ zP&^G<#_Rh`WBqez#f-e3k2f+veqjT#&6nL6vy8i6wv_?157Cakl} z96>8!$HTi9YYW4f%sArN?eV@_Te1%j{8u`^iRj~)0~NfEbZedlVK>umQTLJLFGk4N z1VTChDis;qoLiP(v%a7VL9g*xN79bsx4P|QMI&TffUp5YvYJAM&{=hbp~{#Sni3QkhYE%B{ zaI!uIT<5&-ViieH7upTCd@2bMLMk-~C|LNJy_*f821M&=zd!#KekKf7^En@o{^C-( zITg-|6I(3z8gg>Ud@VB-DS_Xg#(f?M$+7;hzTH;Z0>Xu#vZhc4nOI)jt1GZ%7N}bK zfyAQg{kzF#>`(~f+#~B)I^?_(>5()<%p_$26yBGz@;jebyK4#WLfx@fU`HK2bH19C zEuhW~`<9FUJ45g`%re)Js4ODwENB}a)A>@+$Z)kaD2AJ4qh$1SE?i@ z_pP&~muP!~W8F+z)sZAJWNh#9c}A5k3R}TYCyR%;euX^dJY?AzdUeQb5Ycc4OB(<> z78sMtf+21_e!CE5WAmEHcTMBFr%u#((;x5rJ#Lhhbc7_`b6sXG_Oh1X%jWiQij zp$R9~1RD2XF~7rt0!;Mn^6k-cpakHqT+W%P9J=9B6J?TgJb4_Af0MzYn09Z4fGUA; z$Q%3qYqOzq*yNA=xQ?j|Op(ZN{txp7)4x%#?J?{GjwKkvmweY)(V!sn1P#{RyBbRu zYn(}5hsO;NBl*k_n|CQAoS^7S5|${V=w zS3QNX`G~@{oQuLQ6nRVoqOiJdm1B~JS#8f^M?4B-hEkLx;c3Y7l6)8dMNOfHs*T&Nu{xwCj@q%Yx|_)hu!o|_T^76cGzl$kg8X4I z9{u*X!c}gwqYzd}XPu@GT0^CztJz*fWnm<`Uh@$?DgQU_-u;U^j`Vp*$#R3yNwAxIK%dk`+*EwQ9AI(G7quKSb9J(TUW*O#jC5M$4h-J)BX0Xk=zB1mp?_J>PN zQ@?}93q<^DrOzHwS`xxD%WS`tm50PB+=xLC$5=vQZ%uScAefgNV$oE#0Gn1On>UU4 zD3RYM19)Qg;ZhJHUdywzy!a`;;V>h0tA_9uE}1S_%^< zx5`1q+pp>Nz^WK8_JDB75i)z3PslER|(Z-g_j~h_l-Z{qhPl$e0RQZdWEb{T?RG zPUOL})jJcM-ox^n%I6pI2p&8^`AJdxTOi^j82}gGqrMb}t(srnxD39#cGa8bn4{*H zi|Hk3;fq-gdiJrFhTJ@_*VWUKpvBl}_G3a8b+V-M;)PN|BMdzNdHdAU$Uce>mO(=fx12WTsFwY}I4%9iANvl9N6{qd15mJ~u9w0L2}DLh1LTPA-_!ffl~ zUH9>ZSLcRP2QGP%pLa`w(=;Lq+>^b2PJa#)b7sF1j)Vez^&NE^(diBdq6CCKf)1U$BSJ!wy@#7 zhz3eRb(voXzC8lsp*8EIaHW+E+|W>NLU3(ES(P6{mvTuyH@^C9ZRbMn@}B~HOMw+0@pP88@^k&Xsl{(3IHuvPSK+*JNMzRrQL^} z!$}#ABkoh$3r@~3t>U)vmKZrls+DQ(aJd*5lxECVXYv5O5#R=RvY<^XMf@lqK9OO( z9M$A*Xal8LnLGLFOf&=z9oAw}`S95gf3oItHyDsWFgM|{g48}K3<1E7w&}L0Wab+P z_#1cHV_@Hvt$_wt8{GnV-#r{gM0$yXVr1+xpgr5B(Ibp@hcm|7;d|J%?nk;7i5QUJ z@^>&fy-#&eNp`XkFj+6-kD@+%bjWh%a{eePI?bca{_+OVcP7}F_K#RU zUQiXW*b(0$-=c}v-ld&J>g8Wr5>!!z8nO(rxE;=^Co4Uddw-oD&A0#KAk-lTXhyq> z)Q@cz#UY9&A6eCfHqYFB%o%}}dPXcHe<0<^I8R+i$rkXEfRC8JkMlzf5L8uT%m)wAtE_pw* z{Tg5JsMlCvF_2DUABBM(E{ev@KtsWCg>Is$hV4@6gfQa6+8s)iL}k5agR0(#D<y`65mg{!9V zO_bv~oa}p5DB&%J;Zw2GoSn|;D#~nf{J?7m2~YWD%`?I zaqwlYC|&yHuk~pDw%LZV#%p!4-e|Sk_$2cqF#|BunFEV;7T#~@hsUX8y1dc}=eb2(4 zd3i}dYG6MTX0PuOeJjlo8HNQ_xK~-=*Op%2VrUkmwwUtYo3SK*4_$Bl+H^tWT6tPU zbE_jpU1n+%8b&k^_IGb<)5A<}E~qpC277r8FRQM&>~mF^42LR#CR3J{5W)MyxXm$} zW6xGPyQb<4DEwBRCV3ApL1rBti!#p?vIuD8V#^HfFF#2@!Nk(Rg|~;W1%CFa#xtqf zv|S4-G9d4+9|aDC*MIk78zNWT7i`7}xCam5iDB(?Rcg=u`OC-~AEfK{GVRiHnWN#>taW}{f*Jl6Nc3r7_Wx<6xzFLGcxUmTb-hq8cLGSic6p6X zCf67doqqV8D1JAZwMn^~BJtlEWfL!787JD$&s#r{f%N4TGe5o{QP6$GciwF0$)c6XHMD?K z)&ZnH4+54ZQzIskK)`%O;OgNhl5=bw{Mj-btUI;5I6}$ew&@R_w|5csh(_}J*J+0G zKQ#2r`Pm` zhITY@#!f>@7GXzEK`libq>Vf^>Gg>yvGD!pK>vjjA(qxCH&TW^3Ct+24eWvJ@Yg@C zti*PApSko094|P$Ha~|Z(Ez{?fBK4yt?%_|kEUOzkVyw~*2w{;)^RT|7EK_yE6G_s zH@_osap%E|QfSpfvK`kEm~mlw)q?w^BQw-3^JCA}vKt%uj`(bto`1 z8pmB?Dl?kU*La*ps~J%(>Rd0h{@pt9X0vnNPfskYP~WX z&pprF()iy|M&WZ{Gr&@5%uzM%x&Pl+;#R#p7@@42_lUViO|H?)K;F>DNKdAjvRC{X z&t_#scE|f9hDnN{@>urRH88cyF6aSO&}Rl6t3tJ?PqYUeJe1rlFv3aAJ)PFrK%8*N zg7J`9RoU?Um&2g~6unA$-K8uUB!kqSU>F_R)0*f9mdE#iukmA=nd%2?xJ!1UlvlE$ zlMh!9)h#J$KBRsfvcrh3T@q^VJIoblr0kSOc*$;xCL=h*Hob#DGi7_9+tYyp^-I`W zh@9eI))`R_oJ&!!J*%{yn6*_s(@6j6PBTN zahm#QJJkgf!0!ocZ+m$!;?}P|5^8lG!MpJQR}t)y^GNHBIEwddt*VgHh9(TqB*V)k z|1g#^!AA^GW(!IpS;(r;*Z_U!D8cg&DK4c$7}Cf|&rIBaWB#l|v4FArGV;ekBtj0yJc^a|;Pm(aShZ z06;NHbie-9i@)6L`Uj#MdiPejEg93JiK0{Ze`Y)V>-0y4@|?W3+vLc!q{R-~avvp& z)bs2m+cA`|g%V9brY(@0U<>lWq%3>*i8E6o7adQ0dAf2mEWPH?WuiAsFr;}d3`IK+ zhM7X_;d1YO|1`E3<+8%&T*f}mu^YVt5HBm+Xom~*LDpKurPl!yg)+x|E$JaHB!>b# zgirp4GI=7?(&TNy;~$LKzc4_R4E$SDj0Zd%@2FDG;91N^G}UH8c5`NA#+AhnrMvLt zja^|s2uv*g28nrgpOPVY$f{_e_y7~XmhDs`N`-qg)k54MWV*rZZ1jnL4Epc^94o}= zv&A{2ni2g58fNU{ebtJ6X+ri4rKw6H*eM{xw#eq?ifVFVp?|QiyZ!xiK;EAy(S2_TWi)Na%mFNB)??-cixggZaj&F7`WckkHph>VVgD7D)ytQ>kFy{% zU&t>$g0 zzt!c0&#`P0dNou1BUEeFW6u}&EwNs)bZ-Qu8-BT=>`Re9y14-fzEY5!=&(f`Zdk}7 z#83z)WU=_d$a5ty^MHnu!>?+KhTw6zY$oN4Z-~$^7hRjAyPXJp2#Al#l&=QHRcq?G zm%gBRvg=V49)^|mRPULFAPv*N5{&`UH6+yA%%fWot?0#$e_rg4WL~w^AZrGDJ8MO7 z&*1pUxz4foWCVcweD*_?=x;;>VikYefL}1(fp%SE^m5)no?pXEcb*OL0%&2tgQl1z z0evdB!9*gZU$m z+5${wQET4_W&(Pccnx3GI zpOeL|M{j~8Nb{sdf8L11Lu*0FPFTW2udy>y3+2dXx2XNm>(fOA z@)aSH0NEFAhozZhu|X79h?OVhrgEmnRQc|Vh;zSB%G*$c`OHo4_~eth8JbMBRb19G zj3?JqO3B?pw4&JHbJ`PuNvBBD`$iS7sMEw)gT1gniEsZ_jN$%!@m|5~SGQyb=%et= zoZudDE!CdOuULlo<7{5;SnF+-2iG9y>1}}U^i&3f&DXet&^&u99#_3q!cBQ*KW_!^(}t7koyXUJH(~#VL!QP%QFZ8*@!miqONQ?xrS{U6E)T_J)k1YDW#8S z05pT?eix0$8k3ZU0H+$4=NbCc+hY3*&a!E`a_4aOBSgFkE}$Kw|6Ih`#0zS>6HM-K zK_&*+3it)587Zju(xfJIrL@9Y_Gaa$6%SLz&z;Jstb-Jxga&q~tQfTHzz|@CF4hPd z$TFNySE!S2iiO5qH57m6M@rQCs;<>{rwg=hSfH7WEq0sjgMQkv_YgZEHz@nTV##`JWAr&ko69nbx2%p3TU7*jOwB zT`Mr!L{r-Kut;bOp>s1 zJ&McC<{PEBJBG;(oaOY?J#eCR`lcryqWRUOo`!lT`szFlO2nBngzs9XEotDQ+p}5R z$=U`_K~VAIVQ*d~^$ewL4db6G=GdlvwjjAJQTXi3sek`He^1ibAGy1oI$)plp*<4h zGu$<+SVH7W%(p22*$Gfx(W2aMf98jNl=xYMLVO{s+(MuHTAnAetsFT|BQ)f*>21Us zBDvk4mz-dT?sqKTU)j?a0@G>1a?Vm2kN$5FdY+(1&gN3jLkvQx|dOq`}<{ui{@69+n)g2>$UwQlDwMDwaot_ z)vtSbG_Q{+2cO>G4qnlIdvFBzBWk>U7y0xbq&of|q$+sKWt0g8sz#LOg+2ccwrtk- zR!l<7l=L#~3g>Yn*R)WeT18g6nwGxJ)l|?6r{?{c95`Luq_HpSwDsR(&Gseu0MKY= zetx|@A8DH#ES)EJ*%jYM=#_^aU4F!NO5K)(_F^C^>=b^t@>zwN9xYh%ylgo==;rz^ zsggP_sYF9hIlbLqK%TF0k&?*n_SWJqFW}8M$MK>TI-Lop&=#eM$-ZhF$>7sxL=}zh z+&fgw5YZjN(*2)$lq{ZNiwuezlmONs-le(JM~S)t>2JPOR&;GswV2^RzCJl^AroH* zLr8qiz#8d)D=gOEye-5Yrs^NARRF@~|B5VH0S}_>V3j_kj$G7@Ey+Ighc-K&c-|`I zbkromk7&$OMT#THr&rJS>c2L+8WjkD!N?UwzQGs=AECKE0gaCqAn!KswS-$*sA$U5 z#xrR?)jvC2%QEr5$-MkIFxEe)lSKAf>}359G&$sT>wfV?snS4UC)Z@SG16JX8X52S zL&g|lqKKCH+dCIEQUr*HFIGCSv2Z-tTK+tk-czFE2TMrRmvSI#$HbVpk)sx#6W?R! zR)VpV7y%esdEh=*G&^j{t&x0Hkgs5U$icq8{b6e_m;OZ^tYls7;*Tg*>HlYm44H!#gb^8mzY z&iPIHo`#Tsefx`p&8oJ!F(riRCTDszREG*1*-hS+!r_>WwJo#)+76DIA)Sy&yKMkq z7ylTbY~GbS(^sok<{>Vgm(F#y&|%^gD(6uL|JMF&p+FMLLgQ*FG7mGPjFfq-y_!Fq z=VR_*fo}l4mdWw-lSsS#Kl@PWA%HH>d|9=>3J35A*9!vv?UXh(|v)bGbqXVI9V7&nEcwGHu5^d zg!1Gu8Z*^VQ`IzZD64+Yjpa=5JJu6g)+AABXGPAN5*vu_TdVps!- zPlA>e9+CFp5HX{&q9nG?wB-??#t-*4l%@|)=2V!hLVB&j7abp6A1yQ2|0$jpN{}d_ zn15-*=){!wb?Nzbwl~rFobc6=_v#jXGVk49D+q^1{h`;N8n3iE$!5b(dPH`;Kt3UV z>MLUX(k&X|_Px>*uj_?3fNc8L+^{kLh3sF#D2ZRWer_T@G@kOPd(DL?-g_*>L^oKA z*Ry0Sk80O!qHU>dt?Y$jeb8w# zMsMg=i;><=vhV_!^j)E5iFXmkq6~2SB~4Dp?_*dAK9E=#X#t^6jd`fo-`Ok4y@nVU{ zpyS@$wc306@ObXXU+t?k7JG~+K3cmFufW()gCL8$zP{%!XaMoXeTrg~x43NdNQ^qS zlsgotIbO)D{*0?|ZCQC>q4rEy4NnSN-Ydv{a?)7q?R1tPch!w3$HL)9METHf`5!Wy zd6J}zizya={Gegyk#1d$)~Y~|SRT9$v@r@!Hu8ne-R1`8?f@Om61Wp!f^~;+3Y~I) z5AyBzLXgU3O@Rk}FAs8>g*0tfLz~Km%S`cH+DoWOM8?+MaIKsj!8Ea55kqjHCEo>n z7h4p*^~|blp_Kb}i;J0q|Nmp=k~4G9&`b6Wnbo!DEcCm1Y=i6{iRqMob=GyM1c22= zNC~kDE8M0sTBzL~47xs0;QI7CKwB%mBXBwMBHy^%pnbQj;+H`V!o zH`H89yWJkA2$)suOb8bL<5a^6lSU3L=DJ5Y{)rIfM5_YmVZ``tmupwpE6Qrm($JNw zrZq7@-YsJS_MeE8G(#_P9)f?dt4?qtU5RqR&tk>6;?n(Zs3*FtvcbO zWTKtk_-Mdj@oD{qgV16R+l0iq?!6>nlKr1> zGN~J78gk2ynZnCbfZXc__S8x1F8D`^LeuFNmHU!zHA5;Ooc{+-16ns~q!>yy{9chv z5J_M%5a*VyoeD3r`T69=NR_Wph?u?qszr4Kf4u$OUE4V(^%qTJ{Ip8nj37cyO()me zedb@$wUx7nf%elBUo)3F@xbUZ`*a)@@GYakA*J@0JL?t@dJ@qFV`wr4z$&$VkboUw zA{+^)b|p0`{O)@LV7Q`}C(XuPkY=>StviEL5+-v}2-xaYI3pGSeJ~W~;*rOW$Aou~ z;1}qhy@~6DXP^F@3W*aqr@VmMu>P4>?MjXq*5C(mA~`E)`khHgJ|fYu-FjV+DWd<(Z0@{7Ni+QY%^q`h}pVor%X)7ctQ~7zxaMN+gqtpqC z-%@qZuG#~Ow1U8_ZMC_6*4s}uU#`s4T^xaK9rvGqDE6#$H6tY!^hSkIm@>JRiavZ= z*LH+DOlm6tPQg$vJ`r%Y1cgWxYUNMs;`=C%l&ksY1PXyaa6&>Z9rPz7SmMjks-)Va zKQY6x>dfM$lrTqLK^Y6wgky2r<#-i*jG2L0IFH8+<>Jbo&nSJyUKP<{O3enH1zG(H zxoh?=g=MbA_Mr^Igq<%xIwLoQPG@M%L>EBxgaAth6b2xnZ8b)`MZquX9>zT7@r5n{ zU8_@aJ$5vvKxHlVainw4=b?gGt}O)2R=8UeymSxWj4C^7Y}DH%nz>g`cf9`oG}pv ztnA=rH+;#NWT&>Nt?V($jch78WBVnGCcr>&x_j{Wk=!`LuiubP=ewUzi{ApJuZqDJ z7;Sfi>M1H)Yf}r1-|C;9oJIib;TLmD!jZ}@WE$gKgi)m@>hLC+ujis>+JrE zcEo!B0x=@OuC=HwuXy#ruurA+VZkcMTOh3OS8xh7t%0(?a>phi$RR9ad) zGU}aW5}E!{`%h@$jX>tTB>|7wRbS;AFMTtw{T$*S*DT)O!Mf-!PA8W(CT*xleX8^VzuNpEdSsw=5aNgT?q~m z-L*|JR!c^Z@HfF_fHpoI^>h9e);1@f1HPN8R}@p|hb&EBSmEgu!i zlRr5deWw#wo@Jos=n(2Eq`&(wzvwfim0aKA>3$@GV+5ZJ{RWk0*L?Bsbo&Y`=S+-8 zRoEC6+x=mB{WPCA#o9iHKK&bbsS~Tw#0yOix?9#UC{#4TVRu3x<{6_+B+@#^eMs8|`}a0kNQCPqVzI-~uqis#R|`e;QNgRBO#FHt3)+7cS--cQ+zi zgHARXg`HD%)54nn0XEbN&o8}Nx9E`w2f038kekz5?QU_ z4kZ5WPzfjcJvSCxnGcfIsy+^vkOQVb<0H%qQL!#No@Dr-aWa6!&a~-yUxYrZ4+W?D zcwi)Ba@{tk<_{vUJ>1QBBw~GsYNjiIS>)_~ttl}*kHdZIWJoV1l+3K3S&ssumR&^XsQ`@A#UEdcqWcVjh&WQar_s1{t%kbb(mi_J?SY}Evl<{o5aIxYR> zn+(VFQz$Nu8Mk}IH|O4SR4xytxf`#XS1o^Bs>6mNF5#PK!Bfk|Q|3acO;^#=48;)% z`59)T-9FKEpxF_^fC?Iwv`i*|>fzPo@twdZVgq8BW1sueN2`i`a@`icgo#kwJ?hQvuF>oB&DpCND&7Cxz*kjS4mcYee7L=1 z?Nd<9I)$kuh3$+aHQE_Gc+E@p%Q_mg_-yo1z%W+xaDa5v7>?_L6=z~hfo8HpjJng;a`LDOa>Q8{FH{mx!5e}iD z-M?qAfS68@+EGz5zqbFuwd}N5#DDq)o#)j^V$evgW@-B8Wpvn9UtYF4-(Dv>8Yd38 z?>vS5To4E@yf2rH2m6B#dPyUC{FeSr8q&A#vDJCG?q2jQ-9aOK9mMQNc-jF?@cR{A zEgHQ&y@`H!Pqc(}=EQ!eu&E1pyL;-@iw8^$uI9fDR=Hu`c!y3HI4QI|nERunbwH(p zE4p)iC>>PVOqmEh&j4uC{~USlCG>Jc;(KEu?QwoJ5qfT3wh3yzSeUhJOa9~MwQc?$ z5{jmou8kA`y)Se1jc5&#W$)m5rsM#553gp0c2C!Uh z>QoI>|y)I;?<| zovjQVci(R5ZZ7mHGGAiyzVId)6_;(fE`lMLizvlTGRaBTtBjTkjztUo=C z>z0DtL0_anNWaLk2S zblF(csF?na^Fl|;rcWDnF8Pnm*2kAhFY|Y|A81Rpmtrau9FXS*N{4Ui_6uR{4|hea ze-xrtN>MrIl$3qAV8j`vW_p+09}ewOS^YL$rz9Fsw*Pn&+`8P79m1?WOdD9GNaA2> zX?mt2r8u?vt=;SvaYYxuy&kSws%`I4*QJ zR>%^VKi!44{ZM%AkZDXu7q}x|C zwbj!2_+H*FksE8UH>LhHoRZRxda-iPqX*W^+!ky0A(_x+a$~@u`QXqoY zYKKNx=8RtD!SC1;TFA44sj#PH#?yGaUE`T|vbE%YuHy0{uQ^<=`?NKnqf933Hs?@vy()_(= zYt6Hn630y+>I)Onlyq6S{%C3(OLQilp>)Qyu}yqda~T7%nB@vuC=}K4$<;}ZhEJ1H z6_M+ZlzZ2_2bu-PBP&7+j!HEiHd;EG94~m8W8I_*N;Sj^0{!HdQQ3n2HLT^HS8`cQ zW}Xj(rA^@yvgEW*=b>Yclh{7um0we{2UngY30{vq9g?S_k#S%elMkYymu6yGHjNOb z@yo)W4a>S{*I!gzQ6Kq`>txwB%?38q#o7OiOt3|_7B@ZMFXxxm)&Q^gORcjfa9^r7 z)^n|$C@qHb=mj)?xbM(>Oiip7b4tZLvOuX_q_N(MPV}$BOUdpwG;dgEp1S^Er9}t$ z5%DXIw*jWguwD6*c`kEo5)7z;N_LJ|uaBdMTgx;q>U}>+*NnkJN>F(W;eIC>;Z23g zlfwmB=^KirqG-G63wDbP%~{*sjd!q+vsr}lPWK{Y?tM%Bk}}^aJ`*KQQ~)2vAw#@C zID%4?Pied)O3fhbr!4xs$r4<4s~qfJ=mZF~PQ}Go`^I=D@ zsVT9Oft)q2=0kWB0&-Gus3sGZZhNAkWW_HlQxZ+9oHVIWRr}uKBrEUA^{5`c4iooa z(NN(p)1S}IN0}kto_+>t63xx3sMzdS?dmL5-bLdm;OJr{Q{kSi@85cjIi@}EdXTOh z8wy-!X-^z*DjHtIW&nNw`U#Kvfz0aqk3cx-HfZTwU59GA|gt3*u8^i1N&btn-T80H~)*~z6R|kWokuC2gRuOTD zv-9U3#4l+a%w;+l@sl?cqYIlEcIPN0D2qeVyG0iI9X>UmpjcF5w=>E;%@!xc}s{1MJp45oj^(1kG8grq3GTS1|Kisz1e zQ%dRWE}>AX*i)EmQ<}g!(kRJotX|D#4Rku_)2^b=yU8pgj$(_)U(FjXq& zt{&pQyU63)RYp-BR5m^|ci2l>EH$S|G~{p|>8;7@0Czi(ye5Xz!&LmF9*OpSb*OK7 zD9zaE+QJD_EBB|e=wdH!Qf*KAY75V4TN84iP<~`+AEyI+5~1j|ritEVj7mgAQ#k@@ zn*uYqV*SW5|FhodpRl41`7wjq3cXe|x4RgEtqVqiQWE8R-~z9>Qv1isVVpZNw5_uM z%u2C4fV5HSQGMjzi0q#5%nts#O<&r>c?chbOrP4aDGlFO&|3jmqjDtJnnyf3T-`{- zM5f+SHn?prF13kd%!y0ey1C?T>@G$-tX0ViLvC?Wd0MZTC3ObNUy3u9Bpz8za$B9cWSIfL&=4sZ}C>M}pk6r%^2!7=`^q7-N<389+JRlcSp+j-a4)MTY z;s9V%E5Q;P28GNm37X-z$lwsS%^O=OJtNx1bcJ)J_DS5&J2uuT&xx6>*R)c6*ta^_ z?>{DsxuVTp88?d^cO(u&OYf;9DCy;ZAN@R7CKRC_tQ8B zJh5|K(b6K*V^|4`-0tKDfS5jQPGzi!-5qVq7GD0q6{T%B<$J^v; z>_?Kuv^iPpj!^^ASziu8hSM-R|9kB{`4b#Wl_pIOL7+P`7Bi4s&ns=Ic11kfwD(~% z@rX)xVj_tq;dDz$70-wYHHp(?S&?+IG zMZdAutY&wfMGMMEnO+|25BS2NZH<$CWPA}QVaRcDJKF-*!7;N~CIthh~_p2imd^iJ1I*V;F`ZehWasK3Fn^%NFj4@%U+BfM9txlR$7M_LGX$x5W z8G96;v>X^$sG2ba(rSPYAxlWIXUo{VGg8RM#RBw>9=J}}EPm3I5p|*C778v+nk1SO zPFVvj0wH%sfC{6Mz+}fAw;yjj3a@KZf4j~iO9Mrj()hq zb`}y#RN61=KawjFkx60hlE)K~Spm-JE$zP*x*z2M+Z6)^x9T)HaDSX`#`cd3qe7)O zn&{dN1wHQ^N~gl9{nj$m6I$r+z9o-mm;~@J?>eJfv)AS+W^a1IF;mJdKdOW1VlEkUIJ(-> z@bt~|tr(re*(VABEQQDF$X(y-ingE5^FaU@xc*L(Oyo{1QB2dWY*Trq5X^ETzIEx9bG3|&L)%%=d%*slk@m!5u6MGzstn(xUT9vb=v)1cGH)n zc3$hY`(uGwN5g6XVo*-?BHMdPbISMYylY&oJg)r5LPf4mYHfWYz3q`uSuQ5Ca8QUm zLn_mV+6lmN^D%#?v}q~3bG;unJIqF#&Cys&sl+p_wI6ny9-YH8qoy`LC+abN{bAGw zvvp^CwiQ>GGix_u+iZ2c%{!m%Yb=ZdU9J&{_}+}9x~7v4+JaB-NOps-uC$QGI!}Lx zgN$}|b#)_exkvJDIbR!Wg>1iBTbN?4xS=JQ!##liG7h@1Wl=hGSJY{pv*g@EgyGZa zRLfPKqZ-1_HBXA{Z8~CxmQ=&*C{u)-^#Pl}&dgtR80z-><3AIn;3){j1gB91>4!rJ zVK|(L>V-s}t{;H6XZpYDC-hnl&eXexKG+dnD0w8GUeeB#dD#{-mco>uWmTWTn`rQA zL;<4X=9D3t4&H$CN|JQWaZK`$>k5cqLVQdrk`Wj3WP4s=anUo688`bH{d@)`O`L_6 zfnyH}5u%%r@L-F(O0Q}Pu5-BVW6c)|y=i;B@QM`*I0-ue8IpP7%b9dW0pv2!m?_Ei z(5IUJ!`OQVHPyZS-lRcjp*QJWdXwIzOO@UU)j|s(MT$v4dheh#DGG=fR60WFNR!@6 z5UC;t1*Gbm`*%O*JkLAtIWzAcnaS+zJ$tQLYhU@U&)V1ZwJ3!<<&KHM34~_BA{DRoQE{g>L1>GRgK=ITQ5d_?N~C zweD-4p8RXVrSWwZLAT%44cz9HF599Lv1(no{c$>V@WN-~&)!Hw`*`wFasApWVdt%! z+*(JuNnHjCoz_<}_X*QC;+X#sOdiW#%vs0JA7}a>Y873r5QIo ze(k_!o6`FC;TJL9@`~YIzixM4!~K+dvOnz$R@=T$bUU!cyBs|+dY*lAJv2?w;e?Zr zB6%^ib#Ob|iX=30-=Sl+QzcjXm^=L}mScy;`)duRS6P^_SbdSWcnus=EJTVlsdcyO zel3a|LViVfeWYAB3xACIb0qTjSR>Lr{G-Olk3M2$(zLp1^40dzEt+#U4N#$I^_oT+)Ex_GAVaRsYdPU^S)4tq!f{pwPi(^s3Xnn*057bUtk5N zzO(aq8AF=oi!SslC3L4Q*1lB|l{+_}%^nS4Q+GFad zE++IQt-wi{4)zf@7f^US;|oijdB#*z@4oHN6F=YTeG#%c_fXVe&1*7ZBNIq{1qxno zhW2kf-SsvS2RY{(KBwSAma18H-rTDz&yT0|Mw`klY?QM`h}Ay#%mig_G*hLl>$@m^`aK}`6!qV5t;^OP{gsB z+@L~`Gb(>T7Y#dkhMIlLf5qJ$UCdap73v~iD?a|>2CEV2F$)7v4AFz24C|43JyYz( zvH_cVd(jv%HcuA3KrXZM+TJ|W!S%*Scpx6|wsV#iA}W!}p<+%2Y*1}0cu-hu0{ev8 zOAo9lk*Vp^bYeyJEr7L9h2PWRbRi5FMu6p~9!{NEmeQ--nSE!Enq&9vhf9;Q)aMB$ zsC?=S8b6x6Jeo$60R1-yy?o%J^~sK;K+{0rw!{rbTWg2ip}7T!Q9(vV>$hWdXpY;# z)4YP&YoY~t<(v&cGtb zahea0RC0^^rJMtlj-=Ujy9IXm^pa0&nFw3=a034q1g!fuC0qIjUkIf34jG2n8Oe<$J{!EH;b1SxnC#Wq^n_paj${~z z0TIP@wc#g~wA{yJXf}y*soYdrfzMi7Y!ZeaNcFbZ>y-lodtyUcbpCinPCR6LRhsnt zmDbiUUK}tNM|4lu&70vzB<;lbwRTKkE*UVT58eI?Jm|j_7BWO)#edU<3TRbn>Bzqv z14L2-RagU(P}lKe_HT2VB8hG1G3*oonq{y68%Ec%->2i%e~Y8r&8BtIEAajxdOdhwSyxGzJ~?Da3??)0k5- z*kEjDIjsT6JIBGnATM_jV+yQIU);BeMBWwR(QcA3St@RdrPxEznB6T?lkyS_g#)*# zzCMYra!JsSVIxz^O*oWe=lcA*m?pEpv;+>I{?6cePvex5N7s0gUS$g=`ehRsyG`nx zmGFd$(zLbIrVC@>L@xfLE=)?YCwE6yvHM#x<-miQM}@Bjl?w_%`7d!vR3S7E6OmS# zS?dq)a9NUVrP>0S!yGjl z6kB-np8S-iI+iw4jYTaas;P7*AW!otwH+(_Q@ST5SPP6H+PNZwCDhdZi-MSxR&!!Y z0Q~w8#dRE9IV&JZn>1be*YmQ4iNYMasZ!2vIAUPw4*_436y_Y`_!l34-%7*xU zGrnBS@~1`(J__()YNSeC@J}<%+U(=TAB0NA?*#MADwt*2bWhn{&Zv>i3z#iG^mLiL zjh#$ejD;^OS=||VkAlJ@Y;SiAcvY`+03CJ1?S+qrut z!9#P7?0)zpo2|`u0)N?H6Kc|1AA5S&=>a4HBce$tP%;Ii0``cxSphB>C(glZNfGI8 z=$=60M|`5dstvBPZ9QOg1KKH)X=0u~&u`q~F2fc#)eLx9ocXn*!f2#s6yk&azCz?kX<2@C&ZvnWN3F6o=4N&O+!7yW-lHp5H{lh z6h!BKG|8FZ1TSEdEbrz{zM60Ph?*s&+gI^$G4SaZaDZXc`tuXUTmh7|g+}@vLs`Q^ zshJvKK(P4;6u!tMFBC9Ed}nmm-*hlbvm*6 zlob>_WMOHbnYa&d0e0Ga?`n!0NWIfK2`_togQ76oe7&rdVOGTZ3(;24>ckgV1(?9-l7img7a_~p6-ti@eFU`v z?yR!P$+9>rnP@*#gU@HKcq%%b&TWssXXRr&*)%a$)(;eMOj`85d`@~0Nlm=4rCdf* zN=X4|{@zE~O$eP!V?aTxlg0TnpgieJ)a4?PN>#2y%v^nbZ}lT+%c91xU8daQ_cnf% zYLV@BA!t))TS=rSkYskO*&=#Jp52A>H-^V>b5d%Lm z?Wio(OPZhxr#Y!DdW%!!3S06f75UsZ;xzBDP4}EMkY9iI>OEMCG63<_gqz6vnyB`G zBST@Ud3K7bHe)~h)G`+QyPwptSd<;@>5#cyyG%)j`AmmPDq)RyO4JrfC-RG3vP)?P zLCj%%0~18qa*=%1RtAaU(vH>_v5PZ6>6McDw2MoEZ`HogT_r`595{oS=LcS#r;ZAF zH|ootknH4BeTMwbWWpdX0b$&5ySdbNHA*C^a9L=MA-01Wc)t1#J9wTil=B&oZ(&J- zz;C$51e<$2WaRPH=Q>Dv+Ig!?SyJ-vP**mvWl9or5WEOxs?j&{Tq5_u1@OBUX~_%1 z`3Mh!4{f!8GSWH72U6GQ`v+C549Ex5&JH)6wzwVLXksA8+DaS-cqV|AnSm>5z%izx zmQO!;`c(_~hj{bg&%ndeF(~YFaQFw-i5OGtO2c%wKvBixHwSAx;ICH`MND2_?__C` z7nnW3GzFxNrh}~v_|~OdcHNj`MuzlXXC;(rB1!s9k@}RRj4Pj1d)%!v)|#7l{TjH-I!S^cre82tRhaXH?vtsk-^Q!;9QuJs@StesHBfd z?=iMwda{qo>b6EZGc!78yH$>3#FCF`&z=>d7hr<6N&*U6c9Uu&-lmcjBwp0{A_|`F zIlR?Gmhnq6y?I9KuKT`;Sz-CcTskCA99LJL#m4+hI$oczL{cbgt;9q7fuJ;<30b`0 zw&^f2MI&I_BB)0ny+jHym8!y$4XGrC9u>V}W@o11qp+bgkI@)Jo9%tFA((G`7Sb~z zqGF_InM&G_eaW%C&&DALGtPhNIz#lOSftzo*-W2^8iOs&ahO|rYS@tZ3(pI}sVIS` za&X8Dg4TVr8&>{mixQeliw!5+^&6p3X~0q!G~VleB|*a*a-U8zyrvTy?hwNO_;m}) z{07riL!_|PT4{CP^&uz5P#>!XKox)B#`%!CgB5O?_BL5(bMu)ahNzdHq^=uq$3!Eu zP0h{jn(W#Mb@#Xy#L=FRI+EDUg;dfr+WJ>v0-#D7N6<-@i9FSAgOY!kQcmxyUd0vnfZiEc|B9X%V+L<}GB zCS1Yg-EB>#xbL>`)uX3*L&;Ud0Krh?A0n@l12P%PM|Iq5EtI;c>@%>ZmO2B(?Fw=K z#KNE{YP=l~{*3`>VJ|1%6_1@sj#W+rd2zE6&F4N|B#(4_o9{Vo?CC(r6t;y!B@=Fj zG&WtnbtMa6l`M+q7qp&GDUw;{pI6a}`f6#`lod)I;4HR4EnCu5$x> zBLK;GgZ&@G()alYJ|~FCsVVW=)leiQuv!n8iLnd4L#`&?Y$!pKr`u*(AV+k{!U>#q z#%+dw6eT}UVw{NiOdN;uyBVN`AtiDAcu}cDE``X(S`9vtBQkxkHCNX7ZOg<$3B2ON zwN}xFK*B}qVY&BAuKP)Xq=sSmug#<38{s9Qbn24Nqj+ikS z0p5!=l@OIQ=qdU|{#fqjPJ$v5BP^V$HA3}PLeAFr;oe`OX-QhLY&(XLdJcu~txJZ3UpAA0y>NpFH>Tnr7DaevR$OoI`GoCCfzEI4;!DdhAF|0VKnWXqLj6Dqs`AiR(4NaG1>ASMgbA4Q=57J113yUL>!-Th&`@gjPr zv2XtTj{$6v&d@CPZZCJ;s?{#NMQvnHBk4?&z1{D2O>9lvVvC)z#qfWNJdVj`B*Cdd zXpKYoIxBn5ynEjD+XIlNHXrlG2lyb#1CbElU!NSYnn+&WJCLYZOkPo)iZb6BVGMBr zoZEC%a|6V$KlmvMmC^kqd#S5H{?1xsI(ZvgvIjo!1ysyupuShMO_80#ZK6L6xK=_q zz)v0eYJtaMWxq%}7=E)2#!+jyDuR?~8PQV+YO`OwfbIos{TXcc8?AR|kjYnTgWn&? zwgENNNV9tlL(CqXK``5GYPO2~TWH8dq7EHCxGhTjN9Z`plskSZTQe@gRxZqKOz#7e# zs~b~IxF==ng%eYZV1t*14APF3171R`iM*d}kQQ7E`4zw+ZT6#TG}gK+A{Z1;T)p3|9w` zdf`NRNv2m0TONgKxDNn~Cnezf5J9CI^czF_-j#TOF@G7gu=VBGy|XZ%j~;lj#X2j^ ztVhGS3C%N0ece(TMT8hAb1T~TEONi-ZuAfVNLbONfNP6WlF3+(i+`DZ+%!Z;y1J*L z&Qs^!+CQ)I?0d`)L7ukfQ%?ly1dZm%RVFRoH*z#U%NElLV4sqb7Pgb0a#2T6?9kWr zQDoFT;QmSe8me6fsWp5d>x1c)*#-Ut1~C`t*&ne=cHGy(-jpFYNTojoW^*->w&_#L z1AA@C?h;_c1o}yQBLrP2)wLsDpfyh?*<{Sxe|%0&bMwm=ByH=2(?r>GUFRQ90TN6Nn!Fh7xslG<(p<-)xq=2+^I@|VvTa} z(kX%(-l^HqY~JW|a}eSJxOu`k-(gk4$N&MXF_M%DEf8#lfqy!2D3g4yXP%ipW}UeQRr5*a(Pd zAzgt{?$LNP$~_Sod>@m33lI{@fbnO=!pStCcpviZxcfHCbtiWojNC&7+pTs}Q#TwR znY(ub+CM>OnFZwT-+R5u2fcO$4I*~)z=_oDz|+^Xa3&Ng-jf~Pd5XLReZ&bi`H)LC zPGveCl=mi4Q}kQi1J6~%+rbMOjYmdoh&TshT(u$0WZlgg_Zwf`67czmq!et)`F{3o zQ7b>P^6zOs@q7E%5@q9msIBzB>@kd3!y$gC^f68-V7{T>_?eBCvhtbt5p>VCtbJ$?UkrTw61h-slLgVPci=X3{u~(rTTu zjXq;eBkcZ!*6caXdmZ#z;_WRkCzEY7zb8NCL{D4Cr^l1F4U%GHK7Zp-hA_sOh(NXd zfrNUk4|o5f+4GTr^wG?t$2Wwb&D_2nD4SX#Og5FR!M8n9FoX>Z3rk}D@$ShV)Gy-K zu$If1&!;-MloF-sx8Fx)p3sEwX~vUY{F!+1^+AlM;Eli-|F%Hl61B&1?EINdDTTe7 zHMeju0Nyp*H1iWv{yP_?R^oIDV`#YWTU0uj4(P~2LKK8_7FfC!TXMsPFyD2dMgfd$ z-4w)h;vO12YcB;Qn;CStM!_cL#LSepHCiZOFJ8OSh?CmxuMi@rZ}i&d=RJO z+}(axVhN38qqkCY>HaMG@imO*S?B+t*uh!e4>nX-PI5`B@lFWrg}j(0UpqVjta7J4 zBR)X(FY3~ZsD6S1@X@mzV0Fl9t`cLVSj*#!*}pkz@iD13VbF+wY##J=Z2}n5MGLH? z6xBWPM{nK=7uwd1#SV+b3*a#Xw>qT8-BdCHvq9%AIjK>hMuUJUr#pBr68 zFoI%A$x0&%e|)on@ysKJUA(PfzP!yY!~p*lvaF(KuXM2phJ7rFj97q2&pJ7+^N*kC zxH2e`KS}y>_0s!Z!Ydp6nv>H!%SKMQk1zn^I3D`C9oV^FUk|KOLyv%0z{smbCxAm3 z{BdjBYY~~31&J>2o{FynF6lu{xghIr?_#(m+g>Y=39#Ysg$B^# z^R>M1-7i2zRf|f3`BIz|>ja8g_HL7_5N`MNR230ygDYKSn7BDQWz01ay9I163S}y<=}`%5@XRg}_?$g(34#evuT(+XjdIcG%iE zF?`c`Ur>lup2_ltZXIc<|IGND<~0#Nsb*zzjQ;~D43NtdNx|jUk)!0J!6Q1jb-Fm_G^{PIJQ zOARZ~;;q;w+73BHj14YyJ_T~p^?KUHR=e?r8xw>um|2_46Q|;eJe^w$bFufe^h9WL z5zxwjH36;oI*_*PRkhqg3$jAnsx?-}oDW!lj+=?1o_6V!a@QNN5Py#6mZ9q=q$QhW zHFpD(3v1>%oCx$ojJ5%j`th{7zeiq_yolCFLKK69t$rsToIc|*$hzR$d~nNEySXtl zjuAw=PrrIT|SZeggt5}3ova< zv}G%BDTeF7&$wyuOJEgOim$|oQahaG@=Qos`RU`D{F_Y_9>8#k{In(tRxLr&j^vkq zO0CBBOij07+OF2 ziad*T_NeKnW&S}Eg&mSuw{q(m7TPGBF&X4-f%mk8K(phYMRNI<#L@4=bOzUpl-FEA zLH#X=bY8vcTMJbzEsuL#+j!(Tvp^PgCQzd9Jigci^S6W*#Tq)m)32TmR*>mD^+=k0 z;p9fkK?gQREBoc>gPCK@9&3Y6PR3 znbm{n;eh861Oz8o%0i^!dBLiX^CBTKzFl3E`UhXG0KqT)#q~EJ7IxRYvQZ|DTgK;0 zqZVb+B6nkE@UO&(6uASc*Q9Z%XCsjn;zS@4AC|kR6~h^UQWs@ORwZ%l^ugS>#7nN{ z>wxv`L4QN^u*h!|lVT#p{FNteEIu5;b*fL=ZOj{V4Ktib1<>ksvQW17CiQ%VI^w?6 zL#}raDNkXI|V_g+lm?Ur6bKEdPgAlObar}Cc_SGf|B!&l9zW2fw+XXqdp z$)5liiP3k1keiS4cxdqbifm_tifm>^s-?Ze5Zgs-OQD^A58(-i@z7k-IBar|0-Qi# z4K=aBE5Sm{;3q}BrH=!QWAneYP;evvD8#G(x~;a9fj0ogCb#jfzxY_}O6Ee+Pl%+(BUf`G67eJ3RO=Y_PLUHt`JN z_3;gCO|t0@0&N+l6i+@Xx1P@D2qx~Jl~vrPW;a2Cc&C0oiNA#3L}pCtWqxb35TI&Q zsSi>>+&P^lwELg76S@|byb?JCydv~?v_5D2->%@&gA{OtMoF5gmG%OVMZE@Dy~Js4 zX6&>>?4$oQJE{pNVtE@GofP_Cqpqm^Li(DLZP-k^vo{LHln|Qru%s8@Qq+qkT#Xcp zQ72qP@_!!VuL10C|D5&Deg4-K|9pom<-%#tdhKJd|FkCHnUunXahYa)r1=KQP8^S4 zgqKqvVm41#{kIAld|W&ZVmgBswmJuM$V2>6Qf4@~0#Q-|2K2xPVtu3r0$TkM$1xcd zez~p(Y*HO1*Y5z6U_ip)?P|G-G);Lp=V%-cP3#Tj`?a*@x#FyKxBf$c7F$T3J^!z` z&1T=5M_mxkf5zj>M?fU?pp@JtXI#=FwgO(Tt=!-syy>wknj6an6nY@*8t|hf7)aKd zTC3bpdZ0d`x!Q$sh#2inN3+)#K1D^cWQqA;QtfFfFa9Y;CDv! zWxQTGV_E(U;IBTaj8zxso!r`t2zN#2@)>=hQu|bLop4?xPw#U4!+3}T&oV+@C75#b z_w!QM@xq*k?=$Pa*9?=Q8EOlH>+#~i8^;;uz{E-hbDVd+$8@i{VVluiRi=&CVRO&bP*Qou@$C8b(#Y17uMWf@ex&)pxDO9Gk|HM`!b^ zS{KSAYNv}<;NacPDBlnq_K!v!18N>{gW%N`v+9e~LmAwB!C}OnmO~?%|8|5gx@Ma= zFouoPcBdyP4?hAYnt)0T*DO5<%B%+>u%#5JB5!pf7QHSp|e^s+9uur~YOu zV6^$vwQ=T`onb8-%d>{?e5mnbfR(3he+vtVG*o7)58KTE@#_4YcA{ z1bJWg=NE1Lt6-SQi!4Sz#H)$*?l;RiWdg_s;yq220~^i52XUa6Cat_Zk$-&>aTL#V z^_uk@ts%9u^g&a(TCQN9;2dW3AH1ut0L_l~6>Zqoq~5TAq9hE)j1I*qCWT=tn7wO8 zV}{X}FQawllo9a8-!kN&a~%%Tlqi3Vdk#%0)=-OXYdZpbojfy~PL#v6@HX``C30MVO-CGpnPGwUnk%GEM8E@1wm zgzabQ*hfSwN-gc=IF-TC_jKI19Ud)Lc)v(pq}W_(bx@ZD#_&d07Jj{Nbvq>wO7E6j zYL4Q0w@V!IGX3GbZnH zd53Y3Y$DafMwM|st-nHc3=?`qxp_x5po|ObB1C=f4pOTiRL z9%}A`^NT>;Xwk7sk40A@PghgLfR|~o4i-bI$3{suu}i-huFf$7AjUblkt$Bh)kz&$ zb~aexG**YGxwXhl^Igcs)<-9`XUOf_z5ovw!pML>2gx%`4Wn(cQI@ul^Npzmc)DeB(nT5u0OI z_+XyYFz4KY85U3#+z9XUkV5L`^F#gU?uo~HjPn4TB+g58O-Rn8z6=>2+FpQ zyTi?;@z6-YiOmdomn&k{hI%>6ScCS0eNGBu;cm=yFHM{U?a-_t^PIK1$BVWB(IpA` zp+(ek(#!mODdlrQ!}mLA4ij1>h$1i)KjJ>;58OQ9v)FY7?avr_K<;+7w?pwd_PA!; z#cD&0XWY9dWrU#OEc6RSgWY`5z7ZMVEkq4ES;RVhK_Mwqc(DE!Po<2Znm{kAg;+{r$xfn=6d^f<>?dK&jU%a7^qe_*MQ%F2zd%HI}v zYTYY2h$PWf4!$H0$EqeZ@)f6Z!Za8}`T+^g-#qVKV<~;W;6m@5$4V8*Q{l<3 zVT{+^*C=H(v@!tf{A{*FOhgoTt@!ivi|5U$(Ea#HLee`qA_BKk&E*#T1gS8jvbsJ#J94Z8QIOu!3B5)X9*8UH384 zQyy<$hNpbjL~38a>t9shUyy(GB{+NpoP-a*YD-WRG`t`F zn)hQ3ww%|h7EP6jdp-ne+SB_xdOXkRHthcf$E^Dc2l(~fp`M9f0GIEFKl6x6bFit@WP{5NEK3Ngggi9=tDK zyR;0#r3d(1{rdQ1@By=~luQy)7jOC@$Y*j2T~E^NB3gks)*}%QKqX>x=HN&XV4B6; zrw=^Mx-UgRD|RdjgAPp*D63nzcRiHe&e09f6{Ke+x1wbL>c~wS>7QCigB9cc*k@z> z)(y~+91vp+{}S{L>lC{8t!>}Tkcmw}mwBSZ@O@HjVy>H?TX`Xk8PmZC8SbpdwrQ0S zbZ8;D3p~NI-SLfQP_3ELL_!wMAJ7$e+D;D- zZ1I!@V-}ePKKQ<-wJ!opKlPO`#?M0w5<9!O5xy@u5HCp8r5+6b{Mn`w9v02R1Itrh z#Vh%z{jlG*;GdvPOrX&$)z zweIwDlce}_n>0Z)$Og=-9z+O;d@C5uUWKcYE(}4g@Jn2u~AT4a0%w)&4CrV zN7!$MNDXy8Z`u@N)ik2~bfN)kVIq>#)CpQviHez-5=SXDTntgXu-DC?Vj-zKw-7o| zN2ec&kzH$vSYfao#6>cHawJJ+APcQvshv?of0y{*O?@*gcKPCTJFy;A>QjLoSp@DmVqYIq?&Q z{|QnmJJZ4i{^4{tW2?T8i;6BE_$2?1&kS>5ShQC#=XL4ehL#F32#J+FqrJ^DU`yjz z5rU;aS~nBpJ>>uQ8__1ihkWmc-EQIZ)~zDd^?OmaHTK;TZ2BM zc@NA}p8$*EEsVj>MB;J%{OSn$8&-o!9$3bY{UH`Wx8%g z+=I`7N{i>C#Wu8iD3-(w5l%_qX@7x>pTZ8;ubNm9lV%^g>9pCQD0eyu1!j&`r!q^n%2J?toZoQs02zF$t(Fvwmkl$Q!7e^&O|NgLg zxOB|;DQk`}Qw!7)qe54clOWvMxq*Go-;v_zroxWHPiNu3l>IhX_i(?~8%PI9tx<~! zl{`<0$nyG5F52x6;U)8#A#Z3WBa+=YaslcRBQcPdP2L%CkkjI-n_m5gX|>w}})1}r-*zw|I^2c#}70-Glrh5ZvSnSAZzdu4Arf9fTUklvIZ*OARSgbQL^I9YIqsfpTEz?Kj6^!w{ zUR1yg64Z-?I7-rzmn%HMOpB|O;QKuqD_k7HO6v1o0q0726pr1~K}O9a|LKwW@#P-J zf!Iko?%P5b?rj%6dcj2-&bo3Jup*Um5Ao1tF_K67=2bc4^FlyTuQ<{Q`1Y4PsQ(ua zf~Q3B?G^N(^9!L;ba46@txpw=L$XGu!yYx+C(I75<~ZnZjCeH2Z0j9H3m$}% z4xUkh>AZU*g~;+RnC^#6qn@jVM&Sn7ARXSr(G7P;0G)227H=~A!yHg(80$$PK8<{N z@~QlGwuCR6zN){+7o@cA#TcLgD}vsn`(;npL0(b)Hhnb({Q~mc;1l`Uc}miaqDAE> z^PQaceMCzUF6hD}ZQL3xtm_StDF|JE^KpVoDfV>=P=a@|AYLJX$a6VHHth4ZJoV|P z0M>>38kaX3`Y3fZ{wCFjLgcSbn9?_tqg+V~94R0j;WQL4pCWZME-H~b!T1y62KAHJ z>pn(e@?(f|G0~6ILLH4~-tJ)L>dC1$i(%CgjI;DgZL^I_tszeBGq0#~X2lFifOYJ7 z51~?}a4EW?mT7?K#Am>|0gxt)q(lVc5~yyWKN@-Om-7_1$Yr!^1KjEw3s&+d(N;ro zYX3Q6YT)>yB4WPKYZE8{{{u*$iw^CR*%?nq7jrNcOh03_ zeYf33d!WcjC2A|oew&-+lu(H^_t0XWgzvU&^hgg3J=Sf*R@eF$Jl%ty{u-yqUclTv zb{NjpBBciy+(tJXU62f*+g}oeR;X~1#Wvq9?TJ9PXuP?C=LmN_^VV1W`1Y%`?szTe zC#f?jTj}l)`~P78pGA88I5sG@7^el@_b-fB#WN>jKeFQ;iy`wGE&T2i;`+kz`KuDo*CVaRK@=`Rf$9X$PkchRig?!;bAr+ zO-3CX{FOk$uRbFG@KQK1i%u|w1p}E=3U#nAUZ&E(%XAH!y*9fWI3s}?^AC4&Q$~IK za8)3{9L!MmX@M!GfYfAVkBD)12layw4ebDhzPtWo&~Wm+Z!}cy2S4TLn{)XhF8R6) zjrh^*ArgE{f-zDZnbV^tGWYB8WBk+fdu4y^{N!IrAE~j)kH~cke+k5=<4CgCF9rFp zc<7>ty8H|z5}>2$I9?OH*f_`}GPybYT-fp4Boc6Mbk$!0F)xix zk#_FlNU2tjgDs-&pKjy&$?{kR#aOOaz~ZgWJ`sC3l*MINB)!7B4424o3?0?yjnqr3 zzkX-P_Kl|4yK5?C<5`Q=C93mf-An*dU%?XzrSAek-ao5CSZEYd^Wc@TJifrg7gs$& z^vS*GwJFTatRL{cu;_7Jg4yU}?6|HkBT0PilMlBqhXEMbrX=i!^-4N`2l175t(^!g zmkC2bn9OMuE`Yzs=O90|-k<#Nejm@^CkaH-^IZQP{)=6z`4;xOvbHxB($R^)u(D;T zwyfF%)xMff$)lOrTP$dEm6Y+5Oi{xo`>wnlDPENFEqf9oEBIcw_=i1pV;pRJAsyl{ zxyFF>O&LA$eU-J~M?{ff??*_*bm2xn-P zLL+xIh6=g!)laG_npoJ0(!Kog{bcf2m<8h#*X3ToN7XAF!q8ywUYa9z(}}VQ`E9_B!_p_)$E8rH^NoWx>am7q*8dFrS zk?~;hIl894#P!~vel)V3b@Ne~%77c2{vd7W_c!u5j4?c>D7@5z2~DoXIIeI83EEDF zYp=yOZ-62$H`o%BufN=yuqef@(hbD^@pCX{bBMKs|1fjugx^?bdXUetw9hl~PA>aa zU2QF9=}9JV6hip(W8xra1#ZvW=*ZslvBNENlzd>dbrW@gCDKw`#FJ(M%oO#0@lS{y z*X>GZM_Nzh)=yog<~{g%bY3sx$Ex7|!WN;sJY2@1X@^hpU5K4Ce~o%}R6Rd9`SQX2 zaUxN{t!OF*&F7A$)bs=qCCQkI$OwMWis{5CRxg2<4yi`JNY@6z-dLEG>dn; zYNTN0ah&d1ftW`P5+0Ot^posch5emufxb(g@<2RgtgUB31g9?X_Mqt7IG=h@v!2Lp zCrJOJAu_W~>`v1% z9&UIy9#wNF6SMP(&m1^5F)-bejRm-no_TgtHNGBxzP^r&ig?&AJ%}IS#LO|7K+(l_ zE9T!=R)kbSzAd(YKT{P57NT$UAKdJ&mRPL}Y*4V|A2LP-m~Zy`Yl^=s{JCIs4}bpp zmL!Dnx(CVL;v%(j$OM={7-q^Bc|5#DzH?9ev&P3sO!`j^RmZQMr^@xJ-T2AU6v>?k zsuE1mjv({(kJ=hV&!@j#_tlqmzXsM*;a*X8O+R;e(tJkQ<@c$dCNoJrI$fc=S%FI7 zNX*MPlSg)0;6y+zh{YzJimfFWw&FnS9BliW$l(x4wB`kJ2?ehIV5Q&aYzc#eMkiw_ zMLd;Jal-PIiocmSzBvHjLTB`d)6~(SegJ=U%Bx>ZkT60(r7;6sXL*poI%rvH!&sUM zpgB#5iI+jkz>=ZT*7HGXgf5rI7)2|*caHM^9-EQ>2IpzAzh^fQPjK#U@I)V*zAEC> z*FRioL>*D2Oi~^WfLaQBWzIg!i>W(;2Xl}6>V6t6Lc%x()~kodji`|KZ+NLHs36%w%mmNQ>vBIc#`JJvpcmmH>tIl@x@(RVH0=U>X( zt}b5k|04|+XIOEbd^dhklhBm;ASvDx|5&^t?x6*SDPZ2x7!tM?K5Q=RU4_Hno(mO& zz5N0ote`Kc<72*?(U@88{UvnA#gD3&k=Rg@e5IA5h1>uXALf;TueU=(OFE1vy zd`^GgD+naP4#kAn)^hhnM)w;bz8pHz{%>7i+Is#X_|!pcrUn`C-?HGz?f)$c)Gs(r zewSL~(oB@%oTxhgepr#P`UR@^YcV*I{`GY_E_h-U(@XOFH|NIYNTE?4rN?R~d3iYI z&88J@o3TKz&>+5>4eb!~RwP}+@-Z!5Nvjj}>tnkXymx69ch}-7FyK3r?VtjTxC7Zm z6XYaAG%eosYNB|cd<1DUAsju(ug!ty!Kq06Z1W;IoPd}>iYbmQnB_oDgqZsd;LLsy z6-?BjtpEpme0wthB4PCRKh_x$lBZ$olj15N>>!}p^^*^y#wM7a8#o8Oa7bE)JdKVr zH6_DRED1Ek*0h(VNM);KOkGmaZz2l^QRa`!fAEA{({r-mo0wBx##z zCXysQDK`MTT&B$sKPom`#Y^!lv}AhFOLkpsypX3F+fY8v^5JcqF;Z5G(&|aFEs0$y z)gFA9MdAlGXOoofW*fSl7ND7odgo`i_eCf18PSR~zrvkcOkqw{N04+qsgYGX^vBA^ zrX(UH168nInu48CC$Zl=iJzZ~=0txtWmg#GFS~?wD%?GeY#1^sM-WMh9N1xNVt0Ny z+wLlzCp4<}cm6Pa7kLlVZITy|>%`@qo_uRz9=-#Q380SzyqoN#SwomQdonu(;Z# zj%cQ^EO&h8WyUh1QI3vULxos({}RGtBjFDYVnyG!rvctaDvnp9)5uC zT2oi=)4B#*qhp(wnYeU^>DARf74X?sRlaUA*7&Pg!j?k&hLin%Mso{&+MivVptG1v zw$PhyYw*fl+vYEoVE$YZ>pnIIRM4m2-yx4?saxt0y}|joNASX^!n?eWtG}QwKY3&4 z3RnOuw1JIYNYCi8?gb+??V4W9?Q$t0ZR=8vbiZ3!qIkl z=cWx~p$?0rI_vKo=4M*<#qu`j5PJ7c3eG7}pa*$oxueyg4M#UO4e&CeS+_aNRv0S< zr%DlpXge@x(vVNQxQrRKbj?pJ`WM5xYkhW~LJg?-THzMHJAQYA@`5t1bR4epx$4fU za_B19i)C)6y~Pnc6Ef}{6cgYuPRttk zXL}+ln)B1gkdt_oG>47H1YT)F%uC>ogkpRD0gvW_K-B?loX0@*&p@SWo)P%mCO`0J zofw%_e|;+rZfo0qz-{IcO^fD;Zv$yqQE|c5*!?~otFsr76&oNZaBC{bCsc&S{U}Dt zWUNddLiX;D0&^I2W%6}-$1XPI&z5fC)jeqImm$HBHJ|UL{L$&*_1SKFXx8eU?$^Qlro`t)Ob_T@ zfc~DwT9_$e`$i7xUV{OHR75Knhlm1wafg01XKcBGi^`F~A10&s#AW_}=t|AQXuQaRYo zNJQtey)Q(8qn~cDJh5`KhaI#m%FoXpG^N(Av_4jagZao;$l6%{Ov+E;zu&rXQ$Ua8 zb>1t#vP3Uk)P9Q0B^vbUc&x|*TSFBa+=v1mTz(1@lKz zWeZ6oW625_6A9iGp)Q1zB}wWpI3$5?Z0=zl8+Dh1rSBll0WtFmwzZR{DA~>$0pM7! z783vS=PMudl~2T3Jzuhh{e^7M;5bdj4|}VG3t4lnd+dPu;Q*!Tx+3EiunyxA+jyB5TuEB!_C%BW~2^O3P?(PJ4nt=p&cP9|s zT?Tgx&fxCO;LP&x);@JtKYT4!-F?r!M+9{vn{*L`oC!8i3Wt$75J=c20IBy3dGZ|d z3KE5;Lk#_n65MIQvFDrM6A;13HFbnIMTq0GkUi*8Wf9@F1B}&z9@9pKsFI!Dq&sG* zmw4=z)*cu1#(@i=td&go=Xmu9^0{?`zeZ>AVolJQlms7#V^?%j*AHx_Dt9)z?!SUA zRlZCJ#psNw;Vo`9o!F7voiiTtmc^3Xx>-g%7Xs&&heZS-rIZ9up=D^M zwQ}CaqHj6bqk=SAvZx>DgfwX_`MTco-OY%|8WEIWbrPe7A0n4L$OMCtS)m@e1rg56 z_jH=F<}T|TxSpTUY5uRvID@x;6~euXpQR4`aK1D1(&V6}+xn7@<44%^zfDH1U3}b( zzoTsg%d|8ODUz}4Y~1Nz)N36L|Lgdj)gl@d%Sc?~vO@RNO z*iJNhwO}WP<@e5r?q2Xww44ZAfV@Ebo45SQZk{UYTSt7|j37byt28qB5@jkcJh&0R zR3{+#pXFoEUAOueJY9}jLn^&@56A=zKYi~%GxhMUD%=>!8b4hmLYl@GU6P719&Y;L zE}-DIzaqkNw~WMZ`fV{B+WNiIJfw`U3yjUab-Y(X2-n-mre9qIcRL;_Ksl*ts@ejd zGk(*G$)zjpIx%mFRK7xO);D868|<&?h#zUXTXa$xbH}(o*S-;`DxFqZw~iyaLSw_Z zB&-m3zAfDcA3dtNR;0r*Y*op~M|%_C$uKt|$C?V7d>B{}-iz-^x=PwRkoYLT!;5f; z1}jO7OQ{@d;yD*?c6m1!4upY6V*NxPknx)_u|0fvaK0OUo+_1hA^Y$>JBQ(~_v@D+ zF;f7#oJk4t2!dpJ?O6adTmx9H{ z5?zfP4T)^Gjep;ZfYG++eaz8pg{owVbXdQu89xP6BSE!j&wqSN0VqtI1xvEvYhT>c zHw3~zELFdV^})L@%lMZrDz1Zy+{cZ5!l=(3X!4NV)+;Q)E4Z@$kE(f6C>%pD%UTN) z)!7EEbvw>wJ*3MYos(Tfm>lz~A{hd%UCnszD1!Gv7h0_O2O;4uD80X&KmIIH9)2ZC z-$b1XmJKXz8eg`V1Bdnis9DF!xIDsay& z$4`PDA`3~WYAMj0j=muMLU$}Jq%}DZn1>nLhbA`eHL%FUz|n!#(q9E6eU+64aS=s* z3$%uZGhhAYfMw}*l%7#tBo0l+FQJa%hs4PW|4nitit_lik)X*8;2up1ArewY?{zC4 zdZGzWEV}lDJIVsXGy`Yup4Jwfe`%Wh-$^$~Xfd$NGW>-BE*DW_B=+Kh^^>Xd>&lv7 z;n)9QqN{+2$;3)1vL+FqV3iym!F8ij9h%FoE_?5C-*K~)$G!Eszmz6}=Scl40JSs` zPDuFj-;5z`xr+FWB3AxbUjR8TKxa!uN?vJzjkVnPF0#0j***jzUf_@ynT(` zVK1-}{k@({srz- z8@;X7_?(icPkiXV*+R^87ZaT67vV~iy3{f3HWwQ3lF{EFyeo8K-mXw#r7nbLit|i* zyjPf(s6;{Y+)#3~+g<=_=7$9lYzO^N+K=&ka}Vc-cRygj1bRlb<3h}41zoHl5#Ap- zBy(e8e*tzn{TechR`L&nN6HAt=+{+IWe}+ZF(e+z>_t2d>u%MoEB)PTv zB--m8&nMz8;`icR$nyCPz$0$^$R?qH+4I$!a`e?L@l}$9O}^b~7*nMGTqUx7?RQAZ zV8K!%jRL59R%CoyzhU-s2;G5P8fS$0!Q@V?0zlX7ontj_c&OQy4}a*W?ZMWblUx{y z=+Nh-(s}2N-0IbpEms(EH?6~NUKJ5IYd=P$%)uot%c`%u5hm|?Xvt=dg(aIm2fdW` z%i_!(GA%e`i3c0|9H)hA6Dw_rgw?4 zIX~tZ?9JkuzA8rFL{y!f3_cM4%BRmssv=}&cWtue=lz|=l7*vguvz%AY$P3;G)tR> zklTKs@Qfpsd)QzyBS5&O!lV)*O8&}klLCGp=t{0#+r}NTY}!(A=8U-l9z&8JcMyUP z+D5-7EltLzd_$Qvf@9s^H6v@?j6w-LT8%mL&Lf9YN>7bf5f{Kxz||5~{roF^^jZ}5 zc+vQ+ZP7!x8AMH^KqT{70k_y#Nv;#r5+0qFmWl4SXh=$>l_;EE9n|iVpfDLF@B$cX zTgUyB(VW!Hi}dsm#ebA7&>%3@n4LE-c0~Ad6NH|yBQ)}5qn2;l=y$km&(LUlRMD`= z3BgAh9D28JV}lQHnPRv9K6v;bN9uE9$t%Ub+w$QQUGF{DPV;VbF+f78*Q#ycVc((Z z1i>h{q%X8v`DiO#R^@Deg2n1KEAoLpANAC6r2IhIZi}{7hCX-$t3R(vWzuv?bL~B4 zGUUJVW?fdDy{Ak2`U=q^9}F`kw}PoZbx%T_PsSEPpBY?QIC(#WG3W}8O7R+fGdT9; zDs4`GrGu)C^Un-ne`fI0)oD!QQ{JJnzAE1h8WQ5GRLJ8njWp-qNsF1#TZ{(&T;^(x zec)X&jXslLwiOm&Thl5X7_hh*&s0~q^8VwSr^Owl?^a+F$d>D}`wg0Fh0yLuTqqPJ zV7d~;b?0uxi#=KJ)bkJi%RL+*Y-KqvY4A-c)ZYCx=P&c`BabKLHw1-;^kk#?&cxxe ztfZsC`ER(adr6*hgpfLr)mv^Gt0jml3ctCCZi$wI|LL`aiWGuP{OtZB%VK_C8Q`^t z#BmCm{%vbXqQc{23Tr5ivO_P-E>Ucw~chp(~mX3{%eb!$}eLA@3 zM$p&GK4z0Vf%^ZbtX?Ql$6$zkVllBpi;yOGb497@_=Oj-#;_j-=d<|b_AR^PxtJHd z|LPkN)ywx8Czl5fk4P&9zypG;ceVo_UlcaiBwUhq*OezMNPISGhqH9>WG`Qo#t?N< zK+5HH*4fO~+T2`GRvW-S>P;u0jG|Reu1E0gZuz2hS!}HXB*%5eHJ&US{wUM(bgYBe zE>Q4757EspCn2AVNRqFQ{_<`jPR9s3Y^;gbbLqZxujvzQzmdvXQ6#N6Lf+hZ1Z5j~n@q7QN zCGUB&g+D^2XLI&E$l-}Z5&Ho!&$RIyrM`vH$Ao1d-}$4&3i^8(zcqEz+T;0^NFdS9 zNO^srR{3F}^V^Tiwowolcm)~KrARqMo3mJ*w)(fpPr0*v^xBTG%T+=H(WkrTwd^+a zme)QfqAhv#!l%PXC@x*M)3d4+(b=;clekO0d+B_^jO{TD8kh~Ct?i-3T`@Save_ft>W4A{L;5W zTUI~WxD&~f-{TqRQbZyfi1%cAATkrFB4wkF^I?b+;3gdAZmS`A`>R#Rp#6(2`e$$zh8M7w$fMZ{w)#FnA#YMsPbuI4SW{&6boDJZ98xO188cFRTt z$VVou*~zMW<;eF;T`W&bsAH2Op5z4^HN>5R8XRmQQT#2t|J0_X{zy555u{r}ybX#o z5Po&{A04v;0`bdti^TEK|G@ey$+#sq*Ccx`70Jt`X7^7H+p8pAgD2?< z_qF)tWS(gp+wuyR-#^?Qx4Vt5J~Y(he|QWXePgO2kei2HbAUljFi_rb4@`CcX($Caq8=k%Vr4IQ1o{^ zy(LcCiiLWap8iVA=(K)*II592RuB*i#oAy@h{6A}p1f+Y~=#Ytk`wdOF zrOguV`z|h@>rJ7orPRaXXu*1aB|IzoV7-_^410p|CW*;$5kY>&Q=`WG+lscXpWtv| zWMb|v?-8)~Ti1|wvmgDC*^}idbZT4uN*RtZ(Hxx~)Png1`i)#S$kFjb-O0?!Daue`6CtK0*F_vA>Y@)kJ53v!TqAID#hZk32D6HMEuvhAd3|nd~BVt86RF zD{0;m6NTh$?2g4N&`<6s|9hhiu-FY~9Np1$`rt`5yM3i_TC4As<(4p7@xw^1PJ!KfvzTayI^#enMQ1Q-8D@vHznV-DhPrTk^tJhMbCax3$9T24>5@ z5W#pGmf|k7TN{SrJ4j_h)#i$+jZ@tLh)rE5UodJ)%ky#!<3*+uy!3XyB*(LI zPK0$p3b1A{TY-JgxfEwtTnb2@c!C}|klOg2yB~C16p=seC2taA69PJTuHodvJyn9v zIV;7Cosu6h>4On!v@y-e2Df%y&0yqp>{ZEcix$=N5Bt}2MIr-^Ssen$`A_doB!nra zm9UCx&nO1J9q#&Z;lrm*OZ3&uF|b4o&oFJJ zgr6bmq$KCy*-LL8XLlv$o78)XAY2b&;`XEX&xysa{eh)j_n#C9(LO0~r(|`sQWM*s z?l#X0hWX+@g=Iu3(xRF^goWq@NIKV6y*ZSqb#dr|elPH{hz9rw>K%bMteD(u|FE?V z|F92BFQ7s#`x_|S;Id0;9lvvjVIs{y1}(2HWZwFgJ<)Eo z4^rNQ!=fvQ+9-1?9SUpmMwI!q376==Znly&_U~f$J$PI(=`vWPIc?=f7!u-=eJL9s z_#E+)PR9HI#Q@M<-%h`slC^(@Nw?zD9x5DGH=Nadx$%f6pn7>2M*C?4&co-yURWO7 zC-?>%QQOld9&El~njC`p4uuD!8L@HFh`@#`Y}-S+Fqf20mn{F&S1l>{X^0)6tY)as z@Uf4>RyJF}PCw2JGFfKi&~93`^@ez_K#e^81EG6yF;J%oUSP}Wewbx=RMv+nrj_R& zW=!~hl{ZX=3JVa2&{sCL6_HrlNNdt_)CM+$hf9^fj{rx?1eA3qBdZNeJgTO2u zviUc|+EP`ad1dFO_ImqYd>6=3`pv=81VgBa9YKehlz3(8V6*^U2!P!8{`=r2VI-DokZ;BK`Lh5@4KB7F zXFGT4nj{ms+#ACu-X8?=C=E;TxFL^-#;X&^SvzL|(L1oaqn81q>T@{}aTX2|)~QH_ zVPQeJXFr_C?6i7t+Oa#hmck=R91J|U!dlHL*`7Xr@F%^XdQuRzI~Z&L17hG{+uKWb zK$3)+{p`_7u-NYs4jy;uIl2ySLOei9Ua?hAO98g}@1Tt8d6DxERtZza@gs2^=C7>R zeDO?!IHAVr-eRObr6)Q6g57rK^PndVok9d+=Wa9#+&_Z4_mBhJEOX|d#YRK&JU^dE zS*+l1MOGAZJVOZKxkn!rKskDacgbH&fs{h$(tArd^GFU2RSl!%T}tb-%gV(k3Z zyjH#px(el*Sb^O#`evI;#w|Wq0@+r$F?BhaQ#hu7uRLY;eR-L=Ab;-MGUqYYfI#zU z(pBr4?{hy~Y(Yvxs+|bK1stJ6VNz*ZYO+ddkWZbx#JjOSe(XCU|Ltx~vfuv& zW?P}@R)A*<3WTAbllfcRx2{vpPZHRk<>Mm2?naso56*6v&+iE`M~hImq!@}PvbT4! z=&z{nu9yzQ(hyIf+gD8d*u?H$KsLCJ+6TYty&`nJzMxBF$A(Wz`de_W^9zr?GuMN-&(JWk~X)jeNIUZAuDEp=p#cTA4S|gm3%8jHVSaj>g*z6~ZK!}=CnsV9U?<)zWin4_MGT4n7T2o6XLd$Ek=ffCSPtkvN z zH|}eY+YSvN#y1==Wp$(W(xpB2>G_f;-rK}PxiOgbmT}#*b6_|FEpy4!Q8?@|)hJgNA#BbDAUSI0UyYSJrr zvKfvA^GA=>d)r$-KwDniEMp>jh*)^hTYlU0-TtXDf1=i>mtRVotgKKaNE>nkE0SHE zSKH1DiE_eho!VpRor4r7cTWi1557{z*+V9`m+6j1UV{7>-J591t@2o=G%a-n6&~9^Gd9<-*s9Rv0~YyTe61hQA+2Sz5IC3B z!;cS>dP8@)*{GBRKG@j$4Kwl$fC0wK4@knUAq{-@eK+wffqvf|7RM>3@(S;sj%oLs z)Uz7T(%Lj@JXyUAyH{5nh55s0Us#nd?T#}05HVldHIB_0L+kEDTk2x0p9@=~Bn7LX zf}RmX2Y1dIfZBOd*zuh$U|0A$ZQN~e&s)FC=Fk~yteABk)#AnWG1Nn|k{Hk-5QMGi zNtUMaJBBB0rRui%*7OMVcAKlYEx#T`mUC2|ZLB27o7J7T@Tn9_si8`(zm-(L^t^Kl zj3*THzqjT4v=hTg%|L?tCspH=3YY3UaD0!m_b?fVRt*p!J^MY^UCY+;JzJv-szmcU z&C+)PjeP8?9Woiu|J6PbaTh`6ZO{(i=U8%1?;+fBDx1%@@S&kr?&{9?6S1B7R04lX z?Hx_Z^|6FgDEMr>;lkKMuQK6=-Q4X1PU8tU8Y{2e%^g=qPvVUrK#aa->52x~78*hB zNqGPuTb-&Wx>IEIMF_uPw|q7@e_-@P95?LJ0#Z;vu!^HMq}l&b_mFo}#4n1k#yYx( z$|?L*I=%|~Uknv0xe#DkLt(6Wy1Jmfa(lQP6zF%#d4F6^D9${bz?!5Fo!i4;YYc5d z!r61;@mnE}>o*j-PgcdiMM)DkF@o?a~MusMrd-OxCf8+y(g@u22tGGBDqA8!cc&-H0MqmdbK z)XY$B)}k1^Mo%g5EpI8qN_Sw})oWi~SQskB9Xqm(c7Vnv8aW#zxl^_t*pz3s)l~!P ztvQ4CR(|=)VuEbHInU({Uk$&Nt!q#wVB zRz6`vo PrMe$omo~^6;1;?-i8>Y8kgc4x;#s530{|2a@og=&C-iA4Or1E38gZ+ z5X1hCMjXe&S5nA6gd$!YmiIt8P;G$vw$$M;S53=>j@UfUf_9vg49KOA`P+c{w0y_J zAMR(V({Hw~d__U%-OwWi#oqgWneXwE%TI@`E3;^m_?Vp4h$(!gM% zZF+SIu?d4}1frkvw!6QOJRA^LIr}$__t=A@0=_Hz1n6T`@Z)=QpUhUH&T_NcNA7FH zQw|^M`_Wk-yp!~!Jy~D$ zXf+`U?^v69>nxY~QD6|m7`Y=Eu@$-BkAqzT;#z{}$?NyYlI$$*VCb!(QLL@Qy&G;a zD@WmHx;_cQ49IQd*82UNVi@FJq>OVT4slMVxQ9Xr+P1HYFoy5gkqr=Bm5YtlHzprg z8zGw2jO}W#gy(0s&nP+^WCysX2AQdS1w-*q49n+MrAkUmBh3dLuj?CCs=L;$jD#Gz zoM_34Q@7H-EgOF`?h2y95^~Uw*UgH!PyLXuzl>Z&ZjZCN_oVB5TGd}$H29M+2hYl2 zoi3t)gPATooikPKC-* z?(Iw3Bude=#)qQ6MC(S#!7x5$ydIWohuBIp6@aM1;lwk>MHr($ zRoEiZ1sukLuk)&yaw}_&9~H>3h{y~oZ`WEE6G1pjAHI+rO#>8UMx)S}ltGWeryxk9 ze>tj*L>Vz#8S9l`Zu6&l3Ugj2o-|7FpHY#BPG;w+&1j>^&$j&*=RAHiYy>1fSe(Xu z{V5#3w9h#Z&Z3oGLHg z9VaW5x-52+Vn=KdGbt}2#6{fJC!TCyxOO1jb-j)s;8?dhCe}~DxtFu z)b96pr&-&JXAZ^l_hHDC4B4&FNtd>ggTip&pBrlyPeWkF7_Z?>Aw~;QpGI!%PrWsm zV~*Xxp8JhvmV*@X#;aqHP46QbgGmb;zT)^Pjj7|?Pp0=VCNcb)1SFBfdpMu^G0H_| z4x4`ECPOOPxAqJ%8t-qdY3f!Bm%myN<`brT6bne@y?bx6LoH0QB2sOw_^q`J(Ig-n z<&me{J3->Nw~fcje(Q;%AYnBB>3tkO7As(k2_eWX$X^%a7Xa5-18STD>%i@{ z#Zgy5YJSbt6&*>t-k#O$-M`F$X?x#E=d}(0Fr;0uoxhO2sSA0`GB01ll3QePebt!h z7<@u&QWHeYHTaw^99*VKGm@uDK|zS9jh;0WAi`^#jL{{}8P{zX`?hStbO$PX=)8u_ zObH}8cUwx+ut;Hl#V8wl>;@}i*xYcqVrk~EM-^b2sA-4YXz?_+S`O1O3WhCPFwQLF znT{gVRn}5t*0D+>owobmLNI>1)2;RYa^`%W5$7rf()fX7UklfWDJ>z;%Yl8KeDV`i zCH1OUIi-0e7WI0bg1K>??#UUhi2KnLD*)YZ@?TtZtl+h4rD9l2d`MY&j-oo6Z@6Z% z5K2+#0u)oxiZACyOz~qDj7i{?i7~IfBDGc4b`9*TWE6;qR4Hd8+piJ2w04DayxS#c z>oiGh`Zhy?R;%@}WbYIBpX8`^UvQ7V^FHyO_{a>st^5F0DIzcqp=s*#hVQjx7vM+j z%l~}k?&=}NiTf(d$Jp+Sr+wJ=r0&Ob$kNNN#^re}>2bt?d0s}7zDv<-S7D6MUv)3= zK2qo%(w^W@j)MHvMrO!Ykg`)X>h?A&HiV&+euM6lVR}j(;+t_SoE4(rQbY@W@sT41 zovV(#SzO!OopP~a4#`m$9_p}7pq2AQ@%&t1A)jfB} ze%qzZF5K(MC-}BOk5XC@OT47g>e}S2LC4$qn$-T!i&=R^VcI*ZCkj6tDD5rc3LiBg zb()BG=l;{r`>OHS-M%oL(?BJ5*gpAW_T#4U@}NL8D(>AusrdE`5#m`sU~R0OSpW-! z2o&ymMs;0M+Kxih3~#?0wI)Esks~XB^N@Q9*`Mm56R2!rjzFZ$0tcy3*!9AFuoDZ* z%RUvrDq7ablw2l?w83M$M*LcNrU^D{T8UULR4}rT;yQLj3Ov<~Y%aqOOF_3v+TNs2 zh;MtUJ=PAc%~KlbtSkX<@z8a~HX??~J$kjhZQ{!XzKO!6-h)5>=_1}kZI4W4se8lz z9cMLo%Cb^?(r$~30mLS>|Go|dVK*1oR@Rtea=Ioq%<@Yx(n7PnsEQ2#6Szw)Y>+o-|5**_IG;(@Vn-#YE?X=;25GS6pNhGK%5c$}P%%-U<wgGz`!-umJAC{Sy? zz7kgvDSgG)*HGL0QMrkSmkxHebw9aJrpsqoI*eX)RuHQV1WQ+{4;s-o_g*{_Q@ zy{?qw+sF0)@3*@FE|ohqfxfSA%{bFf5-8Y?c-<`*t!;iD(I^y7ghGy8<{AjZk4bkz zjdEO{5NAnkl?tnbqJQRkUUt?;kz7$`o9OpJ`=;2%N^4K z;34B&=!d(Bj#YC2dILU54>&zN>MDLfGVm33boWI6waDgcC)m;TszR%Ly0C!hC4nJn zev`dI%q;%oqJ*om)P%>kQwCfl&KO6G z3+It5oXC*)J6{E#3oo+6>A;g-7=p9h-_esrY~W%E8b=+ANjO_YRD1Q0jtM8q-wSQk z;PUyh&&(!16I7->Mbva=26BFELR+5ufs+q0|LPq?e4U*@MO{9N-GXC~Fb6zD@`N$F zd_;rrWw!hjTv*g64r#7`-1r`P6)z|%^_7-xdUczNR+RsDd3$IZAp@0mT#9Ty-|E}3 zzH*Z}gwb`<-S_LhmBPklJjfZ11Dz-{Rg7wH zd2^!Cg#2VcPM0Rslp;T=wf!k;1VGr`PQ6mzIbNzdR*gLZN;rVZT1#|QyXf?FRT6`X zvpjR)1(+_*x|NS&&iEg%x_Rrd@jl|p+Nqyq0&@QJVg6HKecJEKB=S~0csqTKZ<`)%(Q~gGW=A~X)TbJscj}8ka!6Wz(aDI_R};zxZ>(t?||hX*Yrk?frdU=774Zitiws(HR(e*D0l zT#7y89&Bb)UWGC&@tLeac%ug>1@s~)_@V$m>&u^C;Zv?jl}Woj%aHY?fdyqty76|E7!DgLGT4MX|T#t4% zG;;v{LezmIc2jOA{sp}%Dbo1V8mj{k$k)+cc0PL}x~3n6xIr3%c-kz3AG3;fDuLAz zM{<4`Uf3I6j@XUy>+1xfG_{lswdxW{I+`aH$IXFMB1#52mdSMpgqUL|Nfo?>=%?`P3 zrxR{EyUmv1oOLH#A1NE3?b^581(}0@qr$Sq!in!?Z3%jQ{c0judut49MhihX8v}M$ z_KCk9(W=$mU0~{;jtt(jD0R8{ZG`&0B{Yh!ZXt`5)+|+7zE~Brk_%bd+oSIo>}}4k z%-s^_j2Nx=%5UQHCmwmTcNuUCObJsXK}MUJe8D@o;mLol9EDDj#C26vUmY0!v!L+< z&G2_|sxcJ%@2dE!$24VGRxE5#YDgrCS}vLgoYP9wo*1%r5G9jl45!f4EQFFT2Va z>b~X69~qYl+!d4NnF7g{?C-b68tw*Cmw%po zowhz>CvC4bu|-MQ^Ia);lDvi)T)LHR$$FT!ymCHDcl31LU%u8vEmGAMzlcU|S7Wub zdvx>0(5Jp@@r*9n_hA8F{hD=^3_uh|NoHav28{ z>GQED_dKyi<*4GUSpeE6L;L%u%Jqz3yozrimw@Ke0_;zY|(&51))xO;I}=DNq&rF zG9Eku+jauz^o-y2_@0h!<#Lv zBF%b#sp)G}S;~2xdth)4uUN>rKlHwEd*d73Ta_MRvFi;Z;r!U;&GLK>wqo9~o#1q) z{0q(Z4Q+GyP17pC_Po(%-{h{kTl3FEzJ)ZyXM`1czI$I zHT^<*)A){(l*-6p$MfW&GmndzuIN=J;Ns2cr4YzP^lnp0+p(OwX#;%J9`lD$_1|TX zEd-(I3d0KADiPBr#x~S`AJ$6W_}=IMEd|Dk57x=n55B=x^(F9qRnIL{SD4sQg_RH@ zEdL0XgaT8R_$i!)1X{l_%|*NxV!?UwJNgpp0$_t9+%P}-E3jyNnhg{-6FD$ zhZVI#yoW2&6bQyfp~84r5>@J~mEi2?f}2LY1mtGZy+jr!)K_RyJ|m9xg{3c$?Q)(^yi z)xa42e))>JbLN`r6otHl0|i7~W`z{qf|*0el3E_lD+T@Tp@Ka8nEn`~!?)2LecUJ9 zJK1rL-s@E7?~Gf2XB=dXW*^qzm)GRqkm|n!sWJ-BPm{aNyR&}_?tK}S?7C{`Y|a=g z5sXFWn_UKDB>imdLK8YB6IC+0 z@e8*~`5Rb9Q1GO^oW9yEgrk3Q497TR!0i{@DHihRK~Tv*<2-r-%V+b5emV6pc7SV) zU5LWtCwK>OOTd?Ve9<~^TIHh3c-^_)MIWK;&{0U(5LJ?ZwI3-ZkL+o_h znM0cUzq1*5bV8dC?4cw1Z-&pNt$`@Jvjfa`M&(KA^M`>!u7RuyIVl{Qy3c&kc+jY8 z?UQ7o16$g-T^D$W5euyG*Mp%>Jj#KONcI5fI0h zl>uKkYT7QrSk$LzpmX_G+r-$s%H5fU{zx+MGXP?VLSfffa{r6f_ML`AU{f=D!ITwT zO~=Lb_z+RRXA;K56NozeqtF@Ma`YbfmW za|-Vl?QJ|tVarVZ4UH7lL*8v}ImVkSg=R@*bj;Z8(0Mdd_Hf9G5cwCe-L& zD($V~7pN3NOr0w%#)mAh%)3W8H9E9Xaruwl>_C*Cuv|Z*^nrL0l^`M7W1#|4rC!7z zz@NeS$gk-Quwth+bCTQhmGwpa(aubwhC3GaBQhZ$pfq`|-~p**?(AqDz-h@p8^tCv zc;;Ca+^5#T&P?{PvU-~*BIA)p^Zp;BVoem|sCCqQTj7VAEB0WzGyFFkr9)X?5*S+J z44UnYkcRdCG8zHKMZ_rkkl-4y|tBhcw~n2#M)FwLbc)N-dC5R;&VVQbSJbE z^>)s>2YPXNq%8S^M5))7M2!~gJqDXl48-4^UhL;N1pbpQb%ld=F`R{5`K&m$ptlz; zDZ{|M9)V`{s7v#i{go;jV7o1zOyS}5fnACjxCZsJr3p#`9-!w#*S$FJ3Tgkh@Th)Et2V6f{_01O7BE#V5-s-sZG;@2$??N(g1SyJj?qB{i zz==}zf0SLqGU4&e-cfQHBXlx2e~WB?i__L*#)7dC3C^q~2KmOcZ1gQ4Z@s(QybWNe z174%euHP3?gGJuaPO1BeKgeQq`X%AYu}FJ!k+^?L?t-}6YfjJ$Ek-M84lqxN6Cj8# zE+;(-R}>%_AYFcYUl=<70SCIf#YA0zX~Us+@dp3-VSyOH zcK6TK`z`_+kUBjDznsbzj4rjbh`n&z2ONDs*!o0=aO@lk8f#GDvRr=ec(xWi`&;TSXzqHKIkiehlWHt-3q z?M%7DFX^K*CM}l|#kNr#PV*)5Tp=wq#Gf2TXMsedbQ7+rc{bAP@PbvSfEX1usUS`` zu!~r2D8g^Mvr6lG0L09xJWJ`ZBpZ3r*i;^jqu%U(fZs69PjVSQtW!RNH(?f*z)FzB zXXN=iXM?poih8M0`%RJX=xI9{29TWTVy@a9WXEAN##YyRQSW6^J~vWXC1k0IzL<-% zJ8+&4W>iAW(#z5*=WK^IFX=3d!wWe~TKh?B@o=H*v9IOi7J=PsQJk>cP>*_+t2mqUom2ydAE3MfKHskR zcbu(`ah4_2D%F#r53Vh!($a!1F(Gy_-$@XYpPq%YciQohH4d6NU5dr3)XBjD*;Tf6 zZ*WJZFks2V#A8*gPcufZh-bvPO8x4sH|AO3uJhJ|(Ich80u=WjDOkH4-t%=p|mLeD90*zx%&*RR zi=ku=7ydr@H?KJV2~TuTxZkV5 z9*%%VfUWs{Y|jC9&Dq{&fgte37?2dw-#vf=mq%IYyh|757`@#2mBgy7!YGNF;9*Qw-0#$HB5H5xs_WDW%`#7#RUqL1#MNI4fK z00$DX-=`m7L6g00L7nK&Mf#mSyU8$=6JAxs+f*}VSnjy#0npf{gKv6aa^%#jkDy;W zFrK_5Qx2Pj$x!p2tr#5>g~wAk(g_~#i@on z$(7^%%-V|IJgmd&8Mp0)!oA00QQQH;;vAec0=t-pNh3yXDfK5Fr@8|u^9W48Bb%=m z=mrqq-go-lLtrm3$?MthHL`POgGK)HFTYgWVb4CF!Up0EIBuV=FV-1v;Gg7!g2GQqBA28 z%SS8-H?k-+YhV1ui8Yx_0$Tz;I$2u+`y(I>JxcyOiV3y3>t0kkLA+BvcYA&-R&{h| z@b80~Z`N8akmW1h`t)?@*SE1Bj~N&I{skr_GOio{9o0+8|G4eZ3xvD>P)}ZFzp{KV zdi$%5^l@(NYl?y}GNbDA!xVd=yL^d6xKS_v)F|riT*>T8FsVVYLIOu^*(pD2L>o|H z9)I$<=J)K-;f8+u>*XaGb5Z|HT=LfJ9gRbN%T}gFo+Gl8s4r9xA|{#F)TPHebz~L( zgD`*iK(g67g96NlukyzLs(vR=tED_UAu zdF!Odzc-?{72mk~_`Vzb7j6+yRw)K(KXQ1alX8!XV{knnyXP{5a5~0)+9@OdiImaO=GDg={LC2*GPhi+x(Umt=bPOun9SPpxnH(#Tk;|D7zFV$igSYeGi8kO?vCnH zTXXlzwK&MQF`2BAh6U4!{Q1)>V+oOOUng&`YXyC{(60>v*iq5EEArjQ=&HD**rgN5tc`ptdZd?_ z`rO>$B4N7&vj2k)S@5uM;~$LR+TLWbmXq!U$w&^Q739wF@H{e)e_;rmiOVF{e#Yj~ z;N11M=!>3ld%mC@cAFE=R$QN=)T3u;YC`@j$J)d40&?r<3pXnuEJ~WbO zf-m6tNQ(CsbR1*(wj=H|I9b{zm-RcJtzQrwvgK|+&6Z{*f0V-hfRL`8?E2qU|1h~q z3Y!`zMG}pHJ%9Yz7mZ30n|-gi z_+nx0-SvRNGC{6ppTPLf^lSbH-yv9Z9iUNTs`1ZF)K^cslo0BWs8z(4XrAyTUoEGq zUR`_tPw%6kSS2DTgqziQn)t(#I@2X4c*6p4r;(YX*mc{Qdbzxe5vM^|E39x^r$_aB zdC}Ugx!{ z>5+2^>aImE=u#g4E)5*U!PfR=elzU7iJbL6e4S-flu`HgB?m-61f@ellt^& z*7*H^pVdxR8KZCZ)B*zhI8N`|7>_iVy%_)9Gk@b-WCDpdA-P2tw0X@cQzbRcg$bVq z=6U*~PrFO5I9t&#+~+s`=GId0|E{H1+e)5BBfh~rTKgwBSLQG&KJxfT;ARu-uvn?E zg1I@R4~igwP=?{qU7;y)2&DLsTQ%Gna)$ z*)u7ql1J{HWv3j#+9}TcYwoaN2AUZ**8S^9@YXQ*5^q=+lrmrhTZ}IWm1`;$8oyLw z?Z|I?sd&#+>ehBI{c(A$T}Z#AxPMY>J5_^u@s+9Es+?SVsjnrVA=j>T@z&U#a(I{v z+&WQu2h>r#A^&>Myb1Mux#O;TQf}#u^oXlcA1PM`M_MT|c=egc&_!LHLCG$h^;*X8 z+{@&asmDJ2K7YaLS=P_{cUQ{4Ej}OoysAz+pW)mRLvUI%!MwlzUH`IjV}t%M4KZV2 z^1iqU3IQ*6js^<@&EX}cm_3b~IH&THTT&1?RmGUh^!KL(%fE6RO)}}BUH_c-P}#Zx zt36I&tKNU!g}4?ANEtch^*1)e!TvZ)iXGtZoDSNMN;yYZTlISznIuU+CTa8XBusr- z!{0epHA*M^kO8yQQiYSN8`&GlM!-|`@NVpI>qPjsyn4->y^-{rLzkh$??UW4ZXe zVT7Rbk7wgpKw?FE;D=wqx6IFjN-lJF#?lf7%W&D;!nLXfKFQ1gv4m-f`DOHK`nN1R zHS06{N%y$Wk1#>9w9x56NyfejY5^<9dHkh%pN`oH(GDBF%knzRX?Q%@QXl(#z3Xq5@_ATQLKiSBaC#-$ZG zD39Z@-+{W3Q{wXR7Xm`l#K|?z(%K)hgx{@N(;cw>DAyOg90Bv(x(DaMFXxIjKnEM^ zkM02lr3DzbTE;RS;G1OPLFuzezv;qZl&*srnz84IeHW7C*^Tb7mm4xHA^+L?`edbp^`DkOwEJFGbOs+NKj=LB!dwXyzHAi&#iM!Y-Nxs8O*E`Yk=i@WjfBXZn z_JK~?N1_2J|IsI$A4=7hExsjGlZOgC@r3Mj^>c}SsRjMP12~+I zRfkCnD`oVgzB3JYSk^|+m2QBbE9Why;=JYUfiHNP#YmK0=C^Qa3d7LzA8%e-KR7tN zQjojGgBp1W>~-Qj;}KHWx=W<{t&Ix$%bxcPHVxlnf>|1=mWJW~ePz&PS~A1)?OW{k zrO(BR2SIBGY>4N{eOF&&=-oc@lK^U6O zn3$6v>~>CtF|=^aoTSHa`AIpX`=R~&@CcMF3VLU}+EACAN$$JHlDUgWg?jB|=A;At z{?-KMpQ7B6W$kZH(8<>ecU`3c0>0VgL5*|8rsPA2w`W=rr?da4o}SRzGBZ}^nSFlq zi%7|4zd67*&|fupL5m3oN)JhD)C! z8#otW%4b_G>>@fcw;)^RA=liG!{Dko>tCjRK-9kr%)@}66SPVyeQik_E6UJpV^*JO z&{Fmh;}NMPuMfq^kLi|2ivoI>M+xIKbHNhxqjE7g@?)(X9%@oN~ zNok)dbv+PGK6aBjN*_S9?wUH)$4_Q`%i)_L1w@?Bp4VmFto8?vibc3%9*_&nV(4c9p@iH(Cj9Cs~ls&Ia9Ce(-W?#G2Vb{TN- zyN9CB7=FG_Pxw@KAKtWX38nm0k#-}*^c`)tc}xOaaTL#o&=tQidhM^EezoZA2ffU= z%&iE|W{LsnZAp?>h-^5OO8aTHR-b3g+mQ@={}OsqF&ZOlZr^yo_&pFaA0>(b0;1O7_y?#+7-hrwFH zVfTS#N3Y1n&c4p=AcX55YI3-!e=9;n#!R23?GEo(g{p|B+w#pdrLxJm<~Bif{(kCz zM2iFE`cW+mZ?Cny=|snueL6;>w^U^EB%~d=+sN@$uMzfN)xwk8KMkCX4|ZP-0KU;G z_#scHE9Cq&Vb$cj5ANbKk9p4^G*DpimONnSd#KMgdyWZD(TD23!B1X$2o(tiSXV58 zQ^d{uPU2;_iHQS^m`gI=Xaf$Z4B|$_j$KpBEYe8J#2J9b?4F}o- zGZh_fzFYiRG+o&4T|nxgGRWIMe?_3bZUfI(!ZOd;PSy&kxhn2o{7}W&W(M%bD@VOF zYI^aY>H8uVx4FAl1gKB!tS66pS~~=*^LNzC@R8(K61UifA(2&gplOl3!D0lj9KNb- z9zBK$GLK5u%_|v+M6Mmfq(A-+B#*-7ICD z=iMNBwva6=%*_I#TnDVpz(fyTyy!!4B}&;vl9W?@{O9a$E0a(iY&;5OwcdwEYMAf2 zL(sJTd(PqC5i4F)7;9hoMROIWA=p6oIj;!3hmkD*-9b?@N$u_XK~mP`-qGypZ*At& zekLqqW11hF(KxchSQ#O`_kq-E&pW(C)bRMD8@i!C$L_5DW zJ0*Eua!3~@SlmIjevB@s5?x7^Nn|1OE6lv4;1LAWD*t}d-rxp2b*1s|!aqSHe!bC0 zT7Q=Zqh1-$V$*Xi4DM8ahNj*SA8X27^H&0?ROl8XuND>`Ea;H9UQAe0l8f=VYTzZj@naSoo)nyZWNMz zgyU8I+Poz=1@NZ%D1p+|>F5&qr^;@N+Xd3^%Z;z?=JSixjGNag^5d zcUn3W9n)6Ta)-p2nH}PZk?`U6H-VFhkeQc3%mI6V2g9%UX%q5jSCZa3RF=VRX`)YV z)5rYdD!U)7kX%y~GLXnSssiso^)f8}JnHxQQof=qlACIYW!bW7g$+vw96vl~*sO+~ z9jn6u)OJVBbcLxgI)l=BW>YmT6P=q{;_q+M0gBYy&M2vQgLAwa+U5;)IHT2e+4Gd2 z@n7vJ=VeHo(d`-gshP`&*&;{~9K*qYKU!WnwodIca*&~=lQ`A)543Yj_s)8y&z$8G zbhO&{nCA#a8OX^ zqmXFkUo=MY=`CnHPlkobee#I;JqayYTGxdU0jtc9z24W*bOGjVk_v-6IlkNG^IL{q z3wOsS{{Ict8qU&zPpzp#G&jZ{P{(2z-PN_1r)?jvE-uK2n_%DUVQp$34q#jUuSo!emyABJf`R!(v}5`)@o*bw zIm$-hdgGg}PPs(X8Kwp}U&a<^C#y%|u;p_Ks>^`0b<81<(SCgl9K$f_Nf;*KOlXNy zAXK{SKMsZFF48@BO~|YZ31T2xc?~d(iHOq zB-aXOl)$Vm^S=}(r`TE=IJRuO_9EaT2ACK!iBPfi%*3c8hn1uP!G;03hO1XOn9FCF zBNQc**NqEMtV@yRau&xAs2^f(7ec1xI4h#hzJI_BCWvMQXj#EOauNCgGHami)jeDY zE(Xevq4MXO9=gtt#w5{$MmkT2iO+-BuI0%ux;>se$0a*ERiaq5+kz(Lo}}b^;mA8& z%}1>+!JM|`F!`PUWCL%#Ue#Lj-MNqa$aC|WGawK=x}FlN_-B={S`HZDr96Lf_)yGV%#4{!&`d~FI*n?H$bESX03|3 zt9Nf?di668bjFufCW%MS7F6Po$GQxKq zfxO;m)08uHdzSFUgkTZm%f=>_ii5?!`;Pn%`9ZB4K}>eCDvL{>iK6T`&c_IsiB$k%y(X{ z-@DqS7y@j0fVgR$Vb=aKa`qinD8^zjIkI;_TW%W9@yU$b zpMZ`z_|)g2#Ro&2B1;&0Q83{?M{Px$?24%H>_MSabL=^PPSd;}3H2i~m#DAt8!wdt z%q|pGyD2ugkyVRrDDAPmzcofr{O~*}&xx;$4)1q3#&2W>Yb1=EXuNdw>@wVg06M#Y zFBfB)4;a*le;x9pLz}vGId>!_+n**-Edk*Nv_WbGF^sN@`Rc+n}pR5;mO#dqiG(Rf^xPu{)eHtT7^Mvf2HN759L^GVXWtpUmLYyh!0-V7-rW|=2^f3);dF+) z&6D|*@3Oi|>Xt_GTfUQGE3QBPh7+>k^5?|qjeTMEep1WGrYZ= zxLdo5WqgwR|kJKqfbh970YU%(go$t8RewT1kPrNP4#BI!9$5q|G#EJYTtZV!oJ|&B@pm%^b%C%(5EGM zZdJWZTt8ex{N4{mk;=S66GGaLSydm_4sLC;twX+Pvgj=SX+isYWdrW7nhdWR^oZsk zG3i%AJ48b0L?@u?C!tJFfe`)&?S6)MP_-?v@4I`7_xkb4kudZIZ={qc=K|=<&{Zb} z=!|NSxdWK8z8NnAn8Cqd3>%UMl(9jMZ2I->%wTxSsv=d8ootbROaaGN+dB&*XUgA} zab(1Ar*yaLMkyWwXk;ut09**6fFWr2Xs=HL&QXrj>)9h_zLHAEn$<00$7p0RZdK1E!h5oA8E3l)X;H3UFC$ABrp=YJrOD#l?$k>VK;AyJi}wq;8NxPhyL>9@Z|h$+ z9d5;>QN6z9Zi4Kq=XY=KVsjjX4U$d1a9 zTqD54Dt)CddGuiw-@qqLdYPSX`=#D`!-0P!1$5!@A{j{1_Lh$V+jnuwZEfwiZRU0smuAwJ;(A6YGSL9WW2$IV-;Yi)6-@!p&A5JFX+>pBifY%RK z>$uS@M4wpHj}YIQs22GJAFqcWqsk3@J(xwd5)w?RnfhOxy>^l3KSLRfXsDPg+D&@mbtOwDeM}FPwCM4e1kh9)2;psrX_m>#l}N+UKlIekrV{$wuwj{BHD$u5 zJolxnzc^)vqlYL1z^57|J8urGSD*VHGo&Yjx!+T{`+I68Ce+Mb<5X9cy~p;!z8ePB zq~t=o#X*wKhw}Fjej^sx$q2`=)uZhPTW5go5#~Xn-pt|lwB7~LRhXcH5OQ!%j;jEGySRj!E~tnSi#P^s#YMj10RfBmag z1fOwte-WNE>MUE|eccB{#?1QF%9XTT+a2l42q9H3ixNJn#lzORcIBprA2_g9={&SQ zu8_-K!UTJ?Z zmkUhWYPS+;vh$o*cfjSGS)k$#js!{LJZW-cHRkCbn&-Q0w~P&A6=a zWK>mEMc%yIZC=Ue^O&C1iMbfr12z?!`pK>5#lN%pBcwHrI2aEfCn?g+F0E$3|0QP3 z4bV1o)WCMX!bE{dzY=r_)O8l}`HAQ3D<+YTC1C5iG=1slVetL=OGxF?cXPLE&xG({ zrIC+n)=qu;cQwh?p{kTU5eaC^E~ul5N!&$gyQLbz*SEe~$pPi?_>dv0nsrqtnos6l zwP_i|yZd6gEhXd?Dz0Vi0=j9f;5>pFcbmR z8+VR4;K0T!MPH+-hGzCxHdPt+B8(q&{Q5jQwWv4WYIiH^Fa!MBpHGkdA* zjnn103^-mcJ8eOPQS4akCbg|6S@dE+<7C&gjW}c9wk~oid<61t>e%f7ntQg&NmY-D z%0VO>Q60!cb@k%eK#ZLHq9zD+p=RB)r5VLCX@lB(q)(gnCaTN&-%JYq(0gB2-$3&@ z27sS@(4|XfK8rQ}?qzWq5$S%!ZXh9IJ*EfB))SR{zW+C((y_s`&oqo_vhaHw@ckyM7!A=~6-Ql$FBuVzYi>MNFJd+`U#q zq56@e);sRc=ZxQLe=JxEx=y;MO-5;@(JQ4#XY?k&8;$FBj*ZRqH`6tptF#n3uvBr^ zp-Q5Lb(4?y+KbZu@VoHzTr!%F2S$Ck;u^#Q%vf%bQLxpPc-JLpvPQB^vHfGFjNY42 z6;I@mWbLP8I#kxun`V#qv8UJY%Nyl%0qE<@RubQ`gq9!0m?9qj^99+wqu+_2tL;xZ zZA%Ml4BBF(Wg^@{E~f0x7&b#^Q>gbT#>sWq>*LxWk?MrC{WeT~dmrxDffzVAaW!Y_ z(9g-vJwc}K^1C6g+^@^2hwjq@hVhk)DhrY@!ID$gskB3j+$ALi95I<8C9Qw8Yo+a% z&-77bThxg+10lm-Me+__4E2xJ87!p`nu@UN)mc}*jR#-+yN*FjHN8OS)~+FL?T%6h z=Y^eJDhESi7X;)kc5}1C0X-(QF066c2*hD^wO6AR34keVmCv(0PbN7_)ermpX2w?O zPfRIu?=K>77H35dCWI*HqUTuYDgR?ZwNcH*c3gT zTBmIB2*_>laEfereAJeC(caW5vzD-Vk!5&TSg}ag+;%dR$9Om72T5ne~u+2Xi8u-a{dyaW7LUy=Q z-ly>^iKS@UlEWA};Lp~+X{KH#?g!)5pJ~@+0a{c?@M`oAB4t`bPXoyk6%rnH)o$za z3xk=DzJNcQ?LfI_a)Y(HmqVR*7>bpYC*U|&Wob@43BEsnGV7PyYFYfxdvihHfoDa= z%$^abO>gw>Y1~T4DY4()%E{DOoJGnREkN0G9QF1>@T(gdW|em*A~BN&Ge4WQ)s+G( zxs4rr&^@xj3SRt2p^tZetSj|Qi%Q3ATbI{Efl;jhZ|O}K8V-{b$V^T@vwA3gCkh~; zvA3CH%|7EExz+0n!lwdMUD1kuQsm^hbyr^s20t9DIF$^H$y)y%^oe`9`;K#;3V z7#2I6Wu@EP^`$j+PP4YRM#%-39tp4vpQWes@cglpYl$D_=I*U5e0{H!B5GTIRq!Yb zIBaXZ$)8K}T&0p7wn(c7+x0~5 zr^GGZTVV4G1ePXgN?&s`!UlRA1kXp2_vFz!2z8YrOe{SX=TipkmE`QKR5fX%;Wf-9 zi%qGa0|F%*o6kvy<)0g;%pXqerQR8}TtXhyBFYSm=2S9lMVmY(j>YQ#-1@%)yf&V$ zMH9}8%mK-kF@XL0$gswws*i-9S2=sV|$`G4F8*UYN8y^4fbHK^YiM)9O2)LOm%bw?k zsLhSD9JP4_RH86zQO(F>ELAA2+VTcqDq5& zg)obxjThet%~~cE-Gtsuyt)N@ZQoPft-U{g6XM8xJ(gaulk6o#>aNZ$`KnNj_La)j zOMdgy3|BYemeZ@=Qr!VS3nMB4c^Z$aJjvXX8j+iOKwHV93-(2X z2Ow3WTx6RoeJ`du*)MHvVdP)#OoS%*8)%#CnnhQCz%6VqJV&a-qFep+C}uq3SXwT# zPwD5=7x%G2pvAo}=K-I*a#C~J+{jSaIxCp3rXe~EK4XUu)%@7SVW)oOWcC1XhCYtEI<(oNb< zaY(VNYBZci80GZ#^eZ*VAtgDwz#Gk2q>EjPw8^v-1OG za0hPJ2@%?j1YKa>FuJanFGr3$W`x|gue@KX>Q+&y+3h{IQl1B)?N@inh~E8rt(jKx ze5slG)U-HTl$^vv>PECuZeuK&NjgbAHWZYs=nFJPbB&5}WmY4xPioOAa4tq<3ZTTN z+2YT34L*N--TO9YRw`4lQ&y-#tC`a7*qYf@)MAi69vI9O)&+8Iq0b+*9kao4sQ(iW zu7q!0(3YI4jZS`xq*Uql!Gtombzjg%Eg0n%u1b6jDBY~+`2rnbyr0ggaJ*LD)9rp( zU}lw6snlnWR@ECIts{iBI`1pS3xw#NPR|PM!%c5P5kA)ym)Vuvlmn7>ju#1K^Yn@9 z^{k{ol`Boa)PY`!j{>TiCcU?3J8jDwksSA*XpomyYX35uJX)8jL{Q2gmwIlrB4@h^ znxH$itD{c>Th}HA`j+jpm+U zD4uF{2Qg41V2+qRneHDO%I1p_<75shaONMqPk9IZ%{Z20`ztboBd-e3J|1upey?oC12i@z`)MGbOst&SNXBN82 zeVQWjFut0(P z+YBCys}EC?g5jdt>jUD)QvXSz86a1|C-p7!+Y8Kc4%Hs&c*7+j!6tJZ-*IBi3`;7x zS#{EcA?<8{pzA3-V_v1*i4>w;l~CC2nICvSN0ds#(6s2bpL`L!IWsN;V*I2s73H;e zPUe#U{d*6~-tc0Hi+$*8v#}`E^;h8p&p}-b^<%+PoqmIfTRGAF-)V9a$6MIJYnZE^ z)CB+AG-ENbI@~^v=l*jwwD9}4rZwcMBb>!{McN^NV_fLL5pMUh+hk6Jp8+#&n*}V% zM0l20-DY#+03m4YA<4(wp?oU!WB6YWJ11j&8+l}PMEFGzDQs1x-I%IuYDw8ncXVKA z$dFCwUkY*R=`dxD!58Tg@O}EXqq2$c;*^XNTEB|#aNnSHbFfi;ul;!9lwFL-!RvVc zqai84nEPy3eZ6A6R{{_#+C1qKc>#g|e{zN8sk{&`7=*6bU9qqeyE&r9X3pHg`O7VZZg;jDG z3UQ|vDwC6I8t2ULF&TdtRGVdsvQ^u}Yzz3Th2y}|LU5QX=KOl27_y|J?rRLjl_JOA ztnXrI#NvrYlCe-+4rKuzTzayF$3)^=W$TaR1%BW2ex!z+qHI+E_h#oHHSbZG){v|} zfysDpqHo#~zQ*gy1v^b#&S#&(6n2U4YvEymIR6Fp21_L|wg>u*Z6TjLuM`e90SKtm z`E*e78c{~WTE2o16jPoatYk>(v+&;bkH0BAokKV+$U3OBxEdu^u#R( z^g)XtwvK02Nm5e5d3fzvsBg|iKxA6NIL_^b{vnUKgVV2O-MU#u|7fZPiGI@e{t1Sx z3&^N(jaoVL*%dXBk*#C1iQd%OCJ7&D+W?1CQAGP9{vBA4-BD`%I#7wT_T3xeG14V5 zpP&!1NuP9&sb${~`Zsly#e*}F1(hcQ)MM`*5)ou5vtHd7;L{8(Z@N|D17a^JxSUfT zH}-rDOanoKEQg*o4StF+2H_s%D(Ty9->2H6=$IiACL=*-v}nei(7y;HFuD6Wx46b} z_=)gB+nv<;VUX0r*zXqml=Crlu$#fuhRZw2t@BPw-MZ@6p*Jt?C#7Zf zWF`J&lZYoTKH_P~XTksAqTBv)T4{i99;DKix|r@UwHJ_A*Zt@KUUFYZOs z(Pp)b|4i>;XHYz4XUiZaUy3L;a8u@CknxNB-_*`Fu|(13lxJ`aih_TcdF{*_5l5z%9aSv?7&H_?(ayfOi0NQ=QJLA|6T58)YR??Koz^zhe9P*GVya~)aGt^;Ps;e z-0r|AtlbEY|MjrV;$x%@m4inI5fM!Zmo8`oQmzu`HP`Ghh>fkfA{&bIr?nozL_|H&<^H_$9gPH|>-I)bf?h@-7sVkZn zPHx`-`n)Ti2F1n|>fB6QpsTuDZ|QRYY7NX(>&|pG+I*(=%PW|<5qPv!z!XD+3Zi6N z$w6K>ap0@SzYe+N=$^iH@oP{#m%(JWq z>o#V24q`Wt)_5sP$%hfkQIDOj%&Ss&yX!9&8CtJ8hYz}K1Sol3Wm@;P?`_-U5-~sl znYIhtC*I=Xx{aMLn&UvlRhh4;*f%(TW;)N)Ug3K}&J^hGJ%jw#d}V9@GsH4!De%&- zo+3o?=uf4Lf*HhOe$0`HIV@f0$b?_y#Kt-4P|IPr&eag_ba#srW2^USSw;9XBBieEx2{Z?a|iK{igsWb5prO;8en5O z?$*6|)bXJ!$$Iddz@L&>RxzVeE&nC#N^x@_1_4}#3k{y_Ey)leo;9w38SmV8PNx~I z4&L2cuqQzLu9r0qaDRe()a`C2gJXqz*|Rgxe}po)5O&^z~SV38bbSclB1-mde*LjbV}Zot%cjTX2XHG)5+ZpdB)3V(7#^pYE&puvAaAvxy+RHbsIv%+ z5WVoop0_0vur_^#y&sJvayD&UzPL&J6sVUNo4BnW8hoiwkOriy5E-%{wL1qqiSp>^ z|I7^iIa6Ey8~>9k&E+{elZzo#h2@J}XZR-#??(#FN{>MiS^Ie0qaAmS63b9RywPfx zd`siilquao#P{udczwuMQh7W$DJsNnJ3fQ*VH;lR;MR*g$;r}GaSjIgKTTB|Tgc6>){hVd7PTFAL3Qpc4xp* z%Hom1GBl&xbV{?<0X2yZ#2?l6rCco-{^yd{Vj{UH7FovGsPO{JB|p-u#Or>Hqf@3^pWhB`9s|?_LhhNldefHP^{;~7-wL|@> z<#PEC?1`^(R+^PB)t#n1WFE%8Y!95aB*P*1}|mgps>=-01}I_r4ynzH6j@ zSLBN6i0S}1aN9$ugATMi?%dOvsmkyPMjh0?AJ^ zvt!w_Dl+|hpUbi2^kv{3^-6x>q=h#c+104pAb-8bU*W)?O*Jn!VYmMs=0Z0z#CSBG z-zv5u8oT6P8*}>osY><1UeGjgOrzb}4+k$6v#lLK*vk>8b%cU{S)DhvBdqOpTb(l_ z?aWi&3Y}|CvZ64J-g8!Ykodzd%TPzE#_$e*=uGuXNgC(rRH9|U%Z~ySjUHx8?b}oL zY`F-ds`g^n0x7or*z1>>u*7$znJ3(574y%OcaMMBcv}8Rz9iU~=Ql3-Uc)KC2tl$U zAFqMj6O;2-L|30M?elmKj`01bV6|m27X4>iz;}HdZv(Z?gu}<#cWLxy@K;Yaa6~GoSe9`i>knnAo7M{ z3Buwr@}|Z`Uo!t~`Mrmr)s<}ifcXWjRoyL0|EexZ*(WW(WgYz7 z`U#{FR{Ho3=c^G3ywLwqebntoV1*Dv%g*OH6%wqdxpHvnT($Dt^!JC!$>$pjb~ByQ z@4uKjn`#!9xcE{0-n19Dbo@C9pWv8R4}RY{$Az21OSwzwp8WjalZ(sSQNsA{jAfD_ zl7yFP`%OCN@Wmh z`m{k{;28FEC(jZ-^g#tuIgHEr$WRR5vpwn_vp_lCfYO60eZ+=KfUJdLXY;m+jgQ-l zzhpsriudNt1;b2fcZ}Oz5Q9%dZT3_6ey7JNvD^jZRB3HMKzrB!dZhtKVn7S*?Y-I$ z4NUJ?Z^~ZQTZKtFvutdMcBGFRyEA-#?i-}f@69(y`ymcq)FhEbp26t$BsZvx|I~G^ zLu(T<*gswMotscw8u>`FLO6$bnJ@CkK6Dkw)MtGiaprtJI^iD-3znv-m+a*!#e$oj@$Z^7yj+g`+WWWtvo|0n3=SEYg(mIQh8QqZ#-0<-%K6{RR*or@r8 z$tTi zOuL@Ksc@HzKCajRggc(h@{Dyy5acR};##javVVSz>lmh6?dEqDWGQ&tRUI5m8HACO7qB*a zwG54MTUB<3WfNhWH&p0b(< z{Tk|K>I7ajp@wWDwr110a-*um%t6#j|0J3p#}(1xwzNY% zeQZP#RPjj*xIg1YOWC)i_1NT1L+gsY-(Qvvt0=~+&En&Tw27V0LV@qW(slaF0fjgW zUIvJ<*?ZA~9MUE!ekCgQhHiLUuaEKBvQ8|vVuHvR8fETzztfkvNb_;7RHkrdh(2_7 zPBYEQsVj2wAjHa`=_n8G^y55A1IgF;d=MR(t#Z41KzhWqp;9vOweb#@6ROLi#-5__ zmARHVee@NKo9zjZP@@tN7){ekwI-v!qv za19H}Ix`p?=KD5rnFki@^{QMV?=G?zMmW7~20Ea_Bn~XI1c&r;%g~-#Q>lXa-wC%? z>dT&1!14$>NNNCKhOblf>XZ5bHsd3B;0^t}ukg`_=fl0DPxy%6_xz!sOXVOo1=Eli zoQj9*?;S-Eco`4Pn_6~oOgwOZZ}JsWKX)*<#-h6YdD2`I{Yhq5NRmMRlktFiy!9-0 z=>juu@YVPE`L=m1i6Jk?SkYu@7k((~Y2dQMKf*J}yhCYu?j4Ou0Gm%QTfKTe;UVIH zkZqaf^RMl;71SozyCUnqMO-;dmJE%zCq#{tj)Y3qETyl*$huO?HN2uk}hu=Elg6-pvFD}Cw0<~7YH$ico+t3bw+$wztb{bgQ(CRh)S50{+2DQljP1J}T z?52a?9xPut9!f;iR*hFau5nD^9lVBbxdCcDQJlqrUr$LpZA3V{hRDm=6!1`l@9oWyw7&Ot?Rg4NTv-cK;0H)Q4iqt3|Nq?3B15-tfRn* zZg|kZRc5SW6st-XU2`%uJ)b%Jw4RIxM=Qv1(CMm7&b0<1$T?;!KBf!tz*{qWp#>Kk z*$0umE5G41JCoSW41552p4paJf52@{Z?g9YajubShkDi+Q zx1aVeb0-F|QCJG`$2^&lPi<-<^>49JHXqO5UciGrNPhU1M6a9ZkN7+RS#NYleTp<; z0M|mP>t2uOl*RkD7Q;Dn^ri{_youc39c;O%OHKoXW9$&j7klIsTOKP-AXtLtp zK{RITrhnNX0a6!h@T=)4Nm;HF%ZiW3>GEoqcnV7Jg{zxkP$F*08JJxW!M4 z+9JmUi^GRlkMWOQ!k)`%xeM=`GIq}4~m~~2~wC%;H zk9Fu$Y%fF{zMjhd6Z+oD>T%4D5xwIhDFVC-{=H>0zx&SqP(c4O^w|}ang?@(3H+W{ zGuc8p)>u5Nmjg*^6AqQY^Z)1|C52pBVGxObfYZW9z7-V~jn_(NzwDScAp92JX7&%` z6?^fP@wB3*t6hI-b+i++T1wD^NPoutJ##BN0LA?J@BwQL^4>eWO| zJ12CJLLB^~x{d zaJNp}2Ldvipr$3QQ$&qKMv~%Y_-6(Y0`6dj#<%RwdexQ-PWVp6*fIx0>&KPN3-c<&a_V^#N%3X;5k0{E7Oj3T>=#Bi>eKgc z0!|8py%h?=#ra-Vcd0?{(wQoQRNohCo|$w6bZR-O^pGM+8cCwAiIB|z*RLdJZr1jO zmd^Wx2sCa61tm^JaVu#D{SR)k2o`2BH;Sx%`+|S$UcEG=+C!e&Sr6)Gy2MK#WgmUa zV+}k`SkcX?-}^eKnAh3(x9a>H-7fKG3LDyP^_5wHQ3&QN0}S>TTK|zohvX|N!Nq|Z zW^sWg6Ec^hBhuRk6~_}2Pm_|L*@TcHA2mvS?U+%mp{o<4;W$)<5&bW|-a8u3w*A*< zW*8(!?=^@Ly^k8AhG@|{5kjJj8lub$K}3t*J0U~~qL)Dsy+v=McSh7G$(}sV`|j`l z?e$yxUu)sM*FAIH*L5D}=QyLZB0dPoGpz>8ykOQc05=3=bsboRqHal=n0H(GLN?1Yf?CkNlyvym%nS%+0H$I%b)Yz^)59tfBz$ICJAd8lN8wx=h*dc1%dXpV7UXZ0cc#Dssu1o$KU z4bK&V?DHVF@qTybc^55MLg<39ABtaRhMfxEkdH7xWBK$}b9dXKHNU(v3O1;pIdU zg-ac0;sf3?M@Jt@_X4eF#Wv`}qP2flo_`k}_}`UB5PP=={n( zE-^4j`r|N!WlH#oic$~mA-lo9tIk^;h&vL_vqSlHHeeqsA4~!qFB% z76awca?xeiiI1{u2p1ReB3$!e*;HYi;~@-g%7=Mbplq3QynOU#;r#x4LW#0$9KpPe z+m=X9L`p&bh7g)fXm}74?_PK%=vlj1WH(z(NYm0T30iXVq~7OK+(Zv71EY7-9687t zqAJqJP2SMXBVStqtO=!bwOPqv;m&AnK~y+f-w#0v{crP6*wmYa;a##KtoV)ClL7j3 z1dflHD#;IP0nNCU7sSYUmL3{%16zpWyZFN?Q0kI={|6zv684-$MrtCl zN5$5|_Mzd?2kCwe)pR;0=L9W-aBfgGgBoDqNq>REc^mDVeG zdnsI)gJ~^9*-;;NG*5RU=J;G2Jy;tZ*=^T_(HrVSF=ys*kYc-}zDK+;Sz$N<4i#f{ z6nPHXciLlv!DyCHe*oB0T`28?35vJ1sdi6bc46a9q?KKy~R0GB+&grFP*Ac@!Z9^?dvt)sA59rN3&50Sc^>AHssK%&O7 z(lWkwcdI}2QuB{gSjH2V4=v9cWK1Fv2w0^YUVx z+pIe*d#SP2nGv2i9sEOkl%*!{^<(%aN(Pe}0)vDrcE4W%uEFVADk~N$><6p?9 z$8}|qx-#RR17+9N1rYg`kPG*G!LpbDz=-(uNTu|&mv|2Ix9_sP#i`Z$JZO}*CD&9E zDexnl%aCsZ5tlPh9~6NjDUzPq z8XZvM5F$pFYl`;fxF3xi0Y>3WM6%%eb{)O_92b!B-lxQ)@4fW z`uct+_wn}L$oMTsashmn0b4O6e#qyeA2>7B_FrUh*8$@D4wq9!~Gv$IPfF* zfL6UStQ}liP6{d8WFNYmcB7e3hg~>bfSM>b0!Mg4e)^E>2ftMt@}4_4tT4T#GB*GX zcv0z+?_g=+fu$ycPU8pVZ?Amne&j8I5Xn{{PupCZ6yC5(j{ z{%@ravkI21C&_8tc;h==9zvtja(0H9Gq0%=pDZn=f6%l>gJ76voM^@gCtvmdIxlf+69s~(d4dMtG_zQ*qw-qUt1s8Qal z2vWAV9-k3@8Ws4g*@-&{Ztp^puaWgI^VJ(6!Be-Ho7<1%x68$VK&&nR9!!GO2TfG| zRzTl|z&VnsPRaXzH-1I~4iDCF+u;ORVJCv@1_hoVw2cx<-YL6D{!)JOr9_95l|MCi z+m(nI>|*MO#T9KP@yDWblqE0({(d<2-l$e!HfW(ur!D=@PaGJHCi;+z4Gn!IYMg_n zrG;?6JEcp(vL1#O#5Ft@J3RzesA|y#MO)1R!2tr0O^Qoa8+^c9kTqD)=F`VJ)G8(x z8}Cqiw>VU$9$urut<98;5F+ge^9vNOCvg5ki$?KSD+~5K^LGs)OAFYN;@yDOVB1@T zyLW!c!PIzMFCL~)`F4w1<;#qLsKwT=F+^(UNw_EJ&10;E_8;E!ZK0t& zL+QV~r(fY;-qQ!1ioy+z)Imf0$N8hv^4`E=?JT8*kjV&@_(r6;Icm49mn_#jzC9Lh zX}pID>;?}G+cCORAZJUf<_`15#8rv+g_;<>vc47-D(Hj*6(8{XOTw@-W#WCY{-+({ zW&^#inUm8N1$y}t_VIU~w$P{2;qJ8Ybj?KcCLAJr>$nzyuUp?CYWwt?dOsva2AQI>YXsFs?Z*Z?f&9;h+$Ir2JZKx2kC zv$6VYp2)zi#uXoB6it;`HR2%i5RQMi?ioS`(;pyu7={W1-b+8lb!pDB%g~!a_HU5E z@?vmA@zq!ZAD7LPl6&qde;1>_8KN$h7#k$Om`RVufw&%Nv?wJOtFb;*7{GT^^6sBy zL-YpG0W2eQ18t(8Qt9JxJR;*r)H%EAwX4>iivs4Fuy-t48i=f(4gPO ziK0(qD`oYiN0Pp)FYwUJJuEn@v@BX9wgOo_Qto<|2`A)Zi)w(s(aKRlL$d@NC_d=g z$c${O&Ib$rJnNo8@J+<&ya*}BF3v9{@?q|TnDmQ?(Tx(1JhW4fxqO(@^x_R z#LniFt1l(z2RGkHkM>G`PwZg#6#NGD_AKN!8T>`mDN_` z6X0P1*pE!_NCrWN)$I=?y`xANkd=NocdLNFVA)TngTz`&UfLF+{Geyop<7)3I4Vp} zE}5VOsyoD9WtN{|E-u}(dj5R_UAqHzWx_FcbAbN^{z;u*j>h(Hb!_CYFBt4$OpAI8 zdjZaU);gsIG~ko4{SMqox`ev{4TeL)xdndx-3L>iXeF>2n6boLC-8WFlMOSvPuym* zLdX)iXdq0$Fu%k|%2}5TadjnRUTpX}_|5{gW|^#F;~+0PR$WwCJk2}=h=sH8E}-EL zL%xYGz0v(Z@Sx?ae-owG_O$F{v0i`UMAfU*RKA;F|34PSYZVWk4o$+Ix$qQ6e$$j- zNCeqCv74(X)Yx}2G&MbWIoG$knw+hb#dr>~Ca|cw8~kHwKmP%qig@%Qs>n!@pxRpk_z%*BbU8Xn%<36}OgF>5*(%JXfxB$>3e8GV zs_{9Hw^xv{#Aow7=1)g?b@Jhh8upZGBWC=z?OcfZEdAyGg8?yoo&CSKzx^n-kc|aR z#vjlkQ1p6%`;Onf@dYOLC#1$lzQzlm?M=364G<0*VUrhrl|;1`t;XL3e>83PQxSkaa?60M z$Z(y*yJ>^$3sc|%yfFAL;0&4D>2*YpY7(6B^OiotNsHwbe63CbchqR{jZnmPEdFe= zeGL%2DK4)6-ilT6iubDvaX1q7nMStK5R}JUrswIYzyf0Qe2P~I>TygWprM$JXHvIeHe+LleshxRmjLo+UHPWfl_;-#Dsadp;)OnM#T(Wk3QIw zvXr>(YeH>O7m`!;MD}3a0iW>g@Hs)}pEG5F-~CpJ+5@EI&xMLC(jcV5d+k`7=O3VJzO3d#o(MyWg?MD2V~bJDJ>QUp;+`Hh3_*8n!iY~ zcsifX;JX;hA_RDFbgya!b%2Yq>+k=v)irg#LI8!3BW2NRH?MQ!U&%ohl+bP2rl2c_ z5iq<-+i{1Ltf6IW7hNg%16R<>TOh7!oT}E^c)i~Z5hi#Tw+vkmTB;2A zv)^uY{ZPk5r7xx94>5JCGMT$I3bee{WFs{wIB;pcjFdW^A1vo$dxCCjSHx1F+YAqx z9CX%wV0W304G^o@U%td;dgbwnprmuh2R^yC{aa{Sm<#rNe)b`ZYZhC-C%Uh^pt=A< z3)rA;CApW|BUn>U0kVDEmjah&2Up`q%g>pX^uvd_*3P#O#snUp>iYeIxs*{vm4scm zIf|?oN~MjFyLd$AscDfYJeE5z7;oxy9pShqxg#qsm#Dpuv_~3I757HV17WQrzh(`!z&PFnUKNl`|E?v6%-VMZcqQJ;%P;4V9A0q?gL-68G9o~hDg_(sDQStJ z;=^t=G-3AnIbo%&o{xfeFAzgylABX-$FwuO$PQ*@KEA4^YJ@io$Mr#Fb*fcBK|hDE z04?6vVZA+GE$WL%w}spIWq|ZMepf((2*R9!4Ao$7ubw24qVvB=(8X_tN$zz{>;T@9 zcjnSi%!TCHqI(9`iWztkmjmW0u_4=+kyE%4RM3tuhpSB;KAvC2FY zzAs_RDx7?VduUg24nd#*Y!Lt$+i*qcEI%h{^Pq#03xCYZ80zbQt+R zC}_p|4g+p4E{xeVoIuPNw|fBIM4*wHSNemqtIDuOCjwibu)8nw00pH#`6LGvL3VVSw447Y`dfh_hdS%tI`g} zh*I2h_->!r(Ab*jlv?-A-ke`lgCoj#L$Tj+P~DpNi@mvkctk&ZEZFJ2uni+0le+Y+ z2nMrg(TyHlq!Y(TJ7X_j?7gt;5^NZm@MUI}4^ztN>oU|YDoa+c`S#`BHaf=Q|5JXQ zO^R2h1n+3BC$oMjVv?ZkhRU60^1guOM55%M{iI7znRYm=ge_eS7G<<9i3Eo z@It^|_4T#*V^(Tva8S%MgFf!hHq}qiw`gXf^09ReVP<|#<9It5(pc!z4!$ZGsjIL& zM!pTtr+LZGG;+>?XA?38R7H48+JT3qIO{P$gE)G!ZDz0^8-(0H0)?z^Xd>Eg?e4N7amP+8wA{`z+{8Hei8Du!&2oqu8GYRThNP?@fFO6 zL%8WDA9w}Q0^A2C5r+639_}!BNNJD}7JXE;<@4`;UY)L66SqeF3x{5m{a8x{``#mBWZi%b;|NdYm2@XyZ0i*=VGABUO*~9?p2aXy3^so;5!rTf`m06Ow^Ibp zq1Jo!8Wn^qLr{AV{3-551^Vjl=3A$LE)nnFB0)EFH%&!dE!N{_G#E0jOX@9LuD0P1 zUDOVK;o!h#2`D7>A`Dp1j&qTWY$gQNor%!-n*aQ*metli;EW{TPB!{7%$u5%N(V=B zS^Va%hZqLF|D4~3HZK7m9?~{Uc!6LH;%vS}RD4&6m^Dw+rT@W0zE7&L#SU`9mQq>F zI5p(6uqRonD}3{?P3LDq@^)xaU(z$s@SV%i4hrTbvW8FC1|d((v(SwB@NU;eD`)>5 zGy-p!kl}nLNPhL?E@zXl)o4VS}wT9kmx{A!rCJAWOgg*N* z?i{H2=a*oZ0yK<-+c5u2u1~JY?YF4lus%L^!8rA*1E^wNI6x}{LFiD%c>7O4{wk%{h<(4n82`W)l+Qg zri*t*@op|2nW3sx^mXqu!R}92pYFwK-rfv6x6S)rQLtu{D#Zi~x>-J$G5J<*8f_sf2XD+x0J3_oyC2uDkJ~5r-3@4V zp35=#bJ5RRnK9C2nQnEQ5o$u2(-{>}Y4#&Jm6|Ku|D{_$e3 z?F&UQ9+m!uxoG~(>Sb?et`{45xoXF|8gELPi!~HzFgim@d45@$I zRhS3)iM+O8hLbS%Utv}jDsmS$^|eQo`MKGIpF||qY?$zD&H=f7x{Yg$`#k!pPJd{Vx8=l1d^e+^ zn@%4>S_q{-gv_;4#>ykt2;b#y?hL;AW15~ETvuU73xzwPx0^2A(^AtF{CAk_8C&#v z?(w$vz>qeWQ9?&}xt21EBjBD|;#&#s1eGHi%~Nf`mu%03JE3wJrtDhH{hk>zoBPLZ zCuWVwkl9sCrEj@F^Cz)$p71BfWq>Zj?q4lvl2aj8rGU;Uij8{|Z$gX`quQ0z`bI*Z z9*A^rZNmp#%dBtd<(?|I@i23zeG1b1t*usG79KjhwBXb_?!fSS`EGlvlu?iZ82c)R4O~3TVicho|lI;|V=V`s9r>%$h&MnD1 zIp;@qH4j5fhiosFz@b{&xpyd2WV!@yXPT+}&){AuPwxoO?0$ZIUrL8zWh-LQTp^1Qi8Q7FJ^O@O{KZR(MgKK2htr{Pu(93;q&Jz7mDDGEB-JRMV0-THwIQ`NiSpIHox$|%ZnKn} z*5|QMxuzL#De9Uq)DXNAT&wq^*G$yPi8&T7@4x0ToAKL$FqVy&_!~)Ol&JlSi|XwD z<7fvrOVz|t*&)8D(tw_y)LhZ#v~zhZGsoC^jg(itV#CZ7QtPZ2Z}{%qhK~mVzHVFo#q9J&(b%u(pTQ+M>sd2_`D~V9PDQhYClthS+FThuinbHW5b(s46^)(~ z*ibVz1M2P){gaXD%_@)iYq92^JkHm=&b82{d98}T^~YZ0371kchaR?TJgVpajee@+ zr%k=9^<|LOAL@w}crvK+B4869acD#nmZ=le?r%9W6{~(y(B@;LQ_tJAjkESH%B)IN z)RPUw)MV29Zj1G=3Z$X|0r9rjtgK7+)}AL~Sf<#h!Ws-}HH;hXe*L&#cOXVtIeFt< zch~=HT0ZZ!)$g*S(CFX$e+3|>{O0wN0(~wjawVUc!*ZI>Ho9JkYr&V?OJDlBil1gn z#gCmX-Y(75+=f1Ne@TT05e&LJA4@KW-z%6%-yU5Yp%w_WNEdsJ0RO7+^WqK6008a(LT}jX4wlC z)gB}6w$a_kK%mMPJVrFY#xTf8_&lQdQ>mdZ^l@FGhs9;U7;3mLcsiuB>Hh1Cy9{Z5 zM33t?qdUh%yx4*qs12P9H?KXSd>u9rDCLlZog zu{b9qr-&UV{pG;bEsmC{`XtS(ICwcV0KGMbtwJ;gU~;$L!=>N55{xVvMYXB9TKu#; zM`&A-5f>6>j2RdeuuD3hIEoXO2S$xs`%iG#Mm}cxP`pZ6cf$I)o7-$C;mUq=jAiL3 z4(Pb6cb?qk_ZN;9;WEF*6|>f{CUcqJZ2a#D`a`O2-SSXjCvoOK_?O=vHXy`TXR9pX zHWaf?uzML1bUjoNQJ^hrk?OCKoxh)SNZ>Jok`*sQPUK=g7 zvhVjR%@Q)j*N7M}Td^Ez;JDMKVR;?RQ zxMoBCNqf$G`6FjnstI?z%ZY!g5h|$td~n?vF1Sr~-F=$g=j#5laO)jOX1wToFB5r0 z^rp-w;21XYS-kc9$nLB2vuBOjEZu^`>+{?(lYR@E-)Ha83_Diw6FZ-85(YqnBC?%4!1AAUVU+i4kKlGPOpWJ;WhE0;rex_%l%NCQ{=-MB5+{~(=YHFsS-`!*# zhHtn$zod3vvNbOUzf{x7!p*Na(PmBbb|9uRkzFR|eQ9Bl3w$MZq6*(xIJ>0D0tIY6 zZSgp8Y>;f%J98QOeFXQLl{tF-7$fzDTfvQp_q}R{7?Sz#~Zx={Jj15 z+4Y^q>k;)4F4=SZqjuufniWMj16-Vu572PQZ^uy8l!<;Z-l+Oosml0-huWcF`R2vP zumcTn#y)YvyuWnW;}34}1CI%3mZamK;i;BRwb<+$Q=C+Q`w#yyN4y($YLSNepH%Qi zPlgYCo;+dcYbZiSFVA0p9|Fd>Ezt-2Ot2~~rN51~|1AGksiZEyX0VO>y_j7N|IVa)vKd|69!%uSs2x<&fc$fBBzPnp?}p8 zbduS1&azBdoT^mP@F=tLt7po~3>SSXu8Nz<SSFynp@`D-!za%1BSW zFcZxIC;x#v^P|R&6o0YAsPfGmATyY36F0J53jX|_4wp@|;QOiUU4Xi;)G1&P1qT;k zeFyHziTs&P!DX<~z4aA*&e`8LYH-i2mbJW~kH#}o5BE>IM|X0220j-285_KPjfNh` zEA--L2#(4()|>qXwB7x(CicIQ!9%=eV*6rJM@4FPuQ}gB_=?>erP>?1Ac#f{vr}-9Op?m z=je{wbWsJR4eC&nv_$O&KX<<-I!v|1_A;oKH+lxK13FH&UbFtgClS4jA0Unb)GZ{S zWR0o3ib7$HAIBV0R#}n6F_6s%#*M2sx-g9=7cDFeHSXvonuf}AHuM5B_CV_1vLb#> z{>bhv^=py^y$JaHtsp=CAqMYId@Sje{=0)8{yxn2Oe6v9hma1`@$5aDIV~G5QVe7T z6*>P#5cHc+p2NR$!*?qMmB~hiZIV--`SX?@#%RCe!*54^80TG2K* zSs7^GPF`Pr+;iZOcoX>nNm;Wm%T^Tx3fp}o5bZJI{@s&MX zRRw{5iqvD2+j1g&uLdwW62Xx5D1L(4ig+*z@nb1*cK_R2-vP>QK4AgbET*hbWSRY_ zo}XSV{^vyeP{ZSbnK!|nVU|Idw-@#kh%ceQzgV((28$(A^<@B=!)!<@=*P6i)+da- z&_DI?qe17}I>2NtT_CcYKF9sUTO#AeWSci>J_=jcdY?nJjYc6)*y5ZJE*UPw^>k@% zBjdO|>^1^Wz|Mew@Jwepsn%OB1+@(R73cgkvfME*1#26&DDoZoz-p7mHPV^lAoPzf zDhRERa8wW(r3s6HP~)}6+sqKnsKb|x+ShQ3fCz=h9XmL*^#l#%9aE{tgfVi-@)8%`Yv67tT%@6*q!R!D-x2j@5C0s__amDQEfIRppWo^ z!uhL7e^+YuZ-Oz;T;)$dhNWO6u}u7;Sp(DtY{p#A^@u1>@KezkHiqWTs!SU%@y*|+ z%d1REg1+vp?rtt^5R3wy+}I2IU`_&VA*w5Z@J84y8`Klf;;<{In^PA44WiLTs!YyG zLcS4$500w^^z{{uK4?ZnrqsJN!Q~66*b3J0{-KhBP74l?WXQ;;_2zm>eKWP|8wf|k zaMjE}r6MkzOEh*!&t4%xa01kR%Df7@uZt$P#@iZiT1Wf(@hHyZ=*g%z>&6 zn7|3>$HSKZn|q&G#hKkk#Xy-Oa>ZZ%u!y^FHQ@C|x*+eLJQ`fKzOy4C8&qpWrcwQ5 zn1wTB?}1W5zsAmQLLc4Tjx25;rcc&IFQ0JCBY~Cq-k1O7k%a@nZ^OyBgH|6|Dc7R8 z-Wb$Wet!Op5tfu{v8iV=Q85HXoKfiR-YyLuJaJ?`2eE8QRe3%xmQ zn$7BEtAaP;#h(8~lApy+^#{jrtIix$(c5<{co>-&hnhsPKc@`EP`L^LR;&6T7GOx` zj7RTrZa>8;s_66%Sfu!iJ-Ulu=H7$L9FH$gMTfn-uyufTB_*-|h%TUI4n)GY@U)0K z#`s`v)V{}WPz2jsLS@!1EBVfn=Ge4#gp2j@Whc^O!9wpGZM36(p>+1W=x1fjsv$Vk zLO(dRjWTKIK&b{C@bz!#PR0NU_r&I?!p$+qY3wu@-{~=2%u0lswN9m6I^N+BK+uY$ zAy_$sX5;uR;kOE9j}L*AaPZNtISaS)m$y=BpMs@`OGO{E)=a2d6spfF<5+e|n)&5% z*b21YYcUixmTv5j&?MP<$t6T%;jmnazr}{X$9MGpvvuK<8~VG1ObT9f6Bh63UEN2Y zECBw7&z6(oP}f0%m@9fbmPm!GTV=a~e!+ImKSu4_#!d<@I~^SPaLEHB^XgM1uznhc zt3ZD=V=q+VSx~`W=6LjIoowS1S>EytB5%(#?)_NF%SfBF9ClEQ$ny4sU(TFfA@ffX z@iam&V;*SXyw*#6UjLB>nWcv6GGW|Lx2q7#L9edj{h5vCm^u9B^u1v(fs=6PjxpymyS4jwAFf>I`0Z5s|y2wI-oI zJ7IC<5xlDkl`Fp9y)LRCn2gf`tntR}!Jp z4d!Rh=**w40F_HsG{HKT*lxwX&{Bhj2Bs>D{v;KTbCc}Lw|3WT2qVGu-3<~j`>*xh z@k57N*1O2oc2`n6*V{@j@l@3YY)I%%#iWy&dh=` zDMnZxB!O*7MIPVD6tueVQDD4l&arG%usQ zkxqg_sr;pA@hlZFs`0lO=p2t9*6F>Yz%%0scPCx6w+vFbXF7eR`$##sa@|KFK`|OT zV<5Unxm1VAlNPl6>2}uyu_4HV2YoGhUqQMGUK*!HZo8A*lP>I8A5{Rm-n_Xi5YF3P?oc~YY=zc%);0r1pMVROr!8*m|{?!(f#HWGlFJK4nIUbb5e#S-^ z3d=+YV6%@d=*2xZPL;KHPDjCpJl`jH=mYqy>@9d?N(UG+iSs2u()Ydgh!;F?!$U0I zrsR}M&>|0jI#8IbjRb=}CxFE9Bj5WF)kuZ2WSgfFuI5z8C}M;6-zV$*q|CF@Jmoye zW{f|aPHGLlW4&cB$_BzOBxAAPOYcIx)^S6^=X8NaMS|HM*4%?aMH|U+Y0X7Ff22`e zM$6!}U#{ym4{EosLY|lX{PmwGS|V@C?K3oQ>M_l7hluqRE4znN%rSCpRP$`(K_||| zEpb!$k)*KGHifKd{4Vl3on|KY!#^ps5A=VMXs#*(Qs9IpQ3CgIJ?$fK8YZ5iwX3lt z9n62P`mZE}MhJcLRn3EtH`M^QC4tO$k8t4veW&A-E=q6kumQN$9OU5fi8=NH!KJ*7 zc720LxfXWz)L+G=`{UZojQIlVAv@+bnEtPA2DVJOmf!AD;t6=OO9B1T-k>WhN__?= z2&Pt1$iBDU= zhUFc$(i=tDdY>36fsT*9HW-u}Bm9NBO)~GQ)2o^!{0ylFfFo8MYg1wxct3lC)v?9h zR>T^J;+VuFDKEboSUS;;HcIOcJH_e+9Dc6RT7WJU+ebqwY}w(vARk0||0~)EDYg&T zghwv1%O)Eu*K^efQ-21#o2dW9(KPLm#{l${Du|;2oA4-ho+e8vh{s?1jO~*=&ni~K zH3vuGgrcXa8G`oqTBh39a2z>E0fK+?Xc4(D8D5+N!JN}Nb$F^d1wjwTm@AK~l|Kjw z3Ao@hz$p(%+F2AIBra|bg3x44r2kJ8e7kEEBgC7HW`Gx=m1Uc zp{UM$Ug8brOZ)F{9V^5r?Y!>%pNX_X1_(Pm#N z@h;H4!$Y2`-cMmHd_73>8G`M{@C=y@OFZn%@wu?w!sL?o9nVwKCiVrfT`gfZ;-2;3 zt*0w7k%UUCz{r-JGsWTq$jxo5NFodrQ-oD`c&I{=US(C{)Wbby{v z7ZAWS_oSFErQ2{T=e_w~W)#sL)+G9dkl8E_%^>5_7V<4{xCAKjcHu({-uD3c?h$u_ zzC~{cL-sI{t1Q@>BDOSL3M- zY6IBr{RmvxjmLh)z^=w^9R#{-)R(APTkByQ7V7z7NPsW2vJ;;KJn!uXwH6zMHs=W_ zKkuxgzc2qYswuMpn4?_upx_EvzrbGISM%IEEvVRcm>va(aF~1>| zQzk7zulbU*{2NT$T(RY6u!2iM)n-^sb9G4ShCkBWcC`PKOq-XDFIZ!#P2w(C8?mtx z?hVIlR@Y`(jFuWs zeTPRVh-^>^`8Sx>GYiCaZGy2IjqH15VTWPB*AXBqL#(;!4%j{LU-CF`uUMDIIjnDz zyDl5smZ#%85%@IFaZ2RwH(Xml3YDuX3w85Gel&`}q|O^_oyp9x#v9h5q@g46DLIBS zVvMk3PpzcXEoySVvHMj#mqAYnW_O%erV*YK+3w)aOckykoOoNA`QsKVWo1m!_*!lD zRiPqZN`;Z|V^$H`Y(i%KFf2fz^zBob)>K4d9Z|Hn=B&DEaJ0hwg|!9_b7>eEEv5sR z+S{;4gjt#D82nL+bdh6j{Clx*oy+aLj~W=LI@UCGN{zJthm;E01$wm3oR4?l_Id^d zZb-G)KG5V@eI^4P`E`Gf|B)o0H?mS0gZE_WQlJ3u$U2z#39as8sZIg*U?dHc`zVN( zAn)ife~fm$64AFAyC*)OsY8`Zql@$-H4&OT>Zi4{vi!r+rS`Z(4X%Qny=ketb5!Yd zd!0b#b<2>DvrQksb9N{7>BJlixYQ0}NEKo&Z{3w1U4!q^-_%zCQ&bTt71k*8X~~=X zAItSm+Yzzx!LjeLli!+h$73tK4m!L`{bhYoI*;6L!-I^Af zgnq581Nn~NkmXDeyqWMjgrYR3gFe<}+a_Jx^6AHqY;%zmXSVJ?i5_O8FIKjrD$SX* zWNoEyPLe$SIMz!_#)3(pc*-8Nms^Oe~6rA!69TGns5b=OswSOiWx zi)pF+!G83g>tnpqi5_624PfyIi!FRys3QeBrbA9Ky((S3+ zR$>4hU*%B*j!fa?l%rVDb?~rJGd_;}S}hZ^(jr1f*@b!f7;PLzfk z+Sr{9w(WTs9x1BrjsEQ4)vFTxm?f@wi_co-q_3W2q>*y@7Z)SlPL%zU-!~*h(b0=7 z83k8^YCzo`Z1ZxM`@vvnCy9I`RlC8D>C71fwgQc9FbczRcLt`3eHj3x&>8BwO8xwC z%0pD$sv8VqOoL=NyoNZ_`sy8|`(ayept&_Swtn!Vp>jyR!uMaUji>ZO(b$;7ivpA; zIHk=(Zx(ALGR@$jTkSRQ1XjL%jUrf(kb(6y+WH{Kp$rp?;}6<7)cKMm-YA%#`1xVp zj@2{XEZaQ#7CpS>nX1xLOcM~lWL(vRwd{`RME zSqC1_u5ib%01y1z`|wVS4q3DI?H1|f-onN2=o;)&k>tfrw}L18BHI`WUbz?ffTkwx zZnmTf4w#<-(_n&0YTQV(Q3d|D(sxf$1Z<-T-I{c;*;TVJI1&PU zp82`xKW}u3#%=dK&OkW}>$M?TJw&0Hz+X-N?H7`-GYMlb>vnt zI_d!f)Y0&Rmz;~D?3*R0eF++-0V9@J0kjzf(=x#enW`==2jQ;GM&W{i$%Y`x@( zGo@a+yOng5Rvb|fF?M8$hEkB+G$4}s-i2x~79qO>1fnNRVV5^=)0lC9pcUbbd z&bvGOj7fbz#eaPL2;VgvU&O6N8Aa7F6lC+HP5EUl=P)2#q0(KIB0bMb^?3&8{3=4` zV5lK-Oj$=5?j+#g>$foe9Y~qwUbV18cr?sQI`o@C{n5*dL2S6<+4#?^YZaSzQ{c#6 z%s07PaoCGLUa+DmNrwwlvW~n+X2OX(g^9HFjBg$T7b?>M^{?<%Uq&bl8T7J*n5ztA zdPRks=XQ{yM_ptIw^<;IT$gbR%^Wa6FyF%{D(LAPJjG&b(Hqks+_R0MKOzGN*0q%n z(&Ab%<0{cPACu$W{~oofAR;p7sir=>LQ0uxA-P?9!hzWPMgQ$*_6lpMLvA;-?<{CM zPkmiD;|Ci|3iCycHC9OiL)uVaTv8ZfuPU8;U0qToLQfXchPaNc0$jAAn2aJ(Mf9d( zO~3BKCzK=qL{vrcJ469TRP4yHVYk_auXt-}s5*r`wHk#P-LGzTjEt~1cl^2pB)R&uyj=lTrSHLvM2)4ZE>QsN}<0r8{O=`IRNCg)%6te&36l zN?G_$M>}$Kk-|$;@L>ichry~8CioDVt!*0on--h<2X{sM%{ zXp1ldo|2ZnOIAT%#}h%>uuagT8_j6mHwdgV=d>l!w=)~UX+w?6$#XIjB~5V;Vq`bI z^H$EWMhK^lDEx;U;WgWb3n56Pw=**n&av5@F*!mj=ov6E#jF+^l8>Fhs|Gm8AL8KF zMTw=?w7l|LrRX`oN6N4L2^-`g<%dgqG+{fXah3j1Oi_dM?UpgCgYtIaMVoWnp*4-F zC>L_HfOw?q=3Y`raAvYy6rBG*ozmLaVXcubL2m0lrS;+ zV8&JjMMu_-Mg`|kN9C&kTK#Q2kBJuyukhJ(v*`t!7=~$H(}GuRX5iBCz#}5OHiHo1 z5Kf#*&NPBFzV2aiA(Wz5iuW(7 z(C3zcc8^kV_%bb+aErUD`J0T(l+OB~i`H7tSEHL9RV(S%f5}^b!)bWanLTmJ2T~5xI zF*5@}Z@>kq@9SaoB6!T0hLt_|G&b=KWMSEfu;s0{K*W!viq#%LK8dn%8XdIn#{EzV z!AoXL&Y^HnNVau=M~S6Fv319e#OO%}#mDxNSfxhW+1J$8C;=azL9jl%E^PqbK#9w2 zM)budUktn*VO?(^T~}z0Qp-e}XM(W@q&9<--?P(N+Z(tK>u>Kdym&@^#TbLUuTpi$ z!TIfq&Az3;3E8GLrUUkZUkySqpX)@Dl}(KSSd^G9W**1<@pX^_hp0iv>I3yeM@w)J8 z^s_zfJ76bBfOla*p3i4Mg#k%c<-CVRB_45NpSG_KGH{xW+4Y~rmjoCmgeqFQyS@f4QvT(@2(1-7ko}UseG(`68>gG(@ z0MdErym`f@bS!@YoBLk;)Q^ia$H((wo)a%EueQY4-&|ATEktv{GI{Sa>x$13qp|I> zy`6O6cK&a}G*$7ZQZ2+RGkEXIa4o#_#`W%Qf6_riW*Ty9qPky}zKmpJWkViqO5YX8 z!rRnzN|{FeD^wkHlfGL$7tVT_3pRY<>PdPY9SkqWo)&QJ;0GwINLXeJ&}?5a1N6ss zlrx^v;L`O+xqcHNHJY-Z@Kd+Jm21mgiSiWvYO#Z@YD9nYjZWmuC)^ph)sU`Aqlx&h zXQ(lO;zfqJk-BFM-H5h@>k753YczANsNw(Ab$o(Qe?6-uZ~l0j!KFz!v$vMkQ-nBH zV)NQpP={Jg-h~K-XMf0#BRbKlNQx>n3f)AG!$Lq*y=8I;2T`Oi=Xw%HY0>9 zlR#=Wr1V^{6a;pKLrnaPlL^p1el(ZweQho{4PDCquIdeb(FjnjM1tqjEtoQ+F}ZtNyy9HrC-;~-z*~At)`oBq4t64iGwQII;D0Wv&rE! zvRV`>9%JIm)n(mqUY00oc^ah@1pL2N^i)Ft*>}|F3mbvQ@c-*YkJ4*I{VJBI9GP9=+iyL^Y$Ou707j=tMe_#r~4!ql2{@ zNKokN=fvaiK(3g1k4=s*&QtkP9)vMODS{D8GEbvWh4F{GkwpPpv;P@=*;9c?^S(LG zp^l^y1{noD5E(1>*if#gOI5=gsQzb1cm2N|{Z|Xza11b*m9Bn+aaCgezZkR=?kC1S zOf@N>VWh|PVhT03)R4OBIJP%R6#osr*haoku306X44s9%ISWr&A0vcv3!)3#DLpf4 z7E7+<^1l58PAe~|gI^lZq<;7rcFwm?&{F>`SKnuKH5ouE2DWi+|F^F%Ua})RIKFq@ zlme{o1B$|L{%~UM*F{owlSD8_(|Cd}1zrDZMz=jf|10MC7wdmWXv&Fbd12~=PMVDn zc$m~bg%P28z5uLnb~a_Jt*7gQ5xf(RX=? zd%&_oX%(tc$Q&&sJ^GP9DfE(^VH+rfc?mG9JgPLU$oIdznUh6!7Bvg%8P*3~;6?m( zx(nriHllcFN$-^4s2Lc+z|+Z^AIm@llSf?SxunbWV({hiz^qoH-m}}pJJ)&hy z2=5d95Y~F(e`p1HB3TzS^v<;CDNa96n4#iVLqR3Rw8(1n#4k94p`Gh<7_6CRuPBjs z;QF>~w@Cs@_`HuTZVK7D;}k|6$X7y```u{Ph_Q7M4yW6_l@Lazl%c9!UY-{A58nEp zP%q>423A6^Dzkbr$NWEhu7J-LooVR1;^4jeunJp}n!S#wUE9TYON~8Je$*V6Y-h5@ zlwDHq_4uDO*0pYwT|5DYwm>t%hGcIl@v5RQ9sSU0@cz~i*=2=2P(S4c`3;p&)0EfN1Qdn#Q3`1Ov)AMT7c4Blewn$oC#15 zxlWjc6N=aH%ccQ3Bji9!-QS+VOTt41j3YakHTx1M75EXLKZNE&22#cB%SO`i!S8?m zID8gzS~m(3u%vqxmm}gyTubSn)IgKNG1-7@hBEpS)mUEw!6x(?pZl&}@+Cz|R!=KB z?Z}It_m9ZjjCnzEbHKjB{nI|gGDzl;*wQ)IeaS)dMYpS%1F1;h!h7d?e8Tj@)NP^fkd@P!>=H5B8K2ZCn4#M_l|P-{HeoYYZ6ch(5a55+S_VVC^X; zy`c=oS4^#EO_SJh0_g5YiDTO?nn3iwl;frJS&vvsHrMx5C z`70wCs@tepvLN)Y>t9K*xP=)kic3ituG;N(3H7rdAxaWI2Ua%V?iFMfh|esc5DvlT zIUaxy2ke-E zfIbvl_Gr^!olLXo(H%LX*N2aDdY%>+PC7JD9uTSk#aK5x_HMfiBTLpW>5J_SC&)h6 z$nrN1`wH{rwB<})#g(M}#*ZE!K4uUQr04m%yN`bcHycIv$k^>kBAvNdfvp;r;w%6Q ze@--zdNJ=1E~elV73@F4EblN}1K|AHfH2+y--&Gq>IU;=D?DV90J!g-{hSp@OvYar z-CY5jCWK@UQgX~jm`oFjiD#|r6~rULAo+a6O5rulamn#fByNMU$S^XLR3g})4Bzdq z7Q;>pgnHgyB$@Cb4@_$yFnTi#z%`($>0*lKcoSLO1|CW}KbNoB;2^;a@9Lg9bY`eM z!2g6U^@hOtqi<@jjg3EqA4$0>hV1k5V--}Hu+(Udw`0AN51JMmo!MnUFQnGo2}MkY z(3X=M4`q1xZ8JmRC+Iu#!Bm z>*cy$d4~4!IiVnGI@@|=K^~k&K?$NKs(GzV4w;B#BRJ0eIL~mkDla3L8!T4yQA&Rh zt*gI>4arkfb2S1NEgek$x2;b5BObDp91zUUEq&Z?ML3y&g5tvX|Kp(F4)9VOUzD0) zBN8~4Za(@Slo(sQ229S4RzP?^+*)o8y_X*Z_{;1NQ!?T_^E;Zgi3qFQp#BMGG~@b~rWP7DsyRY5JyM7C-yrHWm|0!*(;5|Na6J{FTEgdHb>>yrR|bo4ksbe9aZhm8 zq@eZ=D?X~XVUh6>`53f_Upg2Vq^$ArhF3b!_=tRg)(paTG3z4FV^8j}>N{cG(9zyl z=oFPCh|O-g#Ar(y83NX&n{us`OD9!fUdDhdwgIfQ{lBz*O9UCMKP0e?Sv+5dZR$!L z%Z_83ix^#Gc;mVva1#3~G&)~e+{S-7oFD;t7uvaQ^3hrsw_bCZ|LN6Thuy7cQ<0c< z@QrwWhzMqB%V_AfF5hYepI~$7q)!WnnJ_}?Z%y3!0bdJrEFg#!CKueRQWGYxX+ZysJM&iuu8dzblwk&Ee+vVbN z)h#caBozUGpkg@-!oKzL-ILu?BNE2kOv<#xTClZXJSBwwkhnv4_@?KNt%U3%?Im z8O^wiWTll5&A8cfIOW*Q${01k@zeH5#8`YC+%O_HvJFN|bQWKcNMwvX_~h)u^D{1! z(R)-G83@uodpJyk0k=T~7>VnD{_z#J%*e6KorQuQ&%Ie{7^mrYbSTA0Igk=otDkkE zp!XZ2GrGZxMZ44SCXaX6-nHVR10kJsoutEE6JU zfnoF3f#Q?c-9bdqL~|&R&-_*ct-n{KQ2a|s7Ff6a{(KHu#DOV#w)#Z;dVmWVH(kt% z9|-}m6Hf&M!z4*tt5Jb6@_$D1zNg?6t?FQfPrPDmr3~vVm44EQ-uX(x-zJ;l>ZX2_ zLo^f%V4FYY@*{*#8aI@-YJc2w^Re=jkppdvRy z=r)orI(a94KIVn>z9=q~^gcss6GiQ>l8*expYo5&BH}&y_6jT0n+?=J<+o*|VI#}8 z^FtqNW*=>Y#9}B8l;nN|#y^JMf@$77wDYH+nBjjcJCyv?sAr0|Q7MOQ*Q=rbp`uGn zJdm4RILGrqb~ZSYDv{`_tc+$Oon{^(Q}qfaBnGar+8!ZTa6lXDd1bMj3F3Gc1Nnb7 z68#>kMw0k(up$m^$DO$?9xa(??01UHm!-<1!IUd#s_fV{kj`|%Stz4nbN!(%hj?R! z;V=-XSN_RO*z;%gg)O$MC5qgfRzjXSk4X<048BIi5Ld}g{;I9rT!u2YSa#m1ktZz1 z`EO%k`1%{BM245<=nPkL<&wg=7x*yQgyl2pn1laSU=uqYT2@qZ5x%z!S)pko|;{ zU!%xKc=h2SQgaSu_zQjsb)|Aef4r&J4b5&JE~CLTYjhfB_RisUXel=;>T>f%@8;%z zmf9NHZNx${6OsgT3+8( zKo1)tt>F2Mzd2(o39R=7pMVI`yDz6f;o8H2|NLYqn|#YP-Zu1xo0$m|*=1Osn7kV1 zfBBejtjT)CbQ0up(*OvwOTD|tf9VSE9t0QNe#gjq{MAyG`0@^GGYm`YlW8W@mj2D- zsp;ry!8lFrB-&fd#)0C1`bKWwBL>72k@cpTZr09MdHw6&3O1+~Ml4{9upMGi8hmHG z*#O1rg6jy1=pnd|QGb`h5?)$k2BWe;vM)jw{-Ykqc7r}5ed2GV#8KO8p-GhF z>T(jsQcpruJlMeV4#%dwCz)cg@{XTG5PnlOlJj^*&TjB^L$?tHZt`yJDn{dr6pVM7 zwnBvGdZpvar<0#xu^{Y`0O4XtcLOI7V-}{*sGIAJY!VOxSkhbag6BYyR0Vr=9ts^Y z@Yg&L%CKku7Fl4icW3mboKq?kmR{!JhCUWq#t#W78a%=%x}4b8;?NR%fjrwMy9FiC z&=+(HD`qS(wpWsJZ@$3xM&vi6 z5^x;o-d_Me1)cp%)SOrG6WKrS=ipu@@CuFR_I?mYYBd-uIP0H%QI#%Dp!t_7#wp3- zy|6>zBUyDV0c`QD$sxfq(sJ8TIFAxiI*Iv%A(vzuFtBGysBomrh zf&+CJ9Tz)pgrh=9{=6821|kE!xsN?GEwZC=CNu!=n@x)%b|LyTRWkS#AqcYAC91QB ze@p_Wt=B&0t~>H(F255nK#thDzwG5=qeu6Uh1g)0o95kuy5a>uT6R@mx6JH$srHZm z(`xV~(ONd6zsYm7Bi294N8P8Ol~$69A5lRvov@Ux2pf6eFT(8n27X9lOu<`ctDw>O0}Q0YPc zqt+}mvQoz$Lm7uxtJV3tVofwcwb)M7C7 zf9STgex`sW3-oQ6^xZ^Z5_cG~lTA**GPICVBh=EFyO*iloEf7GWpPTN8>7|hZZ8{9 zbhCd78mx0y%Bzb@Y(7KaOw5m@hN(0upd3zF9b%1A_+PkxqwiPL!TmMRqL-qntTjB? zlB~jJaXFYf@atIXItF*L-?vwx{7v2{e-ov)0)SPddS=5HAkq=P-kzg`y}^U8gD(Jy zhS+QcQu$uVD%98D!j%^gdrQYyEYQ=d|KQtQR+o10Vap2%w2kmu8RmClpAh}RdO2w? z->h44AJvP{fGSeH6+4HCtC%y_V1)CYq4?Vid0X!!f^XkpTvUG>9=XG1f0%x%AB8^v z1Oy+gb>kUq{zu=&i+%9>UX*JjPJ5MPB}{0e^)F2R94RHWBNxfe@o&;W@Jk~@F!q84 zI(vjJMy+im;ewxshL*ZgsVv+?yK_1J@7XS7=8OQ`k)v!OT*2(G9ns=BvjEwKdwLnhJt{_jI##A^w zHGRg$=a>F99z}Cm2++h^@S>|Ny@L^_D?$I`;MN|_hU`VdrVT@IU86Kv5Jk^>-`h>E zt$Z34So3xH?k$M(De|;WX!nmf>xbiC`T86A%m^7~XEFg}-mLiq2(6p$n+%|}yuHMV zp!m7CY&QtkobkqZ1=UGEQ8Dqth%leu6ln?md5g4GGaE8!1bs5S9i5G5V6>*&_~KPm zC~@g1$w&J=JAF>;Hx*n+Of;p4&{3$Z(s(0pVBn4rI;# ztdXM^@Sl;j@`QH-7ZHNWAh%p`0Z}IjM0q#?aw^X4;9YJ@g zBqP6j9h+Y4v4ihGRfe%7OWYWu%QYiaKnZ?{Hz&o$;Co6%RwBm}DX|kPxpmRT2d|ys zF5f)yLAnCipOy?@TVJ1;9{4h^aeUt*#o^Wh^Rv*w|I6*u4oX;-pJ9leN2Gv1sz9btSerKuEpfKCQ0lb1)Z`c4Tjld|hM6`Wr5qjP zBrZ|)C==r#ozr$Gb&g5pKOOU8p%YI(&cBnT5kOAu1`o^nwc^k4O|^<?Q&r69<1Q| z^6Tv$akERYyypt080IC!sH7xe0tA6+`a((@Zpwj~2_-W?yrqBJID>y}_s^gHSyeI#Pne$)M4LvxLp`E7j(c_U4U zSJ#Y00Um9UlRFH>g*3&p^vKtf2rtBJ#KMxsnA51h^7%j$X7{(cT;q|Vh8_T4s$9c} zNBGkB5>Q>tJ}M}KXRfMAX!X~S_SOQ!6E@#0j^hlqA8VF(@y`g$hC7M*k0|rf&|@Z( zx#@nZDGE+kBe+dJaq5!7OmZ=%lXE1Ho{GE9cr-fBm={i{NE&3vYQ|yD-U?3_J;FNx z>2Zsd^UyxU1Z`z7d&y*R2RvZKzdV#?r+Yx`d@?)b(0&;CvLa7^7=`tT9>Wa^mnma= z!I1i>^2IF4%y0^EAPEbJ)@-@YPY_a^zRnose7ay5)tuRYyEzcWA_SYY%WrO#*`r0L z&{z{rc?D~l_j(Q_%o%qbK{4r>N%OpveUX+Y%JHNz6zutKLN0`p^|g(k1U5&slljKPB*eSMJ&G37xI ztzhbefX;2Sv?C3E_lr8Mfd(QSoJ1b-JYmy&SSc;}sLzEEjq;QitRUoJZXo zhWIg`fv#hRWDP*W-*rck@B7jxJ5Im_5MeId#&jRfGGf@yOO==7%9}=aA zPnmVv3}hrje5XF4i|MlgnCK$0;O&lszChAIFuN z``1l+z-yxD7i@kC-oeB~oVAlOvd5SF(fsaE(e@=H1^qd--YDMeV`@+WbCu7baFl&^AJNW){_ksRJcV}-5LY-Xum`qZThS{%np&&B~ zVkrYAvf8pN_?kvicU1nH3rq-qH^n=DWAhIr_Je~-z`7pK7Zv31_%0Ihp191Jn{!MG zuJ~V1ho8j^Eut23QJ5R}H3?klQyw(qo` zfK5;F_b3hh$(y)PuA+~>h5S+_A)pT;uX^B`GLq@_Df47v&`=@IK}wM5D=~(bU#qhC zson?B0?+=5(sF|09oyl#8wSKgP5N64EN_aHFspiZt=i7e1q(ukL1>cu88pY9}&-8EDj2N1+1_@5~)SPmJLEx-M;_S8I zB7uzDzlgqAFDT@Va6WNMw z=<|C}dGCRq8>CEtmawRG{?Me&5}Rx1i$?a0VfXAMqO79b(P^Q}xq%hpHP)Yy0N!6W^w@4<=!)mg-DmY8VFLh*1TIl*!BmxdJ7nZOyF-Pyjbv zs(YkU!06y8+f^U>93EUtcHTLvvp9%e14AX{EO=+AV-u!&GhBW6ZZW33ejw33eR#b- z?Kbx4&-xw-ja~ z@t>`izAi3&Qwcc=4`n|dYt7;R+g|+=W>+^V@ta(D6KiTf)r*i4-fbtA>Q0|XmySVC zb34f|%fF^AOWcy+)+hg=O=K;$=p=6vKps{7{Rk!z;lMeY$YZVdsi6dHsE6~Idrz<- zC=9yc1_fuP)d91`_GzCkYt>?WTG|S)TubmqRn`M|dMYjeq2omTP4fcUmhXqD^F`}K z$30Vu{;7sclhzONlEQZo2dt?tkva;;AiG`)P|FwH7ue=^X65hiMtBn>EJ4cERR#^A zf_^VNsl|Wyx)#w-B^m%?fAY_Cw3KHk7k@U#$I?9C$Rn|+`MV>E(T zo=e;{O$qicM9V`aJvc19Ll%R)`hZBh@cXKG+MgR5apqRfX1`X^#`hW9<<81b)*IH) zxE3K}UUX%`AF{G7r{28ag97h%YIW8>L;#|IYhaD_mgw7}aS*Tv&ttEm@_9MAL2wM= zj=z$?U^&L~wYv6stlhu$(JWi$(#LfXuedW^i6S2Q*sgyDayu7itm`tJ&Ijv#KWUHs zT`iVqH>~Sexd*d@l3AD;M&q1`cdT&^zmt=nwNLil=%2ZszD9Pr>^>Nmkeq+W?}_Jxb$*h^N<+6)wx_iiP-05!*u)K&%lx@SmkzOc4Tz|?=92B>3XpudBNPXrEb5gEC* z(piQDoA5QaC-6hP5e!pZ#OY;}VhNHi?7K>p_C2bS+S3Z4GPG>PEVlI4r(E8JA6G;p z&?30*btlDGMARlolZ-S*-m48{1o3Md7Fxf5B1dAFuOlJy+)-Ni@4{ycpuRmZbcn0} zeYO3}_E=(_OCE!Y^>u=6EpK^jX zZ7rq|8mDQtu%C#K*AH_qo#=mLf(2h}&_^woXdzh)7%;{gr>8~s>gg-pHWG|kxb|j=T(2_FmR4D^Z`t2%CX0w=X2zJK zzhJ)~Xabjs{96A}W-Rx>V*0KJdQSFy*ES9B8hCvDN6b{wED;RoV7y?~avR%-WNbJ2 zzPq&Zw#GPN)%Sc~jQDCpZlUx2-jA7K4bC!rg2c#g_qBS2*D=NWr-U|7bb2`R??ul) zJ;ua(e|UUgFtiUF``sNV-Lsd1A$HLceKWwt`AxF;vCJ#(dvnbi3h6i8lV4RK`Lk1c zi`L(t`19@O`-ikgz;^Hp>-x-)UR?oQvk@1G)h9%znd~@qB7?|{U^#bF@;Fb|(9YNA zqeLr7hL8O3)PdZ=MwzYWY)EilJO#Iy!#BOKxm*$d#meTF54{Aw$i7{{dxLM`%N+Ts zJ6)7S{=tt7E1Lq~ zO$qsr*K)|u7c_DG^Zpvb$r{6ZVd?&EuFWK*jK&YbM|$z{^v>`uGTu(-&;be&$A(vA zFkhg;H5lyO`*Kyr_3hlxw459SE!fngr}|y^ACZ<*uOg9&3y!pOL;J;h+^xaFWOiAbX`rqK-IswmLgx91ZHq8y$6% zOZhf;P&&5R57${;{u^uT8&6tfIy@^?)CIHwqc%>iWlv&z-0(~aPUJ<3uV=?EDVoip zMe)>^rdXO7OUQCy@~Q}pa(|yLRvxsAD#Z2LA{9Px@zZ$H_Ija0O}(FUP)p20P*7-}GAP3h233TS>s=DN$TeukwDCQ|Fia|ByDemMwF{$fLhpt)7 zmmk&wX1u;$_r$g7F+kNZs0n!3YCt{c$s147B`z>D&j&e|e(=bk=f=rBf~ha*YVstP zAk^Vw|KjS6cP&;6zz$C^AFc=;&c+%EG)9U6hu{z%vazV;(>_e4w`y)J0rF2?>r`$U zD}jcXScRfLe>f*!oy!DD5;paF46?s3TEuHTsI@p@jtilQS z()ELVA*9f?NaroY-ONS$o4h#kt%_Dod5 zC7uWw6e!DZg=c#kn_o5GqGIkAS3(mfN0jKLV|ImkWi`lTYxN+8?AG${z8!xc{HTr= z!0MZ^PBx_bko9rlk1GxAUP2r4Avj&vaikV5Z@t}^lT&F;kH3JQZV3up!NLy!6xPg2 zpO}qYSr2y6tc%q-Pel9UcF$Bzl$PX&F|=>0BQ$YIl+sNg&oz)Rc6!A--aP{2UFcFN zKasE>m&20C%uf(nWow^+`W9A`L7+!M&U2muUbdIu$i{T#DT>-NdZ(DpL(^sI<~n~K z_x|JR>AsIIe=3WycFFRK^&I>H+701ucM9gs+q5mMm+Ct8@0ZKqmqvejBT1%ySxz)% zy&c(>`Do(ym$2&;q&3ejM)k_3?+b&7skp-{G6x46G77Skx}Q0}`=<5D^%qTKf*w={ zZrVb4CesqWzO83;_+e@?vDR_!9~;qFOjmyU_b5l-tokLj@>O_O!`D>v(AUHj?kYbx4@e?9lnB7~QeG zODmtXL}ByHuqK$ZPQgr|3vi0qp}l_EPc)y@Uph!FQ>hV?wRNT6fRWilwfKk?Z(Uuq z?rA)zXLDRY|IAaSQ-V15%|QR9O~w7eQSp@h!uJmlpSdIQ<7aU!dK(Q3E1{`X$+Gy% z$S}I2^iNr?(E#aOX}TCG;!YOz zE*IyB61yLaW3zC7_^Fl`J`ezF5J@<6^iC8m%7_XD<$R}kZ}%4KA~iCoLt8-{xxctW zGxnDzKjKYtob1PZA#7mq?TE{Y{gy{v&Qp*YF%VqeNfD?}vYo6kXQH~E)y0BV56sae515j(^6{`SY4A;N0H#dt z&^k@WJpswH0A(a#{GfjPcY$rzjU*=GlK0o6dMf^iSiwT0q4F!diPve5wZ6-@YmYzU z|Ad_0uW5q93|9 zS(9;(o+a^&j3?Er91ZyIlU6_)TxY#C(fh@+W@MDkv4;D;y+c)Ic4VT$zNus(s2j(Q zNXvPRHS1Gka=RC&81-VMUUB?gF4p6(?^CCk8+z6?1{0O8DV7BTV;p}cEG51K|3NEq zXJ>brHhJcw22|>+_HtaeP5rd%r*`5)4lbM>WXse2WWDMo!45fC)7-u{a@+1<|TW&9tlIL~g( zN2}B?RaJBVZHBy@m65j}pUVSD8}HA|;S}nIF&hZQgyrX!$DAIc@Rg1lVL6bN3)NP& z{2go)&m8i(aXPGc>FM9h=956N^Mu>y+&Ennon;BH({Uf(YRZlEg8#Y54VQ}b)Y$v) zGbIp78ypOKZO@85u6SndMizHj4Ch$vkYih6i2gVQcnzWJy$I_wX|&H$CT_h7X=9p$~G#6>$qW(aX}ql z9xNPzF+AFtth94z5ks7P=QYMGU6z}^qO(e6ne1;91xHoIaaCcvnpc+pm@V2IE*t{s z$S$Cals|qess7OS{ds2S^Foev?90N>=)rc+3p)Erz8b=)sm0g{4NL2|vuO1|_9mWb zCz7TmbA_@1R*%5=LzUFXiooFSyX z$L1K8mFL(uTFmw-Y;C8vR{_rj$@poP_ow*cl^Au&x3smaf9>TQHrVmhkCo7 zS#qxWQ*P1ehUn-^Jz4Pvcy1zoJFTlM;*V=xa82izm(ssrUt)6b$2Zzi`R%6@FU8AC zfU$5L2p6fT>cH2l;>@KQHH2fb=~VM(lr)_LmxcblJpPB4=_iAn)?EP=m?@Qb96BGV z5PJmz>c<0^JuR*wzv7(P1IH%cFH+YBi8zVOnppqEPu{m?VXjf?V-aH8!Llr(nNbPx zR5Kqs*qU`$+u@#AI-8)T&U)nh(G?wa47BSUL^0g5b!02-;JJ-qdJz_v#|tP>_Qr!vnCk|KKd_Sk^&iGgpc=WEkNrm&4!VmvIHi)lr`x#0a-+IWug?W&vs9N zdKRJnX{BoPNZrH+lO<`?IVqg-Lpv~qsJ>gV3N5c!7@2!>Nt+%1v*I-`*gRt&=oG;d zEpv-!O;w+1If~b9bAsFRJCp_U1i+IF%w;FK%Bof?91BdXAF&;10CjhYq%uUE`KfCf zzTeAJ_VWhvX4@tv^?{+jKD1*oFawqkE+1#nYkf#-@v8I>IlQyfIo!6t zpJVbhh0SHv`A^q`x*gK1FFbDq4*vdG(s6yfLxIc)JZ>4D$C&S*>9;-lXYiZfF|Ab@ zm(23Z&$bW$b3>k$vt9ftpuY&y;b09`D?%NT3pb~_}}&Y!tL)Anl~G9y$qyZld1jx&jRm~YZr}bKcpvDJAEfpb*RqJ zB`Bt5Ip5wEINJWA?0Mfq2}*>Y`EOb4QDFO@Rnohj2cHy+r!h8IpGAG$Hh(HhtjRH& zxJVw-83Du1n$5cQ2J~mz;T9?%so7NLSKkC0FXG;7sIZK^krX8yp7lw6W9H02-Q;g% zl$C9mey4F`eo;Q+%s_Hl>7~4CQ$@%StzO+IXsTzUgj&Y_Mnq12ql@i91`@tILeEdt zV^B+&LMHC?h(5j}H8X5tgG0G0F3U>at&0X?S^rQMhFRha1Vaea{T!3|bc<)+9c|H* zpB>SYJD|u8d+}1fCXo{X?_t60w9ba8v*I%QJx(7biTf06Gr7Z!iPKO&D70Qi%%RMW zn{W-IH&?8~!iSJ1S+^?^^_5iDwrUjNh2*@tbqAf4nJ8qjq8-PU+U-p#>W)4CZ=teW z*^JmQ05rsPA_lQZKjPSDcCkeDR#L|jJa7Fzr0U%~>131n8``xk_w}OMn&nPF-`_Lf zw)5cs7AopXQQsECw_zAMF4HMZd^}CDdWc~h&K5@st(dPoa!xxMqb+h4>-Rq|CrAfW zk~bZ%H*X&A39I&Acj70*$$9~Y0qX()WPYmFrTI(@WJ~b&eWLcm>Be1>&uu#C%_U&5 zlOi|n@UZd&B5MBPk66QBAJL%PQMKT$4B$R&e}4Ix5ONys&L`KI)5#Bg`Ir$F^7Phz z#5M9UT^K*I3^z=ZA^qUlj5`zedu`^gc*!}6e=s?a1RuN)Qn6h4aSA4dj&kIIlpUjv z;r4Q}-q){%)NVP1$jDRm&42vv?c*RRO6dsUX}ayCx$91`O0lFA>By%M-Yi@BNEtsK z&F{g6*TJ9v-6_5J6#)xCx}3l8JX)@6@;;tdO~YAtq#FIR60`C7*aa*3q@wkQbIG1> zb4e-5HpU~>W*Q!-jg>_8w5Y=kfSU6LSI2#naIeMVT6n-3jHvpTM?sn3DE0b1;JANk-jqM!5mL@7;f2XPCbe04H6=v_W3{+=c(qb`>icV65*>KuK> zXwN?}PP%AR;%Uj|hn?4~Js(S|W3u#xIF{JgA~amFo#>Zcyd`k@Bv}bfffVlso?_7{LDm3Ni?`C}YOdV(s{q7@v zvxr?rn%|BrXW*Jlhq~~>T;Rc)W(;+|Eykk+j zCp2Yvq(fJVpgxMR_#F!|0*Du-64S6O5-Hl@^t0P!k2#xPI_uYRmzb&Sw|Y&WLct-2 zG2~VI3;Dh!*6mvyzW`NJIQYQwZBX#WBXt@}7RGdCxOc92h1t_IKOXX!!IX6bzE*Dd zWVoH#VPt}T5=l~h2C@DGaow;DBxap#!L#0XY3!py4+>$AI-!c)(K3+g*3b(g2eH_*BNVGwon$mvQc) z6XE$lZ2VK5-$|RkQ>F&OthzK}=|*_nkqv12>?=NGtf!EtEQ8bm-VP6s56Y9b-s;C2 zF(!oc@0;#WgM(3ow1APJg&b0;H9_uoT-*0~sT#bcDH3V-%tCaQkcJnb%`-$-BJHhQ5#_mE;!35TxYL^dTXZc}Uurd#i_GsH1+w^4YcM34UU5Vq-ISTi0`ZKYAQ`8j@_j;7j z*J}w6ffnsdP{UH>8zyngfGC)uq@cC}AK^ETbo^At)_B&qk&Qo!u2}6bYYXwKH3Om| zMe|5ht+oR&9V1Naz-1q^HmrQ@V^%C=;r!^5!7~|PG8S@QD(J9Fqvet{19}PsUI6oh zz-6?9vJ^$0RRDEE*6^HqU3@XVZhW}Rdqw$B$nQGm)W@KW6S5%XImR&b!x~KJd`R+# zXVBMT(2(#_oj;!SgSy3PI8#xGFDBy`B8#h{5FiwB@Ett=!UTIsDiaZ5G8BO7+c&fN zx0`5#yR{U>S2psg#LZrpap0U`@W=lJ^I|H&)+gSHFCHiGs}-LcgMjU4 zSST(`xFM+vHAo@OWXR4(+6i3`f1DFJ+E*=!vv`=$$YjyQhqYpdC@6JFs`=-jsRF>?{ zFkLG*PvUa%;Rh4ovi|}@zj=-WLUbq;$kJcTYOeVwSHFd_bkkIiko}QJ_H3{%TQ!th zk(6niQ@)=(;kgmrmjx7<7pbVSqWvW^25|*P3r|`>9>(_z`O{*TNrLg1cF_(C5qt3@ z{O~Yv@5fgM1GdvFVgw-{s09}=SB7{*^ z7WFJk{Y#7Iz!YZuk$aJ9UY+L8c|-6~$|zijb#YADGsata@UulO%0ouzyKL2Dxd~K3 z-Q`vH8w)(#Uc+cyh=KHAaUy0fd*PpOa86><{3I%*)-XcwYlR~Q1Ur*xt&^ozcj2o` z)=LrG?IOx~{TmqZx2XAjPuJQ01!7BHgYBa_tQiKqy3+x6v!O=iYX18O5&Tr5@Q1YU z<282qw*22j?yVpfp*#%cNHSWX0NPmCnpf|}MlT5}DO6Mj11SBVm?;^FX;zZK6NJhM z1@B5_LcNYd%`r;x`9NZqJ>HlFWAQsNVWjYH-t-=3%(U*Eb3~kSH`$6`RV&|Vu}AF^ zXJEx(D5l#keEo{xB|#MZ91@<|QhE9aMMMq~ji@N7#`lir*&+yjrmmU0>^Ry?cDEBT zEKBOOB(Gh60}et_NM1G#OM{KIgy{uTNN9F{CH|+&qLWNwL3oNY`t(nWf4;>m(XU-= zUaPK&Q&m%3k-R_}2UwGG*-IGD*w|#gS?+3wy_keJq;TP(pgnBOUW%22A-q2f)|sMui8ben>e2b_yg}kLl4zA zDH7g66e=EmgvOrN(sU;*++g@it+WVtR3y(Pn7>YF(1JVzoLdrB8IHG%whPDe#Xzm6 znr|yS+jaTc=>jiJDBvkBnvvb(Z~Iv_4+ga|WVj~IE#E(Vjd^8;B;j%SpnR)WwD5w* zgBXvjSMWX6n;8w^Q}C@(Q-0y+K1yV6{*9vh&$&!KLs= z0gEa6;k)wP*e~mT`=%SZL3f(DYZIQB)hQ0)zT9&3US826=bDo`ctA z1F(6&0Thw1K@VHWp>OZi=ngpK7NN*`9H_Za;PuxGQ9b{h3&altDE~mmb6<1e!0SGc z<~NGJxZ!Bag_FpT9y&$M|A9^$X$;0q7es=)jl#@$xr+B^Y zh0yqJ#urBL=P2t>jlkW>8{`f~s={>=n*>{N#sCjx7Ns0*y=C;xdScmQAU4(1eU#Tr z5Rf0afZk1AtgCM`wDi%&Iv$B#m3vQ%9}~?n;1&1SX7k5DW2SjDdDCBn;SNvq2+zkm z;^DK5OB+q+!Keh!3z2WPq8{ZqntlFq4sk!J^LZ5M^WJllSsK27Z1 z77%~w5%r1ZwOTHGP?N8KlMDjKWJxc5DThows@DT|DZH=i+P${$7Ulc18cXx^xcXugnMM`jYcMH@Kye5b9D=)R zad&rT<$b?z?foBnuY+~^9Au1SjNH$4&ujkX%*2I#Bi-gJ7?j zkVjha+~4}G;mj0PSJeY#Y>MWgB^z>U#=dXCmG(}FbZ8mkXd$w14Jp1W24F{)t-!{L zohz)M+ifsI!wu3bxV^c@8Oe}v&|MzHHtOa=i)*oX!etPVxXsJB=)Ioe=r2%J44hed z?(lO?KITxl9*JxPS(1jOkm>EyMuY}gbD6jCxnC)yP316b4%h=nCgVIil14u;UOp?g zTq^USkcQj$&tC&7qY}QBAMdrdpaE=&$}&&KYN`zh>>KR-Dvfw5gag|#qYwYI{~X?% z>#iCBFa6mmm6`P!VlXA$DYTV73agyCJ|GhIf2MPv`*J2c>X0^#XI+&|f8E-uYWu0c zhwOVg9hrfTYMt-y53Wdyeh)3i$E5U6};vzOnLF%93n{jL4L zd{mh_7ErAIO0xI)^p@Sjk&JK7GM-PmU#lX#8|Esr|Yda_o1$^TE z^0O8r-49WzgxR5B*ipd%{M29HO?>DQRFJpVF8u&RM}1s>HH~LqKbJY`nkQ)95Kd>v zMtg*Wyo-ee6hG_`y%$vabW=6$eH(`(%-}^{XBsxDP!Cig+C+0`-+*c;GPU2vsQ+;z14t)~fS4k6)O|_{(#wNA)$Oj=IK-P6^|t*nWAE-88hIVB2>Po8b#u%)Uuh zxzcWxSgvGnsTL-rW+6Dl9$^DH|WhPksthXB`0&!tFbU1fov1$;+DU>O0yhCh8*A8WZN~I zaF@gy{wTft-i|<~Z}07w+}{K1YNh9!WV;{!iE!)7+hTO(e$`KEn)DR0(4V$TGL=D4 z(wy&_?_;sKZCcEWT|X)f#7p(>*?aytmQ&5_D!hpJiEHABEPdynZr3im0gP1_ldHU43mlxe=~PpG zloCJWSNcty_rt2guK+-|IG|=m=COuh)!=4?L5%pc^mHr>U96 z>HOGJCe0O)J9d7;qG@(IsH(JZlsd_Y30~L50lFdQ(AhT0KFUoFvn^-wbK7rQH^5_m zA2Vg54Wiw+3o@;=G9HQYFYuKgLj^!0IsEoH7mnJC?RnQ*be!+8tTy16tpeM4;R{zE zy`;N{TaY6IUwA$Oi0NRn$ZVSe0W=;Ttjh%<_!o~Y*MP;N#(Cnt`n&EwZT{;vw$#8A zVBTtHa<{Wd=z}dsMArH%Z$3K^d;8-_m)B$Mi1Ak(^4q)c&KFNyB&(Ya@+(4(PrnV4 z>oTIG00G7im?%hjkSH~sboBm*=fpp5`$NQATu+b()I}} z(QR9Y`0s=X9})qT9Y1Kvna7PBo2QT!NE{J%M?c01xia`$Df8uTsV6=IjXAVbi@TMQ zt8!kwzGNXW_eRMDgBe~zqm zU_p<|#=~OEqs~!ifA)OLcpWv6k)a;Kn?DQ!H$m^PT9xf(D&mpYM{;!$i;=K z@t01MzZ1c@ol_vq9qAQ~j(Iu+F{@mHow_>H8qY9_#(M*T#;|s@9!z>1^2N3n4r!(Y zcKIkni}WO&QiMbq-!t*1?LATeWci1viFY6N)5aUEuYBzf?n9kMh+GupuJk(6M5|j) zB7n!7FMFO3+k&n8%Rb~)$d!s6_%HK0?Ga0_zsvt@P-S61Bv4ow7rsv^oRFb9V;WtG zm~7=fRNmNI9L}I?{kI$qI38#HqE>*as+Yl~lYyvdJrN;3T2()iQ|t!-d48}Aa_iM> zrUK0dR+?;>+l0iL*+8?vjw*xwZ;{(@Q~Xl#9X^N@VuW}CE?5+!ytgoPMFNnVK0=Xb zkB?)PtEA(CI^>X57MEqrjq9=TOPS?y1C^VpF7i6Ox`<j8SPC>+mS;B(b}D==&2NRF zFXaLowriLFIz}z~CtN;_&${hMGT$8mT79hFELP~hwqM5H_RQNlSIpgb6}C;)cT~30 zA=*cs>hr=U35Gz>iGwM(2Pc@mkc}tOadgEW6F1T^Q+U}nsPYg?w>p)74)yfsxYs46 zF-893q-c9??`h6lC{}LRh#qd_^n4O%9_<69$suzo;N^1F$i^#(%@Ybj;&v@C7jPn5 zHjCkply`z{QWUv3(@$k8-_hO(;uK(O`*KM5_~Y zb>1GUhlT|QdfdJcgN$z?tev)=06qkxsP8j+bU0nxPl7!9^$gXBU}i8!%2W8lhy7XL zQ){Qg?XI&(BX;E~z>cL0pQ5oI^QB0fviD3;$VHUPLV zg_EGDVp^4OSPS4Yu=6Kq>YJv?`5%L0^hns|kh1-(bqN^q2OA^dP%%l83~`L-a!3G2it$rdE=uNj!ev3ST~aZVh>KcBpb zJ#ev;bPb~$(vlo-OHQ<*+T!U?kD!Eh18Tu@%APd-?T#w-VE>AGI(gQ1_a`bzCJD|I z*eWX7-G4RtQ1E$;6ngJM(5m|0`{SDhZuiuGu3jy532kQ1p5{E!s7R)MXQn8Y&$Q*{ zc9(8W8h<=>(=mSTf|k(7LS)kRQUH6(-8$^^cDwkI1*CIXDEQ^>qqXSp53d6KD**80 zrokYd-Kim8vP_TF;^^3=={C%eD8oL`++$TkJHSA6d|4ikRMlH1^wWK-aP;Uu011Z` za*%|*=MPgKJgm{wEA3FE6gL28`yAOw=8sO)BnRUpLDBMWL_0q?Q%o4>ufB7M zxsj3R6`V1?8sdOI_t9lU8a-gwYQwAE>{@8}5)=o&F`G0rWdx7`UDH z&{sC$)5vyX;p8Z)Z!NmT}Q&fZ{WmVGQp4ZMq_|n;!gVp;kcfpGz?tTOw-Tk(!6!;f_Dug zW|cZBoPUN|r$Frn@|=eCjheO;?{)XY^-F(jz~0{IR|3+INPCIG1gZzmYuJGqi+?)(ae_1)w-fRcQ zw-SyAU(tN-Kd9s#cK=MIF5X}cL!0*F++ib4l{Meg@^kVckCJ*@cw{w@mXY@pN z#SDC~ZS&SL`?|_%N^%CTZyZaMEKgXZkeLhOGK0k(meLn4#v^2p(Jl|iTONLOA4-CtXLQKcn@5L(_CES#?_qWds78WGI+`Y|<1-Qbs6}vnu&wp(kr@xS4d?>zyB%J4Wg=ByGlAcA5p$CkpcLwC5qPrJB83^|D#eef1|qk|p@C$p=1uh{cK$OT>fni8}a<-`LK)8@nj(Xih&c5t+O8M9TQsM9kGq# z!v!%>yjWm3&K4uF@zRCT(G&E6bifl@g{=4^+KsW;^;Ot1nB5`6Fi!*gvUTka_p~K4 zQ*HNjEO|{RC(PEL-7(me{~qw?51VfiXgGK+v}go={SLjT7>^IAO3}nv8vpWzg|JJ! z3ggDS^XN($_$Yxt1x`tJmc(e2A6HwHq_Deh1kw(fp1EJKd+^x49-r4gX#-R5jIDmp zR^uqAXdeA30sdT*LY*=$*v51u{=q7#AJdDIXxc2NSZ2MgV!8~|iV|sc>!^;qDlaVe zc<8oeLNB8+%LsNJ71g(}J1}OPmwMi3y7c|*)_M&BI<_+MBkfm!Je7UL#~Bzqp8DK( z4IPz+efUMn@l7#z6F{elH}Ik-KG(OOKSh6_-ik63toHEAKOa z8<_fI+D=@IKsubXKGUBobveTp;eJLRHd#cnlzg|BATa-S^nFWHqMNTqQ?V5acxiX) z@t`vT4Z|H>n!OhzF#i_8ZC+Lw-MF?YU)t*eO_1X5o(NlDvA~T#m&(4n7j|Yen{h&g zf*!8)xA6fOE7?6aGx5S6z9nBI8_#{_ZW5KUv8EP)*o%owcgA0YnMG)EsIRZDq*h0tF>0%Q8dCg36 zFPilcFEVu`9@3Sw8|>&g%H;jHyS;3B=sS@e!iqZ%sq}%O;pccM-n0OL6r(XzkOmD9 z6nG>;vzQ9P#-bd{7z5*Dt}I+Z0?Ww;mAv*SP%L(2E4T@YZv|cxHqpTdFbmmI_dnn{S;~GY zhg(?Qjpp^;C}Ndd(20QOj?NFc{{aj#vHj+Sv=*gsJBR{?qtJ|iw6OXR6!o^+AW(F` z^M8k4;fJ-{zukws$7>_GKx=FEkeZ6v;e7u%T00S17la90FzqX20#~R zRW_mgZ}9gngRv`A{K#!f*Jx+%*awI!f{_#TgQUk2Ik6KLGFoGSAqMkmwZNR)n5!xl zZ>#y<;R7&x26qTd0*;{p@m)p7XY2Ibl<4o?-;Xr*HLp!b<-Q*6(-To23+6_VJ=@Uq z+zBf@&EMIc3v)pSa0fqumMIZ4d${Nlw0*FH=QZDBqdp7;@ju)Hk*qxZYd!gn3i>KrOx@B$NeG?0BnF9ws(#(O?~L}=mZwTrQzB(6}7v4PAtF}l5F@5yLS ztv|1moB*uqZ(jMg@zZYH-`l_t>!>b$vsR-q2@6#!7iq+e0f8%b6r2cd3uI-`wqL(v zRCI)){wWF{v9!M-fE4jK_QWa_l(ZuNLZIa@s=se&;l$U>o4fsL3OsNHw0AY!$1!b1 zxwt{%<_CNGc+`k2UuG|bGonbzdtA2{2>@-9B5pA}4RjPf-mJr^+E>gMZ(e#A9sG5M zu%iF;+=6-25tmuu0>r*@?P665BH)!-EuwYaWsO<<5lBSkjw$v)SfQoJ;8dm9;L(Ya zxo8T)h$A>1Di5iQHf0?x?AY||YR-G8IvFcQDzCF;>D+FPvpHSUG)Jzw5D*l(SU6uv zSpDg-;b*Z?iK+7@|rtu3j-6X^t^&jr%l$T(Da`0)( zS8L-Z&-v(d140Z+pb-)M!jTe<^O2Mc$8j8IMB#_6u_850^%N^zLX#Ak?PSMota%f<~gUw9dG$7dG z8;cpOekIi_@cTN4!1ROpg8Ah8*s7|_7l)dd;5CF{<-_?C0vhTMS~x(PI?*cRLczO2 zz@u>gSgI`!@UH;o-6DfeCwW?v?g-6S0REiu_dsYO+CbJ7Qn@69E@XYGfXI(wx_m+lhQX=+$)?^fM4VTqZ zgb~K?)7)}Z$EC*kdCM9fqdH^V-*7r)k%MG>yQ0_%f&cQh-Azyh{gWvXJ3P+YZ5D+9 z3UKsUCiVIoikzm~oRgITns~F@)j-z{5YB8g;dvQ3>yfTfPBVv;j9iS^;6R@=A0GChc4+zey z^Sg2Ya^G8)4$d%Y^u0vb9GX4&nzA<=A4~^m1lV4!JxlNNr?VfgOb6(>?sn9a?sh)^ zyDxF=YlNcIyrHhYyBE<)A<%+$h^Q#iooo_&3UNz zlZc{;7~w&)aBChdyA<2;FP%aw+}^Q6_+hgVtPz4pyhJ}{$IWjKXrX|EY!+p~>TLw@ z@&_N}7+o&-{2RRxr2wc-pN@ErZv^M8BDN(msh-C3crQ5}1^ z4gw^tz%DxaI8*^*P6B}eA*-Fx2NGcXs(~ki{z^69k#$PMhwyYGT~n0jhU+L7rq1$pY9O5U0x=CY_iTT$*HchP-- z>p#vKZ1_Ob4bPOaoNC>Fzh zZL^Ipc4&#gRNmSP0Vw0Dc4Z2z)NmwThpbip zHCNCMN?WE$IdL#;FiE!pxPuz>Rn1%~{)W!1*c3{Vf2feajuNTMtlZ;79{`XVn6~hB zPJvW-7)fGT9eht;YnvAvc25lFawGA0h%Z53KgmIE(C!=4gx_k?}|C>m{hsQM5Nw;t@)x{fBi z8mi0;7j#2lzB>e7t{OB?XkP&A2+xe#H+-bp&T|9LX5`~s1u3ERE|luiY*ZjQ8RNl9 zLQHQ2Hd;SDY_4Ib;W*I{(@&G;r1~6nzg@HKp#`^4;0ZVKGc9@ayokE4WpUL$Xd zrPCeWSe1~<<&&)a&CjNZ>jmu?0h))LcHIVjSNM0$h6(#`@6WKaBg+W1GY@)567P7g z|9X8&z`s6(jnYb7yUu$FW59Y2&#fXqnG^YmfqkMNW&FPRcZB=VVZ7MoYT}8qWq?kL z9#l;bz@N?)=LhWkXrMbv5+_F}aMr1EMecy`0m;d~kDLqjK8R!Fx>!LBllj*;m4kv} z?N{dN4lnQanInbVNz2(NY9%c}2fE};5}8W^BP09Uhpqw!;->a*FB*&tu*eJPXVBrJ zrA6RqvHoBIm?MCYZ+8EOG1j5fBH-jyDC~Hcot-VWOn~s0$6fMXgiH2j#L7F_z`57c z{G>RI_d_}?D^%p%Ulw%=S&R1=!3Ypsg$@~EYflLJfd-E{ukhp4r?(I<-QNg=s^Yyp z=#8sjXt)+51BjkG{5!aokZ4z6EnDhBC75V$IO9frAo}aZPuB-}wEYN(XQ^uI&F^QT zU76oLO>6TaM5~(9OM7yqd~#;D^h}1kgc@z$#a-qFI_tn#p>uG;ED9ismT}Q%iM37Z zOL>B!v#9S$0ZAL)u-dY)=SmCIt#O=r`ssbu1{<87#2#IC>ibgDDE7(nw(mS!NyC0G zy(7(6si1Z%qDE||!~R%F`YJg8>iT~i4syo-aX5_mIKSCmWvgABQZ!09%ePn}b>y7s z%q#;Xode;V;Tn%9K|Yg?E0~OXA|);X&ppIw28LRjT+Z(X`ycGn(Lp!HB}SK5Wybjy zS1I!tex2nfr#T(F0AtU;60d`+0Y4m_sT!%5sx5E5&Xy^^!b^W)u%Mav2w(s<*XWG!3<^)KA2f>d$YP_|llt@hqF zN|zV8mUl8t>{Q+T)z|Jx`YI+zU-aUd{=>sGu}I24X}GN3Dr%H`9$`S1xvzULuz1an zEUfNc+}-Pq`@J%1i__E}VK*gkYro&9av61ZhAHnE(P?7XH%gi7_iE*5=a@LIJ`G`@ zTsG%f3}#-Yu${!u+fVq~wk&tN0x$LL(Wy7}5hNFKic~)xc@HK9$r!OI7Is_{!+(x# zXp1!^8);fhG~l`%BJJ`2g6i)+$S>#ltrtzWeL%u;xu|1hn6}W|eDoU|?2SGT^-Jy$ zbRy9SR)dF6E@!P=pNDj#=4Eapf8!Scv-}jxX*NI_9K^#e5T4@|f+jLJ29$Vwii~Am zp-Uh68&Wp-1F{Um2O3;_Atv}pbp4cD3IUF^HgU|t!-hUTu8Y2i#C&AuL5Tlf4CSB6 zu!R2*h?>UH+RA;x-_5wkxF2+iF1VC&oftt}SuPoI!I|TdL-T@+4@;x@5xDU{W(BpC z+pDIrZvKY>8(Din)H%V0{+pOuH0jg9Q%N`YyDm zXLRe^nEY~z$$qMzz<$DYNLmY!VbewKr76I26wMjg!93mnZ$q88Sq6vOvK0^Qjp-k$ zRf}7NV1jR_UTU?`QMkcnHOMXwdKfjvYC4@w`E%m?KBj-y?%{;6czt_+vHPMZ39Hfv$}4Qz_g7O7*YIthS00z1N7WS0n~@(f@M&Mwi`*zj^z zi~Mi1kpr=Qr?7m%g~;s!IThn(A5I7>wTIb z%aqeS;2@C*_E&@}k%|rIHvC4B028_mk7oRFdM8EA@-w2Ja$=7%|1jYThn(dqYHLyP z0H`j@!-_NUe!#buYplIjLF^?WKaw)ktU;)-&^&JOShiqqsYO zIB?*{$YiCtL?YzyjE*8Rz@vSYgfvvk@69Kp-hMgIpwE;uwwGM7~b^O z^aLiW4XR$6kWqkLVFHo~GluGOyV9v2_5ONRg1dNKYQ>+amJQObgVAJSsfTtn=>(%Y-rIfW3E zA9Z{s?M(`XL4w@k&Lu{@|DaQ`!`OBE9a$$x!18)r-T*0mY+A$fyjImwXIJoWipc8Q0)u2y{xz4+wz<1A1>e=M`tMy;o{d(QOD$MEu z*XNyI6^NC(ha>n}W|L^gF&7vMWx_R0MvHrN|A$Ymi=6nF zf5ZhT!PRz|X;3cG^B7$-gHda6+*(6OE=P~%+p0O>_TX9QKbI!iA=fGc>59U{aBab01SdN=&-<`iI$s@09{XTYA z7w1Ki*6z^8;I+lAMCxJbGoj6zk@+4iUp;N;jC>2{&Vk#}T~(gRe}{L46nKNLBydPb z@(^5FZtT~od5U{Md9c3&*zOE5&NBVI9+>&CY}}Ifcmw$ctc%MyWA9*T^%;1AtiNpu zr{@2LZESzLOV1h4_YqxS=Dno<^G>%nf#$Ms7he-%C~x$znDB*uh8ZBVd89BqkQ0G((n0Bh8od*377R|;5ACD$O3Sw!keQ7MQ2;6 z@S9kXFM`5|+Z^d+kMKr0)7@y)MQ*^A+~Z+WtaO!*PN{b0v^vhoEx%uAqVLamd>m$v zvNhs>TF)+si;9J(p>f5l-JM2c5mmoL?wF#)PFOTh!hKG^K^&pG@eHfFOhgPkaMd@1bEwo%vfKRQ``u z$F2gPe5P|8$YN$1Ef~fjpLmsF1#l`g46j@+RxFp-1-ff`d2bekG^#P667A>00T1~d zw8YYFZfR9*tlI@~MpuD01}z7m#keSoXddFuTSwkcfN87i_!SII0^xXdRIX3_Xesfr zI3D^j*MwmC28o^d%0UTRDr=+njr$ykCNG& zEf>E?%liHb5(4d;tQl)5Z{}Bgv@3`ve8}7tGd+u8}+;7v<|&)org|75uH?0de5zW#g7_W06@QtS$Yew{{GJ+ zX^DU|wxlVKktn4eMvI0b&qRAdmmoQJke(zrd5{R&9`N#%lK?k=ZYiZs|0|KY@uJ*^ zXRz9oySWn;AgFc*wY>q6dv+?)U-{z#=$6P=2wAPL$vfWEF3_#y4NIC$*Yb}QSjZMl zJ1ZtnM|e-~^hec7nsq;&p^vIGGve?kes77$ON)^~GUuX^l$mDp@;V}KMI=L>F;Dh> zlWgLVS>W#i-e^g3-=e9b(vJo!zcjPNvOlJjHao^-HQvKjj`xk7s5R+R4na{woNBkEm=Hsd*4tR_qvH0y;N8nfy7n8=icp?an^f^+nqnlMr zw0r4Vl$AY5aHJ$(*|_;4{xA~YcX5^!goV$-u>LQ4 z8~AeYg!O56Sk`x)boKLm7`pS7I$)cIMK*mZj}f&`==3LA{t1><_kx|8^~9T=Y(}vF zjaR|Y!Q?G5D;}0mM$Jq41Bz&o6LdXEZH!ONMCGxXcr#WA;n*?U(GRWbQZ!^FiP!(K zXvdq*nXxN3gXm0EB=WA0*@GQo{QSiT4BS(uB$IUQWio2(agpIaojlnGkH`l7sv?$bb7Wb~JY4 zGG2CO8nyi}{GZpIxBkV-Ra-JCcHJ-ely+?gvc44wwUI;Pp35YhT z_`BeJ%8Qs6UsCau*sPp3d_Zx8(qM+ zTjo@ErhN!uszDDJq|0pcOne^b(gKETf0OZH=5<+@5-~xjU#l-<4cvfCM|F@3=){IC zQ-|1RNeB1DF53!h^SSZ{UVf>gMZ=3>zmSe{ejqg>Lx;QXM$U+id|r!oFYJ|;*dfX8 z>O4w*wAkY!pyGno?Y)kT=mp^O>khh(K~$M>)C72rsTO41F;Ck>^?vA>3l(3(6&9n=6Wzw=85uuKEjzH!|8SsQLU$J-h z+gJKm@)%mH){|LfzX9T5t>qM`#_dJ9<*TDfcE$;LW>2%M4kcTqjlw!=*W-ef*LP)D zkSNg-hE=-Gh`j`blzqNl*iYM)jA?djN|o;9#Lq3F9M#ji-_I}x0*4fkvM zhFpq3E_OCShVfO>j>66CuPyc zbU9XN4X4+LejS*-?Bu=eZC*Lw3|fW;wzQ$jPI;f>f-$1x6Me0D>wbm}g^f&cO|SuZ zJx)RCq_U7<6>;=3<@~_TafGPon*Q!-ui-P)4$i$zB!ZZFe;xKGTPnd(y@xkMIet+O z&pWA0SrJY2H1+6}*@&8KngH2bK4#a%x~xRimPfKyjZ9fLvOuufyo5NB=mq;Zy;^dk5R}-fGs5tG|IJ304Ho-y_X(yq^!?m@Bq#eKtR@eT6k>q1efu zI1<%ieCy2`{ZJP^L6s5|zv2`%S*T}mwXmfxpjR1AL?!sM>%!#ZZZBX;4Ssd^4E=LL z=QHB+eKpsh%L4qX3bM}Xi((}HbcnN=^6y%P48IG*_Gk~egUf-l|3|d{WWDoDH`@HY*X+w@5j_6ss z0EyrOq)cBFEe`aHz+y*;=B4W!Iz3CiKdh~whKMRF;H0gi8;JZN&1brF6*E%@6()uj z(`~U|+hOrd8>PD54GQSPuv$0jNn&Kf6WNz-WbaP=>s0T*fo_wYJTc7##IX`Z57JR( zp*w&6fiChmJIwETAT$X~UGcwOfDKT0?5e zkFn7^e~}KT#5f?aK)}!ZSq&(z6`18Og7|T{EzTpGa{Q+j{>y|-TJWuuDf+Hi4X&&* zq#xE!m;Ph?)K4v2V8>%^I7aIT6LS2@xf@AlU84x`el^gY3@n=rRWsrsVS?!#oWePC z{7WNM{6?lNqip3}=z7Eqrup^j>>@l^}OBPy6F}{0{ro`xIM;Jl5t?){E3RgDwCn^e~I+Jgdy!~NKODe z*s~UQ*U9f4>3BJw2^P;=EH3aL7i~jx3jjD8$Cm&9T1IP|>~opSf7^m$E$F@xw>LMJ z*pot=*Ee#XiTBs+x@>cOfQvdbP%Yy%dpl$CkUtB;KD$N^?4Gx&{p1oVL+a7{Q`6rk z8U8qDM}6{K*(!EnoH1Li`%E>^7}ThLpO6~E(;>NstiJUeiSfkmI-F!hLjh>t&(?95 zDskDO*ZfD%IF`R(T3E*9E8wyA)jO|eP{;0ho+f0vlE>^tTv8zzK_BGA9}{a*m~?TUdX#rYVNjAOJC;%yZOGhi{8IwUV1|% zxkDJ>|IHq~FSB2VIL#3MSI;>4{&Cq!g=g{1=U^Sh?nveryR5T3}sP zsOC+M%@yUjG5h*bDX=Iej$|p#fmSH7U?-fRYLkv>qj#x=ep4;O=(Dq-&#xSqfhHe# zRJVhF)s7vnlSQnaOMulQ`u>+W;uU<3+k)?k3>$~AXMk z;D32V;_ZdagZs-){gKS!F^noQL%(Z_klO}5_J&chTyV$eM%o4u5eAt7lMcdLwMcMBbDl{y&K;EUzMSfn z<;94knsC9o^%uxc+lUDkkPTJ~eMb3$Ptio^<`-);I=maAi#1adGDQu#zuZ75$Vy#o zEV7U?`R0XJfT+H8xPD}{JO6Q|3JGYm?Sfm!PEE_eGW!Cv-!f!CooFFK+0=~u&=v{f ztdDrE8wK|q3fe*1xs{dM01rvX|BjGY&z!I?;e7b$>RO&^42_a!A6mPZ6NMd6g}-}i z{6y%|7TujG0yofUgND??cg9&>%SMi+lLeD@@Q?apI*mCACjmHhx`{1P$^@jF7E{N8 zH@Y{4s8EI=9#inx&Hbz3vw(8)Q`W4Ele3!=i4^@%;}LDKRbUtVF$S&Ih};` zBS?AF=TNF))`%Km3c8@^As!sB`r69L8@*NdzdrGJu%^mmR+ElluC(_LR%A030AOgc z9e^ATGaxq!u7=Kgj|>xO8Mq6RenhP<|HyzLKyK@g4Li!iq_IJa{)IdiAfVNDYyTm6 zxq{GVgU_B#t(kxMT7Tnb05s&h1>R3=ffp4?%7X#^e_{DJjl9yl|G@Ix)S%dW1UO-w zFIm1@eNa{&s?XXVN*I&nd}Mk-*0{kY5@(daA}X68R`}HnjD~raJpyzU5}HiECAt2b zTnSb9V`u*L630g%u|gL?RM{mP_OKbvh@fkJVx#1m@iOFpu=yV4Uk9!$f4@2KZbbpm zFkl}|#B%4)d!101qd%Jz_G8+Nv~`d7NpZ7(Hn5e7cCM@931bj+nM`iekop)Xe9RkC zkaZ(V%?dO(jO7(7OA@u%8(y}0t3b8*(m?j0_|xADjsTz3+$6jZ3&eLaD`N7DlP;F! zUm~^vu2M4jztQ=Bg2yRP3Pt}(sQ@O%IqKLpZ$jk1gDq>MI>$>H8I-lEMYPPbKi81E z78}@1xo#LC5e5F>yP)|mLEoJ1apCx(y>Wt?Lp9yNsXHzBJKVC!%x=o+!yg$>BGSpo zv|wCE+dJ4>at#3?pB`tp=B6>)jXjFyDk|+IuGdQ!8m#2^5a(id%vywDV1gJ^lG!9P ziKS)%eh5zUge}T7u?DtYl1uD|JM^I;a+`13iAQe~7#TIhEAUK=LtMU36?s$I;d0u2 zBbbZFi2LftT`7#<_}OYVXeX04lzpD-MbRRnd3@OA_5Ksddw9hHzy4a*X0ua<(9}%t zVu(zTFd(#*;YLp787Jcbq(R2zaipV0_%+9Wx6&?F(j*-S2b^5I{5X^nf(gV|vAYkpvXX*M(h zC9|U)eck^W*ZiujX~zvb4{0mhwAjCl_zywf+U{!`Y0g_0Qr)E_@SY&+Md`Yi%kmt zYA$})56U#U@(JK$F}Tl$p@51bshzc^EsKWwm2iMUpEe!~pC+h@s9((EP0IOr1xETtLKadvKc8$Q;rLzpf92-~vC4f3BJPF@~^Y5bFLz zl$IkRP}XrZr=%}IiI7~uEO*7tHb*xH_GCOYg*HB^9#O@Gq`8g8p%Wk3p0W_Q!aipD z_$?-6Mlc?Pa_x<0=pz+-vojf9=b+TsV0J3Bu-i$jaIq-n7TA?TbNJo7VhA@0TF z{^PP;*iO$TNK&lTtMzYgVK~vIKN$4&*#zSZ~VsZd@??@C`Lhid7b9;LU<5wQk#R>tS8) z;lvoeTmaok1|5a38IA+sdTy#h>T=ayk2!fyU}Aq)#b8yJY; z4cYA}9{k_2sgFeDkNp;0`N?4^hoD{WpS%!w@X+=z;+A=gZ_hjJExzVa1=7rQ!TT5v zU=fs}V~hoy(ER3;u>9d+?}f_yL5N0i3)ArqlN%Tr5W;hQI9>VGDg)z2>pOPT6kTsX z=zZrwQd4$(us7p7&B=d)=&OFdoBkhYjx`ii?~lV_gO}Ns-CQ_`>3+W#Ckj%UPUSlZ zu@a2Cy%7J%&77^L6dCQKZddx2yniY-Wh z8P2p1d#|0}M2$$t;@`~tIC9)5h()>9*A}OeXh7wd(G8O*asf`$iIGc&;ux*#VybU1 zoO)T?yJkY;N{_&|O~Q#>J;5)U{jHnd#+oREqJB80MsHH+9KWB4ms6pmj|C18c>Oqr zVhTT4_n~X4K(df__73Q34Y&!WUcZY!oe-y00*nYh*rxY1AK-jodz!^n!s`=%u0Gw+ z8*$Z51jPj@+NPT$|GkR&jHZzh*fBn;e8e@nniTPAERG3-3hI%L5pzms;#gvLc76$7 zC2CmcoJ501FDI!Dr--xbCqyVurRlUENiN+pz3BzLBwl}`DGvXZ1Z*y(KS7rjqK|$V z7nUd&UVxb|X$h^MGaj`l7%Xm8IWYAf^D?j0xtS^)gbtm<2*;*5?$P06{!pVh)ZV>5 z__38~>s@^(^Mp@CRZN&-0K{@KFN0QJmsHX*+ly45ihLqiWsy@ z>w7wKj`Iu2pVXsVz1x4X9)o<#`0TvsjijCF`x>6n!9Wm-h;YN1d5LRjd5fu3m3h;CzcDd4U)5F{WYz|wheVxZY8*uJ$DXHqNk3%aOMN7! zbG|C0w^OG{#~fPtWr6JPr<>YKQu<$BAGJd{>QU9?tnSkkE4)l_$$cv=V2+n*$xHDP ziype^)#-s>C{s<2k4zsnAY0-=7+`u2+xs;@|06vW^5;=T7lHjroe|N~RXb!<`Vm_( zfG9P_>c|%qs{f0ww~UITi=spW!8N!B5;OsVyE{RHy9IX-4n=~y(^#Xy-QC^Y-5dAD z$?(mZd27~NGe5eje^jq~ZdV=Id)Ga;w3&EbT9%^{&e$x9CI!S;+gK*Z=^Xh-*8Zoh zI|G)wX)V&c)k(Lf50BT|60fh}A3X*lgS1v(7)Lur=xpK}+E(^T=cBfqh=CKZKfSEp>6A@*ltec;IFeopcc*^A3C?B< zNS-mQS|Y7SJ}zAcWuC0bnRFYu4?AnFqZ*&{Fjc$}Lh0@5ix0TGTQC1qTxt2LX+}IV z4vy)pz9WB1jP+4~|Nedqjua4nuapZLB2Y;22?;K{<={FcjSDankEM5jYGv5p+uPZJ zxv)|KT_4NZvyXPC1=)3$Cv7ZKl5$0SdbZWIu&j+j-?I+Ph%!_Z`7o()Zj*@^k41Iq za(huDl!^Tv!5uI`$G)yLVXhai^%@M=`7F3l&GnK#;wyp1!9W(iX9!uN6CVpG?@mQwm=eEDpJ{9JuR4wK% znQsnD?vtbc5lw2Kx=4qog;ph4kF4~*T2b?9^$}EXu=r53oq>-XOMpr(QKaIF&mD&LqOT=L^ESe1&jF zoh-h8`il+2Njj?>$$)+fa-3dRHxh?We!l(u-B4$JIgY5|oywdVihVjaKAh2znbA(C ziNRT|urKRrCl8wkd0+o1VcG(2hoQ<0%E>(fs9v`(qnm|UEE2HTS94N3iwrg*X)w}g zO`0`qXFX&n#Jtcpw>I_NO85Nu@^+4`DEqs|=sb9y-nz$SD3?-F7DI+}+?)H+pnI0v z28XUmfY#65)EeVo48My~*U?+H`2vQr{#9Y=LAi=d0g;&+SNhY&HCS5*D@we9+00}= zl~RT>dnSAvpj<8UE2zg;7uAfqEtHa)K};<%z?^VVKFh)IM^FD}=l!!W{tUlTm$d`~ zj>$^=CNqNh+!76s=4}*P31{jknK1#%6wr{=c53UNgaZ1)dNA&I#JA!Gkv{%}?grZq z%?x#l0q*ejHcfOEiMyCW@oJPG2L0+x1Khz$SSIUDCfC|Eu{@SoF5^0W$A=hZMI`e& zu|09&dN%ElEim<^%MS7L(N*375t+59`KPibf2qlDY+#t?6J$@%fGgIQpr&bo4d&I+ zu}vGPtHk8M_k8cWmN!khlXh$A`fK>;hPqVUt3mae7^A>(Zr#!Cco>C}eqRdvzk2oT zk1z+?65<)M_?gyt&tgtxY85&|iM2#dbH$Mx`tt8p{kybKzMnbSCbigfo+7&^C2P=- z%f8z)V}V`CRb2^0Jqrv}8588LEa;j_VOz1^zr>?Wb818&o~KxNbN!ndv)nRB`I(ob z6HR<{F9-0D2=!Qw2sJNgtG4ma|K!z|F#>(_0BMeXtyv-${4KHV;ssEZhNVz##HVY) z1v&;N3*VGGZLOr{pF=pIE}BkN;bQ~>tu<5`cDnusdhIv-J;w47w7(o~7f8*f`E0@< z0g)fkiu(B*4!irlJq~PK{w=k3q>DsIu{4INPn;G09Z)K0km_7YWR#S ziq-LrLw8)fUVKGmcra=Uo4lBy`8uTPm9%A!6RYK>8I#0LUvfG2X>QniAi(0kjw8>8()YN(5>hb<;L=p79e^?EF9@L?OVGn>-7|NRSgrD5cl?4_E zlWKbX!!d+f+mF-F(;a^XO_1VY!cx@I281H`OF`I!r#ifo%l^bFOf_)H{=u-DU;SZ8 zNLy-;+G~_RLbt?isWOXK>=@T+pOnEO?M9YjLdDo0+glj#;i+`IJ(qPYHyD1wBG%gh=DMi$0@XL%5d&yhSa?Cp)XhUgeez4&LGD&~*irQDOI#E!* zo{s+8?@Pr&a6r^VK^w1@C<1BwOS37ZOd{yqO2sj^LpNo3fdt-Xeta{JIvfM^xBrZa zgSbNxaG||@l9>a~ra4lvP!@5mNy~3f{6#pY=0NkL<~LV17d6^WolR-6WUZEi%!a=+ z+a9x4t~r?aYxV^9NG%aR`|of8c)bN)9|2%Vcy>hY8K@{cCudTD{%d<}Z1RCtERd5w zq`S!-PILe-75|{tK>tIH<3ZbVWXj^@X*2*lJuUpZ>NqGF@x6N7*?66)`1ceI+>vKy zuD=!ty{-YbLSs6wO+KyrnFkGb&(2SSPp=+C|03=rCk_f;zn;Gk{G0J6`4>>SUNNzg z>1*G4boK8k)2xjp$l@viK)C#oanJNB)h`tK2)y1m{M!L0g&CrKKov!(g22c>o9jA*khnZ*~t+YA&h~|br8$oSs*>Ani#cuRt+CyJ?a;JI!nPB|u z`*(koshgC~AWxzXQxHF1u1*_65w)R_BPHodvW^^M9) z{hGHItR?RH`m^v^WxdJAdpGk|-y7H?KYxkBf2xr7p4lE3XL~*$^tI?EXM3)g@(I`h zYOiL6QlvMfUQ;fK@Fc}uhrE36ChWhMs-=(F%dDkbpyK&TL0{Kzxt4@z1whb_B%nXYA1VaQf-x_*M@J9$GEBcqI}_oM2o^Ja=Ww>#6M7o%d7h!=s)Sw>5&9!vyT^mYUTr?%)uSqAM;4LR} zPuszJx_{5>46Enk`wg*x?|659dk`e`S@3me_W3$v^HL?{#!dD>X}#U@(z$)FK$~?q zZ{pne(Ks;qxbG+0zlbdR0}bxyAiD>;T%fJ}*mXd(qsh{1Wz4=B7=L<*%m$>O=k8E- z=n;@1G@SGj?XMaA)bXGkstE2C*orzyNi z10!4!(c(aF6rEL|k_iQ`%EFv*?PGLiw)afORAz@A+`-WMb^W1lyW0DFPEO;dA8z?u z4waXmlPRz#iU8fh@RaX^tz3CJn0sMgMA;H`8{l}lTW$s4e(H_FK~qE6BB%ABzat%y z;zgVKJfw<4cGUJjwasFZOUO&!@A-&`Lv7H#>MZz4dt$(6LNMmxitVgliX!=Tqa)*C zpD&Tsj!ajO+SF;~hiJRY%;*5f|K(vsYK5_W{cl2AC;$z-Hx9y7HDC=!Fsa^6)4|Z* z18r1VBoy50oJ<(Fy3WN*h7X&=S1dN#=j@q{^pojN3Jf|>c$8P>{DBF(lpLp^L32aZrNQq?d1QJqnNA%z$5>oJkwV*im^CkiMm4Li$r}YDq6;>q z=P&%35Rab|7teGyg8LlyedZ!803yy=H~y}>U{2Ao@TX zR^PVyQmx-xx$;vRB0FN{m)G~Y(f-Gm?=!@{+UWI;SJfMdGc;jB;{5E85nskiW~ICW zH-HZ5HIxO(gK-)H=ywKe!bCV2JXS;_2t+Zpg-OO zz}_6M%dF}o%{qBli!R0DJTTa=JHc$m6YsRgf$C!&@Xe1Sk@HCD`3sPcU-q*X7!rIAr5)Ezi)ZP zA-jGumH4&!dy+jo&M-u$-F?f&5wyRkph93W0mwS=pmA|`JiWs7M#(XpEGj~}#q#8aR zf@70jj!75ihG<+FL1x<(&EowzvFUmqZ|(JNRRLuaOYdnfC$@|cczGs!m&{lsudi*b zj@t|0*7ZUnK?`DXDZr!aPuQTClj(oR+AcLl%1GmDDRe~KdN0*!L03)nEwoF#x#RXoVHkJ z>cZiQsXx54brmM6)Gn2yZg7Inkl+UQ1ySF`a#t^Zx3!vo;+_jpqc6){(n|iJBXh^| z;}u?GShb2nDITzKI>*{*V_1}w=P_&}vLK`XJMRgyq)+3A=d=6P(ufKO5pXj2_wL%Z zDah8;0ly3#xe~VZ74jsQ&W!Nngm*9FzpApQR#^fEV@1E%DBfgWz49aY^Cu#j`7X`! zO2K*4k!4Yv2r8)8*S)k7S7n zKY`wnU{eAK@1Bw)Mi5-z+QnEcu_KRSS|QKN@x!iS<)s+3#F(qBxPShEY<{`!|2we` zPB#l^3-Sxp8MtEHDHt=Nt#22iBVlS$_Q>UAYfAicO%TM|6S;=oUssD_<2=tn#|`Hq z-1Nl^-TpY{mif2@;y9w)p}47ydSXC;n5!VIc9aCXdr)QK{*}mM-;|bl44*7SCumsa zKP9YZ0E^4~zL&hxtC@;{P_MWmAnM&UuVFb{+x8y40#Ey#q3qGZXG1FoPoj+F?g#zt z0rUK0Tc#L>pHzxnmhYJ{u%_W`yuLDh^~0+ChC1I!{(A;>6TfpfMRXUXDdO^3%3F5U z3l8ZA5RGi*fOiIu9Mf}{_Lbpedy)1%E~0cKx@0 z46t;`tBEA3l3n0R!+&4LI+w{3{k&~d9N=O)0VWthP5Er(Gi=oAgArK-m5>~?A}hQX3A{?UT z0FR0yCr5FCvIM>}$d6aB5=Pf|S|c#xr#MA2vvfvB2CNm{R~UR=`O}1v6mL2cvXR!= z$?xB|dz{5!PmFK`blX`p1KCvGvB4+dg#wlm@5NI7D9H*p$g+42AVBe4q?KR_(35LU z^e#-%B>Y;3T!PWP`_nf{VXwI);C>luFnj3aW-4twiDcQcs=R&wL8D%(g}FUHS{`}o|kXD^bGUJC&9 zx^BlISN)-5in4ONCM!)gp!J?85`mAJlGc)GQkpQsG1LKPmLyj&?Py<1SlfV3Yu*bM z@cWiKPZJOq#_FHtI6JD@tj4r6lBdGrhC_I%NLb1;c-21z5#3;!EO&O$-G))H4J1XVxK5?h z6tM|6neLsrin=6drLuWUK#F=)C5g=82(=sxmYK~K31K+5w_4YCU ztz}WSwEj37HWghxlIU^5R!QqC42xEx)m6(3alU%iD(J+p{4dN}qtJ}2ExqfsEDjKa zPe%*|M(MHo5jc%v$%_y|W>dSXoi4oK99mc<{ZW+q1ke4!Gl$angR5{zNu%C3r*rSp zy~^*CR56;mZF?h`ly)k4j}9p1HWZ5K%@DN}8Dsv@3D^JX;>Y9rf@N%?-HA4bYIx4t z`K)$c-4pWe7>007)=yKKEpLN;Ub&9B&t`=ccwtZz&l=V>+>PpTdJom!FPe&Nowr)D}Va=Vd6r0mr!*4eKFUZqcWbQZ4v2xWkw&qFfI!)5p^in{`z zBw_g}ne!wb9)^>*OKe5gmTJP}Dk&8`&<`>CX!ZRE!^C_NXvpLd!Wa4dytdgq_3QG#$GVPp8C+JW#@X3EOP;MtIAfU^ z!F*vDrFFwA%0H~tXlrKT{a>GN8IKO036Y}SA=V=is*lu*8aRBD-iK3;d^h596YaOB za4~nC_uKmit}h(f(}b%cvZ8=gvdk%5I%#_w`GnzvaQF2CaeolL!?$K^hs|GsxJ4{- zaYrB}TMS~7_MkCFlAD#U1Y~z%@2njV!gUJEmy^g8k_%62TrOryExl z+Q~&nMeEMnbH&(bH^2F+!q(;7oKF0Op2#eT{we>k-&7p78s5t5Yww@2JebBK2;-8# zC0gAlm^%jys`W3K;vr3zQ;SZ98!0h~0VMv%q^MDy%#rJkeP)0F*ZZ&?A_aV&VJTy* zOpmDcwo^Fsbbqiiwc!#dZ&3qzKa|6?x^fHZs%DrOg{;V0jL1-}s4{O=0hcOb6*vFq zOpMYHA;at^;>GvEb}-3rXOSHB_56}O!?ob)d?}=}J`!(P?(2XYj)Oc4vu&&x4)L1& zPBKZwZht=JP68k<1m%RHkNgD+gZv?&3#GGv^M-J7Pfm3doi90`khYT*$5x&oL&@%^ zA9hogB7`}N?fFb!C=5|u@VjmD))P&%s-7!Ra14M#R(rOj}Tq^G*-&mp<@{4A&My% z1Y+rveCuUoUa4l(wG8dWkB}~=X`KGyIoY`?BYfWOufyW{y;Id-L=l(v){k+nfJ=d) z3_a;9sWPa>0fPeO95nBMp@Nx9{JUA*WvJ=i_(u2FRu72V5rZ05kO^eqh`|Pf2#R;a zuzK%{Q&tr8kHZiATkFUVyJCgK4zmVX#l;$Bg*K zYB?kyU;NUQFzz(53eNv+IYoR@jA%xamuy3sfI!_j@wPiqzk|CWoC!0?glLn^0souX z;g|t(@*d$l#}@hbKBqPbL-(DL%Gk`IuwZtaX@`G_weQ&?Sy;E=Zt($_{_j(+m_l8>FlT zT+Fjaxbk;Pj6$0z(P3;beg+#K6s)4mQk;Hr@?9jA;hI9SB^X4TI(bZqRo2-p&{Xbq zxPM#r^_&Q+c1u(D$W#F{W)3Gf1~yNBuwuIEX(8x|voqEg6ArLN_utYJ#s()$G*&-k zosG4lvPH_a*Uor_zcc=8(SrECDoBUrSZ4Y1G6OET%cw#?kJ@z^%8vfbmM(nqHiq^( zo!*AUYu%5P8fYY;ISFZ|rTB-P%}X zlL|?B2BB%liCwR#RHzY6u^W>NMXBeJ*UFO-DSSaj>sdr)6e!IR zXk0U4XPrTcBoK@oUT`JW|GxiPYXxpM2ZevgS}QF3qX}U6x;x4W3bfxg_Di9XdP~e?Y!P{^kXdj@ zr4#r2SFIDx1zF1G%0{1pe;uNdwtiktVFWT*e%;a$!;knfYO_$Vr70X| z|CE#w-|{)uzpe_G4wbszdJd*JGEqrnhR(2lH{lMK_-;~yk>UJ`4CZ)$+IAYq%IxSW zA=baWT5HNMWDSrWtxbyskj}4b?_7&Qld~bmBvq3kAto+h-5#ZiZ67udZQ@goPpUn~ ze`9}Mrvl{dxAlRcGJlZt;+mFgtv#9{KYySMklj!7eqFpt&dxJq3?}gUKIU?{Y1vk) zu1_Wo%ER=$9O-JZ7}1QwjJ6jjzpSbg%(a~OpcmV4`kTzLxy()Sww}G2)$qx5X87+w zhmDJ|$(jrapO<|hq0`AGo%Yie)2glt-IvQ4K3O2(z=kA)OO&{MJ+g!hL`_f^XU9!YU!r!%=k>=H7n!jES*7X`Hf`%F=%{-bD8K@wiDG` z2g}_S!|dw@5|f@dxo9|CM+>qVp>Qfk zJwMH6lg?F4-sFQ zkHv8$&Tu-~@-3}AaGR9o7T$iSw*msm2;SB2Oz}cLD6oh^V%sPLrME=9UAI8q5ozAy z$Nnj!eLm)-TvQb#g$~9E0^N1Z2zPrPQ(wit^dsd5syjqBQEgw-`IdfgPLaCqa-3JV zDfIT?k3^%Fx|yVgLJ+6&z%629!^9pjLsr!;vxe(~iuvgA$eU8c4{jV>+WWFE;M{&dV5jcv6EJ5p3&ejV(XN4g}D|2b=Q}@qir{ z-`j#I{cj_ZAypG8czj7_{(l*k$z6hyc9IG7E2HJ(ViJ zK2hr058VK0a7K10kojf!Rqr}DBGstWJkr83+vWA@(+2b)gP=GycEQnei=ILZzJf-3 zd{B21?K_`K=4yFsQqmci!N}4D|4`9?puu8Ah}FgyaA=9T(45N+g%0N966+MmD`W6+5`;8fNdqodMPQqWRy5_r5Rll0%JP z83p%sLw)uUq?+LJlWLo>c4e!-VkRL_O;*Zf3W$g+WR#GdUXv!A#zR|@DJ1surkOou zzATvZ^8Ok4R-FNM}kdTFvSp-p~UPgE?)`4ZGQ3vIHORJl_QoNPkpxYoiE z73j;4?eb08f#{78C2s#_&qYiXk$Wf5Z_PW;T$LIJQF>T3Unx}$4hlDi5(v5-Ti9>L z9lu~%T41OihP%Q_IO`v)_8+YGd2i2;O8uHzD1iWIzBigae-$N3+jd665IKkFuugduojAOBd``R>KZ$FFSkS>*{`38HuIKK-fV*f_PM7H4M9V4{Mt&L05q!tIni}O) zu^tT4D^(${ON@ELM>4UVt*Y2Ko=o6eGPI^1@$f!s79D&f!q=1+qN2$!hJ=mLme3Muq$Y=W zsF*T(KM1@klOHVso4fp*A2GA9-Q_?D`>S+W>rlE>2Lj5eK==Y$u*8#yiyh-=M*RgUlb{Fc7uu*puPY-)sg)J60GpyEp&w_U{O3|$V7`|y?qjXAd?=hk@I9Onb+ zfoK+eJin|6>x&)`5SdZzuC!Jd0)7_>DM@hAm#f^s0UtyTZ@Tmp-Igvbd_*L6)iqk{0Ck=$=8Vs4?2yV*0wV zo>1?px6$NaH=EVLvaWg`A>Sso>fKTw#3wCD>+C-GgQR20I^xfLjKRRmA`3L6!XuFU zn%A9S>oOvSXz_=am_r|SF-kfFSm&=jGwP*V-U%w76H)eq^&7bGkm)kt+y{B$P{oes zO;NKl*3!$Hkm~+SM3fGVD^0;l+;YjnZe~@h&dv?ns*%~$I~6-tpY?~xiMT8oPYo)@ zvVlIgctD2v;&S6eCQbP~O(4T>gzWAjAT)WbwPmTb#sEOfzxZJZs-KZey=YLWebE(LVZk_t*i{U(_~`DzCm=cr|LZ+t7Nvs zL~d_agTd*?e-5>4O8^;@!fReo=ADAVWou2m7zk8DKUiY2x;B*UY#%i5r%`No4qhDs zZR}JSn?pn5ul?Nk1GMSK%3MwD1~;X{7Yz5FZ>rC z6y}H|#xV776`th5N`+K2BWxr#QM!_ZN@=ri?e)GQtU31yWJ?lc-qFfgVJ0#@hyNZy!5_dqoav>0eYEvJw9+rco`LcD__2MFO>dPwf_H}YV zxefx|l=iuOt&jyVD}I9$R;-11bld2&DHNhqkrRw2&FPDjd4S_0_$7|82C52KmUixA z9*c=RZ<_62SpGnEe40oK_J}5nU6GYBNfs6gY3S&UezoT8j{pZHq{J1LmV5*#mWK$I zX)_0tMp86yMh5V;$;7W69eikL0;(-JSY|zWo=rJ;O-G5t@4`r$YscXrKQH2yft!!4 z7=jCmE@uZ*-oKC3zU0#XW2ekjH}_Wiv|Vdt$o@NqFy99z7dVJTV zR`VneH}5g_auLz;uTQMWAIm*~zy=uzFA~z{w(C#4cY$e=L01IkNM&R84|+rtM>9Hc zxrm9AO*3MA{S-ZPw6$?o_rt4I6zYOCQV1;Eyx|Bdb&s(>OL~9ZT4G_kR~pIXk@N&W z%{MkIGSh=1`Vz?~oE|YFUH!*NBySo?Y=*=n5r8N%mKH>gDff~yN(m-FBHm)(XV~BEoIW}uCQbT+QYGuc(YL@{W2FH$*toXSvt{3X9G(_cUkFnn zXSOH8Zr5)dK{BgY9~S%D=h-cP&6Z@wCjjpbSzjA?pOSmHtv8@-4sY8k`_&uY&V?8e zEMESrH=$o@@_b*CehAB*vsa3 zvINSKsyxK0K29?BUCOmP#3%=kr-|PvQyxqqo_EmW^P*z&s*u>e4~8z|5s9XMrjK?A zQ^hQSftZ3Ws`gU zFM1R!LDp8^@j zbOW9pF+NFaA~qXv|4}$Cu0SEq+Ex{+!BQjjLFxBnLKv`TR$B`ZYyLcjnYG#qo49#M z?hKg)LGd84IPYwzQ*!)<$mg5i2nfoays$Imh%qtqH8GG0*7Tc;d`U^=7!b3jP9^^8 z3Q5gQo-J+CJ?YAx5+m@|`k`CkCdtp>lO1fpDb&_Tok_OgnI+|IQ#N6-%qFg}YwMLI zm2P(oR%-heS&p{oyL2N(BkgsbY|1Kqro__cbq#zsNPLK|<1jADjiUe3jFOc7^L0`4 zG-EVF+cI^6pFPXb^AvN86@DT>t8O41O*WGZ8I3=!%G)W5v24vXx#i=cbOl+@sg4<1_F3cVMV+^zAPl%*+i=K8A09ym}||u5)64_DCO=R;1CA z9IOLp2{wBQk0J0klRdnd>&=_8)-AA?2?AWrW{t|swFBW1Cw2UUFBr$tqeZtr4^i6& zlb>(4lipOR_itadGpB0JL&^tN-iF~`oA1$3wU^BmFoH~bOvOsq#|XeD4U?=((+nM* zz!OoB$vW(Hh=X3CfSHCj{x9~leD4NaK#vQlzJ@3<2;xX@d=Q6d8INLxy(b;C=%U8c zpU$c{78T^(#>d28m8-Uq*{8F#ov(h|_xk5#k&gWvj#zXl&rL2@Ccil+=Y6S`8(o-= z+z95ISRv&x;akNZuSLhic~IQSVK^~XLMYs`ssU&bNCy*PH`9@JD{fdRw{b+lbCaA4RcUA9UV^2YqD&5HkdmWl{3DA z-17iOCr*oZN*?p+J3Y-QUM7sB@ zv$919Sd6_S>RDZ%(B~PYvJgy>$(k{a?|R03!k5*tSFJ=O%xM7*UPtC`nko2Gb6A@0Jaz>)T*cuJbb%`CoFvMvUJ-3K{x+z>t zx*H_(q;jEi-DR%EF1Iq{fC`p{zlNl}K|vsX=b>NeAr>8&a8no$vPG$Ky%yWY^?` zoVS-1yM@c$y}z05EWmCA9G^eXJhd_vin+n}nD2E;k7V5Q0D$eEgYu0>RJ|s;?~N<`NO*RS6|vR^wNGb1Fm zgfum`+q%^bNcpV!E&!T?OH}R=zkU)n(S}=Wotmvh!MncFnSFXYIJi_ppZ~w{pO(ZV z#>(XX4gV=z*yCPX5rs3U?KqzeV)pYlSzSF>j`P>Z(sbjjAF_q;#&0;1$dErj@OW{{ zCy5kr9p7~*y;cEE4cwmFY(Flv&=0G}S(fZ8LBr0~anRxfSdO4S7=OOpo99krK7jaj)t`fay)Wwi{x1KSFZ?5 zbfOxR3)u~Y5M%E-;f$f&Pyxn0i0W@e%4R!MaL0S>)I;@*6#177^P7IQG>aTpiH=2H za~$?Kw^IOxgRCq#g5-B*6_VXaX)HhUKTS0yOd4#crOV8qVhd6!4vBA>p0tf*ycXW z)j3p*O^ifQRQ2=?aE9W4ri9Kmn5=EqVB|WV|0#dWf0vqs7;ZJ?F=$Z}Jn}s2srB>F>qCmlfnOV)6(X3pJ<7&`JJq!!mgOn>Z8>?$N z7U-H9D!=E4QE(}3_(P$eeoWaspF(Xnn9L=%ZMN=_Maq|3-^|l>o7DXVB+1t_X!^8#mk4@$C2L2@HCBLkEkg$t2SlDd8dZl}r@jtRT+-plHIavo*b%bw194D#E zO*{KetK+Pe1|;mOlwCasKNd`nx~cV0TWB%g{(*-F7#g7@f=E+GR#o#%KSyY~zchb2 zEb20X6H*_AQ@zC>pQ0j*XX`2g7E0J9gGAX9Pz>6Yc3633v)4Y?5?tnp*UIlEH&GQ_ z;w7G8BsY$)SV%^==DgY6Y5{H_e{BmmAZSvoIOZd;dzp;sv3ctLuEfI$bV|r(GWEb~ z+py(81PLZW!B>aj+8xF>j2%(d#vgd9Rw&!|@q9;*H>&~$*hI}@3NL}-hU6aefH$&i z=l!Xp*unUh)N__#zbDVhyR>Vc$4gYdJlDTomtgW=iLomu%cTkRoDWr4 z_}=RrPuN>Gb+kHfTTchL{2 z8uu&}{x!W%VkB)|{i}k9p6$+C2aCSR^^>h5O!c1%xEi7#EKx^vUVL;?1-FBE`vq(+ z{}&g%fTJ>c}{N}0o zw`;=vZJ!4*FBON$!4HVWu!xv?neSrK5p#pBNWl+ZJQa%$u%)!GbdGVGZtCrsLEiZR z&?I)3S3A^YY6{i_ac({aUD#gv>?XozQdZAvEq35~Je8V3YUA_H^F&36i4H+YvSlJ`;lDlCB z3m_^_-N0r6_;Q%sY`t;Gp!ve4-H~z&QR)VtY!7Cr=HW7H>Up{XV{_7>rhD&@WgBpz z(3R7wm`T^D0dnV|>_x;%PS3uxkR$-{<*#mZq--z=KMnJ_tn=rbleJEf2zVO{2vD@> z&+(Du>6m1ZG05^XTdSSwv9qR=+}~XTI`&G^PNp|+k&@tgvWIJ1@8D$nQsHs3Jqz`e zQkKO}_*A`Z^i>B}MrZXm? z%(-8B0%Rb~gn<#orq($`p!qAVUibv@J5Hzi}|8!?d;r?D&W5vHx! zt%P{Ukt|njZlafB$vsu~jg3a=+DoP6*XZIo%zmgn@EFbO$iR85r@S9DD8Vjs4vv^0 z`J^i8fv1;QuY?kl@UN4Z%4^%Gsbf&@O0;zrYD1XNv;0ge z+Z_co$)~x#Ju_V=WY5dkyZ9rJvPk$OXyDY1$%DvswIBv=CtUb(i$-EOEW!IR9T~d5 z9Zrxt7?SpsZr*+DMRGsPqLXtUi`mWqs};eUaXwl;iUgp%@Cuk!}rS)xe9uPT}q zrUofmOI9~2OfYGU;u$}W&f?kpH9z*Fy1g`2e6-X!I+ny8|FwK4!+Eu2-SOnQ(gj6C zYxyxmnSCl_AVA(klU(2--)+0i8h)81a`JWYOmu=|K3)gE+ZYp*n7^cfZ;d*I%osNN zMbl(**+u2nw#{n^b4(u48c(;-);gZjXQ)OzPm3O`RPXDI1^)?^3YrV9E&Wg!yR&~! ztyeQY5>RMkk4418j@hv}8@{x2c_>kfS)YwuH9P*sgR*<{I5}Q~@k`JgTSJg`73tMa z&nDZ-w<84{2=}DvWTjB=!UBAttKb_4ee*tPsWNSTOMRl z$C?Zceh5V)9--UF)~a_B`vdjQJ|1VhWWLL~uP*f#AE86Y{7=-4ZAX~OnU1NFtks&= zdaK9lgo7J7*Z}^*(oizWYm76tQH#?X|F~QI<57Z{+|Z8=QaPJfB1wdy%iG zv>jaBDIAr8hPg{C6^I;PLa;k_32frYA^a?Fg^p4_i@u2i0{yJr2d>}cU9gQ-vXF$& zxRwJ**&7ss725+SvBzsGlVYZ&UUn{E(z(VE5}>`#X_8Wn3VLA(t+c3>Wy~%q9)8a% zcx!rCIE5#i!YOz1o4jp>{n7|jY_Hl=Vp@BQjr>AildV@OdJsi9t7>G);<}jNJ4N@1 z>}5Az4xbod8N^5=m9LYIM!`hkFW}-q!XtdJO#7zpLRQ&w{}Xq4Bj3c`*_IQkI!ZnO zYevT+HG zK(&pQ>hJ2SKvOFTHqjbCusC-RTOb?%$Tj@oE$^bmH);0wWES6*4q2Svi-H93Bhfye zA!3s8*Mg2GZTe*R^m~R~Zw4nr2QPk6Dp>0h!1vJj$KDg#{(o5)_}~dJ(lb zw*EEiwq)LW>S6oh%-HxY%%Mk&FIO;pO zyuYDX4kb+e@Z`&h^v{E{a8RYC+}wY%i8$N30M~AJd1AB8U%qtW$d3kzUa<6*Mu$^i z9AV3Pz>ttN9X;R`TqE-T@@wG#aBLt0eLIlo5|SNBYK#X1c8Ao6STLli!JNLGg$SF+ z5kiAA-$woK!?=(g6fPsyPdp{5Yi>!tjH+gliO%@HUePcx_gAcL2cwG?oSN7l?8N*$ zSD)|UV;8N_YtKM8iq2&vU!vcpmVkD)Sux3}v^{?M2_v=7Gf0VSBkxE`4HOrUp%_sqMiK)g7okgg{WT zb6g;?bPNO0vo~u(zZV9O;cc0}6t0b2_pFW7Sndvts@TfcSdBZw{SUs*I;yR%TlYnZ zrcj``6{onn2P^LGuEkwa+^x8~I|O&9xO;&D#e=)FH+{eFob%l??zn$tjGbifwRYCn znRBk^`OP|w8*PE4%i%32)+1V?3hUGqKcV*5ueIkq8637$in(5?ds%1{*wZ&G<7B4V zzC)kw=5%8Q)im2sUjW3*pSg>$&OUSd^A1WPVJ+<(}Hi^OP6euPd^K}5-_#FD@r8LSuFzWbhns%~9fi%h5Ddgpu zVNBTWZtPB5FO{9Yx4XdS&zi?*bU2{`#(E(f>eLzDQIkLa`M369@~~GC5oIClnSHL7 z?G(?GV|`u8o-)XF)CVMf?cVxRWUWBl2qE=;`Z@JF{k++cuQuOhb$Zk{LmYHYG?kiE zfF=X-I0K>mQ*xvQ3l>Bs^~wqj&DmW*ek0nCgxpcnvh4sMhcraR5(<=u+0Uf=T|R$; zYQ5VYP_H-J{)iqtE_jaUEf5DCP)4fqmLO+$c&Z)9eFS4MfjxQ?etrwWI1Z-XijWn` zzgCNxdM4$#1kSGXq~4NIsM>)f^sFJFpttf8yX)Gl3dgKhduzqf#^(}QO85;OMj=w- zcYp#r$;7-vTGZm#GUYKoYf7nMWqJ*5bcidGJng}6De)OW*^j^UwF7 zjIA@!Ovk;jCx`v&Bv!e7L%|I#q{`kb$uRa+_n`qRhkFOV1uWs^s#rH2j1; zAKms>rlNDMPxH4fnJo%Eh5CIt^B1+L(f5vxEf}2m|5PQ{U9x-xTrG(C&OxJRMriix z6q)`&Bb(<=QW4D`jX9wAmd2A4>+up&>})I3M3tfRpN|Aznw;S#KSmJF%e#zP>G+t> zE|-7vJ)M--r~nQOu|K2cLg#){toQe2ZMJ+R!%W=8AaP7xUxjrP@7k`Vr2X(j9ULV5 zSDq4&;ZsU%=*^OZPnSC5^(V;P8ui_WcL{wzlIUJf#|5oG%I~t-<>ae0XPd5}xg`8e zQoZNj9~un&R~^y{`()L{tH#9gfMd;a7+Qq%seC9_F!b#Iv;K;HUUCavDEfTRfq1Tc zSM|-YnpK~y8s02*Qw~!71l}zqA1m~~xQg}UlpR~D8D+JPJ;~lA4CnOK@x4}h>vSI3 zc?kLfbE*dTK|>-CR znwrz3(CJ~@EoeoX#ElLcMhX(hmPSWG(q`96Kuxi?TC6Y<6W!F`MQ+yeSyN3p zBV$Uh3MO zZmso+g$4Fe5>hN$0R^ge$es=o(UqA~KtWozh z`J2TNF*#FBD%9PoT%M)Ije5S z#N{R|dh&o9#}Nvc?9yFA;UFfu1(q5<)S-7KkY)XngQ!4?cZP0KG$iHR^Qv#T{}JLa zUk(;~-uMqrKb3wHdJ*hO?db6+9467zaQle2J1#_SGg27GgBrFROK@KO`JpP#u%6(A za(I!&V)BR(0-}sM)gQRPcClIUWC4ev%Nr{N8+K=X#pJPwJR=C*dSp6FeGreJCvYlw zn@#^zl8~^jHtlzkT}956G_Q>D-rI(|1RLLd-fWLQ_hDnT@N-8yhRbOYaZs0U_3mLE z=65Jetr7n)>t1h&hG4}1nTcslf=M}Zgi^)Kx7x2g4J3>Foi6O|3Tw^75jO)4Q|I|6 zk)JGVgoK2{(|K~=_djE)u;5d3zR$oob^{&l#~HNaQw8@QMqOU(u^#nEj4c40y;ffT z&&ySeY-Dtm(h0`tT%Iq1s>~K~Bc-^r1`=YE&ZpVXuyuCtozzK?=H6$}P{AP*Xiw?H z9b!BqG%1dtzFIhy!L}G=jxH~V*<}@2w*!1-TWzpzF7aOQhc(=qVF%!Ai++pfuU3dt zBQK;JU&k{WJyeQ_4vHd7TTPtqV9F9m4lWEuNm#(hl_AfRQ&b@m0HgQi`+de_o(Pvh zx8K97L^>QC(A%BW(lCCP&Fy0a62@CQkucNJjCp``w9@*#tHkrwk>HV%d?FD4C+t!V zS%l_LcRLt-%hicv8HcQF=i3#2;O65(vqgev=ao@wnycpQQ;07S+tQEDWYqn5B|^eK zVQZ*);8?-fF<-OV@YD}YnGI{POqa6mX5Y~{aKEqW7yMTW1wx?T+o#@uZ%07gu3*ep z4OJ>4rU>?rdgzQKr5A55-s+Bus=-!>iE90+OOGh1B4L7#zV;?qvSSaky`ngJ z)e>Gcwi5hyGthsCV~eMpDk?P3>}IS5!OW~;%X((Yt_1YSS7jcnpKYYv$-lKoyE9Zl z^QW^_r3Oh(;O9iX1Y@bv@s{(Q0&z+;vAD>KM*qEFtHAlea)0eTJerQw!EI`dx=LTZyo(Wyhqxv@kKKGc=jDq?iZTI%* z_Zij8!P&3d&v*HQ*fFgK1^3X;X!F!`;HobYi0PHR2V!T=sCKYck18>KAEk>JX>w=I z|L4s29?U;iL-jnEq3MI$Jtm)R8(_+GhlcHxRUjROLenP>V~sFsrX~fq0)!S;@>{=E z?_HdrfR$sE$kqRqJy4PH&e_@=PXCZS?D`>F)uN$e{ec9ZPQZHkhb%OAP-lmauTk&# z&)nfTVJ3Lx{)g-X^g9wcW?g3nVw&BA%t1vdVT5gTOq;fLCS9pzGw{YkVK!zB=JO!a zd(3W|4CB!{pK`emUz!wDBci`CIGDzh6#}W)(3rj$Sttsfw>w^`Va5{BiK|HY(nZ*@ z34V6En91!({V&nCmV^imiT8aV3sJ`AagAfM4>AN1xO_%+dEs)8S)qXbzZdnahA1?b zXuqT20}WQE{}eTszrjoQxM=c0raDeC?Uqj@17y1tcsIoHJdH zLnmh?td(RPjf+Q|>IIqqf0pkd^fI`WGn-x?IFq$>qKUbR4Z)HA{W+QT+G13u8Tqoq2}d|IL8DQl zM(FxA9jSj8E_BHMJM;gW_!~lxo5k3Kexr6H-`zZ;N(v8_IaJ2*2Q9^|bqTx(9xf9S zK1KYDeJ0>Mu)y8ETyb|RPKf}w!C9r#>&OIrL(>I_i^Dv2(|;?sA|Kc+%~!!H1$A{smD`@e}Q98G%hNYk_C6qz3S22EEj9=OF8y zjIh4g^2Fn;Cjj4j1sS)biP>aKfak`j6ORY{XG}*~o_NeqM{bEtPGl{VhNy9uuEJ&C zS-f9Vb0ZoLZ1!;Uq(IJ?yuz>>GGy8pM0y4M#NrxJeyHUNn#$RyHWr#CDt)+@21Y!S zSkqb;Zw&>apqcu`aU?~bCxm+E%4`W(n+QA#&M5)?<2Wj7Mg1i+6w}WoGaAg#wkX~e zld1#X@koNXly@zQe%|~&$Ez!{KKp^xF00w0G0lrYQU2+8Z$AB;(ti(g+ovexIIE2a zl;Efo3Wyk04&dF1 zt3?82OHl_H{Io)cHFnS1TICI>fIANXm1rjgo2gfsmUqcMBjF=={dqb`g$g;SG=m8L zJeeDoLf?ks6Y%H+{bw?C_9FYiVZr9#QLEiplgdjMUB+NRGbmKfUS+Ab(rH}$<$lH- zvW7W|>^BBk=AbQ9=A26o4lK}ONVGgfn{ZmWmsJF0MW3dYzKe!?mpDpDq5?E64bQ2w ze)2RNvG_3t(?L+}rIW;D%#~|kTHXIIHbD-p^cBxAl&EL-QgJ0nxgwZop11C3-LUCK zbGDoeMs6$Rb7DIpf~Ha}Ss(TvFg_a{sFd*;`&zyvB12z@yfEu$6F0J@C>&swfysEM}Sk>J^oJ5m% zJ_b~ILeAjcURx>tZ>G}rKG_%GzPBwK(8f8Z`F%$rz;#HysX?hj>N}hX*sB*>ryRf5 z^xRRQe^C;+Z2n>ra6Z^LVOBAej`zMwpgMZ#IkI$JBS0w$Jb>_~oes+YmD+yupwiTd zIy3^%UmybH;4csXyZS8@h`{6Pf3mKC7j(N!p95(Mmb`m!q=^hm^s{f6>KGFDvTw@= zfa!{XHEj!QrY6XMN0MNKtXGDx@d=flGt3(`)y#xUtjr{Rjc5tu4eS0BO|d8RWhAJ7 zB3fjf1OG?{`KCm(PeDruX~dK9GUwV_5QM-NuDG(SVI;{2a~hxT3?KSjzOA5bU~7m- zjbMuSd$C;%mNL5DyKZ>L0!OC~H2WrcCiFG?1FX13vjeg~q`B&d?MrIPDWZ!9TYnpX z4;~CcW|5*udV`%#E188;g(8d zf$-z+uLt+98(Fi(PiqVkU%l+DQbBW6o$y3FzgF51vs4~UQPT*r3_?qrFJ3erb5i3o z3=>zm-t4|7lkaU#z4``=2a{v6tc6qMq)}`#2d9KQ~?_KT6r{)Hfs3A2PO?>hr{o^mOW&KcxHOtIWzY3X-XmeAROV zrO=<;JB}EYBW<8*^rOMNLk88I;`fgE8NoPv7DNfO7K9?`3?$R-DY3TN(_dUAX&4#K z2Rf-)_p%V&@xmV}(S68iD->I*+_Ag$_GEvTtg zAQKc5qU)8^tV_4z*H<2}ZM}tA*A$}KyV3R4MgHvxk0lCGg|gt=X(Z4LrqT!^*Vl2y zCz&%mNyeE{iNY3iYO^q{rpro) zwL;-m|4eC%S2lsRu5hPf>>bYLgRs#@=`5eVEx;IVJTO&turSdn+`CiQ8p||eHJh+VgWywfk2e2a? z7%Wc;arM=b|0W*Peq6Y#;BK!nd&M53DHJJudbpG66(tzG zRBDmx=))s1K7Q(CpfYlg&KB}oucqYrBim8H(=Hu>sq{UE){XLlAZYS^JnRrQ*V^v!|O@#O535AJ|YC?vx7+4`>o<1Uc&tH6X4xxJVI)d zRb9!)wN^U9H$3v7=RsmD zip2-gD*M#ENa>G<>y*0ik1(IeZs8NEwH;|%sld<_g=YSVYn1*W9 z{qmK7&%>kOulNYV-!H z)|X!2VN&VyZ?bSP2~`tVFsvQU7>h@5h7aL1l0P9Xf-0S)AxCI1ge|MJj>D5Mt4kk_ z!Kk%KI916{*#%Y1|V76w;4b|S;F^{o(vW14U%#@gF&hrw1{PmDVYJuDO4d94tK~!~lG2;_g^Y+t*JO8>Wg{MH>J=L*2GX zkCLW=tW_NHW~4*898HdOQ~DwuVIvg4;a1uWSXyQXfNI zL6d>|M;ULl^jE=@zA*!odLtL>H#K~8qVuWd#hON;I)B+Kh?ovZisXYEU@JRb$6k_a z0hZRO`fVh@{2{TX-I5D?-qEzk1)qJb+C1;Jc3YGZ=>TsfQaObH{){P zN!MhpHR>_Y(K{+<(HOga+vKa2nWhQlfYb~D1jfLxF6gj#5|_xi3}E<$$Y!TDA`3?qvi|}^s5PL&)Egr^c8-2%-5?TFpEWlX ztE{r}#eMsTbDB*IQ{RB=^F0$y$)GjS_`U&@0KJW?&zDxI_X&YNO9kq$Ue66lk+F0v zLs5gc*QmyNH-Gt=RqohZApQpyqIblY+dB902dwG@mbVdGXLNe4ER`qQ%9w6lvta#O z({B~XQ#Yn}YhQ{#^mjd@P0dQ?H2`5tp4NUzIH_=dKp!Whp&mC%57dway^t2rz8U+}B1*5lAnaKDAmaSh)?HvN(}RQTZQ7_$x_nIUu0 z_`FN`@H(PlUz)so1!{?%aN0cNv)=N_ z(^Lqm$vf*?+?upuAnX;!K^qgxPoO=_Q<`pLp{>GB3ylmV3nmQugPR_yDwvSeIWG2J zMm`XOaq7391qn7)#rU#XG)Qz(OeF0Ju(lgB>Vyk$jBYQ}-&k?EI#F*SpM^ra-$UkZ zW0txp6a0Q^Okk`58(`V3Gz({*vNxhV7}|eU60~~E>I#HRMR~iP!Oob+LCe&Bj{bOT z8DV#GL>Yvua|#TE9d=T2fb1P21H&02iYZX9m&!8R56S?1oBGWVgHxxUjL+ncbvOAV zL(E}IVYqSsuE?VdLoRVk9Z>AMjHBR<^|}XB z0kTpuMW45l@?5`%Ta2M=^UqY&2Q^QV5Aag-8RLbR9T_981+9-r6l(p-a}NYj)x0=5 zhb)9&3h+@?>Rk)C9|qi+Ce$}+P?gBW;#a(7512U~BRQ;i`AfQRWE3hb#$KvvDK&&@ zwfO;+zUKWqEriz@8oyS^(!J)%*qFRoF0yv>n^CH~+q$2Z|@1-LxB+CygI-p(p4A=A|Ym2=j7IVEFHC*&zZ_B^t2w8N=+U3uGF&9;@ zs%)RRcb9V=ZyiYDt%S|w zV|A>rGybQQ7dmA@4Zvq2?-h!50g7X1o#|oyxVf{s8&PKe)qmU|By1)BOgnehOvh** z+iM*}a)?p7{;w(1!!;XnRm*ultfRD8PKy)Md6sIb@!p-*SjKjES}BMt@JbY-h2$QQ z{NkM5w4EnzG08N%+#KA>(WW1ghcB3?JJbUmb^AS@{-)aq@-+M&YT-fZWaW?zY#XYy z!Z~X%e|K*BYV`!Xi(>i|8QZ@{m|#uPtXQ7Wfm!jt|?nHWt_ts z9q7J8$!Vj$0(cea0>FddS+-7U?-g}D^R&YQq{>eoV{4z6Jmo~gy*Orpp`n`J>ly)Z z{wQxtQ*Z~9zq)?HTys)kWrKjN%#Z@2tza|5hRc7see09EOTPxsiMy8S#qxyYf{Pwgj<7qoHUK`@q5?OnzpS{K%?dMVB2#OK;`l(#~ zC4))2A=i`fPJ@$GAgi-|)f*Ld6KnL0pRVj{2fd?4aT8Q7;4~Uqa^PZYvDEpayd#(t z0QnFN_+A2xOXHu2RNXWQPU#c?O|>W|$yk99-s4ZXV-!TS^Mqk=>T8H$H6+J;;g|~m zfE!ual8&{UN7J^jMbsxL!$iItk){}rG0DZPz}SH0kLM?Gm9!*q_Bi}>3zB1wy7NP>K0@QyZ}US$~5_s9>% z$r0|A^Cc5_5XtCIsR*?o3|`ZVg<~L=xf;;r0JdZ~9LbaneRiCH6pgNDw8uf@yg&PmB2KDDFeuW~ z`X~nU4u4`*&uBZ%gqPU~(8`wbniF;E@p<#d`!rZ(85g7514G?iMzwe;x=c68Of}Zj za00#REm}{MbotnV8xK@|U$$|I9q5ED)qhTuPZhNLtki~qU(remuXk)b&6>c1`xeZ+ z31qL1WKHIpG)4!tC8eVajYppz9+C5Vn5&ds&a5E-d`77x%jVNLsp(=i@u?^*ZX?am z4Xni(@S|i!cWkIW@&;w{W_;Jl2l~J^-Pv9(#T#$wQeio~vJIql`PBMF`$ov{nJx2B zN`)dK%lU2tR73Xyyv(Fh&FXWCI%Y;&M^5}Ne%7A+8hHUp#H2~HyXD>pte z_Ar=~lv`Vs&{T+RvC&YA@xvYk9R!`fm_xMQFyX5RP)LS~e9Ky$&bwv~&+kCk9=~s< z7o%>%&Tc}=Mx}2r2#foU*ZdAQ{kyvp+dTQgAK4HwAMxIg8J*NjKG4eV!T_V*NQSp< z?7Y8c4#z#nF-nK$=;zaXF)tD6&+0EO?Ud#h_>700BBGYVVVG%$fNISnXar|o{@aJjq53nyk)V8H+XUk#76m)< z8l7_`M*!0rxE()nkUInFH8kM5%tpkWW@+3Eu?P*Cin5!GZ5n-FLiBA&%jFD5o|^@m zxVqX20i!crmeMC0KOzc1Jq}IOEUlNk&~i^;yPd^DCNE~q3?uW{V@-qQY*!*0Zs~-U zY0yD;DTQSSl_k8U294Hc8@q+SwbVKjdWI+}yfMq!Tu*7ZrlZlmP=%TsKad?@2=C!( zD?Rm9x*oGsj#(mSFZv1tX#$_af(R)DYPPxJ&QQKiANKMmA~-CEpc?$F{4B$ORS|AY zl>%7E$6SjTDDd=qE8ITXZpBPHbA&zVRj~ld3Q=mLS1=n*9SuLt_CI_|$<%ho$v?qS z3|L`H3kGmr4*@k_Kk|A9k405q$EpbQ(B}T> zCKD?B0Ab3v^=XVP%op5@pH-miiO=U5G&GYs$6J|_kFrEC{L+6t9FABV$)sLS5ylFl zzaNV>^Rx@e`*)77%UP+ zz_rwsL=-U#A7d=^V96&i$T%enbg=;aT1Ymm2U}DxJTMf!G?42Si0dJ=%$LmB0;tc2 zp=-i9fo!$mgSc(j7YdE3G!1ZU@@W{mbOxedhPu~inn1E;Sw`!s+W|)DQY3rw@lR&X z&CRMWj|smwt$oUk9qddaBPJ(-jii;>w*1>MaBP~p1=Y~*bZz%_fW4r#EN{>zk%4rg zOukDwkb>L|4l->)F;Go|xYtuzQZ(%_RdyE-DHgRm?IGombHCD&{6RpVOC)`OPV=#~a786{7e$=K{LXnx#@=pKyLiA-R; zHM@~^fC6zUYGnAW+ipb;YCr@(+Qy=Y%YJUwW@>V5DNOV;>$k5_32FS8g@=-;{9A8y zU+ygaU_m%$wFCd*;_Lt?SvL@lB417UXBu-BiM0=>z=<9ReVyL6WB&g-nEXit;nCS@ve_6MI zM2Kl|knd_sxhIMb9V-dOC-lFHBfV3o;t1NOY%Mcpbh`+?`O~)cAsee}30#KKlzLit zmRAw=%@jWyJSr%v2vw+-Ny5x75t~-ca`T;j$0F3Dm45{s|B@qZ|Bp4YRIb>?wK>n) zi-B$M77W!Nxr$o;k}{tftK&yoB(|Qu3?zr(Oo+%7K957(@7`p-VfO5z+Uiegd*c9jL ze;Fb}jVJ1DTAL`l%0Hl6QFDv2ha=~Wc*>t7^3UF1)7xs7H-~2a1>|TNxjw1XOz+~h zyMM=|Y9vkhRiR++OVTqz^QD^KqR+6lfc;_C^wtS>EWGgkiYn8SDphGc;q*Z48W;4V za;@Si&ng_SpY`+^8@=0n(7tkE5USMY{|3z-Fpd7PLoOSHnQ74dvB#A%66-}fC6J77wI(Hy2)MQs(AYW_NNOWNgig!MEWozG`d{wp{{l^da(8&k1 zDg?oDFjd!>Ja_7 z#J7D|_{QV{B5OIoxhtCEBA_Phc7oGjrEC+zzWJCT@8VuYdmr0vC}p?H&UA@SU{&LH z#c6&ra-#q>>7}81k8bA%11Pp%>67YJ!a!E9_RWW6?jkV6!XPWpWPrLq62JrLqoM<0 zqCgt0aUGy$fWJ^m8m!B~wt%TKVwSmHK1R)pBdDaT;_WE8F3h1%-+XCMf<}^oUlLm2 z;*w}cQ@Na{+*3hSQ?TIt;jAws>MaY2m5i&l4(BpMX2VF@Qar1syZ>sbtZz+L6<_W7 zjCMaTCXJB)2IPJ->1z9yJJZXBAZOL8z~xospj6RHbW&+|)_b%SBdv#n80ycX6JQaB zNB;FYGT1b=)yk~ZmV(7)LqNz7`@^k2CQ|2>I;nGqJ%?V_uYYaP*4m);G5x%f?hy0X|S4 zy8f+vIX5FZ&f+&_;4lZj(6YY4>3imip!Be*4JdJn2x~5Lm0NLvfw=|)*x?%Hek?Az zlQee(vftNHr|?KA^y@~=P6(5PmxabGiTcrB@9i}X0VoC&OVs8l*_EtoJE75DCRp6p z41vz4(IsboD)pPj!vAB7a|T(-h$=-EG)H-<-}gATyZXizBki7}^4`y}>F23M1DWb|yya_T5c0I!Sw z;qt14nE4w%;X2XfH7fiY&g#*+PjXY~UpMlFElstC@4tAUH!k2}15 zm}|CHW~xi*znD?e-&Z83@#xTaIrB}$XIRm@&5Rl?^4<^arngzYi-EfHKnE6so>FMx zTWM@;ho#z_qD6tfnAe5LuYSP0K91kTwdO>LC8vj7x2;q@m6Su>KWOZ0uyGLN`;fg@O$m@cRZ?}-YH9?v$Gd!{R@(5nAep6P zGE;R?9UAJn^`Lar^TNiuJljh`15H)-~l!cpRDvDi`h%VHFmDlob7oV zr)a7f_9xegUo#+zXFGP%uGA$q8W%D&eRl9~$vD>30g_|Q{jzau!F1x}x#v1CC!3dh zK5!Vyx%7WlT0B&yA0^{ekBMeI2co;lJDblAnWiVPap)HD4P@q9G-n@f=;lAWpAVFN zo*f8!003PL0xx^@K0(_M1UJ)%Mz@LYDz}3r*;Avfj6n->Wx@M>E5OJ)VTY#Zeg4wK zD73W804Y)@XPt9W-x4FMc1KQqi{dzRau$QqzkaVwebuwogFCKDBtBsuk%LE!>|gJp zZZBL{ardr@`wi_-3vu)UCkOMJf`t61YX_XbN#H^9GzqI~^|)*7r!a@Xj;UXUF40TC zVF8PLP)d0E=W&udN=lEZQ5K3660SedO0#tv7#SbO?gtf)nTb91$q!Y9!woVO<|-MX|bFeiYKL4zwVgT@|m&JlS%nhQhez$KkKbLREQ4s%Z0Pt*x*haB#Z8b&y}PQ*i_3}y zr8;cfy^u?F+-Q4Se6h8tSYu^{nwZ}*OY4yG|9+y%y8caJ)jdn&IB`z~#u2Yh%Zi!z zg^vqJ8WQmDKySIimmWBQ{Ub4NSF&Aep5N1#6~jCu_Q7;B&?e84mfq95E%Bd$=P z?ExmL#oGq?M19m-zn2J&%e8prs*_k=P!Uy+K~Mz&NUX`a+~9T@#|5WQ|06`BK}+#h z049t&V2`0g^&y+yqAckvxlw49LMm6#TpI5!E29DH7YNG_Cv#MS5K z3a#uN&AR!r@1v^Ou{=c6oH6DHd%1ttYFbUMHBhx&O~`7Y2{w(B@}Ub-@aJ@`R(&cJ z6H%vip>L^b(aQW#nQ?wJBw(McQBW&beMJDwZCZ3rP1MvXZ7Q9_Ap{SWRdh9$m(t5d zCRf*;ign6d=P=p~9+qgVi|$*4bk=CBqDyJ8VCxO7yASWE6lbn;ttdB&A;wj;g)zd* ztW<3V#^3q93AXZiNW3Srp75(Q#!R#^ts~8Q1?y%s@oZ=p5Tp2AGctYOmj(-AzwM4NL6utxC-r* z30wE{bgQ~j^@dJ^0V58S*OoOK^T4&yW^_HQ#&qkgh0{hR?D$cJXGcnrC5f3|( zMe7I&7m|Dl@7O<6@*LdFGCPK^$43OZ?>ODPEu1)Wn=y3IvDIz48IiiM!Q(EuuP5Bg`dlN6N%@} zdRV{GExY>p2I{;^w4`E6%|!s=3ym1QRT4(F#1U*6o(n}ZfMg@)t zK=WE7m4_*gxg?l&BFhX%${2~4k;~S5*Ztgr#XPWT5`O+5aouUX=|Xj^%{lISdx2QD zkO8(2xyBG0I|mL360p(|Q{#gXT1|wR91>M7@vjP#i-K&i90g9GEiS~G|Jz_xX*HFS zca0yyU9xJNuinJG2F;V?wM2bnhlwBLt2#$gaV5gdd^rv|0AkB7Rb9c^Fq;!#afMSk zQR$W1T-*klm?z?J1EQXAV#?xZulHZotX_1bJgF!;#JEpK?wP0-*gK__PS=~e?0e46 zZIWdeS~T8$T*+x+uufkWW6Q#s_0^pd`bcx)z=g|uWSu2pI5KHCyoq^~iO~}yzh%~x zAu1bxP+##i=dMPQ<6f?dSi||)n!@7LyS)&(a%}*F!y)^o8@tehgF9^WJ72)vFBAf* zBOIK`A=1jCUrNEh@b9$m2o=%W8x_wk47>#jD3fxu1)0FkB@D~-8JCS$mm&-s^$OFY zRhN)WwvIXQGIEu>gUOE$@_s}Ht*s;D^|LM?Q!0vm_1%ezSaTxxmg&V-7DdZx$Weoj zYE=aY6!T-(NI$vW-h*p~6~j~r91R+Bv(RPe$jA8OvT56LXsw;#6fOUh#;DPZqk9(> z+)>7trZHUOWedeO#}VW|#!FTpAa7Yg+O(%4#>;#qo?|~%-ScZf5F7dn+;laeX-mvg zN?Rs|?Y=|-Us-}O&NVm#-$%dGVry8k{2m0AZSU7?Hvfvo6xDt=+YK$LL0eIm7oKh| zyJ$f|Wv;VrHNIMDD_T;K2tHe&WvZiIOOs2A4+>#dqe0zc$;(y=qZ^gz&)>4ujoJN$ zI2^n$r*)1Lo@#f6nZaI5KDU!rEGv)I!OL7qn>WKsCLyYndfscMQGLfE)iy^)@a z9<6fEn$LuQa%p~EOwpuBaxc5oGg?S%lhc7D3WvjRVjg;8MLL8Mk0l;0^(lf5^6mzM zs#!*zj6%udfho!6aJ$BYnAyKUxgaBHB8CRDI-TjsCqge?PGB;~Y?!bw_*p`CgPrqhwWzdKCF=3#|^jrLahE2;}~eA%m;(@G}`ICazj6LhsC zO^c^^Y$msrYs#QUMpN(Ds$)MIIOU_+37GNE%*heAyBLa@sr~~toy)glJb6~ikcL37 zoau&>CAM~fi0O%0ZI*MB;ZV$Eg}|y>zW7z_Ug^7=2*gx?dX+9Yzh+Uh89dL@#0VV%Ael~80>&UB8dUVSTO;rO&(zLz9$!LDBjdgcuX^4Wsk!{90Z~Li^YO!zikuqr#5LCQaeoZZqo zoLJweasLMW%Kc4^qv2MOjkPK^~D{zZNHG81l#2_u5~a>0L(W0=*ZbjluzXBdNuCL!{zpd zHP4UjLWU^3W2yg8Se zY0pcW_zy)>$B8+)`LAso9VpoQ)*TDcf85OJxzGquBVq_>)KA*9s!ei@ySRsJeiG3H=}Rt26P_ceGdHA7XAduVNc z_&b4x#2+O$vb!O^6Txa&s|El8E8WZQ-#8k#HCpSDjNYL+XR8@47-3(u&e!WWhUXI_ z0#<4{3|*hU(RflNHJ!~#x5};$Xer1q3GE(8_Ci?p1#9cf$OK~?Xl6-trj4Gv^W8z) z=97R!d^ZI9nVW~1d6J)IsPV)ew_H>7w*&%tW*>dT6SG9KQe{N8h zgT1ORj5b_iJ(P?1i~ng%%uXgL{q=_Ac||mj*x1I31fx;1O7*^M(>ei<5mi9HQiG(7 z$0BAbJXL)Yim7u*b=yQU1ckVzEX*ga3GNSJ>5@-k;!AFr`e^Ren>KZ`~Ui7{4OVLNgpc$kmza=tQ^lhfnAz&&7=) zzY1dv^l@ATV07G;<3JGum)t93sFDtLY~72v#bi#@W4ID$&2@{fdG_b7_JfE6SKNmW z7+?Uozfq5Ca~;KfkaqQNPJ>Fs5P5Ex%t#der+SZP>`ycEVLNPU8W>@pEyM}n2)vCQ z+O`g7e3d&hI(>TF{M$O0+U7c|J@@?U_;iE)-Qojx#tj?vg+Y-8%R#s=?hC((Q_G7$ zft*sASAiI`PU#&;03sf)x>eWq*Lm*r`2K)I1oDqGc=K31 zG8Mh4J>b^KJP=-C>yPcSSQE(=r06|YqxDIX?IWBV|ND2&F$--Hp4+xRqX$&_=pTQh z#g3l;WcA%XlFY5)o>9&175mWRlph(hn4=z0{3Q`vFcbCQ6R&Op#n z0G#4a-;C|v^7GzB^)H4G@`Oa5fweYvU*I=C9%AA>%pWR(dyH3qPH#P9Gu1a`(ZOgd zti74$e#^8WoLo1zK@AI}bc}}An*~<^&ctHTK6GI@7`pPk~{!mwRS&AjTq{wWOS4(Xy%XOAFRVK-7=!#k>T&)q#rV-kbL!g4(PFYhV|q!#rQ~s z_ctzFEz}l_1}}ggCf%c@qj?b4I^G-<=Tw9SSn}7-sID*7Lq;F5PotAY3&U=a1@e&u zBnw#4o!-ObH{AC6;PeC{;N7`AY7-gNfl{bAmG<6Jd~}mYy%%u&1DI%YMWkLvQ0y~} zkko|-J%;|y9Pdv{PJ2oP^=I*Xb@>gYjKgn^!5VCZ>PJ@#v_&#`qA_m6_LfbSoWN!r zHTF8Gk?|n2wg+qFZeyF^OMPJ@nE6xA>K7wx$O79F;K4-S!;}0Weu-16`(q-zpzYSjeY}OMMI-c=UzxGYGoMH?+{+() zeb)G8^ovsTdr#KU0rVMp8bd>a#Dr7Jq?>ua1(&!2xb2v`O4@u8?-Fr8!S;9Mz>NZz z6f81e5MRcfITRspHs%eqA1pk{_GN!T72=n=Uf6x#(qHs zJ=`J)Zr!Gs>EeYf+o+>K8s*Qgb&LQPyf!0vD-IhP1vuU0{>nOIC79uJe$O^Av)v zZKkSq z2C%#g%P$98D3sE_*!`#zgb??Bp+G|tnIuJfb2&k~QR`-|5RN(v^zkNHL3U2qK@+I6 z3(JpZi@n8}otU@I|J;>WTI~fblT29dp<~(aHkTc`4J_>doC)BcRabb9mG3T_tT4lU zO+Gn+38GtMEdsb#_dPmAVLuXE-@-6s=*gI0gRT3Zo6{V5KQjY&Siio-7IlzEN8NJ+ zS}oTN68&Vb5S37gVkWJDsrzHPpM=|8B);#cwF|bT@0*#!PBGR})U)?6@UHlfJxOyD zDr7vFG|<$Mkqcz!QLzR8AQU_mFJk`kz-B*%GZ{@&RZVRwl`E>Q_VjISWlWiZoVCm8 zzQ^&6#RtzR-X+$uk@nwu2%fC8HGlWY>#KpoV`@tCK^LyF${XoRNzLgofo%K?g-`f! zMvdz#@*vBh723!djJ7ReIIaW&Q)G-Gl8ME_)7k&*zoSTbSKXOoRyx+(K1IjskLZdq z{xE{35ngs0FU%bFidha&G*Ilih_AJ!J((ag=>77_VrPgC@?xMykR6)z+5)`Aluc5+ z`r;{6z2c`@+ncUd;>Rzw<5_@@kWG4;B^6ONDhbY4MA1LKQ?fR%)q8IH__6@m3tP!( z6Z_7b2Q~{BS~r8=iTQ}DXCb($`z)0{Fn?4=r}eiIBS=Mo&SnH?-Q_VI4eH5%>sJsJ zQit(wWrngUzv1A58sV2wqPI3idn8NypF~H<1okaS2KAyX_`eA2v`l zp=-q*NVF28q*dRJ+mS2Lgg%?0EV&EnACfck55{>J+z4aF#~nR`RpYQdYhXwwOdB2i zW)q5NVWlQ^xeUIq#g02#l<(8T>GRj#d0zwID+B0-0yvh*C$-f&K0aFJTUyGhlVhJR zkjJeb5!N~4Q%o(K4OI@x_oP|}ALA8XC<9j#R27gkpWQl@knV;d8i&J$l&|v#WlG0* z#d&+ks7(c6K|kMxJK?eRA8>d#G%CMKL&QLx{~a9C*35Nt3hX|<3b!X(Q>v(|?Q~3- z_SOo()pcd3bEiG2kdGt!qztZaD3~Prm0%eAAbravOqn7ktO%l%TDLY{br@k{+F#wv z81h`K`c^0c6-I6AOLs4L@gnCi!{4>5htVQ^W`F?m9UiuaE!!q4w#qR-I2Pgfxq;0_YDKTFy=A2H zNSJ0*Li*og@B^aCe{kAzoFd*A9{kN59$L_KzFb!kcRN3v=0^qBitGM0e#cI=z~gBP zjIzfau$o=opA~hHCc`P9Z$UD!-@CCR;#WoA=hK0?e32pV-O}#Yn1wGfan=^2-n9C% zl^p`pTVTYEbumBKt+Y?P%&Mb9G^PhM%*Bi8fH z4WzRMvBJeiI4SgUA|_h|w_QD4B(g=Gu|BGz!I``}i+7iSM~ zs5`ie*XzXGxH~*^2PF_|^ukPl8RI0m$dnk;8IF`6X1Hz8i(# z35R`p_3KUOWay_qk^HmyzwiBG!1`yNlg>Xm52ZKUgrhN&<~YkF@PGgt!blEtd>IAO zO|Ffi5FZKZ6#3hM+gBIhTc$CVuHL8yj!nii@8e~oL%1sBK)MkesQoLDrrL(n$KgFJ zCXpeE)b%kJKZ!&$w=m53BPfo!bvX;;O+ofBe9}u9Un)Zq7xLp9hyi30uW(Mpr;hQ) zQz~hpW4s_hMTYdeWlw7HDmTy?)t4rlPo}ZA^ywewJ?O-wfqLp+M&g!0mVO+Q4CGHk z+3h@#vg|+*xyg*ErwD7^y~$;1AwCvnpbdB>>}cc0^)27ZcOlK|wff+}{S*)=x8O~P z!+lMeC$;^)`9|}Sw?vrOZ-xgemeff_?UWUx^})VxesD&KoU+YNcLOfXTzguq=Krbv}ge%K-6Dgi7Ygtt_c2o`pod&dSLqp!_C zexqXq)7eAQk(8D!R=CkL=z2@o%o zDuLRU-MLP;Mez6aLGKMcvLdUUE%n;gz4#PiIs_t;~-# zKHR5g7JT;Z3)N#zOuHV=lP5Rq-Zua8RHZf?mpd24W|*tW7b%x#to1@@jZzh@@B3XP z397_~+=OoNH_YY_-4pR>^pa@#-g~4F`Y}&uEt!@`f;-BhwZ#O*#OItXO z@*z^WtD;`hNz6VVCQbJhdvleBeU<6s;6(RW+1ki(;CG0|JQ!B9H8G3Ot@V2QEj!=^ zmMyTtL^K$98npCYK%pHjr6g4Fj-En`?x&dJ8#-LHY`6tP*9 zF}KEXC;#LGl%0PLn$TWT zawcXruC=CMVp~U0R<)`eXgKQq(76dL=QCKB^X)VINobdg(P&Gi_@xZ~tGn&(LKk2& zbUl0`HyC-AeDq7^Dbtv_+z1 z^c)7rphh9^G+#e%Vu32NQRt;(0|pzf=iwi#_(?d}e{gfeLQQPO9)oHAIwxiBx`DdM zu#5z~4Y&8h!J_i2OXQa;{RBu?E1(tnLweO%h_e>0WXE!tmlL+K^y+SiOImI8wyLE9 zDiTj94C%BJy*WL~5{nKWHn@Y6{(1LCml;58!`JF(*xEag<#oqxIkf9K?W{yH_WG4c z^rz)WiL+f4cQz4uD|Uz}O~ZB*kh$Gdi?|6@`c5d$#rBLe;7!QKP*24x-baic)Dpa*7$DB6^?Q7F}L ziqi0J+MrQ8t9hOXO&Pt>oB?BkF8=b)PS_3zqPt4*Fua^bNaiWhf~RdYMCUJDLK8xf zD)j8~lJch(I?A=ebMV&}4N2EMB;F90=NJgYm!J+nXZj6CBOJY)Lqol+ugLJctcJ?!1kCpsEmRO?k$HeTCF@{qJzsRnS8Snm)f zJs&3$q?K_{XeB#D`-l@cWC;ao?WxW3_}boYlf>1wqIt#rDT3TDLcEj7!n4W<1_4Sh zD>9l$3y@i0ZyRF>5lXAopmMQ*VC$CVzzR>24rWO;Zn(d4u|PUES8Sj|y_@PtEo(*1 zFtEbO3D#)46o#8f_wW!-g}l`6R(EQKnnR6DYd<1xj|%L2Yff>Xm;BA^)lq^XIxL&7 zo>zkg#A*S*%Z#`Jm!)t|Py&_*G=MZ}#@dyDFfkq#q_J%oPx0t5a!U5O*;W-jo)>`6 zBe7I9xF$>Y>Y#O*@K?v>v7+MkaOUUfJ9~JnF28y4lbhDnj=4FYZa!{oqSSn~C;e7W@EH0&Re)UP*IMzWh>TEI_ScRM?u7hrNd^v{9li2wA(8NrSDw$iRx*`<*ZDN=%?JC!CcYk#l6pJ zxkD@tmP(>uCOe0g{2u674l?WP-5Kux%MC0N5F^Tc4{LOIE;DVBU@lq^;H~?uvOBNW zdBc@DWs4ORoVyC(zTvOQSVIu~Xsf_7n*nJ%aL`ACoUYqCG=8yaT=*`>!aDmrY|jcd zx4Wlz?UhRHckM_^bO8vuZ-&Nwjn?DISM%(>edt0QfQhpOmVwyys^ zIWz4O_WLAPXWts1)+(nc?7tDqNB(*~U{^m>&VJI^bF%s2Q*Dams{$oiON6{+n zXI|3r-ifgotvNMX2{aSH+NE!c5%#dS<&{wN$=BY95}OQDD1j!m<1CeJAQn~XR49R< zupgrRWY6v@0E=yB|AUj(ak0#;xoOY-!K5e2I@)GfS?$I0iBHVmxu@={+um7k0P-CG z8#wJ6u&+d;1rFOs+8jgE&c}{{xZ&?%pT_lm8zeX^K?LflYU8 zHgA9PDS?i>m<10T$mx#IJ-bKB{nC@C_-DJxn{}0ANt{rZdXlH++NOZS%a<>IV)TPN zfQ&%`du_vNJ`qI4SGUJ*vse4r3jLXvN;^T?uI4+@kVpwX$A=VgaChGzZq~#|coFxz^!R75@e&w_d>*1nKD& zbygx%WRh!1guc!G_?EKHm;6Hq(zrR~3hr?CY_A`-V3RgkDZi4dxpfNb1p@q%@#nBt z=;K_UL!D?ND$~8k$n!&-3!1|&{Xu8k;FpDxj}Wn!!YHer&%xz%j4@~V;5Uy<815MIbA3*Sw7m?Sfje8_UDATwOA;pi-|} z*!2Q*`F1VH@YHM78?^{NO5^F<`y`v)eY4>C(kA0Q&7%nmGF(~h$o-$@7zz~zlh0Us z58QLb&R8|k+dThBXMyu7Ro27SY-*y+B^?PqSe4I-xoae&>4wi%IVpwAEE&4)-nX!Y zIfIM-H~%G?A*@UPC7K?$ZC`eh@hguy5j7O@KFNw?iV3;n_D?pIya+qWrNEwM9hu!Q zpx^+Qjfd|`)IO$8X{=VyORRLm?ST~TrHBAiI4`h8g>;Q%R3|W1Y1eH5m z-##5OP@j7j&<6Iybx+ZADcjHO z{b?HyfshJ{lPHM4g@*-Q4X~cvn)zN-XTs|l}9QF@xs!slw zOgfHHT_B{WB{}Lk0_nW!fgAvZc;+vqv7A2QofMW$a%Ab%tSA3d8V!a2BaNS&ainVm zdq`=}a{MJOec5ztN*HM%hBS|c;?yd>DhjP6-*%!MIIB2hPi8%zao{N+a3(5>KEwhX*2S!n9yGWX@)4Z zPlDFiaxH(L?X@CXTD}oo2WYpth}c~9bKJ5Ncze5?CYk&5$M8s^G=_)T<&R>G@(TsO z+$J{%-%4T#H=Nx^zD37@Izu0CfzzWH=lGt52&D#bc`EaZE2LLi-1M?KB|NmZ+<4Hl zh=0%Eat5rrt|l~@(_Oy?4w1;F|6K__PwSL+9@j%Zm+#u($6#Y6R3G_7&VjG0LPsSl z13(?hXDe9&5(UE;+vl|!rDFMW_oHRO_1}mF`azJFh}x`t4`qWiEcQqR_4#Ye$N+9m z;F^RX!E<1obtmZyBbtWj(|_U4&_Hc~u*+du^uJ+LQbUL3O^J?>FXy412z>=J6B~}@PpgISz!>E(iC6}0rh!`Mt!R10PO$ZWDkoR ztbW3Qh|sX5ZjyEh`z+`~=~_jaeu4epLFZ-foT8J>Ysm5vx`C4WPC^i3aK35rohwkz zrKRB;G>2OXoA8Hl&p~KNFB81@^5{AavosM>>G%rNJU51F+d2RZYLJ zQ&EW*I?nti$5t|0)O>r1g^DYZ-D*Jw8wTtiW%naapJ;ceg1d&PaVR)utUp&>v|a^6 zXX$IRgHB&@S^arcU>*8zv~zem2sR4Cz;r?vwk#ZMkb+s#X30@_SENR0ufqR^Ig`U6 zqH!HdxIWBH_X$>Cnd(!b zb;x`E5V>UPx@;Fx8_`~0>cPAv{k;_w@m?$~MQQ<~pLEX18kMT=Dl$R%GGLPlP2U1H_V7$L z`%}u0cmUf)@DiuQb%z-QD+P6>U}W3v$3?Ujorm$RAhG;rch2~BQO%w>xKJCy`@<&4 zu~8CPN!-VM@({d^9%rz*-?g0@Ps9E*qfY9{nj*&lT1RadtJ!{|k5GP90qctCh7j#GD36{JjDm4-5jX;n1v1Qe!>;i+HxC16}_d zPm4Lr>wzD5C=Vpu5OoAUR^$>jn;4VD zjovNLU%FU5eDyoAN!TMiaHE+*6d>oUD@vuUaemfzK<=Nj`mf=#Fs*cl6CMGp}@W1fq*i&FcCblwDNS%U2QW2bV_`&9=#yTh)E`KT)_2`F(xbT5X%1j|8Zw&a3JxlM#(|jtLz|y zg5X2`wH&**F#rx(mrMF@^(cE)pMq|1S0y= zxjGmbPXL}t3WDALfaj{98ZN@3xy=P7gip}^Ee01uR!6CAzbYpV-q7}zm^FCD=AO@Z zaRZ4>2OukAy@cx8`?q5`e9XVz4RAzRqp`l$9cH-sh<}VHxn*Vo5s>t~d zlI93jWDA+xXvC6iFhZ(v>KI;_{fR@U^*vVoh5C4$*~UAOuO^ue|1c@~yTRXONTx#4 z-))-2WM+2Ld3Y)(`!;}H$L}GX*%n)=iY@gs%Y7p*G2vC56S_@Oowckc{rL?Mj}xDq z&`(d*&mY9d{|QjqCmGP6$Qz-#`{+Gq@mZi^{3S(P#*x0}cT&{ZwpI*OJV(M;l+8X} zUGMPa`y27wz}%u}Q_-$GwA!i#7-1jt?Fu*WQ8vc~+VKF-8Nv=Ux&7uT1Z8>@fc1)p!qgT0Autd^#%6PItWkWN^5b3W)8^}lkcou0ypS^&lcbhD`X8Yq8xOzwVYV`E;ybcI zu7o-SIME=&j`UyA1Xr$gL|2hh7NzIS0T7=HQKk>m?29J}VaazqXc3ob)8o%*5m`%2 z-_L6FP#Sx2GJhW>YSAn%5OcsXMjJ`l+<%j7^6b`Fet46AMvG9OdjHGqAzm3@ zaKp7q+QLhG;$x`7*k5cq_wJ(6^8yap=(p`;Z{53-V3seS=)Sx^vhc^<ac&5=13J>09Nb(9YhLB?JLGryX0r;e4I4%sl{s_FGjLD zKF0Y+7P@y$9gFe6qKM{n19#PWTSB(7{2t8aS;s3s`7DWlJ(hX*Mm|#wGwH#{n6R*< zJ0 zN_)4x{afRgOV*5S>j6FkxN#+Vgxb7VqgMQxiZteZJg}Q#kGi(NB3_}COop^DmnZvS`~S7`_5Whw2b>UyY&rTZDwrytBDus8kL>I2~%Hk zBEsHVb<<3m!istkiZh@Qf5VoKF&Z(u!O@qF){diM{caJivO*-Q+@HX z-E!njetFtP^~}4f1J@5ccIyt3q>+_3MAy`r5DX-`zHvnEZnKmP;R#;e)L!%k>W`dl z={0ers?%@&_y(({EIHgd`5hMlYNWD*S|TNdGf_jXA34Bs#? zx4L)mIWyDTC;ArAT0iwCqWD>6zi}DCnLNJz&<@HVhhUa(sbcxz??nxI5i`2_MH-TF za|*?7Elsvy1eJskJ3RdjCJ~ z%j2V0!G7-(JW_{+h-42ccoRT4S%ey{gRa>btuasWv%vPG6AR%Q{w?K$7%e5c77Hu_9HotXEZ(jFt2{ggwf2 zRP=f(8At;krOMrrRXvs#30x4r0&M&IovT@;ffEDJ3+X2)<6qDFpYyq%j-{{LZhmV2 zlA>drM5XAx5AD?8nDnP!BWI0n=jF8Qa~{gf>Ff#f{q7?1seQFUwRER<10p;Br0PJ^ zy8KvP|2VzYwmZrluGAmVcqxmtu9%*)m(_P!uaQ~tC7T(wTZg~ymRx|AG3mqzCATlvG-T4 zJQQm|NhManUFF8nT`M^^Ria}LxVJjkGphp7TK9KZ{?5OTD9Q;NteWmfS(+Wkpt_WY~N`5UG< zPq4+yAE{=|dTSnqX{Vn`MDw4CPZscVXlNDQYt7W04?JX7Jwq2+!jckCzFu z{c`lA;Qh)%Cq>#Gjnutj`@WhWV+VAR8a5s_R-cIy{0@zy!#L5U3tQRqS*Fr9G1l3&hkKePhq!%%phK8q;3k=Q z)0efyVS)Y8SS^;x{YMu{UhNrwWliojNj<>`pE}W0{~;aV{AAPTY*zezhn0g))*tH2 zUyjn76K?!#e4o;cGoaSUlbWu-l$-LGWZP-PtYAaaW8U>4sutHA3Qw8SuHpd!v4A3P z0c)Z8riJ6++93ZfZptmxAh+Ds*>v{jWD}LtaQ5AoKEY+zC&_Oa;dp?0axfFzLp#%_ zLgk32#3ChgzTxltIiIn&!y9_a&l<`$!rE5*U_z{l$rpv22OmBiP1`vLI-RyKjk_E$ zGVk%V*c+J9$+D9POFerwPhv5&N1KdbhD!iylAW31mH@isZe}oWoCq&~6q!3n3&I!^BK_Kf{qhy4NK}i6GoNDA`1-dRtGk#vP<2)k9VC?mjkEN? ze+wgTIeeARTz?0bZI)rrg$6VihRh%%4;>`DC~G*Y+1V|Tfden`D@&g7&b5@Bh|(~Q z*CCsmY&~5{dZMfY_9BMGw{9GKTjK;0q3FK*Zr5evD!Bfz7`DDnvc|IgS}OJLH2z}cK$^buM)aa#pp`yfz)>}2tn<&tND zHE07zY4S!91kmcoCP&LkK&v39S;k@E^muz1K;B`e7 zcB8t#pi*KN!%*o^l2@&4uSTpolRvq;V;1JS5@|r7><1$3B@n zV?sj|Yl%G!o52pX5_7AypRCF=!YUQKL4-zm6B=b7I7CBeR6x6SvE0dU>noeHJ7Y0Q zYu~0eFY%A=Op8q-~^A-^s1d!naP*Nb6u#awajkJOw=Im0~h(9ml;jolp z>lk2gZIdRt=ZM5c0oBe|Mf5-V#Kw>uKQX#L+zJwfY>=03j)eI+?=7dhR@zXGhl!=I z++iG-4b%Fd+CE3Gj7L{)r z6W4pcRPnU3<<60x_{SIEGM23|mp?SyqAY{HD01JRne?DG*ywiMn}^J&A*wY&KKrjX z@ZFgo&Ot8i=>E-Ca$Ue9@iSG9c~v_$Azz!C`YaqaJw6SP+ZBAe_$2Q$h-Jdn*l+SfqXD1-A^8Hu zR=>xnJMPTZAISW4FAm?x>J`SNf5L3;F|<0^jk>PZ3&WCSwN}S_gMW%)#SU?5)oXI9H~L zopbafq$*`FT_%ysk9&=(^u!r8LdErV1hM1lGzot39yP=X9>|pFveVS9h44aMERF@|Ww8-}rlbhGogk>P?mCpQ_w8Va|7dG9OA%tx?IBbAV{&=N_JkCw(G_xore|(r;*3Eu$#Wf z{zRyDh|QUTX zrY{cXR(62mr!5as^A*sSFpTU!OGtSt=&5$@qfg& z6<=SP!Bo?Gs(*BvR;$VNggAddh#d`h_4kORIeO;svFpi2)2i+#s?qE^AEe=?5O3N0 z-UAZ9*uXo+DJ>O5=_T^ufpY9i zMC**7f-e`IK_3~TAt^m;9s*$9CBPr#dDLy(R&RpA7mFGlTQ3#VOMewr+}7)te>3Q7j5n@fAd%kmAG=$j>1%IKD;{>xW{*TL~hT;;}}x#eT5P`uI-N9;NT8n zi8zlUD#DP`BSc7$Z8u6+U0H(_Vsvm$Df!jrBkj^QO3#MJSba>LKcL6wOY=OWoYDG; zw#SZ`2oN=O?H7oWjV_5^5=HuqLV|2QdQwXJ~XZ@+onb^PZ^6*=M6Lm zF~&?0e8SBg<*TwU(EiDd3AbH4&F6Y3nGV@GFd-+x)naA{NNw-Cpn#M>%30a9{dwUqRc zMQm>*adk;)wmqm#9T%BI?m`~=eb2)mBc&zRHTx3ak}b&j63y^qSu)ae9hZ~gQ9l9= zu@Ia0{U=vg}Xue8xs#QS*AT#c z%Iw>Z+kEf@7qp-T7~x&d4dn5Ub;qfLO7o?_yl_Bnh(^c)Nt#PVQC*=>qWIHs2W7f^ zDr{TN7&X$GQR8o=!u}6OiF&4)(Wk2;bwIr<18Dr2;_@Ko=!?=I&4U;s!cGr#!tXx{ zMJ2aUdI#Aadc(V(*H*J2C)no^QKPy%`=VmWRS?}3w47zZR;na^@-5zA?P6#<>D1}nB^I)$q-eaTlK1=Tn4k#DH*LmUN@^!E~Nl##}@UV z1KeKsjd9V3%e>staXuKze{$8SFd%fDC1rYe z+p(citt2YyX6?!j*`rT^q7UiYYvX~c*ym5VkamlL3_l^!`Jelp8oRkD5dKiaO%@rz zEQvO)o+C7UL<0GOB8{L<6V`w0J1R(=ktc4}o0!9GF-s@93+GK4HDlX9F1I0eQzjvq zk{`~AuBG{z+5F<@sKG@1@&qQ6;k^hK+sWjbq~3>^hWmVXv?{&Im3IdF;ihSwiRExI)ou{F}6mMp$2Lv&|9_Tq(i}Ck|&5(o(aq=~-zpKUyzmM-K2{ zhrDoy90cn~(|00`9HOh`TH#MixTM0i1o{<-Pg`A$M^5JbpQBS(nK_y93vMs$PBNYy zLGmqyS{7)1z&wI+mJ}w*=o%56q(Wplzgwz-G6oB$d-gr&QM>@io+&H6hNrs-TaFfv zxf3ZLkqX=Hu`>Ep7ird;(IX&axC==S+h+9!>|>H@E}5!8=96&Y0ug)r%?$t$y(W%o zg+iGEpF>2`7t+lpa zS83|VG*~*@J;S>_!!u$ay%ndMEN(VbSM4G~lhLa{J-qVeG9R=a47;#9t(Caf@9&u zm;B3_G=g8CLl5U=gg*hCs4L+o0%_lIvMY(QV;1Jp;v3u7*P2L?chJPBU7dTuKUM5b zg1U)tC4YHG_Zk9e@jU_Xi^QbhCOJT?_T(QP;Mmk(51(DaKLVU=4U_#M!zrE!4Y1S~ z-EhBmC8tN%ey^zCS&lN9#^FOaDPh8OlKync8*i)HG0iIj<~^7>{u;u#ZbV zWuC>l$H+$!Z7?|H`~v!0^4eVll)v-My_obj3_pPK4b zWTeg1uQPfs^G>=ul3#6?efZQ@R6u7@L^fZErLb%Bx4I}*chNc_Ee&#j+-V&@T##oa z-J7sZhB-OFJ-3OMJ{oivx$VXr|t3> zYHQ!iED)z2+`cy7@!xap^oV*6Z?+Xfs|Kp)&Zzg7L{?_n?omFol&M+lo*S;f3F?^k z(#*Sp^%uph^8iwQV*kEV5Z`?OWA$9mUg>%(x~RDJvFG{!G2zBxB(B?QmsfnAa$qKDsxC+W$TIdph3AcWO;b|&{4^r8 zjOfKvOOCH2WJ>;#4FS$G#8(r_NedT><;%%hyfcSF*TD(Gq~%xRXk;xt+n=^d*(L8h zv?q;F!Qcf(bSHcfW)!2^#+SY*z0(KsPgFr`-?VmwvnU0}Lv zw~l#5Q?ADi3loaoWH!@T;tUkyk|(PLBi(0=&}*C0a;9|HBR3mN5vzJt-WyPi&}*z2 zEnt>rc*gScBNhV)&?D93MkYAp)(yJ&Y35JOt8^3@STeMwrfHV4l#{X3ll#^-PFy4E zAxg&%#Ny4vZq@A3fLs#Ds+J7jKSdp6q9fMWs0|H%CV}%ERr7bC=dsYZCTQv}6kA{J zecp{n{^m*!1RhlUCU_vJ4VN0uLeB4;6g$(G8+cN8<^D+xzX9B$MpemhP-7s?3D)r{ z30JRs(G&`kO^TB5XL>1suZ-LsMA=NY-zSHR0KXkB4{`>*;Yf`qXKI$ZaUW#(je&{Z zzE~Qb?tqoIR1fK%F0|49qFRtNxI~e{`*kmyw8;tF_f!zn=n8x$fv&+lbLuiRf9;HLbBVNL`G22bmE)s*1T+$jP1U_CCsT?bE)#8lUoZjvsTg7_aHIowO9pH*YW|r^=p;8wp$%5 zYl^q9_m!~nNh0y!=f>69>Wk&1=@ef++YTnSBhER1oc|Q`g9df25Ln+ypg+ReL<~)$ zbmkmhR5ZmMDkI@7xT4=LCl}!RVKPFDSH6vps$8eE~V~ z))r%a^db2QthxlY@w7%RHQcF4oJ6=$(D-vqJWE02L+RS8^w=Ku_m z#n8O~#$^%$|58YoGtoMydTtu$Ko3J$P}!K%)J|>yCd8M@@~l7rvg(sb`1SG3e_P#m zGJfT-rBJJ;dEqwfj#IaTRUe}^^D7^u4jUT6*meQB6?l@d3oL!8SPC~uF34!cFEJx; zJh=nVrERfQHla>b6K=%uIM!UNNDWt_Baipfk$;KSQ6dOi*C~MSU=CS<|s^J z;!zQx*Il>&r4A87u_gr{x*HC7>#YfdnW^6HzjjT4Fj%1xpeE?CL`;)$Tm-i;bxclFcPA?gVo1a`H2R-um>@Tm7*eR%e zsPD12E%yBj=Y2j^S;4TNbM0P@srRySY?I_agGKI4gAK-NTA-hb@A0KUa*lB}){oG^ z=g|@gG*Z|xqhK>zE;Ew!__svPY{fI4<{(-@q&dY6xGP=W=-b82hE-g)UHTWYg0y+!M!C6KQya;xM2g)??1B3FAqPk(TU3*E=x4`RIke}3D zQ_G~n;3HI!^G%4KRLM&tUDE6nL>BBN%SZU9BAr=!c@WE^*2X%*0DF5W+Lb_zI!=|(?dPZ{W0j`5wu#>7$WsZGhRRXw3LAXr@C~l}<7$l4pp?;Nv$o;y029WWrQ9?KuXteu*r53-~?KlJ$=y z$s3}$^1#|7LKevnVXA<~%~`#ik_|eT=01EhHXQ#c>f9;x3A3-;Eb0Vr2(wQ>mx$%I z_@9~9q3w{?c0aqQUsp^o%ne=|3^8oazbV#LkqiPfNMDGhNsEr_bpPW=;%i9-;@m8Pishjcm-2-}hx^y4@oto{I;e;z7OzqquFrybz8$eD=F`Rj z!T|I&Gqk5s^z6e?UkZU)*~Yb+N=bzZCCrs_Pll5eG0}^l*%``5rl!Q5sy>!B{LgG_ z*0B#aparwex@8DkGt;DOnhglBRDN#8*jkYwUTJqo&9oyA=m!5Mu>BdT19eNocKko% z+WONlKQ^H2j;A(nj6T8PK9SQbobvmbSEF_MQ}bdI=NK6veW&08+PuTk+2#x&k^3l5 z#D{qKdsl{RGC+aa&o+%T&8sD2Ej}JYt)T0Rwq+1g3s+({P7-I5gXh*t9xasAXd{c$ zrBmv_roR3Ekvkb9r#;j+Y!f|sk1x2Wm#Dt$+Fsg>Q0k83BfmtKfJqdV`sK$-hcGzB*dS5L-L2*tqbX+Ta% zPxIylJit~%U?K(Wnla;SY`+|O>~#Ro8a%y69#k^1%#DHg`S@vw3;p=m9g0PaWLM&$ z0@0{}|7|)f3YN92jLu|!b3O4E3=$RFDsK{sAPvupB`vKzwpI^}_ZaOmz|yNLVS0jq&zLLI}C!pkm5h?2w)+OnF z@2|Y2t^RzTf)gb&P6pb@9R|2JR%N-fHD$B6Cb$YKA8S*%1mM=y4hqpze}li43U-!2%W(|AHegCA#0d$GhWfXOWCV{-dvoXa30U z%t0N*ZH8PALP*(x4*AY0}RdUpUZb$3DMyMn=ZC&EV?V=SUl)Pl;-bGNbLhaDloO&#FUj z-BrnR_688|=z>atEVQpr6u&JT;Z7B>O#S#lWKdY?QHYc&nuL-S%Liech+?azU9u{=;7Y6ugf_Utwz{)v^B*vsp8B{3z|sK z{4Etw1Dic6SE2eGiM9l{zY%qmk>&mOqh>w?;TxxQK*NXtj>p=Cg8SOzZD=npM-ykZ z&z7&>d>#^Xlz=9nqvQG)8*Cj~(}OafdUX|4AQ%xWy=iWD(?H@VN)Q0r9Fy^ZTJXGE z?!{`>dPH5;r(C(me{cN02;%0AIQ=O=gS?LVFM6@?O=~k#C$&Zn9U^Te?T%(W(MSTx-hOMIk$xF$T2LIwDpxo+n($Is$?FJpV z-gsc_z8ymJ8n{Q%k2xWL<5JOBeIdpUcbQYTkH>9qgSKZqp>pKjzc+~#pwpkaMm`X) zP$)BIBP%cl>CvrW$0E5R zdKLMcNblwBN|zH}>UCme-C#k}(k4bAZYM79&PozZLxs^1>UvJ3A-jAbPGxfjpGKm_ zJe=j-)I3TWwY<8~n=h@6)5V=lI3c=-fH%$*AK^P_&J*R^m60g*7hK^QBkP;jtNK>ZXAEYQqj3* zYOZUW1TSSKhZOCxzZ^Zn?Az9kJH}VBquZcsU~%T!_eK)R{DDdw&N5ejvd>lkbanfQ z61MM0hJyvo(RMgI>X^~PKY8i#Xr1tw#PwJ0Mb}VueSvb$n%p_<2aF%<6E@5lIndHR zAWyu!jWqTswR9Y(5*pAAxzOtT7Nouf8h&be!A$ltxQ19R?<)iYrgTFYxBZeLuT3)T z1ewJG_DKY%Ps$63YldTCb~`r#Y+s+ox6g>zj1hFN^COhu#QF+@oh($h~lZW-7FC3_uA-mhvrAlX15otcoE}!qQes&Wup%gPg z4n(TT3r8d9*H!o|R7u(F^e3&=*`%S#sUG33-&KPO$?H&6Hr{<8(4jaNh!rg%@iwMZ zK*F%)<$tMm$Lah?DFiR!=NmKmFFv}bv8iol@X4R>76N0A&oKbCU7aKmQO&j1h0DR5i5`NYLwBI7b~*!6Ajjy@CVny zHxS8n3H$XJe(zXBuv;O3rW`vGJ?I24FBcF0&R(o>ZRo!IQ$jZ7esjvc2iKOV<6>d!tlmE zBru?Fm;OepxKb#rw1GwVRbcy{dt6CE^LH|LxFZgxsWteR&NC#Q&1~KifBQLkqp>rgV@!cH6e4?oR`U<;o%o-#H zDt5_Q^|+t|s;jXx8qtcuc7)zXg+Mprq5#-iO5$% zYVe)$C6RAUf7XgHeWY^QIoC^Ezc`fr&0Pe;k_8~$sDDt_kbt~`rF)Dm2b7;*kT^`s zlF>D-j5`EK>ul(qjl};AOgF8z7*6zF- z60duBl=IhV$Kxz&m-()hI=1_&+{$Hf)ST&*3766n# z@MUIQ^4y5lZ&qijhT%&@ko6sBxjAOxKpMvcI|)rk_xP zAQj0asRy^wzkWwb8j!q`C9=?@r^O^7Fh5VZVwKMLtbsoKEjVT-^JV-Wv4Sden9L^_ zQgh%uYeGisTkSb`5sw3o*7S#<1J?Z1tw_63c?FV`Y>j)=;BrZOq!#Y~$toW*4{g7| zAS66O)|9d=jQ%0eh@K(5vYzx^?1P|lF5GG_`fm|$0bazL_@A7ztHRs^BPR+HQ$`Sf zqXJJVy7@Vyvu-rUfB?U65pl^MG_gB{SK*oi>nDuKZDP3Cz;SDAPuwO_UbY{Xa<7aD z#G@_f=S=BgShQGiyTKf_Q2ZKfKlR0bM>!W_|CcC-5!^AF|7)BB?2Y_SpwmJv#ouos&uzzOj$}-x zLx3l7Ss?YFK<8NRKY`AK!!hGGFCcIw{1rt&;0n4>JIF~8^FQ&;Q0P18u)^}J#uCR= zC(TLC?=2>O!#}@mn!1VNRN~7${k^hFz}ph!e}?D^rfI= zziS>jkQAO&!=y9*{)E`5AU%=vA%jHmQQ~iAIQ5H!KS^UBgnWIvSCcFfIne_iZfb7e zb`d!k!+Eg7ebE}*-|P49x0qO$5x*o53zAr_l|gA98K|{IVWL73FC3SkXA-a$(z~D2 zTh<23K|fyeAw$e9j{yzQUBhm-XN0%#EQTnOXs}IB*Iy`8#AqLBR5DR6)HqP;WC>Rp z;U9?!h{hZAj7UF~ZU#?R+mihuDb~UI(H6wnNM7+38kP|bxnWow9)q-eK?&?7CJHz- zW~>{jElA;yM5y~#bP|GJ!8wV@yD`W)grcm3KUT3$g|}b@O!HKfGL8?$f|*zq`u<5* z|M-BUQy9>lmJD?dvVCxs6c;Vst`vfJbN5igAtMossw9t)57Kctb2h8MPxlP8oTW{ugmc4XLw80pzim_#7^jv4Z+$VQ~Vd z-{0)+HBj-ZUK>}0ROA9;zY2Y)p^h-VgO+oQdHowO)30_vWS9YG2u3}oB|Q7Za`x^e z;x&5vAM&_x2-2Dyr(Cui(EAy6HyWO&)&T#Tr=D4H5UR$Jxk8vL4pbsIT|Ma!+xv;p z612YS$;_oCQj9Qphaof?I~%ncb=MXDG!6`D|*VXCaU)$@`UA>ZK)iN z7}4}c7A(ejl>btgFk=6!Fe$+&jR+?yzR&-a1QQ^4g0?j)8TWsU!^e6YC4Rud{~v`( z@Ak71OmPU)aDDsn&x<&ZSKvA}E(d#t?-_e^05|CxYseJ7jXV+Leb9UJs4d8|kb5@t z`vV<>HpwIL>rxM|1at~zFjv047xDdOPPrtSc8#HlJrQH8Odb)QKPDJQiznp@E;N}W z-v2*zCefKG{*K&8OlJtYuy#rOe7(h3{LoQGXP)rJI4QWj`p6F*@JC)#sczu+ldfj8R1Os@twHL%+v344&7`C@0{u_QOrwy@&=(IC(G#cs zZ4}6u`$^%gE)_I8@a}0zMTRyKIi7~1f-~c}&t}x&4zd=Q^!o)yF#1mg1c_}}mPUbH zvTPY;P!Vc#P}}8gL^7o^t~aox60N`1@ygi@-}^T+z zGRiZNL_^k=UPoU*^t^gFrZg(ay*E&+Di&K4KSKz=+rlcsQ9VV$WY9N!-+H6NR3aCV zr@zm)L2KNML`Q=(ARsF;7?HW7bPPLza*;feiW9x5BdWx|ScT!A3Lmke30*Kk?x(>O zx8NU`d}ppE2d&5eiS8Io-1s#d_CbQ7CMf~49({w!+A$-Q>4vh%GF=%h3HQ7kg`2{u zv6oDW8qtUqgNKaHm!TxltWA3MjszfaMn`CiAbe<%>mwa)#m#V|j1o^rS?xU%K{yuc z)qtg-_w`@NGYh&hqyFVwN{sUoPm2i(A`oPu_r}zL>H^vQ2k$L1Ai`3JVZRx<1UHvA z4G@s3Cf#*EQ5vY~B1UvzP%bpW5ubh@+gd29%!{rOXYP{R5{+L(wIW@?gZEc7(Os_J zNd?Dn-C4N$|A8zdK?mc%e8UjV^h*;MjlMStZu^0xEIw{Te+Sp|D*P8_Vc8N($P|%n z4S;2$bFGFS*4n8I*|{`4E@8xckwdBV9NjbVX97*okvZX^&%R4!$;?QR{vjGJYi1)Q z)Nt{(_F71l4I+f%%wptz7fC~k_AgXcA8YOGnmP^KI{`maOd#!KXkD)X4XXOIu6-wq zecATo1BtD=G*CGuc_mB{uD8yhhCvvw4A5nGjLU< z~D1U5y5?+ zVP??=opjbDG(zr!;v7P3U01iD ze1tl2M{#S-?hOh8gqb8zJ&o682%IwnoXG)gxH|?*9k)PGIjZZXXz5gN{#R$XSpUxXIuQ(fcD=+|=3cmh0Op)a;D}ycj zf@?=UyY*+^t(}9eQZmN^eZ+PGWD)1Ky#l=w?0hwl(?Q|kS0ind4Pnj$56bcFyuzV_ zZGBwjeMIefI%pm*h$co8w15N0wxpW8P;ejrat_Uo2OflqhrqSiHFHgm= zvjoG}9tr^81zh{HuORWp*ORY5B;IgX5+mAF#^9Oc$aaWhWi4sK2oOqJGwtRRi?8&c zzrgM0XgspXFAoXWEYCCg45=Wff3wMFNL0u{aCMEKvyREyv+KP%W<=`gi&F;dvAga^ zET$nOw~`(^XxI=9R1h!yY5%6F3%GmNtG$r4c?slTwVWF3a>!#sk@B8HkFRlo;QlDE z8dnjWa6pJkC%T0p9vYiTose@gO48|gey@qZZ}mGAHE=?}vJ3XRuAOit1i|&7N(Ok}65}D1$eeGdcpR7N#Wh3kS@Gs}qRt z0mi~BEnl}j5(sHw2)#g!MTaYLdgNm`OyNb}_P-rQ4!?!=nY(~rP%}w&P$guME?f+6 zBNt1t;R=7PD_rcm9R^s2*%YJ_F$L+7MLy=T%>*q^S@EJ^r`)m3Yj=NzAfA1XY=ld5 z{CPH0>iDmj7WQsnx2h&r2|mte&EerdlH)80I%CbA`HPT=!H@)1-6@BkUjS0_0gTPS z0hQ<1k5_)LtyLas_D2D?eC|c9P0Xg}(hVgu{2%^<;@`0yl(yN9Dq(yp3-vkSTsqIt%kQ*07k(#^!#oi;%)+xC{kK}ZrU>%A6#|C@1n zr#wfZ0wo=b;4)2lK+s!E490iDqX$Q8>~wb$B2i{XBj{L)g`)b4_|dv?pe2Q;>9aYv zu!Ed~-7}$`&&=fy6P4L-9vDHUe>1b`Im$WN%8jels+CB>nfVihO@~S3NbC{(D^Z9Q z^)7dWTzEWke%_ow#MJK$a|XGu-;xDC5Fr^MWRcfPBIlz6CYPde$iC5J1?>W3+UXF= zT}UB^8|t5!ui%Tc&4-&0sprpxb?C77jqG3v=TbaV zQc`pLTYYrefiZ-)8?~tw)5l@9kQNMZNsSh7vaGOp^%w*P%U*6h+FbJEdxXsiI_RqL zwiOm%B03xXcP_Hs;5a{f2?CP9d+%?eHtNZ@MsO{AoG2pp^QN+7ocYmvD{_YWJNOQ~ zTlj6m@V&n01pl_{*@fwUZ&p_; z!8cj>2SdIRP6(m?;6vtmqVd9nH-$6780HeUt&CqtefUH_M198FgtQBK z0B2_NQy}ZPB?xEDhF?%zRUNiMXOyDEkYSfAwIu;^;}pk`vlW3D$iZh9|9c^u?8}BX z5Vda)ai1^-&jpKJoa%)UoGNU{lleC!+suC_A~QtbJa& zbP9W4Dr(BEFX*PF_zx#?b7mVQK=~C~$ zWodiRuI#ttG?G5H*;8znBv znbWNtVAsKx*I=R1vKBMb1|Y#8Ro zyT_o3o8s9ffwW6Fh;ZyWX~}196Ok#IsBuj``)wY}x3jhcq_aV7PP+21IgC?^K}$L_etJ5yPsrt%zx9m;1O6~2*lznm6q8G$wxeaATU_dG~v#W+MBI0XI2zhDaWin<-C&VJ zN$f2mB;t5*E(jamchz(cotMB|^>obH{sn}5kLO=ndkdZ8#LEaCewAM1t(Ri6bRd*O zQ6!|RhU@s1BdPzP@LMU75N8K?Mi`|L5ih6-M`OHCK6T;$#?hy`STJiF10arfXBySkH58WM~^gDvb`b+5Nrh3kRf4y6yh zB(c#3{XFDYZl2W)&4bg+gNkVZlE4yxT(pI&O!ur;b~MU5m}M-GoF6%=pvQwo3bS|!L- zkvk!h5Wx8JxbX3jBykZwhT7VYVk-4q>Eg*otROQC;y+&^E@m<`XPKn7CQctt@TEYQ%AMuZXkPuBZBBXy zE=NDF0b;}n`~#6@cW43q=vDzHEWX{c-O;U*!R%g2EC&X(M9dP9KdV&8-D355-}_!7 zis0~Vu|V7giT7NhKT^sHEg%PWm}5x?#e#oTJ!4*y^5WlhjDVhf2Z^nlCPf=*L?>kY z`Fq2w@t_lVn`p}X!BLR0OGd;#QYIAOkDb$iFqdlJt;vgMZqr+0P&hYlP+YUUWtscK zPx9(-vzhq=+T#Y~S`|UNvFz2aLV05m8L$zf+t@)m3#DBD4zB+-wwA+Bbr7HZmU>G$ zk|yvX9ZlJaB=8qb{mlm5v56y?o+oxQppAgE7d}?ztOLs!@YV+ady2Hvlkg|ukbm}Y zTfVI*R55ROZYG#=gukv~6}785LF`)8YkCInR^kM?Zvj~T7zlxIo*)mvH!=RV*21bz zkn?}R-~0bJ^La26q{PrOx<%r@Y~C_6DN#_7?Wv+!GPSfOzF`G*NzOFJC1=mhf5oj5 z$%v_I{64H({Y-vLqCrnSBv6`2sOcqy86CoHh5ROv1o&)$4`0$EdCM5rA2Bf5)C5-> z^t_+kVuEV~0-lq}OmS%uACg5(aaVx{P9OJoiJd`fVowOh+Ra>t)8NWC&bF^%Ftf4} z0m;lkoKk3>K2`5GkjP86#@F8XEWf}Sr^rL2@t*425R*qHJ(JFREm=(k?_6Eu!bMFm zLB42tm8YR^w~Nsw9sXxCF=r?t*?x>kVxP?71jq!y`Sho(Q?{-6}kLTHRq-p#G%K>uA*-6Q>xtCnIiOv#x= zgTL7FiNaStmrL=y$~3{y#Q0-D#q-F3u-m~CM>395OaEJSd_~;Juco!Lk+k!L`?wNo zDyrqP5@=dFF0u40VVcV`76NM_F@8aZa!|H-mycq{ItXFdP7mix$hJYAShPP2NGJ(e>at1NM3ad-FQQnk&CPGRMlY{xAm zJ8s-hWw@nY^uaqsjeiz^o=2zs<<-Ugs^^Ds>wr(}1GRf=KaXb=wC)Oib(!v>>Tiv4 zY>fQrX!^qo@LpjZYW!g1IC8+2W-Ge5@-fxE_``n{+A~MO-ud;jh)PvQ?_ANWe+`Dj zM)7LR*icn&bG2AG$se)u1>Fo4wol{fe|G)$_`qDY*BI}eeXwZGe#n@(X*Q+BAx{G>D88^{Zd&6Z4;|3J}C;}q&?74HSH z=iG%T7tnq#BN<<}IhG(J*FnE#-Kzc^kXAVKrzD5RaiK{{a$5fi|ekzNr zI;5Un`_yrsS^nw-&muEF;^LXc`1w2khf20TEglW;sD!*;r&=)9iI)~vdp(!kqz56M zbcdPAG5yV6cjQ{YLiJ`l(R!8DFgCJ@R}-o%2+jnqq-TDxFBtTn?rXV;co5EJX}=#u z*xU=8S#I}Yubd;!llJ>$a9L3_+g3T({uz=*FTyWrFgf;n>O8u0!ROO!%L{&|>GX<3 zRlWkeael|FW1E^SWw%F@sepS`4{!zxS4508rAyszD0_fjjF!RiQ)uVn2xY(0JDZc^ zuTcSj6&0lGQtj1s`$18ov`W-Isuh3WdCAx5Ty&oFeR4U%lAXl2$v*dj^&}JmT$KSE z1gTfCIAYU&7yKPhO?}pf6Vy({8sBCxz9H05Hx6*I@;aOHRaBBqx81s;T-S5dx-2sn zZhcoq&_A=c&;0l)xn%FGDE2*avCD!chW#8HaMTByq&k#g>XiOt#BFZlD3EIzN1)(V z#%6A_&px;d6E1x8bY> z3rl8_8#s@3Cz*;0xKhN?aEzH3E(m$abK%O@K#E*2ocl)xUS5{(8dl4?b zfRiq=@i^k;<(Z75p1 zK)N(z-_}P$pT{K7YJR9-PVoz+O=4_Dq;0rb;kZN_&fH~e5*-t!CpV)LnS+ypRY`;x6 zpLu1H>6U-@Im^<9zV^@inprRMbm@yZDN}PAbDbOQ314G1ZI7Z#QDJ+I_hxpn*l$j1 z2z79)hXYhbIR#IOc!U1ZG`0%5{Z=^w2o`v{*OZ1CE-XB#+w+ zDMdDO>jL}LXAewi6g$jD<;u7Q3QvAvdgKYcPo~Cv$o?~7iCbmn{k68KySlE9IQyEP?{HrA>-bu;jOS{T=# zn|0*gzqV%E*Z108+~KXOoO@&I(nr%ebI4qHDLvKSWJ58-)6IREqP1@2XJ&!~c7bxm z%^Ev?75(1mgwWq=XBPZD$_)Mo$76GE{^`l?vtHcmaj#S2vC>L60quK-^f0r(Q7B6% zo4}tcYU|ngv$UGN{l4gilnEP_tn`)EI{xYdHVUzs1>c>m-x|4tS@ykIEN|MfeZC_( zJjWVW`^kn}8`Zf;<637e+#=+8&u;f0Vfn({YomMODg%rsHFGcZLMog-w|LpxG@2<4 zJfykT-+ZmC_V8rQ17@%Jcqu%p7JXYsPHk-_Q1i>W_HJp?y%HBRDX7z(Xm)H|hz;$A zRPEbCl(f`O^)C!f4I@lc^eLv*L$&B_^j?(hg%9;THr3TORC-(QH?vinDO|;Vw^FxN;EwbuW4?it4(XIfvWQJ-rV)#%3b!*CO%`w-){C?5ij2=N11hM#m=No16Ci+-+RqjTdXfWj*I;i zFWl*2YJ_`n>10iI_xMh$Q2Bb^5ug@RQGAVmvr30eED*6}59tM6E(Xe~)zOb3? zwG<#_BFbDCdoazaT)p&BQ#92*dy><1An%_!t=Ud9T|g24bfBgk-Zx>tdX#$eo(78n zkM67{zvdNpn7BaEc2X?$Y0CKAd2bZ&Y1ozJe*a=u32?*u>heg7qkm@$qb%%)%$?bs z<}U>%&%A`wC?AzFGU~H)9EwrX#o0_6`{TFzL=x5#awaJD8#&SI8oRvR*7mD2yq@Aw3sTvn1`Sp#civ}X549W6yDQeGQUZyr2dCvnr%T;S zb$hmF>5aHU`lA6hkyU9Oxq#D&nm2ROV_W@3cjr<}*ERHKmb9J&f!hZPqv^8?W2mFA z%?Q$c;J0TsdE>J)Rr?JjQT)4NHL<_cqE7dK_x-PPUn+86P6wD`Y|jTy;G?cG24Y zIelGqLj_s#bVzfOAegT&trE7Dm)nuQv?Y*UT&i?6S$A$PIl9<}8z=32( z2i%uHk+KN^+5#);&l(?<$%U@p7)Qw>XK6Q|`@1jPG#<5>o@Q1^6$is7|2|V33yOP~ zCqE622ohUX-LK8QOuxu|-hFoXpg^1k|2C ziZuh0NgmfmxrUdPMRD~r%_^e$uf#+fXq%tU49Yemk&3$Jxs@xN31N;ULq zyFhkU&FAgNK@q^cUV0wq>4V#4g$n;qjR>dbKFd{I7fv0q0QP)prKSg6{B&`UfWTl@ zk)DSi8oN`ccN)p?uT)ReO|l0b#h{1wl%nI0Z5raO_1Q`%d-7%%0s;@?GdBhUGdENK zuCLgRJy`6_KKA16j)dW3EQNjPpFj~?%7$<50?*lS>>nb##JKCsRm8-8pGy73_X?Zm zi@Iy})o#;+PxkhG65<_n1F^4;k5k)wFTw*ro}M}rm(%XzE3s1cQ=M<{W)Jrlw{zV% zLby#lK%HjIV>zjz8SxtaxlMm|a$Cde*?^#LZp{7*GhPE3u)p3| z$Y3(bRCUizd;j{dvWSUi>I_36Ylh+wlqa0oomFJja9z*eQ(#AQy7hx){=H=$CZkIY z@5tw@Mo}XARR(dwbSi*tTu;5+vhVQ&mZBDd$ySSP9UbY<8zrCc0Fmr-NHe}kcCzr zNf+OF1^T1z=dm#I4HEG~JiE(Fvq4dvrm%k|h5=3Ux;k2w$_qwsx-NUWuJ)t$ah^yD zEp7VBNq{?riOap<&0$Ue<3qWPcj2hKl{Leraz`ClmBNQlf?B^|WU> za_7$WvJJBTf@HWL$|!I?YIKx}@arg&W3-k7E%vdZJH(i}O?|DigWP|hTTcIWU@UW9 zw6zi&;?Y3=>m{{$mDg`_GkxNAasy7WYL1J)p`q4MZ$AE(f3G9X;V2|{k33_qr^p0~ zx5D=#A#@~kLDJzPoZyeKoh~(+IYd#x;G!bgVuFZte%Fc6iYW z^XW!DVrfI8psb%JW`!n>lzrvrxSfWWR@(^#RW)wUeXa7*i}m`5YAo>0_2gxUliw`g zJ?wb27WWa>69U|A;yLGc`up+UMu(Igi&~zvLnsXAQK78(RUNBpe(YGQx1C1bQ2Fn> zYYR!@rwlt-+{|WjA<2yn13en?)NDVzM+-&}lsga!m|P)R^ht~BB@wXtRO8MC@n#E2 z=eaYJ=Zz0Zs$?we>`A7pl~v%PQEX3L4kO9uV{X*+o z(BJn`Z4Y}(2fW_)P?U(lrp@&khS`JL_ z@LU9Pf`asRIDGikw&!f}I!j?f1bu3eyKqO~6)_HPbX>(b7!=$=nsEOB>8}fW_|lIJ zJa@cW^4y*9q@JKv2=P(sc$fdNb>0Nt6N5_zlklD;#Sx3^c9^fpUrl9GpcWke0H3Aa+aJp~0+Dl*t+Y!Dc`m{tA5 zCXwdfT|ClncalC=hJLNUjKLrM#-T|GU|~NB1;R))47=!7-f_Lfp}t6|$9nb7s(CHG zblmW#LeQPy!g+rwel51a31ec&v`;Ek7R`s5&#^CqnJ&u-0@$g$%^SRI!v-Z?fpTJd z%pHcP8xt%`d+h|C6dwmf4Y#L4sT)tq*|7mLXI{be>Kv@YdJ`|vdQ!#u5wy58AjIYs z3N&(A!VHvg3=(2;#9}nXpk1|M{t%UKGiShE;KPyPQ_Lu0z@84qQ2p5)X~xRfLvYOU z`-?~98CXAUg~TiM!O=kfe_kt3=->vlcrL_$1_O7@_t;kx^p z9_18{jj|Ny7{{-z_c3K$&@pA+RC|w~+x+$&D(^+B&^w$?%u43J`(b~LJD+zjG+dwe zp12FpW_D8XUwyc_`C)V`4iubrWq)r&0~Gf|tAg$1f%}G$WZ;-!U+$xwnn7;bbjaGt z(|OgkZ`n1|QHQO!4bo&QW|Li6+LK+G#HN@Mne-RcfaxAQh~MR{4#pG-c`}KK@8@m*Hf3^a+6k^jJVo&HTqY6$t>uQo36#i?_>~&fm^4}Z z*OlSrukxYCvLcp9h4DdIwqcwR|jb5d@?1`klCfl!=y&+ zVQfsG9uYjz+VyIrs06}K<(2`h&?!2U?kxFgZ@N;BX zGUkri_fMJp$o=c{9p*UHWe1N#SWbIgZ|Q?-?g!^v9y6xE8s3mbgiN;}-bzVdGQ3y< z+G1h%SuVTa(d!(#Gj#9T`AGF12l`sDXdlZ_m%~`FIVX&-1ezL*%$$FHo;wn1!)mx+ zvB5D9d@J4OJlm`Ny)nS|GArl9mJjH%@uZri+iDlZH&E}Ieh?@@vm7#t-bYoVsOWzV!8b@n%eEX|SJeFt*3jz47=hMwhhn_!RWco> zqRdDn%6NL?T`m)X%6`z}wu4w!&ya5*x{X5V*RdXkz3RQySE#iNx?6(mv5*ySA1<-i?5B_PbA2LA~sZE zeT$(OP{v5X(5SA*B9I^lBBnFE#{+u@4+Y)%z92pO$w8=IZQAiuFe<0pZwflG!ts zx5OZx`3_>jD2kDhyX*NvN=0yXy6f#5&$Gx%!>_@qZ|JA1MN^klOR87o2JIny_){AE z+wXkNsWODHo+_EcCTks$L6=6X@Qw_}Dg3`Xv2nVqY<3W;G2lCt#rD(2RFnSAWw-gr zL7MfDSeMg?a29`{?MOHLRsRqj{84An+P)2U&saaS7NyN2eU>Ia`r!+k`4NKr_xFzw zb9P~2pGK5N_&2o5gc6X{kE5#C1Z?LAacmJOW}bJx;{4~(h3+?D>_mh{rvrJ`BeT*y zI4VJi7YzU~_uj5YKCnocwGAer_PFwILpAB0EmIKqC9vNNpk|daqs4tv(co|`%CED> zR8%jk`c8>d>Zuw7RnZQ;@oTj->U5wV!2tO%^7dOGiYD!oHM%6p|=evs6ve|yV_0&SSyVr@M_&z~`Hn~K{ zi_*n)V9ZgMsBrPZ@*398@blw-QNrUri4{JB6(nuM9(3*BKb}K-@!I6WjSeX5BLJQ; z3raxkwwVxe6NzSr>gtL`>n)n5A;!Zkr_X-taf41y{;Wg#2f*OB?q5042!p8gM2|-( z*(?~9YgQE*VugS`LT7X_)ndrLRN)54Xj*eL!Y)ujmcfE)G`L$+C+JN_ok4?em|Xak z2o2|K`8*iI#FDR{LmN+dA}HxAaEv0ug{>({cB3fOZNFTAs8uIh4HU!HJgW4)+*fo0 zp!Y4nJEZL_5VkLLn!^;7|2qx(sHK$3Bm+Dsw_3Xgj%@NL) zkhTS7ZRwnet+_)8j)=L&==7(QB8SfP|MVo@oV>Xco~{m#75680XZ?U`l?b2llovfM z{C{-4cT`jVw#7?A2sNNo=|~6ZU8JfANR{3RARs+-5HOGkN=J}hReJBzNkDq<9YdEQ zL=gdLFaGX5_nv#+dt>lV#$dn>e6#nOYkuZhUW9eqHF^nP4Z$JL&y$AmU7xjd&|7ox zfCOwAf)T*W%ZGA|n~D2BO)0M_VoR&b<}>LH(Q${>0$gY40Y&6Bt~FLWSyN-};H8wI zR@-onVEvYrHiK!fct5aAslOe zbLGe3&Ga_>LhdN>1?*9NeOdg1^qPs~p8NH$q@gP7G)IcvXj2pcwH)m$7kK-;Y!CjL>HL4$tP0r@yghgAe(2jo?V zS5NoZh1X24z#S@o_{n#(>sVAmN9o0<#tCuCB%rYo-J<3VMNrZ|M@!7|8Ym?dP$ zSAhIs*7ChEHpZ9F56cAPBUA*q)Cv3?DHe*^UqpfcVxs{sxe*?d*SG}CgsS--a0Q&%R(D_FGl`6O0=L{*+>05q2`Sqf34dZwJOMBRGLO!=&;l z>$n?kjo`Nuasr0b_2%38VR3wb?W9oOBhZTk=X;-(^-Yv3R+^k+yQ$c`7uezlygqhF zT9T(z)98UT#k8pKwFPxTf$Dv2xP_l(HfVU=LsREJFCQ^ZCy&lCn4{7W1>%|p)puz( z5!p5#GRU}4wDvs*6YQ|Yy~BNbQ{~-W_Gg3_LY-ShvIH@MvGDBv&qmBIJT>Y-g0Qs0~L0aMm>a-l}&t&iL@Avj4cZz7Rd!uvCt9b z^2PK$z1j!?M6Jkrx458PTaw-Sz;K#Zj!%3dX*w~tvG3SN#VzQpCkLog=nkHR$jV4R znzFCLcB#&VXF_n$l< z*9^NI6v)<~WrORJ{vxWjAR22R64ex!o_4dLgsOgzE6gL6T^yvXjY4+1--Bnn$QdT? zU9&)%ChV+OP;;@iPhpz`xAysdXDz?NxIZ_A8;D1kn|yfHfI5ChKu5Uq3?}z1Cb#%D z|8}oqNpJ9RnjZSu5cwYahl=vX2np>eASTJ^xX>CE^rYc zENpT4dp`Nk@9#5s!9D1i*_qh%2g2AVa}9vAs4p(U(#yr|DFhzD)^K4t><;IqOGt~@*crMGsz^YELdkYN(E3%BX$NeN-CEF?gL|7MB9EN z#y(8hnckH}62w>1!MT-=R86DTkVCajjjMWsH3bojR}O;ihO*`vKS5lciMquh>T@%;&yJ1FSIwMFIHqA00#tW5Zl6hKyFO#i}XkvEpY# z$E+sdr<5gTV$qC;1K+MZKD3xCI*gF%z}3lTM=n3rr}GFJ3}Eyvtas@DM6uXCwNE%4 za&dsK2^tSe1C@+7ep?OvQvb>)z}H*ZbH4{8O4D4IO5yiT+-W@90sQh6^(SGvU`gT% z$>R!qVtan`%JJjN!^2m0HTHuDyO2GT!a0i~4Yza%ZY(2}V^U-=k zOV#cLWg!)$W1LFMVrqq98}4{J!FcbfT|xYn-bK#`7=lO;_m|QUT9row5N)Z@_ISqs zD%?6=dTA@3GPs%x9YxW^4Y$TTepvMjHCbYxt_;sK(^bvt} zsu}8`j#*~@KTEg&sN7CfOoxU(E%s=g$2ta*#eLm83XCVoN!IIe8(eyCQBOtfj=uu< z>=Z*vL;ANfR|Dywh3rm1_g60)V@Ap03X`DsMHN-(z#>3zlRQA~L6m8i{0s@vXxn@@ z99Oy>|G+(hzhuNu9%C>EK8b@OL&fctp>in&1hT| zA2ce~`X||Ac`{c$xFf)Z-SH2u&#Rb0qGfSK?yX?Cz*YX!m*Vc_^79fR?clJ6FIl(h zUkdJ+r^T;YcbdA-@yHp9$@H~h?q>s317S~J0jX2iE%GP0nuiH;q!5lG5fJT?04UJ4 z-Y-B-Mus#E{b!Em61aDpwpUboK5$@!t(ay|T$%b~E5NmH2 z5M;*AG3f{Fsh*Z5p=qR#t3*K21?5BJkcP=yhc)1&h#Y?60JkiN_8=UrbL3|OJdFB- zS$Cl2t|q@Sziv>tH8_J2;39^tn~sXf_T;)ubYq(zEide$=bx{aZ;NP~KJ))rKsNTa~>DX^v zRG>Q22%avS2|-3+4R9Rk4qw?0_FpitD6K+oah}b&jh>p{p!itOIo4q}QFTYbL!=co zUaUeJ=&8}&PIXJ;%Pkk;GSf`8ufS1+(DOc%16ZSwY`0wMG~v^gN~5$kktxPeFbr)C++C#>0`$2A1cINOmTlq>o8qTnRwVWYpG zu8M_dZfGsSAn^SCLr-k08+mZ)YL9j25N4Wk*XRDVE>JJHD58u0&*O{ zUupTIL6!E1tRUsdDYe9JhwBi-2;6TkY5*C&=cW?Se9^zGQwNr=VRdCAh%{SV)CqN_ zlFp&TB68q&UD$ubdW;kBafHsCRD4HlH6^GikCnULpo;22TyN`0v98{T%M-hm``g-~ zS7g3}Dbe;pY4k3y3B0}ZV^owI308Lhx%X7YdpGRgvH0<<50fMOHGJ|*3{$2q$>hr| z#79j^+>Xt}8`>=lgKG}NlH5QZ%7fFM4u4-?= zIR}$>S*F4+_yGbA4$081p7xVg`sD(9p|s1cZq8NQo6w@8I`SSjB z0vc`tszXV5k{{g2{*=y%K^BqD2h>vLEP(elJJZNG&Qo$buaC>i*t^8ZT-IUP@IL?M zfp%_kZFIg_ct;vY=gg9dQ%Um{hT@D9jwhqoioYNES{X)3lLarA2Yj4_SnYq7C{jL+ z`szlBrG5gt8iG!@hyW;~>s?`8e#7|XCu&Q}Tr5(@!<7Uw)QmAufLR5ETM`Ei+zquWr5&&%mrg-Ikqh1qojK;UFYFaP zgJx=!V-s$bAl2m3vF4uX#J6r4=xkC1hIogR3iX{+ZJLly+Rf*L40`fai1&=>jt-9! z#uA`EgA@uzly~*y!Q8j^N8u&T!~uh}JI|XzYKiHUAC*(3Rxs8M*GQM=3J~rrL5m*5 z18NAeA~@k4F;wUF`(R^X-d8LTmf)mTY?I+8g~{Vps2*+$`j|_|)!g&n8y$Cc?y24k zGEdz$pt$~XluVuzd(>$$b-L}VGQntsfA1c)B&9wrcw8~YaaH%QXb~QjL*ZIJWBX1v z5KIdWr|uUXdTeBdw1S9zO1_mt!zUvgZF(*M8M)U(7ff6&gEh%+1{d9y$+hV{$7FGU z#8a5Ib>lAt0J2Pz237%aWQd*e-O4-GH#13zY%g(UOvTg1mW}nQo7%)3!+xk}I#?MNPdrJBEqG zmx7)#ciN~lLeQOkAAmFpA<~M!nA}jZUG1OJ;JIu85qNx!R70HH9o<@pZOnTBRwG5D zlz;ua{UXq6w@Z9Pgrm_Hiaqb+qW$&$KB7)fUIZXTmcNwoKxeOJ&1-ujdT}XU6kf)#VyF_)AstE+Y(2Six+9h;k@05<2z@Aq8A=>^4#2a zJOk%|s(9kau)M#!k5>X6U5AGeE1m9kkMC| zK^WrJg6`D%pxYq==q)f=ZEwQJu(ToxitGT6@)~&2lQn#4LKVgZL1L*k{Wpn60g+}Z za}BP*GlSLX#J67)xftbEQ}G1MdjLg*0M%()kv_OY*GATpP@Un+Ac=rHJ>rbtw6JQ~ zN7E#ndLw1tbvLu%LP1WoBr?0~EP8i<@4dFKe}>MP9n%iK0A!e-!+HtsVY!p=#Ql+Z zNF8&$*|LSZh&snHYw^G4y1!~oG-;fv5UTDPnM8{zCnZjZ4QjzI>rPLWo`E|v)-t** z;U4o)TOL6ERvQ;v3*%#dofPvaYxY7A@~ZZIl#>V-VHS;W^HZEjPv|~TZY^qgk?)80 z{Suqv{n|cNqDcfW{Egx`5cVpq-8JD|uU*keayO)f!y?o{4*9ISOyX9wkk_cb2x@~# zP?BSWnp%I#a6Y~8;Ry#sVtRmGDd21#{uRt9#W=6j+6JU_36D6+a3ml!r_VBGg|Gyu zxB&>w4|coM?(ak#=VIlZ7MhM7Cv*W&@ekP}+K-^0wrnSF3?QBZ3XJek*Bn)pA$}`0zxfj8AP$kk`k*E zk?ghMj2JBx{Om#%fZ0rA`}Vy&*7rSi@+!N!l`}bz`_6V(ltoUAI`6ILG9+$U#pJ9mOL8* zq%uC$g3VH6B?gtf`P(fk#`NN7^J`wy&CLwK8--ERJQxf)R1-t6O`$OhmB3pTi`#VdB;__yr1~j zNw14R^r;JMYcOzz*2JP2KRW(`*j};fgIdiP?#$8X$cX^{7<<9TG3!-|k7MRb!U36c z5k+(#*f>O-fbYi+IK4JZvvh#WZj(3^bbmz26@23s$`Fk z9-RL%v*7$v|IuOlm9Zy`9~+Ld5e!a$VybR-Z}c^W9;W^vl(}nDJV(9x&a+)1tRzDc zQFn=O*E+(K^I__?E_8V8x7(rP?v#ej__^A9`O+q7;|a|?JdL5;joC@X1r@b`r_}Cp z9vs%=;?K1>JAOb~*z7@XrmAqYAbVi~{BJSX^9w&Wk?B1f4F}FYD4RfB8YWA)#iwZk ze|=T7TVQc`!V~@P)gZ<+?7fI_>z^)1z?ziOIl_3NTw zB+q>E%kpd8f2;#jXG%6&Q#30qBxB<%&OErDctN-__ztZPFA~9mMveAZu*#y+YJ3ow3&Ko(qP*xGs7P{EljHX z(E^@+D9YSaMTxR?Fd{axS~l0{!SGezRJ-^oc`6IWpsE&YGI=8I(TDx zJU#U|Y=8jdXa!GhJTZmEx#Awxw;uq7$hBGD8$_5^wvB?VY39VT6b=d`vEwA~GK(?P zp_rO-oaa|Qf^XsDvWNNT*1Ym(ZI2Yf1gJikM42TCw7jgrID?9%DhpVHSDzq?gnkGb z%Hk4!{(Ro+c}?%0_#d)QF7ujp{yeqz>aU}*>v)kC2ftw+se&Go^+>g;8y<$22AsdS z6)vP-E&0tHm-V}OgV&p%BdJ9`@?rifoY=vk%D0AG%1H{K<0)DkZU4LTMr^ktuH^xk zByNM|a6zIIx^3g6+(f{XC7rUWe4|zRLgp^^1@tPLn;USg7WzOFts*T^RY|Pdlxu?|?f``3P86$W2CPR( zA#?YL9`tW=Yn=o3XZA%=_7uSSv7FoV16%#U{JQpffGv^((UtgJ?io)d?DGPGqOuK3 z>BbKZHCB{n4hX~Xqh6Zj4!;aaIsI|3X4_a0yg1KJ^xUqxAWRcqaEQq?Ul8CSHoaKHjNs>pln`7dR| zFRzN=rF%!hBr%<@--R;t4gMAm)`%auZhdS-+pe{pJfGDWI2)9!ny(D9Bq3E@v#&BW_?r4hsxX1yi ziTa27w*)&ByK1O>jdOyEPMr%bolMV$Hm9i2Gi|;hu>38*(*(`Gt>k%6R&J#~!oA>Z zXp5q4t;Q%>m?W!Hp@TPz`raSh$U4V5!p`S}vEP5vXk#?fdwry1B_jAP^<~0@a5gnh zLNWD4b-JcwZf+MbC!49TPH4{4bc!n6*A=yP-oz`CqQuN;A)RGTrPSkAn4Xm8SrH7# z_gi0vPnr6RD+?Nh`!=?oWx(q0&-4$q-`_Z4thL3{+KjSF zJbi3w;(JxDS@N!>;Ek#K_RRWDLYzbsZ&?U5z9sXEqby_YqaKRjip$$#T5r@y zUaej+>Q_j9nDGv=vKH7#l&zv>@fWGYW{k`r2k0b@`*N#M{%ic2>fmZV_!|!a1bYty zan2dg(H-iTSI$w|g&1TMZu$&~E2PW!L|1nHXvd$>ITNTAcC`k2KZ%kT%zg2rC?xVG zRRXpW)S`iXTm~-WD{nAqyn2mtY>|0y-&g^_2X9e>aoHT)c~Z{2+A1j~4d zA&X|tBKLPZStI~X;EU8ffXuN+mq8|)f2 z>S>^8bwEV=0JqX~lF?f0jwd=Pq{VY=KD90H{hep#D0KNj)Rq@{$mgExzYQY=QPAiF zzo7fuwTIPzlktw=Us-{cQV_~ za`0FA658hrx}_E+KdE%R!WEe0vi*Yhg~&%EJ;pyveYtLNdR|=FcrSaVC~T8AD;64u z;gD3VAfozw(CORG7}EV#^rq@=zPbdYzF>H59PbNm3)TB*3Jx>B7fZ}m`xoLvVLJua z*846yU(r8+aIIb38m53`3g(OXvF)0}WKl5HAIysj;=8%GVOZ5^7^~)2Ma)Nb3$z~j z%`ZirdfX{FiWV)BJs)ABpH@CX5`^Y?H~ZSev9B{q|GrvejNyIRt)Gfn=r8I?dvdM_ zL6bUj=(*WX5K7TvtUeroB?0BPg111rT4jaiF1=aAw^-oBd1;AwQ(Q8Ji|bNFw1u7z zNrH)WjX+T=w835RHriHxBpRU@pm>VjueAq_T8n)YTL}Hv=$h0nAEQNF=d!UtkkLY5 zQ9G8L*1`$O<<9FIw$h|`E2rx>29r+Lz|Ea|V`!7WORf_t_Sj>@XmaSMNsP(C6=|lGZN_1 zVn5>dpiZj9T*k4bb^l6Hek3|cD>6kU{!#xs+4T_a(&~&9L;$k9lTDlGz*7n3Jdbea zf9bj+5^=S{57N;D=1qL~amq-b1AB1e7r42S(WD$|W9OqE3;q~42*=&c$m(Z0q6gfa&9`mRvy-*;=B>aO&|Bs5Zk`U(Dqlvjki8iI9WMe$_ zS@T8~7a0D4{5DUQ3~ybnMt{);stpBdC>gjIluXxnMGDN3wmsG)1}`b+vqQ88YJ7gY zQPZqUT&EZfGnd%U(fT~q4)^Q*MlSloLi)d9kGO5)k39zZB^@(3I`iczcn7o-{~DdZ*9 zBQ1G+#eo>IMkouLVZZHm13?T}O4rT^VDDfj+LWFNGZ`-+_}XIsEEH{!-ApDsbL@ zBFzlEE%6KC;JDa}uiSQx6PUmUL^;|@zl7{l@a!wYh%tGUvkNS89ut^MycQ#lxk{JR zSx2-TKCuJHTeDKVT*qXzTw1O00khILJY6&>GXJ)S2%r<~;&@^CeTO96^dRPt1~Pcw zeMRBUPSKc)ZPTKvG^Rx9>kLL+DRg0^5L*<>_PIVzvjqHbb zH&n0q&QCj+&3Ny z4*O{x9wl&#oZf{>XMM~FCERZ;sPS3Un~^`9F_037M#7VpFqdF< zK)oALylF44N!K13=-eWdN8IxpjurmYu1)` zc8uSR3{@PQM6gN)Y`nWj`BQuF3H^pnZd=xEWCE^=N}qe<5f2fvaJk8S#d&i|Vefix z2e(>#^HMilPbUJWP1&90;Ybo+mnn+WK$C2)h1mKcaf{qvAp`P)a}<`%JzAd#a@_D` zsNMmOHxfYKdb%YNQovJhKswzVpkOyq673|C(O;iKod(OLdNoDwKW`#-3P|aSb=-~M zS(iGTgVXpu3|`y@TbBO7NDz?f6+11zRU-`|pA&o}=g#2Pix3(ig1e@0K*=B-WF=7& zuhHe~7AnW~md{#0R~Gr=u>!95>p&vDY0O_9Uk`mNNN#X4^QdoHtgA$C4O|udiE+xz9$VZH+hjko7MaeM_uqXX$m9uev7d!A{K;rrIdwGGDA# zxAJ>q%q=!QO14-vteEPB6bChvqp4SA!Zfp5ADBVj_)rrbD$ltTC{6Bf(Z{O%N0>u4 zOoYz#wp`e+ue%PE(DN)bljbb~LPy8vZ)9Sxcn$(58VHzNjmPdZK0+_hhh4X}E8(ww zDmM+6Kdb|(>BVW|wL=OMK{}evlu3eMR}mG0V5dCdjG(2EQ_rdbQVo0^<#qo+n-D7z z!1~LT%PusW{QgV-C#Av{eMg@UYxUebjk61 z4j*PEM7T%X0`Q3UC)B#Zm~qrlo%ctIW3@HVy~ zu^k#X7V6;ggFMHW7JhZJ&00bdw~K0C1Zpe1c$VhQt~6V(;dVGE|g@Y`>Exlcof8lvfXx5Vd^Z9zdQ= za+RLXjw>Ei5vrj|Bcw~<2odiktEN}B_Em5@qWI4#QR1f=hdf_c0EA_nk7<%H>6ICF z5dw5fJ?0-QC+tbnwwYLZGUn3g%Lg#=nCTv~CP7I+R}RTKp2N97DtgCICZK@CKE7m_ zLKEcyGiNkTtkbd9+?;KGbuq;KuHxr`<=zb)ZYBvXdwr=$s>t|G5wkGn5zMaau?CMy zr=_N%F{N5EBG2$IqjB`2yrRd{$>vct__3 zzTuLOm7HL$R`;)QMd!o+6zqH!Hfr*=OVNaR`W^`#!dJ|r3aL-ktrp7EeQTE=wd30-`d~t_S6sKwCXqhr( zuy1J(Bk^ABWz=Mbag^{GCF)tTUWR(ple4k*D9hCCg2Mc{ucQtFA3k0OEISg9r@rIf zSu4pzo8Cgz%zYOA7=hce(gmUp$gxF6cb-fsjrngv2EP{o{H3WIQ10cgBOKw)Au#N4 zw3)5xZ}@&+1AazS%GbG-4R?_xI(-|lMbmN( zhT4tig{Bi37qS3M)fMsLbE6jn$+Ety!C3IE?>pG-E!s_%ls2}NX>o?8qfMt3)`s3v z(3WnBO(G)2EceBBhrkh(gtOG+vGHJc%Oj7pB*v^2LY;Z@2Wo7J@PKkIWMcJ(#)m}h z@2TI7M*RN&iq;0wK-Y}ie~MP^x5ApCo_s0(!a_1}eTPuA_tQiAW6B4_&}Xtz1XBek zr9L{_Vr1u1yJ#xQxmse3exO|v>DVAM)6wxfQypQEIQSp);iAh9PW}n0Gdzqt3I_bj zQ>#F7BgeO9VUJ4#o#d1?BKd@H*2AZJf31hkwhH$aDnd+K1^NG}S8=arl^c_a4=*@F zAN?L?O~YGd?hz7_acIU{>KEj2c8&eFg0=a>e-*4<&RT1g(yk&kNpQSLuT{!hGcW^w zH@v3DDN7{(RIEDP%1FNZnDT%DL%mD%eOPqi=NYZj)Xy{MXFFkk%ha?nsV<$FR%grov5k%j8zq}X;eO*m0V;jc|l4Sg-yylVKY4`(aVkuXt z>Oh}B0?t_y$X8*>F%t4dvE|P*9{=GtRO`1@?ge(YOVL1R@T8~~7Y-yqvOcMWtrTK# z*FmMRc;@HveABmzi((pqW7@G=%*`N1friH>sXp!1R4$|u0lQOlc6Bq+zcYV*j80V< z>zIsgak&l1W*3+n)f(PrQ@ zp(hVc6EexogiT8j?dexm)>ZN-^_H-vm;{W15Ib-@XiJWD%FLZOgTR9>U2{|kMVnG+ zMxuAWBvEwNi&yiw8)o;9?*VDFS5IW_9;7iVJ@S9z1Rx${hxv&@i`ssznBi7*`kZ~> z^>~I(mwz$0?+Y#_%xpGwt+fAos6P+enJ`F?AuZs167t%dHHb zBWtw<48JVJ59m7Y0>K^-@AV<0GY4C}q1f^;3KgXCl5E^J$0r>)UhCwuD;$0mLMqNQOAq7zYqLL)a^+HmnkG1FpZ{{2?qu{ zr}K@?(i~cjOrn~6d{?`GjLgm)^d^($BVwvOw9b|@bo~MUnrEDd=V|)uwKNAlheO;( zdn1^o4$N8c>|hU$`O%r~t5(QKP*edP`*2%khB!D%W4`HY6d+3oGvbFWzsGZs>S7VFGz#^Sn7U6z$U}+n9zKPmKt6ei+rpP4>1jJE-{(+o51;KtCF>iC-+DJ zK#6&&7f0P=4*Hx5|B%bh3#e;WoZsD#r+{8K1FO=*oCAT>Y9_iU_Z2P`wI)CCa@4{c z_*jP({qG3zZ$x*>C!`NDD$&Cl`JwvrNIqT8 zMSVFS=Z`=2@2cJyY9L*k{si3JLZl+Q^6BpC=0!W@pniQ^rybwbjxLNX3R*(K!A`mY zQP!X1z!X^nF`>~xugYN(uwpLL_vIK)u*^Uro|GCsP$p4DOU}f1x8a=__3+kYEF{Xz zQwi^!_*Pf?C^eM7yp(s^r28kKwn(boR1waP-?kHJzQSO{2^M_~_r_|;dmrlnU7Jr? zVGTBdbS*Z28bbs5WeP9K#rb|w%f};-WCOd7#Mk$dN#e}>bm@p^o+Cc#z{;zh z!Tma5=i@qwuhigm!!5I0^uueUCHKySr#1ftG8=D1VqRdJW#a{*^^)m<+1QNKvPcYO z9yK$ar2?`DYIf$cIEM-D?^SBgk>bR)H=Fd(F(F~mt1#nIHJn-E%;x?PHy6PuwjeYP z+;&XqM=TPK<-XW|IA&m~O#}tC%|+b5gUh%f2v$P~>5eKomNW0f{^!RC8^5tsycc{D! zFs|`;KXzfdx61sk%YGeeN4!=og{WE%s=7KClhx9 zRn{Bmfqs>AiROd+j6Q{=}T(dKKLY^glqikQU>ecl!`ua!7$<2@43-k16N;d^J z`~qIYKIu=Q5mi%`qVa=R`opX>GhTDcER^nk5;%fQ@Z0&bm<|g}xP%toeS0$3YJTY-mdskJaot*S;N6!&lOdPvXv zQvM5T4>~dn$1M#P3%sgcmbJ%wAF@PRn+Sf`rCY_<|8!&VTlZfGxr8OpeCom3^DROA zuN@^U4y&EJe4~;4XK0c<6_%`g2rak7^F!0&&;K%iE1Aj-g`IXk%C_e@_GhRCZ!r2O z4-&;?_#4wj~6Pd>aRUgr<^?O_0fLfer)1oBt;}miiDw-3*c&@R&FWE(6J+$!SZa$O-;o z0iQB1&vvoGJbsyRftZ7S4K3xF-xmu=fv1yFDfQj+hthwQQp`mhmY^c@N=iynr93K%hLXr_3Je{bqPoO8yDy{ zKz|8R_#Z6r&{tI=qZpRtIHmEqO|H~(`Tf^`l)k@Q;ArbL#+1*{!Wfhgm96Pax{_e*k2gYGE9Kyj_yloXVXiXNjv*{+kO-k>ztBK7CIkB6D0?53d+Qcq?3^ zKyT-7p+QS6D71aeErn%(kB61la-`n95sSKjQ${%dB?ihQmQ@9v2Nx844ZnjUBI-P&F)Cd=v+U~=9 zRJ*+vd%j+~33`>hz0GcPeBQZUd*tM$lFlVJK$@$Y*OLaf@XR_cmEJ5iD60&%wX2|s znAZQr)rLBo8o}Q$vUBoi zUj~p!Hf-`xEQct;=OZO zAKh(1NE5vELotEhMky6mV3UG8C-9kufXmO0E0+DGsfT@;^Jk@R zj%`Kli7r&n&~NN$_Hdye{a1(1(utl`DGtJM!J=R&(C5+rhXi%$iTH02?ga22(3htS z5WiiiDG#jGQyq{){Iq)ZAU@1{l<^VEw8%s-_SN{(P z?ue%b6Ito90K%5EO@yOP>dTpMBGg1MCyFYIn*h#)GY=1x!}}JT!*qtTShqU<1A!a* zfcJt=w1>bc(dHLKyjw8um}9kyb?x%EbKwI|ywNd9gt(ANm~=WQt(g(TY%%7tuIy;g z(1~$X`#Cp!iECZ2JUduN*rkwGe~PmI5_AVV+LC~`&{RY^hNB=WqTn9Y#se-S{_Qr% z$ZSwT%5h3)al|LR5|vF#(@N9e$TUi_oCqXX-wS2>Be5+14C#%b9kJ5PbbkXG{k-&5 z+HCZ8ilW6Nu2n4ZNpW}weQv{W!p0HXx1BboTSY{dM?VIo-sgvW`52w$P5J~Y(`OF; zH}_=F2Cf2=vd_DjZ6o{(!7(@e9|*3^UWP&LKm3!m*4X7Urpg+LOycjZehk1j`9j=R zMb4pA0!|;;?-}n4Y zdY@U~y(TCL@x3W$fakD}+FW+$4gmmdQA?)sB)2OnB58>O(6yQ&yA2y!s()sfehovcFcG@d%42NKi#Z(;G0XTzvX;qxQD2*A zZ$(`Fmj2=AFB1B!2*+pDGDl5h+QJ}(&eTN5Pi|HsMl*E(mQX0PJ#{~-eFbleMlE(- zpBB~rU~N4q;%Qn9hx_U;gkE$9{UUtu{r>qa31j737OuoFfp>zuXSofTvF*1*i;{@r zB2j!3U;ikcrP{o1XoI1@5@RwnALC8YB|QZPNF`*wk4@UUD6BrrVi5ya*ookn+#gm9 zcT(dH&z`1+oDe%UdX zah)64&c>GTy3__s>@%>O$AOZ@BTKN%?R31TBP_D-e%F_ker9w7`~QF$TB)Ly!8t z64YI8l}N}hwaXp~C)Q=HlSf)#R@}6@XpYBAt7#!!$M<%EwhM7#+NUIA6U@QK6ShkK z$WnVGkXB`up#n~o2iYomNe4c~0k`IKVAi>O|H{B_zC|3;kYYc!)GDkd_=C`|Sd?M7 z3pfh+PCMT8_j>+YmCFC4N?X@JBC%-MhFfdk{k}SZAOw@!C22v;gNU%ID>1etuk2#B2S2twV`*7_*R1}lsViy&Dl-~ zj3pigOUev}>JL%nDRm)$e)~dv54#%b%-qhX=P|$hF+MbZ^R(Z!o0Nbw4g5{L3v>6n zS7Bgg08(-@!g5M7EijTAMiL}K5cRnIhC_+6N{8|Ac*8USV%#a1Z-MpFy#uF4VWw;x zu%Ia=HG0kL5(5&AxO{RsU+J^`c*g-;qV|8~=m7#5L{zQEr;Dfy!JmqbGFP(Hi!OwG z4B9tBNbu73RGF(H2}hqlLG{Qh8p$xcXy=Shq{;e%$74!~0y`<;sn!-p5|#=5{qq&Y z;{TJU9rb}1;_2HMs(k2WNKu3#nwc_Fj&ONeR^-5ULR_A9ys}IWp5iOQ{gM*u(smPC z261T~Z*N_EFqn1*VF9r6dA0hVy;N2`tRooKM~kjFX4eopeEIq81N&Pc(w{N3^jYB> zR0>)G&4E=4q4-0kC%j2B6l4pQ@>XTT-`hH5|mwAC_TQ;+qz_L+AI(I!s zr}H7rH~*C(COy361OtsPtcsCf(xKkqadt1xXE;vgv?wuvP1hn@)QY*Om_l%wNk^tJ zk2`S8Pc)dSa%I{S#3SN+bd2~(@SWJ|FmNN&-nbOfAHXid5IPm;3iwnp@{Q@DbDKaL zM2Jh7PG{i+=mN)Q>dN4z6oR8YpTEQT<_lU)a};G{PDtDIRJyLO_3=HaBPO5F(%rl2I_A(1VCXCqZI?|8GXCt~9`s~b<`Ealb_*;# zV6%_D4XZy{UrJq!pkUI7D?-}g!}wnUE88*08pyudhdy9Z7E4_BdNsb0VTCZ&UeY{U zs6F4zHXV6`P6emfotD;NXJ#h_ogd%(-(2kT1%!2~{2K~!xSpbFoiB#!R7L&csaGdM zQjpl6m+vs{SM#zgwVo2KTWmp;u+}U09^d4G6GYW9S%^w}B;)@p>-y@=98XA5b^e`J z=Vt0$7?tzY3?QM*1@brV3MHDv!QnmMh6t>fx~JX|?3{Y#?fCnjGDh1!b%u*-=gOwc z?M|L{9;jx7YJcJGOFq9k>iTqy?7Z2U5DKKof9K2IsAlM75Wc;$QG)sPJ>~U%ra!Nfw9HYi13e3_qSu5D z1an{<5dHcx=Hgv}cqW1Uj#XGM5BmS1?5)D0ZoD>JW?<;<4w3HeQ~_yeY3c5+Aw;A> zIs}yN?i{+ibC7PNOW5Q4e)0eI!M?72$PvR2W?1Wqdzm8bwLo+qoqoSlQO`0^ITeyc zP8Vrh9zu{ca?+hcaB8sC3bk~{N>;`wsQCxmm#&iEH7}JIpAABP&O2PO% zUa?Z*XUrMTZ-;obJz5TLa7;)!j)K9DT<7LUUV5mfNEjXpkmC4!SfYwZ)Y3(o4se2( z(eK^o=^SA_H@XpWuF(M+a$hyA^1g3_@>4DLnNk9Yv9lHpICE#D>fA3qChWa*4X@d(p7P(P%%f&5=g6cSD6;|PRh{ye`+JM*ojEh$Y} zK3_yV{gZv`YR%<5_^$Te{|)#?p`PymRv2>OEiU|Zo69?oN(zk7l;vw=ExZ9$cgkx{sz%j;{C=d`U|YOh zb3{B#@iNz)tZIG&e{6&OPc_R=#SU;-#DS&jH!bY-Yw3 zdN9X-KB5@O_*JRWrNk2|CLZV}zBl6M#K+KJ|Hr#?uLEPVr*@wB!TLiN@m)KRbT>UE zka<}WT&TRTdv8fw#z_hCF-RW$oRB%RY>5;7e-md)^sxlpkLyB`SXpj=yF>1d|I;1H z2JIdMYrR$@=Qq%;l)p7&?}>Xc4|9q>4g%tY34Lpj@gLUZ)LS}daK_#hc zKnoF8QHxpvKa+w_DTrAAFZ=EQQ}VBUXB9xw-|uD71X_%GW??*zQ;Z~AWf^z@x^K&i z7FZ!#>T`xY2m>rm5v7nOIDQ9zcf2yKJnT>b3ig^t-E|?HpE7zlL%2pjf@jphe@4#1 zuW&NmX#h3ODntQ0808P0v`9CG|KsKTYvt`s%I~b}54n_YEA)EOuC#?vD62bLL%ny% z|EUrUdte?*wC~uo%$TpR5FL#Yle@#R?-o~KuYH^X?sihVAnBkVc17Bs(Nz2K@SPYo zxp}<)Yk7*N1ApKF^(DIEdlVF_E|nsRI5_7aKb+Y-OSu~GJH34q!V);5`(v|pS12LR zge8P_Bo_i-{!l?bKxtQ9h(^1UNj47dZX{Z3+CcjH1r;viqi7B6gsNMQl4&8VN96QR zi%3|=CGfxUXMD#lv3}T!5hQ{jcEI5m5n^0$N0w?Csn?nafWj~RqbcRT9io3LL=6c_ zje)?f2mHSkqEe!3lWr<;e=dH#_9CI^2ZuCTmUc$Jn@%F|*;G8H!%0YV0v&l@{#ANi ziqHv$2n>_F$|4(*00&)sXl>+AfHQv9Lcqu|lWfazDJ&m+&B3~Y%q-V6zxs1Dc!0^o%DPG%k^=Xg$CDZ8^jnJ# z-d7Q8w100FA(II50no8!rQ=P5Ck1L!{ozU)ox66Z@rwGy-R^sf zA2UKAErb7$4J96+hG(Zgg6*Y7ASyO!kdNCnDr4Nj`PYL2*Ih5PU@5;wc$65`K!J5q zJvi;ow(lV^S9Okxok%dhCFe_8@Hv1X4%fJvBXGW5fQn!=@#C%G#^Hz29G|ZK)72!D z3S{?RL(nYUpgbh5-X?nA-E@J`Oju(Tb6&%zgF-4!_ZOC(CpT$kgEn>b7ese7tJT=- zS%g^I&#;40`PAV9-Z_dwhQ_2JOU$({Y~Oj3S$&y>7{W+E_ z#jJ~NuF=lcab!}Y$6JJVO&{jyBm)tTL_D^9TSN+cXGy~tBzJ41C=+(x&e;VrybHw# zBq-*h3l@5zs8WBD0<-~OJyEkaef)y3-^7i(kVE4D zl_@-;<<#)!IPO$o^6i!XY#V~+|CvQAH?;78WWjX0wA4wfGU!TTFQ*dHy_szMQ|137i;*7)1e9pKl;z{ z-{WX>R-kFQDjAId3|}EU_rciU8W%|vD6aN4_Ne9Vn{q)_TaMoKQu~f50r^ZdqF$D; zYitE{icV*sR%DehW+zl2v;2PNQWv3 zZY1O&e@ws-4W|qruv+f^B5-|JejkM2i6%0h$J{HDP!J?HyP5Z*g%IL~vf^37eRqdp)9}hAGmpN{V1}3&&8OYaBZ@L6B1j9z_ zbD?iu$8$H|RJ6qPM}j@wTMYkFAld?StOncvYZCxsT+?The$B|@{%04!_;(kug;SHT z>++thpNA|M_MK23n=c9Yd2jxL5ocx_9V=m&YRrm;3Wxu*3N&xZfKTdKy|x~GHiIM> z!WC~qo0DFDi*SfI3rX;kFU5HIcSjJYxI&vyQGt?5pasmN8d8k5m+J8}iqTrY7&pe% zE^qqp9r@&Fp;o@&oZrFOuw_TD0*1M{3Dky_ua{&r4)kHoUDeSp(P{;_$z5;`p#yLv-XotU*FP0D|bs{;75wrxV+L&h!= zX28R{WmwYmzjp@)+Z5WZ4#xLahBa!@Lt-TU-;rUMocWhoK*ehgq@{$wl@m{vic;Pj z4Uk^-kdmo?-#Udh;z387#SPKAOP?PtoEx9R%ZZ9jx2pxdgLHYPFr4tAZivCQA}Mij zUCy2?*Mm!tfTR<}l5m`A!J--xs!xEYXJ_>yw>hL5Ebh*FHk033@B3$n_J1Kcf1rI3 zWeS@D7$v8JkamC!%I3VOg@3=b2i27|>A#f8lN6Ot2G8af?Z%NP-5 zIRGDvw~49FEh4fV6rA*jQ~*ZFfz$TQ_t_LJ!Wcu0oqar;0?ERIC-xK475|cQ1QGuw z<=7~`)Bx_dU-_Ou^xfGlb9Y|E#JIc~6roI^zcS}}Lha6cK7HEcy7i%|5jJ=m~ zc6znfji7U=uf7M_c)78$;Be15UNMYIV(OUnQIgUkU67$Fu$f2J5+j!nbXc3B8|DEP zPYbsc=J=$7vktp>DRc;*j6svHu;iCDsUW+|EvdeB>%jCy4;a_Gp#{Nnwj`zQO6|Mh zB#*SiKimrIBPLURs1mj+8v!nK#>vlkrFgCy#;wRhd@N2B)SDz<{+`w#n<@g(?ai+4 z#ZJ$euIIm9#mFP&Mb;LxZ|Q*y_iQa$j!AF~ixBC# z{vSCR`0+bw3GM&%6)Bu#_g^*Tn8wcK1pPNO=ij!XiPlfWIK+zJ+8=;c)eKl&QAuE? z>o=~hk)`1yv8>ux303FsB6N{xx_%`rrvz?#m9YQw%5{W#4PLHGHsV7}9Lg z+{U6M$bH6*i}D8@J<|<10U>L~D~BcyZ>!lUsHdq60J9#$J7v~oZ!P>f!yKeQx52?5%+S&+~#_(5Npg$^odIQ>QD zXvwA%&40uel?BH+{p(uM>FV6H<-x~R2&-w+O4d8u>w77eg#6Q1%-my_E06t4zW83c zP#BnQ?s3i1k*fLYPaoQ)0+Ag}PDky0@=JJHfPZrEbL-K8XJ1k;{pqRuW4Fdp`fWfg zl~VUNKeB5q!An%~KH9Qx8lA*l(Cun-oqpd$Fn#vjX$1N($s;1!SBF&1vttG0 z$UiIQ&@v?vvU80{B^Y4J3Dc<>s@{RxyD9HC*RGMUSz?zzgy0)gjsVfUbmuS^Ab2Sc z?wZq58wK8_r$}bK<5`ThgA;%MdlAiHCxwz@L>$xKdlMFH!=9_`|7~)zAynq2pb1hB zs-Pl8hNLYitk~w?ww1v?HG2@3jpOx(e3v4{K-n(m4P_%i+Rube=Oh10I3W{)P18^^ z9#6xw{T7%F9dCskTfHD+gHg=}Ml-~yAfjxNpE*Ly_ko>OxO(?xK3Sq^b9En2vDTa` zj7IfOUP19l{Z*G3odYx`$X<7W*8>2W;K#w|E{)}+m`KQmQ=%4QV=VK1V0R*PJ7+EC z7|eMKOY35eGiZ0kmpDiO{n8L$ZJMu@vaL$Fb?W)<(shqZ!R_DM!8IU&zxKgYD~M(5 zvkqbAD^j=3%V3l$RZ^vtJ7~^zg#oQs{zb{B6*fOBsLrgq9h{N<&V2H9R*L30`K;Bs zIXu%|%rYdU-EM<4$k8a-9~wqP`E;>Jd%d$#3b7>Dx8LA$-2<)v`$K!3T*oZiemw8L zg(2p&zde;omZ>a(FAYte>ucPz+jzZ<%8r8`zngNfv)^>oxXxGJoIpo;y+(7VP}ocV z*iP?G6NcZM<969dhqX$zR=6X;X^Ej(3489PUv)~q!Pm%0B=o|d=Y5-1&YjBPh6_=L z6{u|z`n=I5Wf!Tws4!WC@r(S*+_2zy`O*+xu@9>qlqUQJmrHeE!y7ZWoKQf#C)lp~ zby0FoJUpvcFdo-$G}o(wDau=i5qbKOgRegV$r+C!FxkRq#JKZ$t|Exi=#KzL=6w7n zUVVEPoyM$wgN(P$+DPNOc;9;QpKCzWp1Avw_c1Ob0IfMaO6W(3(f1U z6e3E++5VPlFa^^#t_!#uRy4dcay=Maz0{JDEOL%MAVo@f?)`|%FTw?9 znE+0gn@lXSDD1HqIdQzRWA4$bh$3zy5YZXp^q&K}y%EaY9rQ6OjYGSsf71MLL*BSr zi-?wDe(UboiD8XqholgQjhtmFbWH;g0q+pa5Z^+W`pJSP_%SKpuv?w3FOk?O#}VZT zb_UJtIySf8@NtWBby6#Mdgboq*zO3*!H)whTWF!bTruy18$SnhSDz4aDwzOB6)OnZ zPvF6~_yzeWS-<49C(gu}{0OmT_<*z^%W{iUy^3Q)rYy2b9K6YAP2j0CtC+9@yb6VX zk{u5f20JX$eZCxV_o{Hvq$EDMpAAV_Z8g8s%c-iqkq?MZ#G>eq?q@zQVX-xgh(Cy@ zTyA%&WJ=@s(=4bw-Use*PZyo)#GR9vl){IJa!X%AP8ehYzi~yT zbV)0DOIa}9@s(fI^0xmFrv@;bW6txu{&Y$~Cb|X^q^R409=uijLGxPNZ<(@+a0&43 zr~6*M6{75-haa)SRAFNHbSMofouqU1soV zi&_tQeI>4O+K=+>5XVG4lKm5|$v$Bm2Uu5l2`#1`*BB=-gM=(~Gy;AklS$(j1!epT z8^#rCVl|1`$GLpnT2LmHwGiW%1^g_+6Nys%V!By)d&5yxIT)v7&qXjK0hPH2dLPcj z7^5XXAG-nM_6PR*s1NQCO2<0q7J=#9DyOz-eg3xym5RejP7S8_#Kxda*AEGvMEsjUV