diff --git a/src/frontends/pytorch/src/op/pad.cpp b/src/frontends/pytorch/src/op/pad.cpp index 9250fdf6a506d7..d5f2d20e573b01 100644 --- a/src/frontends/pytorch/src/op/pad.cpp +++ b/src/frontends/pytorch/src/op/pad.cpp @@ -134,6 +134,14 @@ OutputVector translate_reflection_pad_nd_fx(const NodeContext& context) { return translate_pad_common(context, data, paddings, pad_value, "reflect"); } +OutputVector translate_replicate_pad_nd_fx{const NodeContext & context} { + num_inputs_check(context, 2, 2); + auto data = context.get_input(0); + auto paddings = context.const_input>(1); + Output pad_value = context.mark_node(v0::Constant::create(element::f32, Shape{}, {0})); + return translate_pad_common(context, data, paddings, pad_value, "replicate"); +} + } // namespace op } // namespace pytorch } // namespace frontend diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 458ad679d8b444..bb7afdcff13e57 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -305,7 +305,12 @@ OP_CONVERTER(translate_new_zeros_fx); OP_CONVERTER(translate_ones_fx); OP_CONVERTER(translate_ones_like_fx); OP_CONVERTER(translate_reflection_pad_nd_fx); +<<<<<<< HEAD +OP_CONVERTER(translate_replicate_pad_nd_fx); +OP_CONVERTER(translate_reshape_fx); +======= OP_CONVERTER(translate_repeat_fx); +>>>>>>> origin OP_CONVERTER(translate_rsub_fx); OP_CONVERTER(translate_scalar_tensor_fx); OP_CONVERTER(translate_scaled_dot_product_attention_fx); @@ -626,6 +631,9 @@ const std::unordered_map get_supported_ops_ts() { {"aten::remainder", op::translate_remainder}, {"aten::repeat", op::translate_1to1_match_2_inputs}, {"aten::repeat_interleave", op::translate_repeat_interleave}, + {"aten::replicate_pad1d",op::translate_replicate_pad_nd_fx}, + {"aten::replicate_pad2d",op::translate_replicate_pad_nd_fx}, + {"aten::replicate_pad3d",op::translate_replicate_pad_nd_fx}, {"aten::reshape", op::translate_reshape}, {"aten::reshape_as", op::translate_reshape_as}, // TO DO: enable behaviour for resolve_conj and resolve_neg complex tensors, @@ -945,6 +953,9 @@ const std::unordered_map get_supported_ops_fx() { {"aten.reflection_pad1d.default", op::translate_reflection_pad_nd_fx}, {"aten.reflection_pad2d.default", op::translate_reflection_pad_nd_fx}, {"aten.reflection_pad3d.default", op::translate_reflection_pad_nd_fx}, + {"aten.replicate_pad1d.default", op::translate_replicate_pad_nd_fx}, + {"aten.replicate_pad2d.default", op::translate_replicate_pad_nd_fx}, + {"aten.replicate_pad3d.default", op::translate_replicate_pad_nd_fx}, {"aten.relu.default", op::translate_1to1_match_1_inputs}, {"aten.relu_.default", op::inplace_op>}, {"aten.repeat.default", op::translate_repeat_fx}, diff --git a/tests/layer_tests/pytorch_tests/test_pad.py b/tests/layer_tests/pytorch_tests/test_pad.py index adbad1efee71cb..38838ffc0585bb 100644 --- a/tests/layer_tests/pytorch_tests/test_pad.py +++ b/tests/layer_tests/pytorch_tests/test_pad.py @@ -219,9 +219,9 @@ def __init__(self, pads): if ndim == 1: self.pad = torch.nn.ReflectionPad1d(pads) elif ndim == 2: - self.pad = torch.nn.ReflectionPad1d(pads) + self.pad = torch.nn.ReflectionPad2d(pads) elif ndim == 3: - self.pad = torch.nn.ReflectionPad1d(pads) + self.pad = torch.nn.ReflectionPad3d(pads) else: raise Exception("Unsupported pads") @@ -244,3 +244,121 @@ def test_reflection_padnd(self, pads, dtype, ie_device, precision, ir_version): print(ndim) self._test(*self.create_model(pads), ie_device, precision, ir_version, kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype}) + +class TestReplicatePad1D(PytorchLayerTest): + def _prepare_input(self, ndim=4, dtype="float32"): + import numpy as np + input_5d_shape = [5,9,1,1,2,4] + return (np.random.randn(*input_5d_shape[:ndim]).astype(dtype),) + + def create_model(self, pads): + import torch + import torch.nn.functional as F + + class aten_pad(torch.nn.Module): + def __init__(self, pads): + super().__init__() + self.pad = torch.nn.ReplicationPad1d(pads) + + def forward(self, x): + return self.pad(x) + + return aten_pad(pads), None, "aten::pad" + + @pytest.mark.parametrize("dtype", ["float32", "float64", "int32"]) + @pytest.mark.parametrize("pads", [ + 1, + 2, + 3, + (1, 2), + (2, 1), + (2, 3), + (3, 4), + ]) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + def test_replicate_padnd(self, pads, dtype, ie_device, precision, ir_version): + ndim = 3 + self._test(*self.create_model(pads), ie_device, precision, ir_version, + kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype}) + +class TestReplicatePad2D(PytorchLayerTest): + def _prepare_input(self, ndim=4, dtype="float32"): + import numpy as np + input_5d_shape = [5,9,1,1,2,4] + return (np.random.randn(*input_5d_shape[:ndim]).astype(dtype),) + + def create_model(self, pads): + import torch + import torch.nn.functional as F + + class aten_pad(torch.nn.Module): + def __init__(self, pads): + super().__init__() + self.pad = torch.nn.ReplicationPad2d(pads) + + def forward(self, x): + return self.pad(x) + + return aten_pad(pads), None, "aten::pad" + + @pytest.mark.parametrize("dtype", ["float32", "float64", "int32"]) + @pytest.mark.parametrize("pads", [ + 1, + 2, + 3, + (1, 2, 2, 1), + (2, 1, 3, 4), + (2, 3, 1, 2), + (3, 4, 5, 6), + ]) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + def test_replicate_padnd(self, pads, dtype, ie_device, precision, ir_version): + ndim = 4 + self._test(*self.create_model(pads), ie_device, precision, ir_version, + kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype}) + +class TestReplicatePad3D(PytorchLayerTest): + def _prepare_input(self, ndim=4, dtype="float32"): + import numpy as np + input_5d_shape = [5,9,1,1,2,4] + return (np.random.randn(*input_5d_shape[:ndim]).astype(dtype),) + + def create_model(self, pads): + import torch + import torch.nn.functional as F + + class aten_pad(torch.nn.Module): + def __init__(self, pads): + super().__init__() + self.pad = torch.nn.ReplicationPad3d(pads) + + def forward(self, x): + return self.pad(x) + + return aten_pad(pads), None, "aten::pad" + + @pytest.mark.parametrize("dtype", ["float32", "float64", "int32"]) + @pytest.mark.parametrize("pads", [ + 1, + 2, + 3, + (1, 2, 2, 1, 3, 4), + (2, 1, 3, 4, 2, 1), + (2, 3, 1, 2, 2, 1), + (3, 4, 5, 6, 1, 2), + ]) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.precommit_torch_export + def test_replicate_padnd(self, pads, dtype, ie_device, precision, ir_version): + ndim = 5 + self._test(*self.create_model(pads), ie_device, precision, ir_version, + kwargs_to_prepare_input={"ndim": ndim, "dtype": dtype}) +