diff --git a/src/frontends/pytorch/src/op/bincount.cpp b/src/frontends/pytorch/src/op/bincount.cpp new file mode 100644 index 00000000000000..14d0544c265de3 --- /dev/null +++ b/src/frontends/pytorch/src/op/bincount.cpp @@ -0,0 +1,52 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/gather.hpp" +#include "openvino/op/scatter.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_bincount(const NodeContext& context) { + // aten::bincount(Tensor input, Tensor? weights=None, int minlength=0) -> Tensor + num_inputs_check(context, 3, 3); + + auto input = context.get_input(0); // Input tensor (1D integers) + auto weights = context.get_input(1); // Optional weights + auto minlength = context.get_input(2); // Minimum output length + + // Convert input to INT32 + auto input_int = context.mark_node(std::make_shared(input, element::i32)); + + // Determine output size: max(input) + 1 or minlength, whichever is larger + auto max_val = context.mark_node(std::make_shared(input_int, ov::AxisSet{0}, true)); + auto max_length = context.mark_node(std::make_shared(max_val, context.mark_node(v0::Constant::create(element::i32, Shape{}, {1})))); + auto output_size = context.mark_node(std::make_shared(max_length, minlength)); + + // Create initial output tensor (zeros of size `output_size`) + auto output = context.mark_node(v0::Constant::create(element::f32, Shape{output_size}, {0})); + + // Handle weights: if None, use ones + auto weight_tensor = weights.get_node_shared_ptr() != nullptr + ? weights + : context.mark_node(v0::Constant::create(element::f32, Shape{1}, {1})); + + // Scatter operation to calculate bincount + auto result = context.mark_node(std::make_shared(output, input_int, weight_tensor)); + + return {result}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 00e3a55b0bc327..9fced36143620f 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -48,6 +48,7 @@ OP_CONVERTER(translate_avg_pool2d); OP_CONVERTER(translate_avg_pool3d); OP_CONVERTER(translate_bool); OP_CONVERTER(translate_batch_norm); +OP_CONVERTER(translate_bincount); OP_CONVERTER(translate_bitwise_and); OP_CONVERTER(translate_bitwise_not); OP_CONVERTER(translate_bitwise_or); @@ -408,6 +409,7 @@ const std::unordered_map get_supported_ops_ts() { {"aten::avg_pool3d", op::quantizable_op}, {"aten::baddbmm", op::translate_addmm}, {"aten::batch_norm", op::translate_batch_norm}, + {"aten::bincount", op::translate_bincount}, {"aten::bitwise_and", op::translate_bitwise_and}, {"aten::bitwise_not", op::translate_bitwise_not}, {"aten::bitwise_or", op::translate_bitwise_or}, diff --git a/tests/layer_tests/pytorch_tests/test_bincount.py b/tests/layer_tests/pytorch_tests/test_bincount.py new file mode 100644 index 00000000000000..1f0872b039169f --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_bincount.py @@ -0,0 +1,148 @@ +# Copyright (C) 2018-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import pytest +import numpy as np +import torch +from pytorch_layer_test_class import PytorchLayerTest + + +class TestBincount(PytorchLayerTest): + def _prepare_input(self, input_data, weights=None, minlength=0, dtype="int32"): + """Prepare inputs for bincount testing""" + input_data = np.array(input_data).astype(dtype) + weights = np.array(weights).astype("float32") if weights is not None else None + return input_data, weights, minlength + + def create_model(self, weights_provided, dtype=None): + class BincountModel(torch.nn.Module): + def __init__(self, weights_provided, dtype=None): + super(BincountModel, self).__init__() + self.weights_provided = weights_provided + self.dtype = dtype + + def forward(self, x, w, minlength): + if self.dtype: + x = x.to(self.dtype) + if w is not None: + w = w.to(self.dtype) + return torch.bincount(x, minlength=minlength, weights=w if self.weights_provided else None) + + model_class = BincountModel(weights_provided, dtype) + ref_net = None + + return model_class, ref_net, "aten::bincount" + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize( + "weights_provided", + [True, False], + ) + @pytest.mark.parametrize( + "dtype", + [ + "int32", # Bincount requires integer inputs + "int64", + "float64" + ], + ) + @pytest.mark.parametrize( + "input_data, weights, minlength", + [ + # Basic cases + ([0, 1, 1, 2, 2, 2, 3], [1.0, 0.5, 0.5, 0.2, 0.3, 0.5, 0.1], 5), # Basic with weights + ([0, 1, 1, 2, 2, 2, 3], None, 5), # Basic without weights + + # Edge cases + ([0, 0, 0, 0], None, 0), # All zeros + ([0, 1, 2, 3], None, 10), # Minlength greater than max element + ([10, 20, 30], None, 0), # Minlength 0 with large values + ([], None, 0), # Empty array + ([-1, -1, -1], None, 10), # Negative values + ([0, 1, 2, 3], None, 0), # Edge case with minlength 0 + + # Large values + ([1000, 1000, 1000, 1000], None, 4), # Case with large identical values + ([1000, 2000, 3000, 4000], None, 5), # Case with larger range of values + + # Randomized cases + (np.random.randint(0, 100, size=(1000,)), np.random.uniform(0, 1, size=(1000,)), 1000), + ], + ) + def test_bincount_basic(self, weights_provided, dtype, input_data, weights, minlength, ie_device, precision, ir_version): + self._test( + *self.create_model(weights_provided, dtype), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"input_data": input_data, "weights": weights, "minlength": minlength, "dtype": dtype}, + rtol=1e-5 # Relative tolerance for floating point comparisons + ) + + @pytest.mark.nightly + @pytest.mark.precommit + @pytest.mark.parametrize( + "weights_provided", + [True, False], + ) + @pytest.mark.parametrize( + "dtype", + [ + "int32", # Bincount requires integer inputs + ], + ) + @pytest.mark.parametrize( + "shape", + [ + (3,), # 1D array + (2, 3), # 2D array + (2, 2, 2), # 3D array + ], + ) + def test_bincount_shapes(self, weights_provided, dtype, shape, ie_device, precision, ir_version): + # Generate random input data within a reasonable range for bincount + input_data = np.random.randint(0, 5, shape) + weights = np.random.uniform(0, 1, shape) if weights_provided else None + minlength = 5 + + self._test( + *self.create_model(weights_provided, dtype), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"input_data": input_data, "weights": weights, "minlength": minlength, "dtype": dtype}, + rtol=1e-5 + ) + + @pytest.mark.nightly + @pytest.mark.precommit + def test_bincount_broadcasting(self, ie_device, precision, ir_version): + # Test broadcasting with different shapes + input_data1 = np.array([[1, 2, 3]], dtype=np.int32) # Shape (1, 3) + input_data2 = np.array([[1], [2]], dtype=np.int32) # Shape (2, 1) + weights = np.array([1.0, 0.5, 0.1], dtype=np.float32) + + self._test( + *self.create_model(True, "int32"), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"input_data": input_data1, "weights": weights, "minlength": 5, "dtype": "int32"}, + rtol=1e-5 + ) + + @pytest.mark.parametrize("device", ["cpu", "cuda"]) + def test_bincount_device(self, device, ie_device, precision, ir_version): + # Run tests for different devices (CPU and CUDA) + input_data = np.array([0, 1, 1, 2, 2, 2, 3]) + weights = np.array([1.0, 0.5, 0.5, 0.2, 0.3, 0.5, 0.1]) + model = self.create_model(True, "int32")[0].to(device) + self._test( + *self.create_model(True, "int32"), + ie_device, + precision, + ir_version, + kwargs_to_prepare_input={"input_data": input_data, "weights": weights, "minlength": 5, "dtype": "int32"}, + rtol=1e-5 + )