Skip to content

Commit

Permalink
[Common]Fix Test for SDPA and Concat Unified Scales Test (#3207)
Browse files Browse the repository at this point in the history
### Changes

Changes the model from having the same input tensor to the QKV to having
different input for each

### Reason for changes

The former model was causing an error with openvino
  • Loading branch information
anzr299 authored Jan 23, 2025
1 parent eab6b46 commit b6f2e75
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 16 deletions.
10 changes: 7 additions & 3 deletions tests/openvino/native/test_unified_scales.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,16 @@

class TestUnifiedScales(TemplateTestUnifiedScales):
def get_backend_specific_model(self, model: torch.nn.Module) -> ov.Model:
input_shape = model.INPUT_SHAPE
q_input_shape = model.Q_INPUT_SHAPE
kv_input_shape = model.KV_INPUT_SHAPE

backend_model = ov.convert_model(
model,
example_input=(
torch.randn(input_shape),
torch.randn(input_shape),
torch.ones(q_input_shape),
torch.ones(q_input_shape),
torch.ones(kv_input_shape),
torch.ones(kv_input_shape),
),
)

Expand Down
12 changes: 7 additions & 5 deletions tests/torch/fx/test_unified_scales.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,21 @@

import torch

from nncf.torch.nncf_network import NNCFNetwork
from tests.cross_fw.test_templates.test_unified_scales import TemplateTestUnifiedScales
from tests.torch.fx.helpers import get_torch_fx_model_q_transformed


class TestUnifiedScales(TemplateTestUnifiedScales):
def get_backend_specific_model(self, model: torch.nn.Module) -> NNCFNetwork:
input_shape = model.INPUT_SHAPE
def get_backend_specific_model(self, model: torch.nn.Module) -> torch.fx.GraphModule:
q_input_shape = model.Q_INPUT_SHAPE
kv_input_shape = model.KV_INPUT_SHAPE
backend_model = get_torch_fx_model_q_transformed(
model,
(
torch.randn(input_shape),
torch.randn(input_shape),
torch.ones(q_input_shape),
torch.ones(q_input_shape),
torch.ones(kv_input_shape),
torch.ones(kv_input_shape),
),
)

Expand Down
9 changes: 6 additions & 3 deletions tests/torch/quantization/test_unified_scales.py
Original file line number Diff line number Diff line change
Expand Up @@ -718,12 +718,15 @@ def test_unified_scales_with_shared_nodes():

class TestUnifiedScales(TemplateTestUnifiedScales):
def get_backend_specific_model(self, model: torch.nn.Module) -> NNCFNetwork:
input_shape = model.INPUT_SHAPE
q_input_shape = model.Q_INPUT_SHAPE
kv_input_shape = model.KV_INPUT_SHAPE
backend_model = wrap_model(
model,
(
torch.randn(input_shape),
torch.randn(input_shape),
torch.ones(q_input_shape),
torch.ones(q_input_shape),
torch.ones(kv_input_shape),
torch.ones(kv_input_shape),
),
trace_parameters=True,
)
Expand Down
11 changes: 6 additions & 5 deletions tests/torch/test_models/synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,16 +665,17 @@ def forward(self, x):


class ConcatSDPABlock(torch.nn.Module):
INPUT_SHAPE = (2, 10, 6)
Q_INPUT_SHAPE = [2, 10, 6]
KV_INPUT_SHAPE = [2, 10, 12]

def __init__(self):
super().__init__()

def forward(self, x, y):
def forward(self, x, y, z, w):
concatenated_input = torch.cat((x, y), dim=-1)
query = concatenated_input
key = concatenated_input
value = concatenated_input
attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value, dropout_p=0.2)
key = z
value = w
attn_output = torch.nn.functional.scaled_dot_product_attention(query, key, value)

return attn_output

0 comments on commit b6f2e75

Please sign in to comment.