Skip to content

Commit

Permalink
test(helpers): replace assert_close by assert_similar
Browse files Browse the repository at this point in the history
  • Loading branch information
dacorvo committed Mar 7, 2024
1 parent c956fe3 commit cafe14b
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 17 deletions.
6 changes: 2 additions & 4 deletions test/tensor/ops/test_linear_dispatch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
import torch
from helpers import assert_close, random_qtensor, random_tensor
from helpers import assert_similar, random_qtensor, random_tensor


@pytest.mark.parametrize("batch_size", [1, 10])
Expand All @@ -16,6 +16,4 @@ def test_linear(batch_size, tokens, embeddings, use_bias, dtype, weight_axis, de
bias = random_tensor((embeddings,), dtype=dtype).to(device) if use_bias else None
out = torch.nn.functional.linear(qinputs.dequantize(), qweight.dequantize(), bias)
qout = torch.nn.functional.linear(qinputs, qweight, bias)
# We need to increase rtol for float16
rtol = {torch.float32: 1e-5, torch.float16: 1e-2}[dtype]
assert_close(out, qout, rtol)
assert_similar(out, qout)
12 changes: 3 additions & 9 deletions test/tensor/ops/test_mm_dispatch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
import torch
from helpers import assert_close, random_qtensor, random_tensor
from helpers import assert_similar, random_qtensor, random_tensor

from quanto import QTensor

Expand All @@ -17,10 +17,7 @@ def test_matmul(dtype, in_features, hidden, out_features, device):
qmatmul = torch.matmul(qa, qb)
# The outputs should be almost identical if we use the dequantized inputs
matmul = torch.matmul(qa.dequantize(), qb.dequantize())
# We need to increase atol and rtol for float16
atol = {torch.float32: 1e-6, torch.float16: 2e-3}[dtype]
rtol = {torch.float32: 1e-5, torch.float16: 1e-2}[dtype]
assert_close(matmul, qmatmul, atol=atol, rtol=rtol)
assert_similar(matmul, qmatmul)


@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=["fp32", "fp16"])
Expand All @@ -35,10 +32,7 @@ def test_bmm(dtype, batch_size, a_shape, b_shape, b_axis, device):
qbmm = torch.bmm(qa, qb)
# The outputs should be almost identical if we use the dequantized inputs
bmm = torch.bmm(qa.dequantize(), qb.dequantize())
# We need to increase atol and rtol for float16
atol = {torch.float32: 1e-6, torch.float16: 2e-3}[dtype]
rtol = {torch.float32: 1e-5, torch.float16: 1e-2}[dtype]
assert_close(bmm, qbmm, atol=atol, rtol=rtol)
assert_similar(bmm, qbmm)


@pytest.mark.parametrize(
Expand Down
8 changes: 4 additions & 4 deletions test/tensor/ops/test_qtensor_dispatch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pytest
import torch
from helpers import assert_close, random_qtensor, random_tensor
from helpers import assert_similar, random_qtensor, random_tensor

from quanto import QTensor

Expand All @@ -23,11 +23,11 @@ def test_mul_scalar(input_shape, scalar, device):
qprod = qa * scalar
assert isinstance(qprod, QTensor)
prod = qa.dequantize() * scalar
assert_close(prod, qprod)
assert_similar(prod, qprod)
qprod = scalar * qa
assert isinstance(qprod, QTensor)
prod = scalar * qa.dequantize()
assert_close(prod, qprod)
assert_similar(prod, qprod)


@pytest.mark.parametrize("batch_size", [1, 10])
Expand Down Expand Up @@ -64,7 +64,7 @@ def test_cat(input_shape, device):
qother = QTensor.quantize(other, qtype=qinputs.qtype, axis=None, group_size=None, scale=qinputs._scale)
qcat = torch.cat([qinputs, qother])
assert isinstance(qcat, QTensor)
assert_close(torch.cat([qinputs.dequantize(), qother.dequantize()]), qcat)
assert_similar(torch.cat([qinputs.dequantize(), qother.dequantize()]), qcat)
# Now, verify that with different scales, the output is dequantized
qother = QTensor.quantize(other, qinputs.qtype)
qcat = torch.cat([qinputs, qother])
Expand Down

0 comments on commit cafe14b

Please sign in to comment.