diff --git a/.github/workflows/call_precommit_windows.yml b/.github/workflows/call_precommit_windows.yml new file mode 100644 index 00000000000..2ece937835d --- /dev/null +++ b/.github/workflows/call_precommit_windows.yml @@ -0,0 +1,188 @@ +name: call-precommit +permissions: read-all + +on: + workflow_call: + inputs: + python_version: + description: 'Python version' + type: string + required: true + override_requirements: + description: 'Override requirements' + default: '' + type: string + required: false + +jobs: + common: + timeout-minutes: 40 + runs-on: windows-2019 + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + lfs: true + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + with: + python-version: ${{ inputs.python_version }} + - name: Override constraints + if: ${{ inputs.override_requirements != '' }} + run: python .github/scripts/override_constraints.py "${{ inputs.override_requirements }}" + shell: bash + - name: Install NNCF and test requirements + run: pip install . -r tests/common/requirements.txt + - name: Print installed modules + run: pip list + - name: Run common precommit test scope + run: make test-common + env: + NUM_WORKERS: 2 + + onnx: + timeout-minutes: 40 + runs-on: windows-2019-8-core + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + lfs: true + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + with: + python-version: ${{ inputs.python_version }} + - name: Override constraints + if: ${{ inputs.override_requirements != '' }} + run: python .github/scripts/override_constraints.py "${{ inputs.override_requirements }}" + shell: bash + - name: Install NNCF and test requirements + run: pip install . -r tests/onnx/requirements.txt + - name: Print installed modules + run: pip list + - name: Run ONNX precommit test scope + run: make test-onnx + env: + NUM_WORKERS: 4 + + openvino: + timeout-minutes: 40 + runs-on: windows-2019-8-core + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + lfs: true + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + with: + python-version: ${{ inputs.python_version }} + - name: Override constraints + if: ${{ inputs.override_requirements != '' }} + run: python .github/scripts/override_constraints.py "${{ inputs.override_requirements }}" + shell: bash + - name: Install NNCF and test requirements + run: pip install . -r tests/openvino/requirements.txt + - name: Print installed modules + run: pip list + - name: Run OV precommit test scope + run: make test-openvino + env: + NUM_WORKERS: 4 + + pytorch-cpu: + timeout-minutes: 100 + runs-on: windows-2019-8-core + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + lfs: true + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + with: + python-version: ${{ inputs.python_version }} + - uses: ilammy/msvc-dev-cmd@ed94116c4d30d2091601b81f339a2eaa1c2ba0a6 # v1.4.1 + - name: Override constraints + if: ${{ inputs.override_requirements != '' }} + run: python .github/scripts/override_constraints.py "${{ inputs.override_requirements }}" + shell: bash + - name: Install NNCF and test requirements + run: pip install . -r tests/torch/requirements.txt + - name: Print installed modules + run: pip list + - name: Run PyTorch precommit test scope + run: | + set +e + export LIB="${LIB};$(python -c "import sysconfig; print(sysconfig.get_config_var('LIBDIR'))")" + export LIB="${LIB};$(python -c "import sys; print(sys.prefix + '/libs')")" + export INCLUDE="${INCLUDE};$(python -c "import sysconfig; print(sysconfig.get_path('include'))")" + make test-torch-cpu + env: + NUM_WORKERS: 1 # Parallel tests are falls on build extenstion. + + tensorflow: + timeout-minutes: 40 + runs-on: windows-2019-8-core + if: ${{ inputs.python_version != '3.12' }} + defaults: + run: + shell: bash + env: + DEBIAN_FRONTEND: noninteractive + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + lfs: true + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + with: + python-version: ${{ inputs.python_version }} + - name: Override constraints + if: ${{ inputs.override_requirements != '' }} + run: python .github/scripts/override_constraints.py "${{ inputs.override_requirements }}" + shell: bash + - name: Install NNCF and test requirements + run: pip install . -r tests/tensorflow/requirements.txt + - name: Print installed modules + run: pip list + - name: Run TensorFlow precommit test scope + run: make test-tensorflow + env: + NUM_WORKERS: 6 + + pytorch2-cpu: + timeout-minutes: 40 + runs-on: windows-2019-8-core + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + with: + lfs: true + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + with: + python-version: ${{ inputs.python_version }} + - uses: ilammy/msvc-dev-cmd@ed94116c4d30d2091601b81f339a2eaa1c2ba0a6 # v1.4.1 + - name: Override constraints + if: ${{ inputs.override_requirements != '' }} + run: python .github/scripts/override_constraints.py "${{ inputs.override_requirements }}" + shell: bash + - name: Install NNCF and test requirements + run: | + pip install . -r tests/torch2/requirements.txt + - name: Print installed modules + run: pip list + - name: Run torch2 precommit test scope + run: | + set +e + export LIB="${LIB};$(python -c "import sysconfig; print(sysconfig.get_config_var('LIBDIR'))")" + export LIB="${LIB};$(python -c "import sys; print(sys.prefix + '/libs')")" + export INCLUDE="${INCLUDE};$(python -c "import sysconfig; print(sysconfig.get_path('include'))")" + pytest -ra tests/torch2 -m "not cuda" diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 7121d84efb3..763ca44eef6 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -50,3 +50,9 @@ jobs: weight-compression: if: github.repository_owner == 'openvinotoolkit' uses: ./.github/workflows/conformance_weight_compression.yml + + precommit-windows: + if: github.repository_owner == 'openvinotoolkit' + uses: ./.github/workflows/call_precommit_windows.yml + with: + python_version: "3.10" diff --git a/nncf/experimental/torch2/function_hook/graph/build_graph_mode.py b/nncf/experimental/torch2/function_hook/graph/build_graph_mode.py index a5659c631b1..cfb26681c03 100644 --- a/nncf/experimental/torch2/function_hook/graph/build_graph_mode.py +++ b/nncf/experimental/torch2/function_hook/graph/build_graph_mode.py @@ -192,9 +192,11 @@ def process_tensor_attributes(self, output: torch.Tensor, op_meta: OpMeta) -> No if output.grad_fn.name() == "TransposeBackward0": fn_name = "transpose" # grad_fn collect arguments as _saved_dim0=18446744073709551614 + # Use static arguments for .mT + # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.mT fn_kwargs = { - "dim0": -(2**64 - output.grad_fn._saved_dim0), # type: ignore[attr-defined] - "dim1": -(2**64 - output.grad_fn._saved_dim1), # type: ignore[attr-defined] + "dim0": -2, + "dim1": -1, } if output.grad_fn.name() == "PermuteBackward0": fn_name = "permute" diff --git a/tests/common/utils/test_timer.py b/tests/common/utils/test_timer.py index ab96ab43e3f..58e1ee9c898 100644 --- a/tests/common/utils/test_timer.py +++ b/tests/common/utils/test_timer.py @@ -17,7 +17,7 @@ def test_timer(nncf_caplog): with timer() as t: - time.sleep(1) + time.sleep(1.2) t() diff --git a/tests/onnx/quantization/test_classification_models_graph.py b/tests/onnx/quantization/test_classification_models_graph.py index d4b965d527f..12ed1639d68 100644 --- a/tests/onnx/quantization/test_classification_models_graph.py +++ b/tests/onnx/quantization/test_classification_models_graph.py @@ -22,72 +22,56 @@ from tests.onnx.quantization.common import mock_collect_statistics from tests.onnx.weightless_model import load_model_topology_with_zeros_weights + +def model_builder(model_name): + if model_name == "resnet18": + return models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1) + if model_name == "resnet50_cpu_spr": + return models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1) + if model_name == "mobilenet_v2": + return models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1) + if model_name == "mobilenet_v3_small": + return models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1) + if model_name == "inception_v3": + return models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1) + if model_name == "googlenet": + return models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1) + if model_name == "vgg16": + return models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1) + if model_name == "shufflenet_v2_x1_0": + return models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1) + if model_name == "squeezenet1_0": + return models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1) + if model_name == "densenet121": + return models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1) + if model_name == "mnasnet0_5": + return models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1) + raise ValueError(f"Unknown model name {model_name}") + + TORCHVISION_TEST_DATA = [ - ( - ModelToTest("resnet18", [1, 3, 224, 224]), - models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("resnet50_cpu_spr", [1, 3, 224, 224]), - models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1), - {"target_device": TargetDevice.CPU_SPR}, - ), - ( - ModelToTest("mobilenet_v2", [1, 3, 224, 224]), - models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), - models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("inception_v3", [1, 3, 224, 224]), - models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("googlenet", [1, 3, 224, 224]), - models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("vgg16", [1, 3, 224, 224]), - models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), - models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("squeezenet1_0", [1, 3, 224, 224]), - models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("densenet121", [1, 3, 224, 224]), - models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1), - {}, - ), - ( - ModelToTest("mnasnet0_5", [1, 3, 224, 224]), - models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1), - {}, - ), + (ModelToTest("resnet18", [1, 3, 224, 224]), {}), + (ModelToTest("resnet50_cpu_spr", [1, 3, 224, 224]), {"target_device": TargetDevice.CPU_SPR}), + (ModelToTest("mobilenet_v2", [1, 3, 224, 224]), {}), + (ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), {}), + (ModelToTest("inception_v3", [1, 3, 224, 224]), {}), + (ModelToTest("googlenet", [1, 3, 224, 224]), {}), + (ModelToTest("vgg16", [1, 3, 224, 224]), {}), + (ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), {}), + (ModelToTest("squeezenet1_0", [1, 3, 224, 224]), {}), + (ModelToTest("densenet121", [1, 3, 224, 224]), {}), + (ModelToTest("mnasnet0_5", [1, 3, 224, 224]), {}), ] @pytest.mark.parametrize( - ("model_to_test", "model", "quantization_parameters"), + ("model_to_test", "quantization_parameters"), TORCHVISION_TEST_DATA, ids=[model_to_test[0].model_name for model_to_test in TORCHVISION_TEST_DATA], ) -def test_min_max_quantization_graph_torchvision_models(tmp_path, mocker, model_to_test, model, quantization_parameters): +def test_min_max_quantization_graph_torchvision_models(tmp_path, mocker, model_to_test, quantization_parameters): mock_collect_statistics(mocker) + model = model_builder(model_to_test.model_name) onnx_model_path = tmp_path / (model_to_test.model_name + ".onnx") x = torch.randn(model_to_test.input_shape, requires_grad=False) torch.onnx.export(model, x, onnx_model_path, opset_version=13) @@ -105,6 +89,7 @@ def test_min_max_quantization_graph_torchvision_models(tmp_path, mocker, model_t ) def test_min_max_quantization_graph_onnx_model(tmp_path, mocker, model_to_test): mock_collect_statistics(mocker) + onnx_model_path = ONNX_MODEL_DIR / (model_to_test.model_name + ".onnx") original_model = load_model_topology_with_zeros_weights(onnx_model_path) diff --git a/tests/onnx/test_nncf_graph_builder.py b/tests/onnx/test_nncf_graph_builder.py index 7ccf52cb59d..d81719f73f3 100644 --- a/tests/onnx/test_nncf_graph_builder.py +++ b/tests/onnx/test_nncf_graph_builder.py @@ -14,7 +14,6 @@ import onnx import pytest import torch -from torchvision import models from nncf.onnx.graph.model_transformer import ONNXModelTransformer from nncf.onnx.graph.nncf_graph_builder import GraphConverter @@ -25,6 +24,7 @@ from tests.onnx.models import OneConvolutionalModel from tests.onnx.opset_converter import convert_opset_version from tests.onnx.quantization.common import ModelToTest +from tests.onnx.quantization.test_classification_models_graph import model_builder from tests.onnx.weightless_model import load_model_topology_with_zeros_weights REFERENCE_GRAPHS_DIR = ONNX_TEST_ROOT / "data" / "reference_graphs" / "original_nncf_graph" @@ -42,55 +42,26 @@ def test_compare_nncf_graph_synthetic_models(model_cls_to_test): CLASSIFICATION_MODEL_DEF_AND_OBJ = [ - ( - ModelToTest("resnet18", [1, 3, 224, 224]), - models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("mobilenet_v2", [1, 3, 224, 224]), - models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), - models.mobilenet_v3_small(weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("inception_v3", [1, 3, 224, 224]), - models.inception_v3(weights=models.Inception_V3_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("googlenet", [1, 3, 224, 224]), - models.googlenet(weights=models.GoogLeNet_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("vgg16", [1, 3, 224, 224]), - models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), - models.shufflenet_v2_x1_0(weights=models.ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("squeezenet1_0", [1, 3, 224, 224]), - models.squeezenet1_0(weights=models.SqueezeNet1_0_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("densenet121", [1, 3, 224, 224]), - models.densenet121(weights=models.DenseNet121_Weights.IMAGENET1K_V1), - ), - ( - ModelToTest("mnasnet0_5", [1, 3, 224, 224]), - models.mnasnet0_5(weights=models.MNASNet0_5_Weights.IMAGENET1K_V1), - ), + ModelToTest("resnet18", [1, 3, 224, 224]), + ModelToTest("mobilenet_v2", [1, 3, 224, 224]), + ModelToTest("mobilenet_v3_small", [1, 3, 224, 224]), + ModelToTest("inception_v3", [1, 3, 224, 224]), + ModelToTest("googlenet", [1, 3, 224, 224]), + ModelToTest("vgg16", [1, 3, 224, 224]), + ModelToTest("shufflenet_v2_x1_0", [1, 3, 224, 224]), + ModelToTest("squeezenet1_0", [1, 3, 224, 224]), + ModelToTest("densenet121", [1, 3, 224, 224]), + ModelToTest("mnasnet0_5", [1, 3, 224, 224]), ] @pytest.mark.parametrize( - ("model_to_test", "model"), + ("model_to_test"), CLASSIFICATION_MODEL_DEF_AND_OBJ, - ids=[x.model_name for x, _ in CLASSIFICATION_MODEL_DEF_AND_OBJ], + ids=[x.model_name for x in CLASSIFICATION_MODEL_DEF_AND_OBJ], ) -def test_compare_nncf_graph_classification_real_models(tmp_path, model_to_test, model): +def test_compare_nncf_graph_classification_real_models(tmp_path, model_to_test): + model = model_builder(model_to_test.model_name) onnx_model_path = tmp_path / (model_to_test.model_name + ".onnx") x = torch.randn(model_to_test.input_shape, requires_grad=False) torch.onnx.export(model, x, onnx_model_path, opset_version=13) @@ -115,7 +86,7 @@ def test_compare_nncf_graph_classification_real_models(tmp_path, model_to_test, @pytest.mark.parametrize(("model_to_test"), DETECTION_MODELS, ids=[x.model_name for x in DETECTION_MODELS]) -def test_compare_nncf_graph_detection_real_models(tmp_path, model_to_test): +def test_compare_nncf_graph_detection_real_models(model_to_test): onnx_model_dir = TEST_ROOT / "onnx" / "data" / "models" onnx_model_path = onnx_model_dir / (model_to_test.model_name + ".onnx") if not os.path.isdir(onnx_model_dir): diff --git a/tests/onnx/test_weightless_model.py b/tests/onnx/test_weightless_model.py index b1c7eadca4a..f93da6b36aa 100644 --- a/tests/onnx/test_weightless_model.py +++ b/tests/onnx/test_weightless_model.py @@ -12,22 +12,15 @@ from pathlib import Path import onnx -import pytest import torch from torchvision import models -from tests.onnx.quantization.common import ModelToTest from tests.onnx.weightless_model import save_model_without_tensors -@pytest.mark.parametrize( - ("model_to_test", "model"), - [ - (ModelToTest("resnet18", [1, 3, 224, 224]), models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)), - ], -) -def test_save_weightless_model(tmp_path, model_to_test, model): - onnx_model_path = tmp_path / (model_to_test.model_name + ".onnx") +def test_save_weightless_model(tmp_path): + model = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1) + onnx_model_path = tmp_path / "resnet18.onnx" x = torch.randn([1, 3, 224, 224], requires_grad=False) torch.onnx.export(model, x, onnx_model_path) onnx_model = onnx.load_model(onnx_model_path) diff --git a/tests/torch/test_pytorch_patch.py b/tests/torch/test_pytorch_patch.py index 2181ffeaafc..f23a10551e1 100644 --- a/tests/torch/test_pytorch_patch.py +++ b/tests/torch/test_pytorch_patch.py @@ -114,7 +114,7 @@ def test_jit_script_exception_preserves_patching(): run_pytest_case_function_in_separate_process(test_jit_script_exception_preserves_patching_isolated) -@pytest.mark.xfail(is_windows(), reason="https://github.com/pytorch/pytorch/issues/122094") +@pytest.mark.skipif(is_windows(), reason="https://github.com/pytorch/pytorch/issues/122094") @pytest.mark.parametrize("compile_forward", [False, True]) def test_torch_compile(compile_forward): # Run test case in a separate process to track patching of torch by NNCF