Skip to content

Commit

Permalink
Comments
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Jan 16, 2025
1 parent 93e808b commit 7991166
Show file tree
Hide file tree
Showing 7 changed files with 14 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer
from nncf.common.utils.backend import BackendType
from nncf.experimental.quantization.algorithms.post_training.pipeline import experimental_create_ptq_pipeline
from nncf.experimental.quantization.quantizer.quantizer import Quantizer as NNCFQuantizer
from nncf.experimental.quantization.quantizers.quantizer import Quantizer
from nncf.quantization.advanced_parameters import AdvancedBiasCorrectionParameters
from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters
from nncf.quantization.advanced_parameters import RangeEstimatorParameters
Expand All @@ -37,7 +37,7 @@ class ExperimentalPostTrainingQuantization(Algorithm):

def __init__(
self,
quantizer: NNCFQuantizer,
quantizer: Quantizer,
subset_size: int = 300,
fast_bias_correction: Optional[bool] = True,
smooth_quant: bool = False,
Expand All @@ -48,7 +48,7 @@ def __init__(
batchwise_statistics: bool = False,
):
"""
:param quantizer: NNCFQuantizer to use in MiMaxRangeInit algorithm.
:param quantizer: Quantizer to use in MiMaxRangeInit algorithm.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:param fast_bias_correction: Setting this option to `False` enables a different
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from typing import Optional, TypeVar

from nncf.experimental.quantization.algorithms.range_estimator.algorithm import MinMaxRangeEstimator
from nncf.experimental.quantization.quantizer.quantizer import Quantizer as NNCFQuantizer
from nncf.experimental.quantization.quantizers.quantizer import Quantizer
from nncf.quantization.advanced_parameters import AdvancedBiasCorrectionParameters
from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters
from nncf.quantization.advanced_parameters import RangeEstimatorParameters
Expand All @@ -27,7 +27,7 @@


def experimental_create_ptq_pipeline(
quantizer: NNCFQuantizer,
quantizer: Quantizer,
subset_size: int = 300,
fast_bias_correction: Optional[bool] = True,
smooth_quant: bool = False,
Expand All @@ -45,7 +45,7 @@ def experimental_create_ptq_pipeline(
2) MinMaxRangeInit
3) FastBiasCorrection or BiasCorrection
:param quantizer: NNCFQuantizer to use in MiMaxRangeInit algorithm.
:param quantizer: Quantizer to use in MiMaxRangeInit algorithm.
:param subset_size: Size of a subset to calculate activations
statistics used for quantization.
:param fast_bias_correction: Setting this option to `False` enables a different
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from nncf.common.graph.graph import NNCFGraph
from nncf.common.tensor_statistics.statistic_point import StatisticPointsContainer
from nncf.common.utils.backend import BackendType
from nncf.experimental.quantization.quantizer.quantizer import Quantizer as NNCFQuantizer
from nncf.experimental.quantization.quantizers.quantizer import Quantizer
from nncf.quantization.algorithms.algorithm import Algorithm
from nncf.quantization.algorithms.min_max.algorithm import MinMaxQuantization
from nncf.quantization.range_estimator import RangeEstimatorParameters
Expand All @@ -26,15 +26,15 @@
class MinMaxRangeEstimator(Algorithm):
def __init__(
self,
quantizer: NNCFQuantizer,
quantizer: Quantizer,
subset_size: int = 300,
inplace_statistics: bool = True,
batchwise_statistics: bool = False,
activations_range_estimator_params: Optional[RangeEstimatorParameters] = None,
weights_range_estimator_params: Optional[RangeEstimatorParameters] = None,
):
"""
:param quantizer: Instance of NNCFQuantizer to retrieve a quantization config
:param quantizer: Instance of Quantizer to retrieve a quantization config
for the given model.
:param subset_size: Size of a subset to calculate activations statistics used
for quantization, defaults to 300.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

import torch
import torch.fx
from torch.ao.quantization.quantizer import Quantizer
from torch.ao.quantization.quantizer import Quantizer as TorchAOQuantizer
from torch.ao.quantization.quantizer.quantizer import QuantizationSpec
from torch.ao.quantization.quantizer.quantizer import QuantizationSpecBase
from torch.ao.quantization.quantizer.quantizer import SharedQuantizationSpec
Expand All @@ -31,18 +31,18 @@
from nncf.common.quantization.quantizer_setup import WeightQuantizationInsertionPoint
from nncf.common.quantization.structs import QuantizationScheme as QuantizationMode
from nncf.common.quantization.structs import QuantizerConfig
from nncf.experimental.quantization.quantizer.quantizer import Quantizer as NNCFQuantizer
from nncf.experimental.quantization.quantizers.quantizer import Quantizer
from nncf.experimental.torch.fx.nncf_graph_builder import GraphConverter

EdgeOrNode = Union[Tuple[torch.fx.Node, torch.fx.Node]]


class TorchAOQuantizerAdapter(NNCFQuantizer):
class TorchAOQuantizerAdapter(Quantizer):
"""
Implementation of the NNCF Quantizer interface for any given torch.ao quantizer.
"""

def __init__(self, quantizer: Quantizer):
def __init__(self, quantizer: TorchAOQuantizer):
self._quantizer = quantizer

def get_quantization_setup(self, model: torch.fx.GraphModule, nncf_graph: NNCFGraph) -> SingleConfigQuantizerSetup:
Expand Down
2 changes: 1 addition & 1 deletion nncf/experimental/torch/fx/quantization/quantize_pt2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from nncf.common.logging import nncf_logger
from nncf.data import Dataset
from nncf.experimental.quantization.algorithms.post_training.algorithm import ExperimentalPostTrainingQuantization
from nncf.experimental.quantization.quantizer.torch_ao_adapter import TorchAOQuantizerAdapter
from nncf.experimental.quantization.quantizers.torch_ao_adapter import TorchAOQuantizerAdapter
from nncf.experimental.torch.fx.constant_folding import constant_fold
from nncf.experimental.torch.fx.transformations import QUANTIZE_NODE_TARGETS
from nncf.quantization.advanced_parameters import AdvancedBiasCorrectionParameters
Expand Down

0 comments on commit 7991166

Please sign in to comment.