From 5afd341e18d2dc06b2cd781c7379e542edff5e44 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Mon, 26 Feb 2024 20:58:33 +0530 Subject: [PATCH 1/3] runner.py --- nncf/common/accuracy_aware_training/runner.py | 74 ++++++++++--------- .../accuracy_aware_training/statistics.py | 2 +- 2 files changed, 42 insertions(+), 34 deletions(-) diff --git a/nncf/common/accuracy_aware_training/runner.py b/nncf/common/accuracy_aware_training/runner.py index 69ebf254ae2..91d74158324 100644 --- a/nncf/common/accuracy_aware_training/runner.py +++ b/nncf/common/accuracy_aware_training/runner.py @@ -15,7 +15,7 @@ import pathlib from abc import ABC from abc import abstractmethod -from typing import Callable, Dict, List, Optional, Tuple, TypeVar, Union +from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar, Union from nncf.api.compression import CompressionAlgorithmController from nncf.api.compression import CompressionStage @@ -127,8 +127,8 @@ def initialize_training_loop_fns( validate_fn: Callable[[TModel, Optional[int]], float], configure_optimizers_fn: Callable[[], Tuple[OptimizerType, LRSchedulerType]], dump_checkpoint_fn: Callable[[TModel, CompressionAlgorithmController, "TrainingRunner", str], None], - **kwargs, - ): + **kwargs: Any, + ) -> None: """ Register the user-supplied functions to be used to control the training process. @@ -146,7 +146,7 @@ def initialize_logging( self, log_dir: Optional[Union[str, pathlib.Path]] = None, tensorboard_writer: Optional[TensorboardWriterType] = None, - ): + ) -> None: """ Initialize logging related variables @@ -164,7 +164,7 @@ def load_best_checkpoint(self, model: TModel) -> float: """ @abstractmethod - def is_model_fully_compressed(self, compression_controller) -> bool: + def is_model_fully_compressed(self, compression_controller: CompressionAlgorithmController) -> bool: """ Check if model is fully compressed @@ -213,8 +213,8 @@ def __init__( self.current_val_metric_value = 0 self.current_loss = 0 - self._compressed_training_history = [] - self._best_checkpoint = None + self._compressed_training_histor: List[Tuple[float, float]] = [] + self._best_checkpoint: Optional[Tuple[str, float]] = None self._train_epoch_fn = None self._validate_fn = None @@ -224,11 +224,15 @@ def __init__( self._early_stopping_fn = None self._update_learning_rate_fn = None - self._log_dir = None - self._checkpoint_save_dir = None - self._tensorboard_writer = None + self._log_dir: Optional[Union[str, pathlib.Path]] = None + self._checkpoint_save_dir: Optional[Union[str, pathlib.Path]] = None + self._tensorboard_writer: Optional[TensorboardWriterType] = None - def train_epoch(self, model, compression_controller): + def train_epoch( + self, + model: TModel, + compression_controller: CompressionAlgorithmController, + ) -> None: compression_controller.scheduler.epoch_step() # assuming that epoch number is only used for logging in train_fn: self.current_loss = self._train_epoch_fn( @@ -241,7 +245,7 @@ def train_epoch(self, model, compression_controller): self.training_epoch_count += 1 self.cumulative_epoch_count += 1 - def dump_statistics(self, model, compression_controller): + def dump_statistics(self, model: TModel, compression_controller: CompressionAlgorithmController) -> None: statistics = compression_controller.statistics() if self.verbose: @@ -259,7 +263,7 @@ def dump_statistics(self, model, compression_controller): self.dump_checkpoint(model, compression_controller) - def calculate_minimal_tolerable_accuracy(self, uncompressed_model_accuracy: float): + def calculate_minimal_tolerable_accuracy(self, uncompressed_model_accuracy: float) -> None: if self.maximal_absolute_accuracy_drop is not None: self.minimal_tolerable_accuracy = uncompressed_model_accuracy - self.maximal_absolute_accuracy_drop else: @@ -267,7 +271,11 @@ def calculate_minimal_tolerable_accuracy(self, uncompressed_model_accuracy: floa 1 - 0.01 * self.maximal_relative_accuracy_drop ) - def dump_checkpoint(self, model, compression_controller): + def dump_checkpoint( + self, + model: TModel, + compression_controller: CompressionAlgorithmController + ) -> None: is_best_checkpoint = ( self.best_val_metric_value == self.current_val_metric_value and self.is_model_fully_compressed(compression_controller) @@ -285,7 +293,7 @@ def dump_checkpoint(self, model, compression_controller): if is_best_checkpoint: self._save_best_checkpoint(model, compression_controller) - def configure_optimizers(self): + def configure_optimizers(self) -> None: self.optimizer, self.lr_scheduler = self._configure_optimizers_fn() def initialize_training_loop_fns( @@ -297,7 +305,7 @@ def initialize_training_loop_fns( load_checkpoint_fn=None, early_stopping_fn=None, update_learning_rate_fn=None, - ): + ) -> None: self._train_epoch_fn = train_epoch_fn self._validate_fn = validate_fn self._configure_optimizers_fn = configure_optimizers_fn @@ -306,34 +314,34 @@ def initialize_training_loop_fns( self._early_stopping_fn = early_stopping_fn self._update_learning_rate_fn = update_learning_rate_fn - def initialize_logging(self, log_dir=None, tensorboard_writer=None): - self._log_dir = log_dir if log_dir is not None else osp.join(os.getcwd(), "runs") + def initialize_logging(self, log_dir: Optional[Union[str, pathlib.Path]] = None, tensorboard_writer: Optional[TensorboardWriterType] = None) -> None: + self._log_dir = str(log_dir) if log_dir is not None else osp.join(os.getcwd(), "runs") self._log_dir = configure_accuracy_aware_paths(self._log_dir) self._checkpoint_save_dir = self._log_dir self._tensorboard_writer = tensorboard_writer - def stop_training(self, compression_controller): + def stop_training(self, compression_controller: CompressionAlgorithmController) -> bool: if self.is_model_fully_compressed(compression_controller) and self._early_stopping_fn is not None: return self._early_stopping_fn(self.current_val_metric_value) return False - def _save_best_checkpoint(self, model, compression_controller): + def _save_best_checkpoint(self, model: TModel, compression_controller: CompressionAlgorithmController) -> None: best_path = self._make_checkpoint_path(is_best=True) self._best_checkpoint = (best_path, compression_controller.compression_rate) self._save_checkpoint(model, compression_controller, best_path) nncf_logger.info(f"Saved the best model to {best_path}") - def load_best_checkpoint(self, model): + def load_best_checkpoint(self, model: TModel) -> float: resuming_checkpoint_path, compression_rate = self._best_checkpoint nncf_logger.info(f"Loading the best checkpoint found during training: {resuming_checkpoint_path}") self._load_checkpoint(model, resuming_checkpoint_path) return compression_rate - def is_model_fully_compressed(self, compression_controller) -> bool: + def is_model_fully_compressed(self, compression_controller: CompressionAlgorithmController) -> bool: return compression_controller.compression_stage() == CompressionStage.FULLY_COMPRESSED @abstractmethod - def add_tensorboard_scalar(self, key, data, step): + def add_tensorboard_scalar(self, key: str, data: float, step: int) -> None: """ Add a scalar to tensorboard @@ -343,7 +351,7 @@ def add_tensorboard_scalar(self, key, data, step): """ @abstractmethod - def add_tensorboard_image(self, key, data, step): + def add_tensorboard_image(self, key: str, data: PIL.Image.Image, step: int) -> None: """ Add an image to tensorboard @@ -375,7 +383,7 @@ def _load_checkpoint(self, model: TModel, checkpoint_path: str) -> None: """ @abstractmethod - def _make_checkpoint_path(self, is_best, compression_rate=None): + def _make_checkpoint_path(self, is_best: bool, compression_rate: float = None) -> str: """ Make a path to save the checkpoint there @@ -423,15 +431,15 @@ def __init__( self.maximal_compression_rate = maximal_compression_rate self._best_checkpoints = {} - self._compression_rate_target = None + self._compression_rate_target: Optional[float] = None self.adaptive_controller = None self.was_compression_increased_on_prev_step = None - def dump_statistics(self, model, compression_controller): + def dump_statistics(self, model: TModel, compression_controller: CompressionAlgorithmController) -> None: self.update_training_history(self.compression_rate_target, self.current_val_metric_value) super().dump_statistics(model, compression_controller) - def _save_best_checkpoint(self, model, compression_controller): + def _save_best_checkpoint(self, model: TModel, compression_controller: CompressionAlgorithmController) -> None: best_path = self._make_checkpoint_path(is_best=True, compression_rate=self.compression_rate_target) accuracy_budget = self.best_val_metric_value - self.minimal_tolerable_accuracy @@ -445,7 +453,7 @@ def _save_best_checkpoint(self, model, compression_controller): self._save_checkpoint(model, compression_controller, best_path) nncf_logger.info(f"Saved the best model to {best_path}") - def load_best_checkpoint(self, model): + def load_best_checkpoint(self, model: TModel) -> float: # load checkpoint with the highest compression rate and positive acc budget possible_checkpoint_rates = self.get_compression_rates_with_positive_acc_budget() if len(possible_checkpoint_rates) == 0: @@ -473,16 +481,16 @@ def load_best_checkpoint(self, model): return best_checkpoint_compression_rate @property - def compression_rate_target(self): + def compression_rate_target(self) -> float: if self._compression_rate_target is None: return self.adaptive_controller.compression_rate return self._compression_rate_target @compression_rate_target.setter - def compression_rate_target(self, value): + def compression_rate_target(self, value: float) -> None: self._compression_rate_target = value - def update_training_history(self, compression_rate, metric_value): + def update_training_history(self, compression_rate: float, metric_value: float) -> None: accuracy_budget = metric_value - self.minimal_tolerable_accuracy self._compressed_training_history.append((compression_rate, accuracy_budget)) @@ -500,7 +508,7 @@ def update_training_history(self, compression_rate, metric_value): plt.close(fig) @property - def compressed_training_history(self): + def compressed_training_history(self) -> Dict[float, float]: return dict(self._compressed_training_history) def get_compression_rates_with_positive_acc_budget(self) -> List[float]: diff --git a/nncf/common/accuracy_aware_training/statistics.py b/nncf/common/accuracy_aware_training/statistics.py index 4a02ba9e352..465bc13c0ba 100644 --- a/nncf/common/accuracy_aware_training/statistics.py +++ b/nncf/common/accuracy_aware_training/statistics.py @@ -29,7 +29,7 @@ class TrainingLoopStatistics(Statistics): relative_accuracy_degradation: float accuracy_budget: float - def to_str(self): + def to_str(self) -> str: stats_str = ( f"Uncompressed model accuracy: {self.uncompressed_accuracy:.4f}\n" f"Compressed model accuracy: {self.compressed_accuracy:.4f}\n" From 146e35942ee53b7e9540ae1b3128fd915b0f37b1 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Mon, 26 Feb 2024 22:39:18 +0530 Subject: [PATCH 2/3] training_loop.py --- nncf/common/accuracy_aware_training/runner.py | 20 ++-- .../accuracy_aware_training/training_loop.py | 98 ++++++++++--------- 2 files changed, 60 insertions(+), 58 deletions(-) diff --git a/nncf/common/accuracy_aware_training/runner.py b/nncf/common/accuracy_aware_training/runner.py index 91d74158324..e4192369b94 100644 --- a/nncf/common/accuracy_aware_training/runner.py +++ b/nncf/common/accuracy_aware_training/runner.py @@ -194,7 +194,7 @@ def __init__( self.maximal_absolute_accuracy_drop = accuracy_aware_training_params.get( "maximal_absolute_accuracy_degradation" ) - self.maximal_total_epochs = accuracy_aware_training_params.get("maximal_total_epochs", AA_MAXIMAL_TOTAL_EPOCHS) + self.maximal_total_epochs: int = accuracy_aware_training_params.get("maximal_total_epochs", AA_MAXIMAL_TOTAL_EPOCHS) self.verbose = verbose self.dump_checkpoints = dump_checkpoints @@ -298,13 +298,13 @@ def configure_optimizers(self) -> None: def initialize_training_loop_fns( self, - train_epoch_fn, - validate_fn, - configure_optimizers_fn, - dump_checkpoint_fn, - load_checkpoint_fn=None, - early_stopping_fn=None, - update_learning_rate_fn=None, + train_epoch_fn: Callable[[TModel, CompressionAlgorithmController], None], + validate_fn: Callable[[TModel, Optional[int]], float], + configure_optimizers_fn: Callable[[], Tuple[OptimizerType, LRSchedulerType]], + dump_checkpoint_fn: Callable[[TModel, CompressionAlgorithmController, "TrainingRunner", str], None], + load_checkpoint_fn: Callable[[TModel, str], None] = None, + early_stopping_fn: Callable[[float], bool] = None, + update_learning_rate_fn: Callable[[LRSchedulerType, float, float, float], None] = None, ) -> None: self._train_epoch_fn = train_epoch_fn self._validate_fn = validate_fn @@ -432,8 +432,8 @@ def __init__( self._best_checkpoints = {} self._compression_rate_target: Optional[float] = None - self.adaptive_controller = None - self.was_compression_increased_on_prev_step = None + self.adaptive_controller: Optional[CompressionAlgorithmController] = None + self.was_compression_increased_on_prev_step: Optional[bool] = None def dump_statistics(self, model: TModel, compression_controller: CompressionAlgorithmController) -> None: self.update_training_history(self.compression_rate_target, self.current_val_metric_value) diff --git a/nncf/common/accuracy_aware_training/training_loop.py b/nncf/common/accuracy_aware_training/training_loop.py index 70bc4a03cfb..2d3298d0a11 100644 --- a/nncf/common/accuracy_aware_training/training_loop.py +++ b/nncf/common/accuracy_aware_training/training_loop.py @@ -15,14 +15,14 @@ from abc import ABC from abc import abstractmethod from functools import partial -from typing import Callable, Optional, TypeVar, Union +from typing import Any, Callable, Dict, Optional, Tuple, TypeVar, Union import numpy as np from scipy.interpolate import interp1d import nncf from nncf.api.compression import CompressionAlgorithmController -from nncf.common.accuracy_aware_training.runner import BaseAccuracyAwareTrainingRunner +from nncf.common.accuracy_aware_training.runner import BaseAccuracyAwareTrainingRunner, TrainingRunner from nncf.common.accuracy_aware_training.runner_factory import AdaptiveCompressionLevelTrainingRunnerCreator from nncf.common.accuracy_aware_training.runner_factory import EarlyExitTrainingRunnerCreator from nncf.common.accuracy_aware_training.statistics import TrainingLoopStatistics @@ -34,6 +34,8 @@ from nncf.config.extractors import extract_accuracy_aware_training_params TModel = TypeVar("TModel") +OptimizerType = TypeVar("OptimizerType") +LRSchedulerType = TypeVar("LRSchedulerType") TensorboardWriterType = TypeVar("TensorboardWriterType") ADAPTIVE_COMPRESSION_CONTROLLERS = Registry("adaptive_compression_controllers") @@ -48,16 +50,16 @@ class TrainingLoop(ABC): def run( self, model: TModel, - train_epoch_fn: Callable, - validate_fn: Callable, - configure_optimizers_fn: Callable = None, - dump_checkpoint_fn: Callable = None, - load_checkpoint_fn: Callable = None, - early_stopping_fn: Callable = None, + train_epoch_fn: Optional[Callable[[TModel, CompressionAlgorithmController], None]], + validate_fn: Optional[Callable[[TModel, Optional[int]], float]], + configure_optimizers_fn: Optional[Callable[[], Tuple[OptimizerType, LRSchedulerType]]] = None, + dump_checkpoint_fn: Optional[Callable[[TModel, CompressionAlgorithmController, "TrainingRunner", str], None]] = None, + load_checkpoint_fn: Optional[Callable[[TModel, str], None]] = None, + early_stopping_fn: Optional[Callable[[float], bool]] = None, tensorboard_writer: Optional[TensorboardWriterType] = None, log_dir: Union[pathlib.Path, str] = None, - update_learning_rate_fn: Callable = None, - ): + update_learning_rate_fn: Optional[Callable[[LRSchedulerType, float, float, float], None]] = None, + ) -> TModel: """ Implements the custom logic to run a training loop for model fine-tuning by using the provided `train_epoch_fn`, `validate_fn` and `configure_optimizers_fn` methods. @@ -91,23 +93,23 @@ class BaseEarlyExitCompressionTrainingLoop(TrainingLoop, ABC): """ def __init__(self, compression_controller: CompressionAlgorithmController): - self.runner: BaseAccuracyAwareTrainingRunner = None + self.runner: Optional[BaseAccuracyAwareTrainingRunner] = None self.compression_controller = compression_controller - self._current_compression_rate = None + self._current_compression_rate: Optional[float] = None def run( self, model: TModel, - train_epoch_fn: Callable, - validate_fn: Callable, - configure_optimizers_fn: Callable = None, - dump_checkpoint_fn: Callable = None, - load_checkpoint_fn: Callable = None, - early_stopping_fn: Callable = None, + train_epoch_fn: Optional[Callable[[TModel, CompressionAlgorithmController], None]], + validate_fn: Optional[Callable[[TModel, Optional[int]], float]], + configure_optimizers_fn: Optional[Callable[[], Tuple[OptimizerType, LRSchedulerType]]] = None, + dump_checkpoint_fn: Optional[Callable[[TModel, CompressionAlgorithmController, "TrainingRunner", str], None]] = None, + load_checkpoint_fn: Optional[Callable[[TModel, str], None]] = None, + early_stopping_fn: Optional[Callable[[float], bool]] = None, tensorboard_writer: Optional[TensorboardWriterType] = None, log_dir: Union[pathlib.Path, str] = None, - update_learning_rate_fn: Callable = None, - ): + update_learning_rate_fn: Optional[Callable[[LRSchedulerType, float, float, float], None]] = None, + ) -> TModel: self.runner.initialize_training_loop_fns( train_epoch_fn, validate_fn, @@ -120,7 +122,7 @@ def run( self.runner.initialize_logging(log_dir, tensorboard_writer) return self._run_early_exit_training_loop(model) - def _run_early_exit_training_loop(self, model): + def _run_early_exit_training_loop(self, model: TModel) -> TModel: uncompressed_model_accuracy = self.runner.uncompressed_model_accuracy self.runner.calculate_minimal_tolerable_accuracy(uncompressed_model_accuracy) @@ -159,7 +161,7 @@ def _run_early_exit_training_loop(self, model): self._current_compression_rate = self.runner.load_best_checkpoint(model) return model - def log_accuracy_statistics(self): + def log_accuracy_statistics(self) -> None: for log_str in self.statistics.to_str().split("\n"): nncf_logger.info(log_str) @@ -182,15 +184,15 @@ def statistics(self) -> TrainingLoopStatistics: return stats @staticmethod - def _calculate_accuracy_drop(uncompressed_model_accuracy, compressed_model_accuracy): + def _calculate_accuracy_drop(uncompressed_model_accuracy: float, compressed_model_accuracy: float) -> float: return uncompressed_model_accuracy - compressed_model_accuracy @staticmethod - def _calculate_accuracy_budget(minimal_tolerable_accuracy, compressed_model_accuracy): + def _calculate_accuracy_budget(minimal_tolerable_accuracy: float, compressed_model_accuracy: float) -> float: return compressed_model_accuracy - minimal_tolerable_accuracy @staticmethod - def _calculate_rel_accuracy_drop(uncompressed_model_accuracy, compressed_model_accuracy): + def _calculate_rel_accuracy_drop(uncompressed_model_accuracy: float, compressed_model_accuracy: float) -> float: try: rel_accuracy_drop = 100 * (1.0 - compressed_model_accuracy / uncompressed_model_accuracy) except ZeroDivisionError: @@ -198,7 +200,7 @@ def _calculate_rel_accuracy_drop(uncompressed_model_accuracy, compressed_model_a return rel_accuracy_drop - def _accuracy_criterion_satisfied(self): + def _accuracy_criterion_satisfied(self) -> bool: accuracy_budget = self._calculate_accuracy_budget( self.runner.minimal_tolerable_accuracy, self.runner.current_val_metric_value ) @@ -300,9 +302,9 @@ def __init__( self.runner = runner_factory.create_training_loop() self.runner.adaptive_controller = self.adaptive_controller - def _get_adaptive_compression_ctrl(self, compression_controller): - def _adaptive_compression_controllers(): - def remove_registry_prefix(algo_name): + def _get_adaptive_compression_ctrl(self, compression_controller: CompressionAlgorithmController) -> CompressionAlgorithmController: + def _adaptive_compression_controllers() -> Dict[str, CompressionAlgorithmController]: + def remove_registry_prefix(algo_name: str) -> str: for prefix in ("pt_", "tf_"): if algo_name.startswith(prefix): return algo_name[len(prefix) :] @@ -337,16 +339,16 @@ def remove_registry_prefix(algo_name): def run( self, model: TModel, - train_epoch_fn: Callable, - validate_fn: Callable, - configure_optimizers_fn: Callable = None, - dump_checkpoint_fn: Callable = None, - load_checkpoint_fn: Callable = None, - early_stopping_fn: Callable = None, + train_epoch_fn: Optional[Callable[[TModel, CompressionAlgorithmController], None]], + validate_fn: Optional[Callable[[TModel, Optional[int]], float]], + configure_optimizers_fn: Optional[Callable[[], Tuple[OptimizerType, LRSchedulerType]]] = None, + dump_checkpoint_fn: Optional[Callable[[TModel, CompressionAlgorithmController, "TrainingRunner", str], None]] = None, + load_checkpoint_fn: Optional[Callable[[TModel, str], None]] = None, + early_stopping_fn: Optional[Callable[[float], bool]] = None, tensorboard_writer: Optional[TensorboardWriterType] = None, log_dir: Union[pathlib.Path, str] = None, - update_learning_rate_fn: Callable = None, - ): + update_learning_rate_fn: Optional[Callable[[LRSchedulerType, float, float, float], None]] = None, + ) -> TModel: self.runner.initialize_training_loop_fns( train_epoch_fn, validate_fn, @@ -463,7 +465,7 @@ def run( ) return model - def _run_initial_training_phase(self, model): + def _run_initial_training_phase(self, model: TModel) -> TModel: nncf_logger.info("Initial training phase started...") maximal_total_epochs = self.runner.maximal_total_epochs @@ -474,7 +476,7 @@ def _run_initial_training_phase(self, model): nncf_logger.info("Initial training phase finished.") return model - def _update_target_compression_rate(self, runner, force_update=False): + def _update_target_compression_rate(self, runner: Optional[BaseAccuracyAwareTrainingRunner], force_update: bool = False) -> bool: best_accuracy_budget = runner.best_val_metric_value - runner.minimal_tolerable_accuracy nncf_logger.info( f"Training epoch count: {runner.training_epoch_count}, patience epochs: {runner.patience_epochs}" @@ -485,7 +487,7 @@ def _update_target_compression_rate(self, runner, force_update=False): return True return False - def _determine_compression_rate_step_value(self, runner, stepping_mode="uniform_decrease", **kwargs): + def _determine_compression_rate_step_value(self, runner: Optional[BaseAccuracyAwareTrainingRunner], stepping_mode: str = "uniform_decrease", **kwargs: Any) -> float: if stepping_mode == "uniform_decrease": compression_step_updater = self._uniform_decrease_compression_step_update elif stepping_mode == "interpolate": @@ -497,7 +499,7 @@ def _determine_compression_rate_step_value(self, runner, stepping_mode="uniform_ return compression_step_updater(runner, **kwargs) @staticmethod - def _uniform_decrease_compression_step_update(runner): + def _uniform_decrease_compression_step_update(runner: Optional[BaseAccuracyAwareTrainingRunner]) -> float: best_accuracy_budget = runner.best_val_metric_value - runner.minimal_tolerable_accuracy best_accuracy_budget_sign = 1.0 if best_accuracy_budget >= 0.0 else -1.0 # if we don't fit the accuracy budget now and before we did fit or vice versa, we reduce the compression rate @@ -515,12 +517,12 @@ def _uniform_decrease_compression_step_update(runner): @staticmethod def _interpolate_compression_step_update( runner, - current_compression_rate, - num_curve_pts=1000, - full_compression_factor=20, - minimal_compression_rate=0.0, - maximal_compression_rate=1.0, - ): + current_compression_rate: float, + num_curve_pts: int = 1000, + full_compression_factor: int = 20, + minimal_compression_rate: float = 0.0, + maximal_compression_rate: float = 1.0, + ) -> float: training_history = runner.compressed_training_history nncf_logger.info(f"Compressed training history: {training_history}") training_history[minimal_compression_rate] = runner.maximal_accuracy_drop @@ -553,7 +555,7 @@ def create_accuracy_aware_training_loop( nncf_config: NNCFConfig, compression_ctrl: CompressionAlgorithmController, uncompressed_model_accuracy: float, - **additional_runner_args, + **additional_runner_args: Any, ) -> BaseEarlyExitCompressionTrainingLoop: """ Creates an accuracy aware training loop corresponding to NNCFConfig and CompressionAlgorithmController. From e497a96b6bd6e7b52c9af25e4e2d72eca6103ff1 Mon Sep 17 00:00:00 2001 From: siddhant-0707 Date: Tue, 27 Feb 2024 19:11:56 +0530 Subject: [PATCH 3/3] Update .mypy.ini --- .mypy.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mypy.ini b/.mypy.ini index 42b6329f72c..aca6e9afafc 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -1,5 +1,5 @@ [mypy] -files = nncf/common/sparsity +files = nncf/common/accuracy_aware_training follow_imports = silent strict = True