Skip to content

Commit

Permalink
Merge branch 'develop' into align-compression-subgraphs
Browse files Browse the repository at this point in the history
  • Loading branch information
nikita-savelyevv committed Apr 18, 2024
2 parents 7855ff6 + 573b0c3 commit 4ce510a
Show file tree
Hide file tree
Showing 66 changed files with 598 additions and 300 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/nightly.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,4 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: AlexanderDokuchaev/md-dead-link-check@v0.6
- uses: AlexanderDokuchaev/md-dead-link-check@v0.8
2 changes: 1 addition & 1 deletion .github/workflows/pre-commit-linters.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,4 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: AlexanderDokuchaev/md-dead-link-check@v0.6
- uses: AlexanderDokuchaev/md-dead-link-check@v0.8
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ repos:

- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.1.3
rev: v0.3.7
hooks:
- id: ruff

Expand Down
6 changes: 5 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -374,6 +374,10 @@ pip install nncf[torch]

Other viable options besides `[torch]` are `[tf]`, `[onnx]` and `[openvino]`.

> [!WARNING]
> The way to install the module package with the extra dependency like `pip install nncf[torch]` is deprecated and will be removed in a future release.
> Instead, it is recommended to install additional dependencies separately using the pip install command (e.g., `pip install torch`) or by explicitly specifying the dependency in your requirements file.
NNCF is also available via [conda](https://anaconda.org/conda-forge/nncf):

```bash
Expand All @@ -383,7 +387,7 @@ conda install -c conda-forge nncf
### System requirements

- Ubuntu\* 18.04 or later (64-bit)
- Python\* 3.7 or later
- Python\* 3.8 or later
- Supported frameworks:
- PyTorch\* >=2.1, <2.3
- TensorFlow\* >=2.8.4, <=2.12.1
Expand Down
24 changes: 1 addition & 23 deletions docs/Installation.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,6 @@ NNCF can be installed as a regular PyPI package via pip:
pip install nncf
```

If you want to install both NNCF and the supported PyTorch version in one line, you can do this by simply running:

```bash
pip install nncf[torch]
```

Other viable options besides `[torch]` are `[tf]`, `[onnx]` and `[openvino]`.

## As a package built from a checked-out repository

Install the package and its dependencies by running the following command in the repository root directory:
Expand All @@ -28,20 +20,6 @@ Install the package and its dependencies by running the following command in the
pip install .
```

Use the same `pip install` syntax as above to install NNCF along with the backend package version in one go:

```bash
pip install .[<BACKEND>]
```

List of supported backends: `torch`, `tf`, `onnx` and `openvino`.

For development purposes install extra packages by

```bash
pip install .[dev,tests]
```

_NB_: For launching example scripts in this repository, we recommend setting the `PYTHONPATH` variable to the root of the checked-out repository once the installation is completed.

NNCF is also available via [conda](https://anaconda.org/conda-forge/nncf):
Expand All @@ -65,7 +43,7 @@ as well as the supported versions of Python:

| NNCF | OpenVINO | PyTorch | ONNX | TensorFlow | Python |
|-----------|------------|----------|----------|------------|--------|
| `develop` | `2024.4.0` | `2.2.1` | `1.13.1` | `2.12.0` | `3.8` |
| `develop` | `2024.4.0` | `2.2.1` | `1.16.0` | `2.12.0` | `3.8` |
| `2.9.0` | `2024.4.0` | `2.1.2` | `1.13.1` | `2.12.0` | `3.8` |
| `2.8.1` | `2023.3.0` | `2.1.2` | `1.13.1` | `2.12.0` | `3.8` |
| `2.8.0` | `2023.3.0` | `2.1.2` | `1.13.1` | `2.12.0` | `3.8` |
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,20 +195,17 @@ def validation_ac(
preset=nncf.QuantizationPreset.MIXED,
ignored_scope=nncf.IgnoredScope(
types=["Mul", "Sub", "Sigmoid"], # ignore operations
names=[
"/model.22/dfl/conv/Conv", # in the post-processing subgraph
"/model.22/Add",
"/model.22/Add_1",
"/model.22/Add_2",
"/model.22/Add_3",
"/model.22/Add_4",
"/model.22/Add_5",
"/model.22/Add_6",
"/model.22/Add_7",
"/model.22/Add_8",
"/model.22/Add_9",
"/model.22/Add_10",
"/model.22/Add_11",
subgraphs=[
nncf.Subgraph(
inputs=[
"/model.22/Concat_3",
"/model.22/Concat_6",
"/model.22/Concat_24",
"/model.22/Concat_5",
"/model.22/Concat_4",
],
outputs=["/model.22/Concat_29"],
)
],
),
)
Expand Down
20 changes: 6 additions & 14 deletions examples/post_training_quantization/openvino/yolov8/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,20 +122,12 @@ def transform_fn(data_item: Dict):
quantization_dataset,
preset=nncf.QuantizationPreset.MIXED,
ignored_scope=nncf.IgnoredScope(
types=["Multiply", "Subtract", "Sigmoid"], # ignore operations
names=[
"/model.22/dfl/conv/Conv", # in the post-processing subgraph
"/model.22/Add",
"/model.22/Add_1",
"/model.22/Add_2",
"/model.22/Add_3",
"/model.22/Add_4",
"/model.22/Add_5",
"/model.22/Add_6",
"/model.22/Add_7",
"/model.22/Add_8",
"/model.22/Add_9",
"/model.22/Add_10",
types=["Multiply", "Subtract", "Sigmoid"],
subgraphs=[
nncf.Subgraph(
inputs=["/model.22/Concat", "/model.22/Concat_1", "/model.22/Concat_2"],
outputs=["output0/sink_port_0"],
)
],
),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,20 +186,17 @@ def validation_ac(
preset=nncf.QuantizationPreset.MIXED,
ignored_scope=nncf.IgnoredScope(
types=["Multiply", "Subtract", "Sigmoid"], # ignore operations
names=[
"/model.22/dfl/conv/Conv", # in the post-processing subgraph
"/model.22/Add",
"/model.22/Add_1",
"/model.22/Add_2",
"/model.22/Add_3",
"/model.22/Add_4",
"/model.22/Add_5",
"/model.22/Add_6",
"/model.22/Add_7",
"/model.22/Add_8",
"/model.22/Add_9",
"/model.22/Add_10",
"/model.22/Add_11",
subgraphs=[
nncf.Subgraph(
inputs=[
"/model.22/Concat_3",
"/model.22/Concat_6",
"/model.22/Concat_24",
"/model.22/Concat_5",
"/model.22/Concat_4",
],
outputs=["output0"],
)
],
),
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,9 @@ def run_benchmark(model_path: str, shape=None, verbose: bool = True) -> float:

class COCO128Dataset(torch.utils.data.Dataset):
category_mapping = [
1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,
34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,
61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90
] # fmt: skip

def __init__(self, data_path: str, transform: Callable):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ def _mobilenet_v3_model(
):
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
if model_urls.get(arch) is None:
raise ValueError("No checkpoint is available for model type {}".format(arch))
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
Expand Down
2 changes: 2 additions & 0 deletions nncf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,9 @@
from nncf.quantization.advanced_parameters import (
AdvancedAccuracyRestorerParameters as AdvancedAccuracyRestorerParameters,
)
from nncf.quantization.advanced_parameters import AdvancedBiasCorrectionParameters as AdvancedBiasCorrectionParameters
from nncf.quantization.advanced_parameters import AdvancedQuantizationParameters as AdvancedQuantizationParameters
from nncf.quantization.advanced_parameters import AdvancedSmoothQuantParameters as AdvancedSmoothQuantParameters
from nncf.quantization.advanced_parameters import OverflowFix as OverflowFix
from nncf.scopes import IgnoredScope as IgnoredScope
from nncf.scopes import Subgraph as Subgraph
Expand Down
21 changes: 11 additions & 10 deletions nncf/common/graph/operator_metatypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,12 @@ def __init__(self, name: str):
super().__init__(name)
self._op_name_to_op_meta_dict: Dict[str, Type[OperatorMetatype]] = {}

def register(self, name: Optional[str] = None) -> Callable[..., Type[OperatorMetatype]]:
def register(self, name: Optional[str] = None, is_subtype: bool = False) -> Callable[..., Type[OperatorMetatype]]:
"""
Decorator for registering operator metatypes.
:param name: The registration name.
:param is_subtype: Whether the decorated metatype is a subtype of another registered operator.
:return: The inner function for registering operator metatypes.
"""
name_ = name
Expand All @@ -100,15 +101,15 @@ def wrap(obj: Type[OperatorMetatype]) -> Type[OperatorMetatype]:
if cls_name is None:
cls_name = obj.__name__
super_register(obj, cls_name)
op_names = obj.get_all_aliases()
for name in op_names:
if name in self._op_name_to_op_meta_dict and not obj.subtype_check(self._op_name_to_op_meta_dict[name]):
raise nncf.InternalError(
"Inconsistent operator metatype registry - single patched "
"op name maps to multiple metatypes!"
)

self._op_name_to_op_meta_dict[name] = obj
if not is_subtype:
op_names = obj.get_all_aliases()
for name in op_names:
if name in self._op_name_to_op_meta_dict:
raise nncf.InternalError(
"Inconsistent operator metatype registry - single patched "
f"op name `{name}` maps to multiple metatypes!"
)
self._op_name_to_op_meta_dict[name] = obj
return obj

return wrap
Expand Down
2 changes: 1 addition & 1 deletion nncf/common/sparsity/schedulers.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def __init__(self, controller: SparsityController, params: Dict[str, Any]):
self._update_per_optimizer_step = params.get(
"update_per_optimizer_step", SPARSITY_SCHEDULER_UPDATE_PER_OPTIMIZER_STEP
)
self._steps_per_epoch = params.get("steps_per_epoch", None)
self._steps_per_epoch = params.get("steps_per_epoch")
self._should_skip = False

def step(self, next_step: Optional[int] = None) -> None:
Expand Down
24 changes: 24 additions & 0 deletions nncf/config/schemata/algo/filter_pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,11 @@
from nncf.config.schemata.defaults import PRUNING_INTERLAYER_RANKING_TYPE
from nncf.config.schemata.defaults import PRUNING_LEGR_GENERATIONS
from nncf.config.schemata.defaults import PRUNING_LEGR_MAX_PRUNING
from nncf.config.schemata.defaults import PRUNING_LEGR_MUTATE_PERCENT
from nncf.config.schemata.defaults import PRUNING_LEGR_NUM_SAMPLES
from nncf.config.schemata.defaults import PRUNING_LEGR_POPULATION_SIZE
from nncf.config.schemata.defaults import PRUNING_LEGR_RANDOM_SEED
from nncf.config.schemata.defaults import PRUNING_LEGR_SIGMA_SCALE
from nncf.config.schemata.defaults import PRUNING_LEGR_TRAIN_STEPS
from nncf.config.schemata.defaults import PRUNING_NUM_INIT_STEPS
from nncf.config.schemata.defaults import PRUNING_SCHEDULE
Expand Down Expand Up @@ -162,6 +166,26 @@
description="Random seed for LeGR coefficients generation.",
default=PRUNING_LEGR_RANDOM_SEED,
),
"population_size": with_attributes(
NUMBER,
description="Size of population for the evolution algorithm.",
default=PRUNING_LEGR_POPULATION_SIZE,
),
"num_samples": with_attributes(
NUMBER,
description="Number of samples for the evolution algorithm.",
default=PRUNING_LEGR_NUM_SAMPLES,
),
"mutate_percent": with_attributes(
NUMBER,
description="Percent of mutate for the evolution algorithm.",
default=PRUNING_LEGR_MUTATE_PERCENT,
),
"scale_sigma": with_attributes(
NUMBER,
description="Scale sigma for the evolution algorithm.",
default=PRUNING_LEGR_SIGMA_SCALE,
),
},
"additionalProperties": False,
},
Expand Down
4 changes: 4 additions & 0 deletions nncf/config/schemata/defaults.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@
PRUNING_LEGR_TRAIN_STEPS = 200
PRUNING_LEGR_MAX_PRUNING = 0.8
PRUNING_LEGR_RANDOM_SEED = 42
PRUNING_LEGR_POPULATION_SIZE = 64
PRUNING_LEGR_NUM_SAMPLES = 16
PRUNING_LEGR_MUTATE_PERCENT = 0.1
PRUNING_LEGR_SIGMA_SCALE = 1

SPARSITY_INIT = 0.0
MAGNITUDE_SPARSITY_WEIGHT_IMPORTANCE = "normed_abs"
Expand Down
8 changes: 6 additions & 2 deletions nncf/experimental/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,7 +461,9 @@ def __init__(self, tensor_collectors: List[TensorCollector]) -> None:
self._aggregators[key] = unique_aggregator


##################################################Reducers##################################################
##################################################
# Reducers
##################################################


class NoopReducer(TensorReducerBase):
Expand Down Expand Up @@ -578,7 +580,9 @@ def __hash__(self) -> int:
return hash((self.__class__.__name__, self.inplace, self._reduction_axes, self._channel_axis))


##################################################Aggregators##################################################
##################################################
# Aggregators
##################################################


class NoopAggregator(AggregatorBase):
Expand Down
4 changes: 1 addition & 3 deletions nncf/experimental/torch/nas/bootstrapNAS/search/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,17 +710,15 @@ def _evaluate(self, x: List[float], out: Dict[str, Any], *args, **kargs) -> NoRe

result = [sample]

eval_idx = 0
bn_adaption_executed = False
for evaluator_handler in self._evaluator_handlers:
for eval_idx, evaluator_handler in enumerate(self._evaluator_handlers):
in_cache, value = evaluator_handler.retrieve_from_cache(tuple(x_i))
if not in_cache:
if not bn_adaption_executed and self._search.bn_adaptation is not None:
self._search.bn_adaptation.run(self._model)
bn_adaption_executed = True
value = evaluator_handler.evaluate_and_add_to_cache_from_pymoo(tuple(x_i))
evaluators_arr[eval_idx].append(value)
eval_idx += 1

result.append(evaluator_handler.name)
result.append(value)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,12 +191,10 @@ def get_current_stage_desc(self) -> Tuple[Optional[StageDescriptor], int]:
:return: current stage descriptor and its index in the list of all descriptors
"""
partial_epochs = 0
stage_desc_idx = 0
for stage_desc in self.list_stage_descriptors:
for stage_desc_idx, stage_desc in enumerate(self.list_stage_descriptors):
partial_epochs += stage_desc.epochs
if self.current_epoch < partial_epochs:
return stage_desc, stage_desc_idx
stage_desc_idx += 1
return None, -1

def get_total_training_epochs(self) -> int:
Expand Down
10 changes: 5 additions & 5 deletions nncf/experimental/torch/sparsity/movement/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,16 +97,16 @@ def from_dict(cls, params: Dict[str, Any]) -> "MovementSchedulerParams":
:param params: A dict that specifies the parameters of movement sparsity scheduler.
:return: A `MovementSchedulerParams` object that stores the parameters from `params`.
"""
warmup_start_epoch: int = params.get("warmup_start_epoch", None)
warmup_end_epoch: int = params.get("warmup_end_epoch", None)
importance_regularization_factor: float = params.get("importance_regularization_factor", None)
warmup_start_epoch: int = params.get("warmup_start_epoch")
warmup_end_epoch: int = params.get("warmup_end_epoch")
importance_regularization_factor: float = params.get("importance_regularization_factor")
enable_structured_masking: bool = params.get("enable_structured_masking", MOVEMENT_ENABLE_STRUCTURED_MASKING)
init_importance_threshold: Optional[float] = params.get("init_importance_threshold", None)
init_importance_threshold: Optional[float] = params.get("init_importance_threshold")
final_importance_threshold: float = params.get(
"final_importance_threshold", MOVEMENT_FINAL_IMPORTANCE_THRESHOLD
)
power: float = params.get("power", MOVEMENT_POWER)
steps_per_epoch: Optional[int] = params.get("steps_per_epoch", None)
steps_per_epoch: Optional[int] = params.get("steps_per_epoch")

if None in [warmup_start_epoch, warmup_end_epoch, importance_regularization_factor]:
raise ValueError(
Expand Down
Loading

0 comments on commit 4ce510a

Please sign in to comment.