Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dl/challen alignment #21

Closed
wants to merge 28 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
5e8886d
Init commit
daniil-lyakhov Jun 2, 2023
6e73710
Pattern matching integrated to CA algorithm
daniil-lyakhov Jun 23, 2023
350c3a5
Channel Alignment algo tests
daniil-lyakhov Jun 23, 2023
048227e
Useless code removed
daniil-lyakhov Jun 27, 2023
6a22667
Fix convs metatypes
daniil-lyakhov Jun 27, 2023
b693001
Merge branch 'develop' into dl/challen_alignment
daniil-lyakhov Jun 27, 2023
dadfae0
Pylint
daniil-lyakhov Jun 28, 2023
e5e0b31
Revert graph matching refactoring as node order bug is present
daniil-lyakhov Jun 28, 2023
40ce658
Merge remote-tracking branch 'origin/develop' into dl/challen_alignment
daniil-lyakhov Jun 28, 2023
2e5eb59
Comments
daniil-lyakhov Jun 28, 2023
075e428
Pattern updated
daniil-lyakhov Jun 28, 2023
9b26f2f
Fix producer check
daniil-lyakhov Jun 29, 2023
43dd31b
Merge remote-tracking branch 'origin/develop' into dl/challen_alignment
daniil-lyakhov Jun 29, 2023
7d2e2cc
Fix tests
daniil-lyakhov Jun 29, 2023
e223bd7
Insert null biases inside algo
daniil-lyakhov Jun 30, 2023
077e46f
Null biases pass
daniil-lyakhov Jun 30, 2023
8b414a7
Test get_dims_description
daniil-lyakhov Jun 30, 2023
53323ce
Docstrings
daniil-lyakhov Jul 3, 2023
878a720
Null biases insertion reverted
daniil-lyakhov Jul 3, 2023
89a4b80
Test find_matching_subgraphs
daniil-lyakhov Jul 3, 2023
434ef02
Merge remote-tracking branch 'origin/develop' into dl/challen_alignment
daniil-lyakhov Jul 3, 2023
09231a3
Disable CA algo by default, enable all biases insertion
daniil-lyakhov Jul 4, 2023
20e9149
Comments
daniil-lyakhov Jul 4, 2023
73d9262
Comments
daniil-lyakhov Jul 4, 2023
7f0b365
Fix tests and docstrings
daniil-lyakhov Jul 4, 2023
b40146b
Comments
daniil-lyakhov Jul 4, 2023
ea0d235
Fix pre-commit
daniil-lyakhov Jul 5, 2023
bfe9161
Merge remote-tracking branch 'origin/develop' into dl/challen_alignment
daniil-lyakhov Jul 5, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions nncf/common/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -711,17 +711,20 @@ def remove_nodes_from(self, nodes: List[NNCFNode]) -> None:
for node_key, node in self._nx_graph.nodes.items():
self._node_id_to_key_dict[node["id"]] = node_key

def find_matching_nodes(self, patterns: GraphPattern) -> List[NNCFNode]:
def find_matching_subgraphs(self, patterns: GraphPattern) -> List[List[NNCFNode]]:
"""
Returns nodes of matched pattern in patterns.
Returns subgraphs of matched pattern in patterns.

:param patterns: Instance of GraphPattern containing all patterns.
:return: Nodes that are matched patterns.
:return: List of subgraphs that are matching by pattern matching.
Subgraph is a ordered list of nodes of matched subgraph
The returned nodes order relies on DiGraphMatcher isomorphic subgraphs matching logic from networkX package.
DiGraphMatcher does not guarantee a specific order for returning isomorphic subgraphs.
"""
output = []
for matched_subgraph in find_subgraphs_matching_pattern(self._nx_graph, patterns):
subgraph_list = []
for node_key in matched_subgraph:
output.append(self.get_node_by_key(node_key))
subgraph_list.append(self.get_node_by_key(node_key))
output.append(subgraph_list)
return output
6 changes: 3 additions & 3 deletions nncf/common/graph/graph_matching.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def _sort_patterns_by_len(pattern: nx.DiGraph) -> int:
non_pattern_nodes = [
node_id
for node_id, node_data in pattern.nodes(data=True)
if GraphPattern.NON_PATTERN_NODE_TYPE in node_data[GraphPattern.METATYPE_ATTR]
if GraphPattern.NON_PATTERN_NODE_TYPE in node_data.get(GraphPattern.METATYPE_ATTR, [])
]
return len(pattern) - len(non_pattern_nodes)

Expand Down Expand Up @@ -84,7 +84,7 @@ def _is_subgraph_matching_strict(graph: nx.DiGraph, pattern: nx.DiGraph, subgrap
last_nodes.append(node)

for node_from_graph, node_from_pattern in subgraph.items():
if GraphPattern.NON_PATTERN_NODE_TYPE in pattern.nodes[node_from_pattern].get(GraphPattern.METATYPE_ATTR):
if GraphPattern.NON_PATTERN_NODE_TYPE in pattern.nodes[node_from_pattern].get(GraphPattern.METATYPE_ATTR, []):
continue
predecessors_keys = graph.pred[node_from_graph].keys()
successor_keys = graph.succ[node_from_graph].keys()
Expand Down Expand Up @@ -112,7 +112,7 @@ def _copy_subgraph_excluding_non_pattern_node(subgraph: Dict[str, str], pattern_
output = {}
for node_from_graph, node_from_pattern in subgraph.items():
pattern_node = pattern_graph.graph.nodes[node_from_pattern]
pattern_node_types = pattern_node.get(GraphPattern.METATYPE_ATTR)
pattern_node_types = pattern_node.get(GraphPattern.METATYPE_ATTR, [])
if GraphPattern.NON_PATTERN_NODE_TYPE not in pattern_node_types:
output[node_from_graph] = node_from_pattern
return output
Expand Down
55 changes: 7 additions & 48 deletions nncf/common/graph/layer_attributes.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from abc import ABC
from abc import abstractmethod
from dataclasses import dataclass
Expand All @@ -26,6 +27,9 @@ class BaseLayerAttributes(ABC):
of modules/layers.
"""

def __eq__(self, __o: object) -> bool:
return isinstance(__o, self.__class__) and self.__dict__ == __o.__dict__


class MultipleInputLayerAttributes(BaseLayerAttributes):
def __init__(self, axis: int):
Expand All @@ -35,9 +39,6 @@ def __init__(self, axis: int):
"""
self.axis = axis

def __eq__(self, other: Any):
return isinstance(other, MultipleInputLayerAttributes) and self.axis == other.axis


class MultipleOutputLayerAttributes(BaseLayerAttributes):
def __init__(self, chunks: Union[int, List], axis: int):
Expand All @@ -49,11 +50,6 @@ def __init__(self, chunks: Union[int, List], axis: int):
self.chunks = chunks
self.axis = axis

def __eq__(self, other: Any):
return (
isinstance(other, MultipleOutputLayerAttributes) and self.chunks == other.chunks and self.axis == other.axis
)


class WeightedLayerAttributes(BaseLayerAttributes):
def __init__(self, weight_requires_grad: bool, dtype: Dtype = Dtype.FLOAT):
Expand All @@ -66,9 +62,6 @@ def __init__(self, weight_requires_grad: bool, dtype: Dtype = Dtype.FLOAT):
self.weight_requires_grad = weight_requires_grad
self.dtype = dtype

def __eq__(self, other: Any):
return isinstance(other, WeightedLayerAttributes) and self.weight_requires_grad == other.weight_requires_grad

@abstractmethod
def get_weight_shape(self) -> List[int]:
pass
Expand Down Expand Up @@ -140,6 +133,7 @@ def __init__(
out_channels: int,
kernel_size: Tuple[int, ...],
stride: Tuple[int, ...],
dilations: Tuple[int, ...],
groups: int,
transpose: bool,
padding_values: Tuple[int, ...],
Expand All @@ -161,22 +155,11 @@ def __init__(
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilations = dilations
self.groups = groups
self.transpose = transpose
self.padding_values = padding_values

def __eq__(self, other: Any):
return (
isinstance(other, ConvolutionLayerAttributes)
and super().__eq__(other)
and self.in_channels == other.in_channels
and self.out_channels == other.out_channels
and self.kernel_size == other.kernel_size
and self.stride == other.stride
and self.groups == other.groups
and self.transpose == other.transpose
)

def get_weight_shape(self) -> List[int]:
if not self.transpose:
return [self.out_channels, self.in_channels // self.groups, *self.kernel_size]
Expand All @@ -202,14 +185,6 @@ def __init__(self, weight_requires_grad: bool, num_channels: int, num_groups: in
self.num_channels = num_channels
self.num_groups = num_groups

def __eq__(self, other: Any):
return (
isinstance(other, GroupNormLayerAttributes)
and super().__eq__(other)
and self.num_channels == other.num_channels
and self.num_groups == other.num_groups
)

def get_weight_shape(self) -> List[int]:
return [self.num_channels]

Expand Down Expand Up @@ -238,30 +213,14 @@ class TransposeLayerAttributes(BaseLayerAttributes):
dim0: int
dim1: int

def __eq__(self, other: Any) -> bool:
return (
isinstance(other, TransposeLayerAttributes)
and super().__eq__(other)
and self.dim0 == other.dim0
and self.dim1 == other.dim1
)


@dataclass
class PermuteLayerAttributes(BaseLayerAttributes):
"""
:param permutation: the desired ordering of dimensions.
"""

permutation: List[int]

def __eq__(self, other: Any) -> bool:
return (
isinstance(other, PermuteLayerAttributes)
and super().__eq__(other)
and len(self.permutation) == len(other.permutation)
and (l == r for l, r in zip(self.permutation, other.permutation))
)
permutation: Tuple[int, ...]


@dataclass
Expand Down
2 changes: 1 addition & 1 deletion nncf/common/quantization/quantizer_removal.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def revert_operations_to_floating_point_precision(
command_creator.create_command_to_update_bias(node, original_bias, quantized_model_graph)
)

if node.layer_attributes and node.layer_attributes.const_attrs is not None:
if node.layer_attributes and node.layer_attributes.constant_attributes is not None:
weight_port_ids = node.layer_attributes.get_const_port_ids()
for port_id in weight_port_ids:
original_weight = node.data.get(f"original_weight.{port_id}", None)
Expand Down
4 changes: 2 additions & 2 deletions nncf/experimental/common/tensor_statistics/collectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,11 +442,11 @@ class QuantileReducerBase(TensorReducerBase):
def __init__(
self,
reduction_shape: Optional[ReductionShape] = None,
quantile: Optional[Union[float, List[float]]] = None,
quantile: Optional[Union[float, Tuple[float]]] = None,
inplace: bool = False,
):
super().__init__(reduction_shape, False)
self._quantile = [0.01, 0.99] if quantile is None else quantile
self._quantile = (0.01, 0.99) if quantile is None else quantile

def __eq__(self, __o: object) -> bool:
return super().__eq__(__o) and self._quantile == __o._quantile
Expand Down
129 changes: 129 additions & 0 deletions nncf/openvino/graph/layer_attributes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Dict, List, Optional

import openvino.runtime as ov

from nncf.common.graph.layer_attributes import BaseLayerAttributes
from nncf.common.graph.layer_attributes import ConvolutionLayerAttributes
from nncf.common.graph.layer_attributes import GenericWeightedLayerAttributes
from nncf.common.graph.layer_attributes import WeightedLayerAttributes
from nncf.openvino.graph.metatypes.openvino_metatypes import OVConvolutionBackpropDataMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVConvolutionMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVDepthwiseConvolutionMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVGroupConvolutionBackpropDataMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVGroupConvolutionMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVOpMetatype


class OVLayerAttributes(BaseLayerAttributes):
"""
This class stores additional information about nodes that needs to be processed during compression.
"""

def __init__(
self,
constant_attributes: Dict[int, Any],
layer_attributes: Optional[Dict[int, BaseLayerAttributes]] = None,
inputs_attributes: Optional[Dict[Any, Any]] = None,
):
"""
:param constant_attributes: Map of weights port ID to corresponding const attributes.
:param layer_attributes: Map of weights port ID to corresponding common layer attributes.
:param inputs_attributes: Activation attributes.
"""
self._constant_attributes = constant_attributes
self._layer_attributes = layer_attributes
self._inputs_attributes = inputs_attributes

@property
def constant_attributes(self) -> Dict[int, Any]:
return self._constant_attributes

@property
def layer_attributes(self) -> Optional[Dict[int, BaseLayerAttributes]]:
return self._layer_attributes

@property
def input_attributes(self) -> Optional[Dict[Any, Any]]:
return self._inputs_attributes

def get_const_port_ids(self) -> List[int]:
"""
Returns indices of input ports corresponding to the constant nodes.

:returns: List of input port indices with constants.
"""
if self._constant_attributes is not None:
return list(self._constant_attributes.keys())
return []


def get_weighted_layer_attributes(
ov_node: ov.Node, ov_metatype: OVOpMetatype, constant_attributes: Dict[str, Any]
) -> WeightedLayerAttributes:
"""
Funciton retrieves common layer attributes from the given node.

:param ov_node: TargetOpenvino graph node instance.
:param ov_metatype: NNCF Openvino metatype of the given node.
:param constant_attributes: Constant attributes collected for the given node.
:return: Weighted layer attributes for the given node.
"""
retval = {}
for port_id, attrs in constant_attributes.items():
if ov_metatype in [
OVConvolutionMetatype,
OVDepthwiseConvolutionMetatype,
OVGroupConvolutionMetatype,
OVConvolutionBackpropDataMetatype,
OVGroupConvolutionBackpropDataMetatype,
]:
node_attrs = ov_node.get_attributes()
kwargs = {
"weight_requires_grad": False,
"stride": tuple(node_attrs["strides"]),
"dilations": node_attrs["dilations"],
"transpose": ov_metatype in [OVConvolutionBackpropDataMetatype, OVGroupConvolutionBackpropDataMetatype],
# TODO: ticket 114378: unify pad attribute
"padding_values": tuple(node_attrs["pads_begin"] + node_attrs["pads_end"]),
}

const_shape = attrs["shape"]
if ov_metatype in [OVConvolutionMetatype, OVConvolutionBackpropDataMetatype]:
kwargs.update(
{
"in_channels": const_shape[1],
"out_channels": const_shape[0],
"kernel_size": tuple(const_shape[2:]),
"groups": 1,
}
)
else:
kwargs.update(
{
"in_channels": const_shape[2],
"out_channels": const_shape[1],
"kernel_size": tuple(const_shape[3:]),
"groups": const_shape[0],
}
)
if kwargs["transpose"]:
kwargs["in_channels"], kwargs["out_channels"] = kwargs["out_channels"], kwargs["in_channels"]

common_layer_attr = ConvolutionLayerAttributes(**kwargs)
else:
common_layer_attr = GenericWeightedLayerAttributes(
weight_requires_grad=False, weight_shape=attrs.get("shape", None)
)
retval[port_id] = common_layer_attr
return retval
Loading