Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added TensorFlow support to nncf.Tensor #3106

Open
wants to merge 5 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions docs/api/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,9 @@ def collect_api_entities() -> APIInfo:
"nncf.tensor.functions.torch_linalg",
"nncf.tensor.functions.torch_io",
"nncf.tensor.functions.numpy_io",
"nncf.tensor.functions.tf_numeric",
"nncf.tensor.functions.tf_io",
"nncf.tensor.functions.tf_linalg",
]

with mock(mock_modules):
Expand Down
1 change: 1 addition & 0 deletions nncf/tensor/definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ class TensorBackend(Enum):
"""

numpy = auto()
tf = auto()
torch = auto()


Expand Down
5 changes: 5 additions & 0 deletions nncf/tensor/functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,11 @@ def _initialize_backends():
import nncf.tensor.functions.numpy_linalg
import nncf.tensor.functions.numpy_numeric

with contextlib.suppress(ImportError):
import nncf.tensor.functions.tf_io
import nncf.tensor.functions.tf_linalg
import nncf.tensor.functions.tf_numeric

with contextlib.suppress(ImportError):
import nncf.tensor.functions.torch_io
import nncf.tensor.functions.torch_linalg
Expand Down
8 changes: 8 additions & 0 deletions nncf/tensor/functions/dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,10 @@ def get_numeric_backend_fn(fn_name: str, backend: TensorBackend) -> Callable:
from nncf.tensor.functions import torch_numeric

return getattr(torch_numeric, fn_name)
if backend == TensorBackend.tf:
from nncf.tensor.functions import tf_numeric

return getattr(tf_numeric, fn_name)


def get_io_backend_fn(fn_name: str, backend: TensorBackend) -> Callable:
Expand All @@ -111,6 +115,10 @@ def get_io_backend_fn(fn_name: str, backend: TensorBackend) -> Callable:
from nncf.tensor.functions import numpy_io

return getattr(numpy_io, fn_name)
if backend == TensorBackend.tf:
from nncf.tensor.functions import tf_io

return getattr(tf_io, fn_name)
if backend == TensorBackend.torch:
from nncf.tensor.functions import torch_io

Expand Down
28 changes: 28 additions & 0 deletions nncf/tensor/functions/tf_io.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Copyright (c) 2024 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Dict, Optional

import tensorflow as tf
from safetensors.tensorflow import load_file as tf_load_file
from safetensors.tensorflow import save_file as tf_save_file

from nncf.tensor import TensorDeviceType
from nncf.tensor.functions import io as io


def load_file(file_path: str, *, device: Optional[TensorDeviceType] = None) -> Dict[str, tf.Tensor]:
return tf_load_file(file_path)


@io.save_file.register(tf.Tensor)
def _(data: Dict[str, tf.Tensor], file_path: str) -> None:
return tf_save_file(data, file_path)
129 changes: 129 additions & 0 deletions nncf/tensor/functions/tf_linalg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
# Copyright (c) 2024 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import warnings
from typing import Optional, Tuple, Union

import tensorflow as tf

from nncf.tensor.functions import linalg


@linalg.norm.register(tf.Tensor)
olegkkruglov marked this conversation as resolved.
Show resolved Hide resolved
def _(
a: tf.Tensor,
ord: Optional[Union[str, float, int]] = None,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> tf.Tensor:
if ord is None:
ord = "euclidean"
rank = tf.rank(a)
if rank == 2 and axis is None:
axis = (0, 1)

with tf.device(a.device):
if ord == "nuc" and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord='nuc' is only supported for 2D tensors")
s = tf.linalg.svd(a, compute_uv=False)
return tf.reduce_sum(s, axis=-1)

if ord == -1 and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord=-1 is only supported for 2D tensors")
return tf.reduce_min(tf.reduce_sum(tf.abs(a), axis=axis[0]), keepdims=keepdims)

if ord == 1 and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord=1 is only supported for 2D tensors")
return tf.reduce_max(tf.reduce_sum(tf.abs(a), axis=axis[0]), keepdims=keepdims)

if ord == -2 and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord=-2 is only supported for 2D tensors")
s = tf.linalg.svd(a, compute_uv=False)
return tf.reduce_min(s, axis=-1)

if ord == 2 and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord=2 is only supported for 2D tensors")
s = tf.linalg.svd(a, compute_uv=False)
return tf.reduce_max(s, axis=-1)

if ord == float("inf") and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord=inf is only supported for 2D tensors")
return tf.reduce_max(tf.reduce_sum(tf.abs(a), axis=axis[1]), keepdims=keepdims)

if ord == -float("inf") and isinstance(axis, tuple) and len(axis) != 1:
if rank != 2:
raise ValueError("ord=-inf is only supported for 2D tensors")
return tf.reduce_min(tf.reduce_sum(tf.abs(a), axis=axis[1]), keepdims=keepdims)

return tf.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims)


@linalg.cholesky.register(tf.Tensor)
def _(a: tf.Tensor, upper: bool = False) -> tf.Tensor:
with tf.device(a.device):
cholesky = tf.linalg.cholesky(a)
if upper:
perm = list(range(tf.rank(a)))
perm[-1], perm[-2] = perm[-2], perm[-1]
cholesky = tf.transpose(cholesky, perm=perm)
return cholesky


@linalg.cholesky_inverse.register(tf.Tensor)
def _(a: tf.Tensor, upper: bool = False) -> tf.Tensor:
with tf.device(a.device):
if upper:
perm = list(range(tf.rank(a)))
perm[-1], perm[-2] = perm[-2], perm[-1]
a = tf.transpose(a, perm=perm)

eye = tf.eye(a.shape[0], dtype=a.dtype)
return tf.linalg.cholesky_solve(a, eye)


@linalg.inv.register(tf.Tensor)
def _(a: tf.Tensor) -> tf.Tensor:
with tf.device(a.device):
return tf.linalg.inv(a)


@linalg.pinv.register(tf.Tensor)
def _(a: tf.Tensor) -> tf.Tensor:
with tf.device(a.device):
return tf.linalg.pinv(a)


@linalg.lstsq.register(tf.Tensor)
def _(a: tf.Tensor, b: tf.Tensor, driver: Optional[str] = None) -> tf.Tensor:
with tf.device(a.device):
if driver is not None:
warnings.warn("Driver specifying is not supported in TensorFlow lstsq method")
if tf.rank(b) == 1:
b = tf.expand_dims(b, axis=0)
perm = list(range(tf.rank(b)))
perm[-1], perm[-2] = perm[-2], perm[-1]
b = tf.transpose(b, perm=perm)

return tf.linalg.lstsq(a, b)
Comment on lines +112 to +121
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
with tf.device(a.device):
if driver is not None:
warnings.warn("Driver specifying is not supported in TensorFlow lstsq method")
if tf.rank(b) == 1:
b = tf.expand_dims(b, axis=0)
perm = list(range(tf.rank(b)))
perm[-1], perm[-2] = perm[-2], perm[-1]
b = tf.transpose(b, perm=perm)
return tf.linalg.lstsq(a, b)
with tf.device(a.device):
if driver is not None:
warnings.warn("Driver specifying is not supported in TensorFlow lstsq method")
if tf.rank(b) == 1:
b = tf.expand_dims(b, axis=1)
return tf.linalg.lstsq(a, b)

Please extend test cases for this function.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Existing testcases are failing without the transpose. Could you please clarify what tests do you want to add?



@linalg.svd.register(tf.Tensor)
def _(a: tf.Tensor, full_matrices: Optional[bool] = True) -> tf.Tensor:
with tf.device(a.device):
s, u, v = tf.linalg.svd(a, full_matrices=full_matrices)

return u, s, tf.transpose(v, conjugate=True)
Loading