Skip to content

Commit

Permalink
🎨 Format Python code with psf/black
Browse files Browse the repository at this point in the history
  • Loading branch information
ndem0 committed Apr 2, 2024
1 parent 7668564 commit 14dfa62
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 29 deletions.
2 changes: 1 addition & 1 deletion pina/label_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __deepcopy__(self, __):
labels = self.labels
copy_tensor = deepcopy(self.tensor)
return LabelTensor(copy_tensor, labels)

@property
def labels(self):
"""Property decorator for labels
Expand Down
2 changes: 1 addition & 1 deletion pina/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"FourierIntegralKernel",
"KernelNeuralOperator",
"AveragingNeuralOperator",
"LowRankNeuralOperator"
"LowRankNeuralOperator",
]

from .feed_forward import FeedForward, ResidualFeedForward
Expand Down
2 changes: 1 addition & 1 deletion pina/model/layers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,4 @@
from .embedding import PeriodicBoundaryEmbedding
from .avno_layer import AVNOBlock
from .lowrank_layer import LowRankBlock
from .adaptive_func import AdaptiveActivationFunction
from .adaptive_func import AdaptiveActivationFunction
42 changes: 24 additions & 18 deletions pina/model/layers/lowrank_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import torch

from pina.utils import check_consistency
import pina.model as pm # avoid circular import
import pina.model as pm # avoid circular import


class LowRankBlock(torch.nn.Module):
Expand Down Expand Up @@ -42,14 +42,16 @@ class LowRankBlock(torch.nn.Module):
"""

def __init__(self,
input_dimensions,
embedding_dimenion,
rank,
inner_size=20,
n_layers=2,
func=torch.nn.Tanh,
bias=True):
def __init__(
self,
input_dimensions,
embedding_dimenion,
rank,
inner_size=20,
n_layers=2,
func=torch.nn.Tanh,
bias=True,
):
"""
:param int input_dimensions: The number of input components of the
model.
Expand Down Expand Up @@ -78,10 +80,14 @@ def __init__(self,
super().__init__()

# Assignment (check consistency inside FeedForward)
self._basis = pm.FeedForward(input_dimensions=input_dimensions,
output_dimensions=2*rank*embedding_dimenion,
inner_size=inner_size, n_layers=n_layers,
func=func, bias=bias)
self._basis = pm.FeedForward(
input_dimensions=input_dimensions,
output_dimensions=2 * rank * embedding_dimenion,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
self._nn = torch.nn.Linear(embedding_dimenion, embedding_dimenion)

check_consistency(rank, int)
Expand Down Expand Up @@ -115,15 +121,15 @@ def forward(self, x, coords):
# extract basis
basis = self._basis(coords)
# reshape [B, N, D, 2*rank]
shape = list(basis.shape[:-1]) + [-1, 2*self.rank]
shape = list(basis.shape[:-1]) + [-1, 2 * self.rank]
basis = basis.reshape(shape)
# divide
psi = basis[..., :self.rank]
phi = basis[..., self.rank:]
psi = basis[..., : self.rank]
phi = basis[..., self.rank :]
# compute dot product
coeff = torch.einsum('...dr,...d->...r', psi,x)
coeff = torch.einsum("...dr,...d->...r", psi, x)
# expand the basis
expansion = torch.einsum('...r,...dr->...d', coeff,phi)
expansion = torch.einsum("...r,...dr->...d", coeff, phi)
# apply linear layer and return
return self._func(self._nn(x) + expansion)

Expand Down
21 changes: 13 additions & 8 deletions pina/model/lno.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def __init__(
inner_size=20,
n_layers=2,
func=torch.nn.Tanh,
bias=True
bias=True,
):
"""
:param torch.nn.Module lifting_net: The neural network for lifting
Expand Down Expand Up @@ -105,13 +105,18 @@ def __init__(
self.coordinates_indices = coordinates_indices
self.field_indices = field_indices
integral_net = nn.Sequential(
*[LowRankBlock(input_dimensions=len(coordinates_indices),
embedding_dimenion=output_lifting_net,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias) for _ in range(n_kernel_layers)]
*[
LowRankBlock(
input_dimensions=len(coordinates_indices),
embedding_dimenion=output_lifting_net,
rank=rank,
inner_size=inner_size,
n_layers=n_layers,
func=func,
bias=bias,
)
for _ in range(n_kernel_layers)
]
)
super().__init__(lifting_net, integral_net, projecting_net)

Expand Down

0 comments on commit 14dfa62

Please sign in to comment.