Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement SK Module in Glasses #283

Open
wants to merge 6 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions glasses/nn/att/SK.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from torch import nn
import torch
from glasses.nn.blocks import ConvBnAct, Conv3x3BnAct
from glasses.nn.att.utils import make_divisible
from einops.layers.torch import Rearrange, Reduce
from typing import Union, List

class SKAtt(nn.Module):
def __init__(self,
in_features: int,
out_features: int = None,
kernel_size: Union[List, int] = [3, 5],
stride: int = 1,
groups: int = 1,
reduction: int = 16,
reduction_divisor: int = 8,
reduced_features: int = None,
keep_3x3: bool = True,
):
super().__init__()
out_features = out_features or in_features
mid_features = reduced_features or make_divisible(out_features // reduction, divisor=reduction_divisor)
if not isinstance(kernel_size, list):
kernel_size = [kernel_size] * 2
if keep_3x3:
dilation = [1 * (k - 1) // 2 for k in kernel_size]
kernel_size = [3] * len(kernel_size)
else:
dilation = [1 * (k - 1) // 2 for k in kernel_size]
groups = min(out_features, groups)
self.num_paths = len(kernel_size)

self.split = nn.ModuleList([
ConvBnAct(in_features = in_features,
out_features = out_features,
mode = "same",
stride = stride,
kernel_size = k,
dilation = d,
padding = k // 2,
groups = groups)
for k, d in zip(kernel_size, dilation)
])

self.fuse = nn.Sequential(
Reduce('b s c h w -> b c h w', reduction='sum', s=len(self.split)),
Reduce('b c h w -> b c 1 1', reduction='mean'),
ConvBnAct(in_features, mid_features, kernel_size=1, bias=False)
)

self.select = nn.Sequential(
nn.Conv2d(mid_features, len(self.split) * in_features, kernel_size=1, bias=False),
Rearrange('b (s c) h w -> b s c h w', s=len(self.split), c=in_features),
nn.Softmax(dim=1),
Reduce('b s c h w -> b c h w', reduction='sum', s=len(self.split))
)

def forward(self, x):
splitted = [path(x) for path in self.split]
splitted = torch.stack(splitted, dim=1)
x_attn = self.fuse(splitted)
x_attn = self.select(x_attn)
x = x * x_attn
return x
3 changes: 3 additions & 0 deletions glasses/nn/att/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from .SK import SKAtt
from .se import SpatialSE, LegacySpatialSE, ChannelSE, SpatialChannelSE
from .ECA import ECA
from .CBAM import CBAM
Expand All @@ -9,6 +10,8 @@
"SpatialChannelSE",
"SpatialSE",
"CBAM",
"SKAtt",
"WithAtt",
"ChannelSE",
"LegacySpatialSE",
]
9 changes: 9 additions & 0 deletions glasses/nn/att/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,12 @@ def __call__(
b = self.block(in_features, out_features, *args, **kwargs)
b.block.add_module("se", self.att(out_features))
return b


def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
10 changes: 7 additions & 3 deletions test/test_att.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import torch
from glasses.nn.att import ChannelSE, ECA, SpatialChannelSE, SpatialSE, CBAM, LegacySpatialSE
from glasses.nn.att import ChannelSE, ECA, SpatialChannelSE, SpatialSE, CBAM, LegacySpatialSE, SKAtt


def test_att():
x = torch.rand(1, 48, 8, 8)
x = torch.rand(2, 48, 8, 8)
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cuz there is BatchNorm2d in SelectiveKernel, it will turn out some error with batch=1

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes! put the module in .eval() mode


se = LegacySpatialSE(x.shape[1])
res = se(x)
Expand Down Expand Up @@ -40,4 +40,8 @@ def test_att():

res = cbam(x)
assert res.shape == x.shape


sk = SKAtt(x.shape[1])

res = sk(x)
assert res.shape == x.shape