Skip to content

Commit

Permalink
rcan unshuffle options, docs, schema
Browse files Browse the repository at this point in the history
  • Loading branch information
the-database committed Jan 9, 2025
1 parent 2b56162 commit 07e88d0
Show file tree
Hide file tree
Showing 11 changed files with 18 additions and 99 deletions.
1 change: 1 addition & 0 deletions docs/source/arch_reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -839,6 +839,7 @@ kernel_size: 3
reduction: 16
res_scale: 1
act_mode: relu
unshuffle_mod: false
```
### RGT
#### rgt
Expand Down
1 change: 1 addition & 0 deletions options/onnx/RCAN/RCAN.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ num_gpu: auto
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

############################
# Pretrain and Resume Paths
Expand Down
1 change: 1 addition & 0 deletions options/test/RCAN/RCAN.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ datasets:
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

############################
# Pretrain and Resume Paths
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ datasets:
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

#########################################################################################
# Pretrain and Resume Paths
Expand Down
1 change: 1 addition & 0 deletions options/train/RCAN/RCAN_OTF_finetune.yml
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ datasets:
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

# Discriminator model settings
network_d:
Expand Down
1 change: 1 addition & 0 deletions options/train/RCAN/RCAN_OTF_fromscratch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ datasets:
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

#########################################################################################
# Pretrain and Resume Paths
Expand Down
1 change: 1 addition & 0 deletions options/train/RCAN/RCAN_finetune.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ datasets:
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

# Discriminator model settings
network_d:
Expand Down
1 change: 1 addition & 0 deletions options/train/RCAN/RCAN_fromscratch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ datasets:
# Generator model settings
network_g:
type: rcan
unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality.

#########################################################################################
# Pretrain and Resume Paths
Expand Down
2 changes: 1 addition & 1 deletion schemas/redux-config.schema.json

Large diffs are not rendered by default.

9 changes: 7 additions & 2 deletions scripts/options/generate_default_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,6 @@ def final_template(
"upsampler": "pixelshuffle # pixelshuffle, nearest+conv, dysample (best on even number scales, does not support dynamic ONNX)"
},
},
{"names": ["RCAN"], "scales": ALL_SCALES},
{"names": ["Swin2SR_L", "Swin2SR_M", "Swin2SR_S"], "scales": ALL_SCALES},
{
"names": ["MoESR2"],
Expand All @@ -239,7 +238,13 @@ def final_template(
"upsampler": "pixelshuffledirect # conv, pixelshuffledirect, pixelshuffle, nearest+conv, dysample (best on even number scales, does not support dynamic ONNX)",
},
},
{"names": ["RCAN"], "scales": ALL_SCALES},
{
"names": ["RCAN"],
"scales": ALL_SCALES,
"extras": {
"unshuffle_mod": "true # Has no effect on scales larger than 2. For scales 1 and 2, setting to true speeds up the model and reduces VRAM usage significantly, but reduces quality."
},
},
{"names": ["RTMoSR", "RTMoSR_L", "RTMoSR_UL"], "scales": ALL_SCALES},
{
"names": ["GRL_B", "GRL_S", "GRL_T"],
Expand Down
98 changes: 2 additions & 96 deletions traiNNer/archs/rcan_arch.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ def __init__(
reduction: int = 16,
res_scale: float = 1,
act_mode: str = "relu",
unshuffle_mod: bool = False,
unshuffle_mod: bool = True,
conv: Callable[..., nn.Conv2d] = default_conv,
) -> None:
super().__init__()
Expand Down Expand Up @@ -334,98 +334,4 @@ def forward(self, x: Tensor) -> Tensor:

x = self.tail(res)
x = self.add_mean(x)
out = (x / self.rgb_range)[:, :, : h * self.scale, : w * self.scale]
return out


# @ARCH_REGISTRY.register()
# def rcan_rg20(
# scale: int = 4,
# n_resgroups: int = 20,
# n_resblocks: int = 20,
# n_feats: int = 64,
# n_colors: int = 3,
# rgb_range: int = 255,
# norm: bool = False,
# kernel_size: int = 3,
# reduction: int = 16,
# res_scale: float = 1,
# act_mode: str = "relu",
# conv: Callable[..., nn.Conv2d] = default_conv,
# ) -> RCAN:
# return RCAN(
# scale=scale,
# n_resgroups=n_resgroups,
# n_resblocks=n_resblocks,
# n_feats=n_feats,
# n_colors=n_colors,
# rgb_range=rgb_range,
# norm=norm,
# kernel_size=kernel_size,
# reduction=reduction,
# res_scale=res_scale,
# act_mode=act_mode,
# conv=conv,
# )


# @ARCH_REGISTRY.register()
# def rcan_rb40(
# scale: int = 4,
# n_resgroups: int = 10,
# n_resblocks: int = 40,
# n_feats: int = 64,
# n_colors: int = 3,
# rgb_range: int = 255,
# norm: bool = False,
# kernel_size: int = 3,
# reduction: int = 16,
# res_scale: float = 1,
# act_mode: str = "relu",
# conv: Callable[..., nn.Conv2d] = default_conv,
# ) -> RCAN:
# return RCAN(
# scale=scale,
# n_resgroups=n_resgroups,
# n_resblocks=n_resblocks,
# n_feats=n_feats,
# n_colors=n_colors,
# rgb_range=rgb_range,
# norm=norm,
# kernel_size=kernel_size,
# reduction=reduction,
# res_scale=res_scale,
# act_mode=act_mode,
# conv=conv,
# )


# @ARCH_REGISTRY.register()
# def rcan_nf128(
# scale: int = 4,
# n_resgroups: int = 10,
# n_resblocks: int = 20,
# n_feats: int = 128,
# n_colors: int = 3,
# rgb_range: int = 255,
# norm: bool = False,
# kernel_size: int = 3,
# reduction: int = 16,
# res_scale: float = 1,
# act_mode: str = "relu",
# conv: Callable[..., nn.Conv2d] = default_conv,
# ) -> RCAN:
# return RCAN(
# scale=scale,
# n_resgroups=n_resgroups,
# n_resblocks=n_resblocks,
# n_feats=n_feats,
# n_colors=n_colors,
# rgb_range=rgb_range,
# norm=norm,
# kernel_size=kernel_size,
# reduction=reduction,
# res_scale=res_scale,
# act_mode=act_mode,
# conv=conv,
# )
return (x / self.rgb_range)[:, :, : h * self.scale, : w * self.scale]

0 comments on commit 07e88d0

Please sign in to comment.