From 60fe42118d8317ff38a0e0c60b2c3da456c02339 Mon Sep 17 00:00:00 2001 From: the-database <25811902+the-database@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:47:27 -0500 Subject: [PATCH 1/4] Update logger.py --- traiNNer/utils/logger.py | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/traiNNer/utils/logger.py b/traiNNer/utils/logger.py index 450f68cb..20f9e582 100644 --- a/traiNNer/utils/logger.py +++ b/traiNNer/utils/logger.py @@ -112,8 +112,8 @@ def __call__(self, log_vars: dict[str, Any]) -> None: lrs = log_vars.pop("lrs") # Construct the base message with epoch, iteration, and learning rates - message = f"[ epoch:{epoch:4,d}, iter:{current_iter:8,d}, lr:(" - message += ", ".join([f"{v:.3e}" for v in lrs]) + ") ] " + message = f"[epoch:{epoch:4,d}, iter:{current_iter:8,d}, lr:(" + message += ", ".join([f"{v:.3e}" for v in lrs]) + ")] " # performance, eta if "time" in log_vars.keys(): @@ -125,11 +125,11 @@ def __call__(self, log_vars: dict[str, Any]) -> None: eta_sec = time_sec_avg * (self.max_iters - current_iter - 1) eta_str = str(datetime.timedelta(seconds=int(eta_sec))) - message += f"[ performance: {iter_time:.3f} it/s ] [ eta: {eta_str} ] " + message += f"[performance: {iter_time:.3f} it/s] [eta: {eta_str}] " # peak VRAM message += ( - f"[ peak VRAM: {torch.cuda.max_memory_allocated() / (1024**3):.2f} GB ] " + f"[peak VRAM: {torch.cuda.max_memory_allocated() / (1024**3):.2f} GB] " ) # Log any additional variables (typically losses) @@ -141,8 +141,7 @@ def __call__(self, log_vars: dict[str, Any]) -> None: self.tb_logger.add_scalar(label, value, current_iter) # Log the final constructed message - - self.logger.info(message) + self.logger.info(message, extra={"markup": False}) @master_only @@ -243,18 +242,7 @@ def get_env_info() -> str: device_info = torch.cuda.get_device_properties(torch.cuda.current_device()) # from traiNNer.version import __version__ - msg = r""" - __ _ _ ___ __ __ - / /__________ _(_) | / / | / /__ _____ ________ ____/ /_ ___ __ - / __/ ___/ __ `/ / |/ / |/ / _ \/ ___/_____/ ___/ _ \/ __ / / / / |/_/ -/ /_/ / / /_/ / / /| / /| / __/ / /_____/ / / __/ /_/ / /_/ /> < -\__/_/ \__,_/_/_/ |_/_/ |_/\___/_/ /_/ \___/\__,_/\__,_/_/|_| - ______ __ __ __ __ - / ____/____ ____ ____/ / / / __ __ _____ / /__ / / - / / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / / - / /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/ - \____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_) - """ + msg = r"[italic red]:rocket: traiNNer-redux: good luck! :rocket:[/]" msg += ( "\nSystem Information: " f"\n\tCurrent GPU: " From 2b56162be360ea0ebb9960c06dad6942acd9c049 Mon Sep 17 00:00:00 2001 From: the-database <25811902+the-database@users.noreply.github.com> Date: Wed, 8 Jan 2025 18:48:11 -0500 Subject: [PATCH 2/4] rcan pixel unshuffle progress --- traiNNer/archs/arch_info.py | 2 +- traiNNer/archs/rcan_arch.py | 28 ++++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 3 deletions(-) diff --git a/traiNNer/archs/arch_info.py b/traiNNer/archs/arch_info.py index 17c55918..ddf5a5c1 100644 --- a/traiNNer/archs/arch_info.py +++ b/traiNNer/archs/arch_info.py @@ -334,7 +334,7 @@ "df2k_ssim": 0.8140, }, }, - "rcan": { + "rcan unshuffle_mod=False": { 2: { "div2k_psnr": 33.34, "div2k_ssim": 0.9384, diff --git a/traiNNer/archs/rcan_arch.py b/traiNNer/archs/rcan_arch.py index 54f4b280..52e6bcaa 100644 --- a/traiNNer/archs/rcan_arch.py +++ b/traiNNer/archs/rcan_arch.py @@ -2,6 +2,7 @@ from collections.abc import Callable import torch +from spandrel.architectures.__arch_helpers.padding import pad_to_multiple from spandrel.util import store_hyperparameters from torch import Tensor, nn @@ -256,10 +257,13 @@ def __init__( reduction: int = 16, res_scale: float = 1, act_mode: str = "relu", + unshuffle_mod: bool = False, conv: Callable[..., nn.Conv2d] = default_conv, ) -> None: super().__init__() + self.scale = scale + if norm: # RGB mean for DIV2K self.rgb_range = rgb_range @@ -273,7 +277,21 @@ def __init__( self.add_mean = nn.Identity() # define head module - modules_head = [conv(n_colors, n_feats, kernel_size)] + unshuffle_mod = unshuffle_mod and scale < 4 + self.downscale_factor = 1 + if unshuffle_mod: + self.downscale_factor = 4 // scale + scale = 4 + modules_head = [ + nn.PixelUnshuffle(self.downscale_factor), + conv( + n_colors * self.downscale_factor * self.downscale_factor, + n_feats, + kernel_size, + ), + ] + else: + modules_head = [conv(n_colors, n_feats, kernel_size)] # define body module modules_body: list[nn.Module] = [ @@ -301,7 +319,12 @@ def __init__( self.body = nn.Sequential(*modules_body) self.tail = nn.Sequential(*modules_tail) + def check_img_size(self, x: Tensor) -> Tensor: + return pad_to_multiple(x, self.downscale_factor, mode="reflect") + def forward(self, x: Tensor) -> Tensor: + _b, _c, h, w = x.shape + x = self.check_img_size(x) x *= self.rgb_range x = self.sub_mean(x) x = self.head(x) @@ -311,7 +334,8 @@ def forward(self, x: Tensor) -> Tensor: x = self.tail(res) x = self.add_mean(x) - return x / self.rgb_range + out = (x / self.rgb_range)[:, :, : h * self.scale, : w * self.scale] + return out # @ARCH_REGISTRY.register() From 07e88d0d13ee0da119dacdf29eb5c5ff299f7ce3 Mon Sep 17 00:00:00 2001 From: the-database <25811902+the-database@users.noreply.github.com> Date: Wed, 8 Jan 2025 22:08:01 -0500 Subject: [PATCH 3/4] rcan unshuffle options, docs, schema --- docs/source/arch_reference.md | 1 + options/onnx/RCAN/RCAN.yml | 1 + options/test/RCAN/RCAN.yml | 1 + ...CAN_OTF_bicubic_ms_ssim_l1_fromscratch.yml | 1 + options/train/RCAN/RCAN_OTF_finetune.yml | 1 + options/train/RCAN/RCAN_OTF_fromscratch.yml | 1 + options/train/RCAN/RCAN_finetune.yml | 1 + options/train/RCAN/RCAN_fromscratch.yml | 1 + schemas/redux-config.schema.json | 2 +- scripts/options/generate_default_options.py | 9 +- traiNNer/archs/rcan_arch.py | 98 +------------------ 11 files changed, 18 insertions(+), 99 deletions(-) diff --git a/docs/source/arch_reference.md b/docs/source/arch_reference.md index 54be1b31..393ab81d 100644 --- a/docs/source/arch_reference.md +++ b/docs/source/arch_reference.md @@ -839,6 +839,7 @@ kernel_size: 3 reduction: 16 res_scale: 1 act_mode: relu +unshuffle_mod: false ``` ### RGT #### rgt diff --git a/options/onnx/RCAN/RCAN.yml b/options/onnx/RCAN/RCAN.yml index c092ec8c..53e6fed1 100644 --- a/options/onnx/RCAN/RCAN.yml +++ b/options/onnx/RCAN/RCAN.yml @@ -12,6 +12,7 @@ num_gpu: auto # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. ############################ # Pretrain and Resume Paths diff --git a/options/test/RCAN/RCAN.yml b/options/test/RCAN/RCAN.yml index 795f4b25..9026ed81 100644 --- a/options/test/RCAN/RCAN.yml +++ b/options/test/RCAN/RCAN.yml @@ -30,6 +30,7 @@ datasets: # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. ############################ # Pretrain and Resume Paths diff --git a/options/train/RCAN/RCAN_OTF_bicubic_ms_ssim_l1_fromscratch.yml b/options/train/RCAN/RCAN_OTF_bicubic_ms_ssim_l1_fromscratch.yml index 7089ef45..f29a654a 100644 --- a/options/train/RCAN/RCAN_OTF_bicubic_ms_ssim_l1_fromscratch.yml +++ b/options/train/RCAN/RCAN_OTF_bicubic_ms_ssim_l1_fromscratch.yml @@ -110,6 +110,7 @@ datasets: # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. ######################################################################################### # Pretrain and Resume Paths diff --git a/options/train/RCAN/RCAN_OTF_finetune.yml b/options/train/RCAN/RCAN_OTF_finetune.yml index 61ca84f5..fbca55e6 100644 --- a/options/train/RCAN/RCAN_OTF_finetune.yml +++ b/options/train/RCAN/RCAN_OTF_finetune.yml @@ -118,6 +118,7 @@ datasets: # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. # Discriminator model settings network_d: diff --git a/options/train/RCAN/RCAN_OTF_fromscratch.yml b/options/train/RCAN/RCAN_OTF_fromscratch.yml index 5b9e04ea..be627577 100644 --- a/options/train/RCAN/RCAN_OTF_fromscratch.yml +++ b/options/train/RCAN/RCAN_OTF_fromscratch.yml @@ -118,6 +118,7 @@ datasets: # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. ######################################################################################### # Pretrain and Resume Paths diff --git a/options/train/RCAN/RCAN_finetune.yml b/options/train/RCAN/RCAN_finetune.yml index 1288030b..a13d3ed1 100644 --- a/options/train/RCAN/RCAN_finetune.yml +++ b/options/train/RCAN/RCAN_finetune.yml @@ -63,6 +63,7 @@ datasets: # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. # Discriminator model settings network_d: diff --git a/options/train/RCAN/RCAN_fromscratch.yml b/options/train/RCAN/RCAN_fromscratch.yml index 571fb696..047dac54 100644 --- a/options/train/RCAN/RCAN_fromscratch.yml +++ b/options/train/RCAN/RCAN_fromscratch.yml @@ -63,6 +63,7 @@ datasets: # Generator model settings network_g: type: rcan + unshuffle_mod: true # has no effect on scales larger than 2. for scales 1 and 2, setting to true speeds up the model and reduces vram usage significantly, but reduces quality. ######################################################################################### # Pretrain and Resume Paths diff --git a/schemas/redux-config.schema.json b/schemas/redux-config.schema.json index 8e3987bb..5330874e 100644 --- a/schemas/redux-config.schema.json +++ b/schemas/redux-config.schema.json @@ -1 +1 @@ -{"$ref":"#/$defs/ReduxOptions","$defs":{"ReduxOptions":{"title":"ReduxOptions","type":"object","properties":{"name":{"description":"Name of the experiment. It should be a unique name if you want to run a new experiment. If you enable auto resume, the experiment with this name will be resumed instead of starting a new training run.","type":"string"},"scale":{"description":"Scale of the model. Most architectures support a scale of 1, 2, 3, 4, or 8. A scale of 1 can be used for restoration models that don't change the resolution of the input image. A scale of 2 means the width and height of the input image are doubled, so a 640x480 input will be upscaled to 1280x960.","type":"integer"},"num_gpu":{"description":"The number of GPUs to use for training, if using multiple GPUs.","anyOf":[{"enum":["auto"]},{"type":"integer"}]},"path":{"$ref":"#/$defs/PathOptions"},"network_g":{"anyOf":[{"$ref":"#/$defs/artcnn"},{"$ref":"#/$defs/artcnn_r16f96"},{"$ref":"#/$defs/artcnn_r8f64"},{"$ref":"#/$defs/artcnn_r8f48"},{"$ref":"#/$defs/vggstylediscriminator"},{"$ref":"#/$defs/unetdiscriminatorsn"},{"$ref":"#/$defs/dunet"},{"$ref":"#/$defs/dwt"},{"$ref":"#/$defs/dwt_s"},{"$ref":"#/$defs/eimn"},{"$ref":"#/$defs/eimn_l"},{"$ref":"#/$defs/eimn_a"},{"$ref":"#/$defs/elan"},{"$ref":"#/$defs/elan_light"},{"$ref":"#/$defs/emt"},{"$ref":"#/$defs/flexnet"},{"$ref":"#/$defs/metaflexnet"},{"$ref":"#/$defs/hit_sir"},{"$ref":"#/$defs/hit_sng"},{"$ref":"#/$defs/hit_srf"},{"$ref":"#/$defs/lmlt"},{"$ref":"#/$defs/lmlt_base"},{"$ref":"#/$defs/lmlt_large"},{"$ref":"#/$defs/lmlt_tiny"},{"$ref":"#/$defs/man"},{"$ref":"#/$defs/man_tiny"},{"$ref":"#/$defs/man_light"},{"$ref":"#/$defs/metagan2"},{"$ref":"#/$defs/moesr2"},{"$ref":"#/$defs/mosr"},{"$ref":"#/$defs/mosr_t"},{"$ref":"#/$defs/rcan"},{"$ref":"#/$defs/realplksr"},{"$ref":"#/$defs/realplksr_tiny"},{"$ref":"#/$defs/rtmosr"},{"$ref":"#/$defs/rtmosr_l"},{"$ref":"#/$defs/rtmosr_ul"},{"$ref":"#/$defs/scunet_aaf6aa"},{"$ref":"#/$defs/sebica"},{"$ref":"#/$defs/sebica_mini"},{"$ref":"#/$defs/spanplus"},{"$ref":"#/$defs/spanplus_sts"},{"$ref":"#/$defs/spanplus_s"},{"$ref":"#/$defs/spanplus_st"},{"$ref":"#/$defs/compact"},{"$ref":"#/$defs/ultracompact"},{"$ref":"#/$defs/superultracompact"},{"$ref":"#/$defs/tscunet"},{"$ref":"#/$defs/atd"},{"$ref":"#/$defs/atd_light"},{"$ref":"#/$defs/dat"},{"$ref":"#/$defs/dat_s"},{"$ref":"#/$defs/dat_2"},{"$ref":"#/$defs/dat_light"},{"$ref":"#/$defs/dctlsa"},{"$ref":"#/$defs/ditn_real"},{"$ref":"#/$defs/drct"},{"$ref":"#/$defs/drct_l"},{"$ref":"#/$defs/drct_xl"},{"$ref":"#/$defs/grl_b"},{"$ref":"#/$defs/grl_s"},{"$ref":"#/$defs/grl_t"},{"$ref":"#/$defs/hat_l"},{"$ref":"#/$defs/hat_m"},{"$ref":"#/$defs/hat_s"},{"$ref":"#/$defs/omnisr"},{"$ref":"#/$defs/plksr"},{"$ref":"#/$defs/plksr_tiny"},{"$ref":"#/$defs/realcugan"},{"$ref":"#/$defs/rgt"},{"$ref":"#/$defs/rgt_s"},{"$ref":"#/$defs/esrgan"},{"$ref":"#/$defs/esrgan_lite"},{"$ref":"#/$defs/safmn"},{"$ref":"#/$defs/safmn_l"},{"$ref":"#/$defs/seemore_t"},{"$ref":"#/$defs/span"},{"$ref":"#/$defs/srformer"},{"$ref":"#/$defs/srformer_light"},{"$ref":"#/$defs/swin2sr_l"},{"$ref":"#/$defs/swin2sr_m"},{"$ref":"#/$defs/swin2sr_s"},{"$ref":"#/$defs/swinir_l"},{"$ref":"#/$defs/swinir_m"},{"$ref":"#/$defs/swinir_s"}],"discriminator":{"propertyName":"type","mapping":{"artcnn":"#/$defs/artcnn","artcnn_r16f96":"#/$defs/artcnn_r16f96","artcnn_r8f64":"#/$defs/artcnn_r8f64","artcnn_r8f48":"#/$defs/artcnn_r8f48","vggstylediscriminator":"#/$defs/vggstylediscriminator","unetdiscriminatorsn":"#/$defs/unetdiscriminatorsn","dunet":"#/$defs/dunet","dwt":"#/$defs/dwt","dwt_s":"#/$defs/dwt_s","eimn":"#/$defs/eimn","eimn_l":"#/$defs/eimn_l","eimn_a":"#/$defs/eimn_a","elan":"#/$defs/elan","elan_light":"#/$defs/elan_light","emt":"#/$defs/emt","flexnet":"#/$defs/flexnet","metaflexnet":"#/$defs/metaflexnet","hit_sir":"#/$defs/hit_sir","hit_sng":"#/$defs/hit_sng","hit_srf":"#/$defs/hit_srf","lmlt":"#/$defs/lmlt","lmlt_base":"#/$defs/lmlt_base","lmlt_large":"#/$defs/lmlt_large","lmlt_tiny":"#/$defs/lmlt_tiny","man":"#/$defs/man","man_tiny":"#/$defs/man_tiny","man_light":"#/$defs/man_light","metagan2":"#/$defs/metagan2","moesr2":"#/$defs/moesr2","mosr":"#/$defs/mosr","mosr_t":"#/$defs/mosr_t","rcan":"#/$defs/rcan","realplksr":"#/$defs/realplksr","realplksr_tiny":"#/$defs/realplksr_tiny","rtmosr":"#/$defs/rtmosr","rtmosr_l":"#/$defs/rtmosr_l","rtmosr_ul":"#/$defs/rtmosr_ul","scunet_aaf6aa":"#/$defs/scunet_aaf6aa","sebica":"#/$defs/sebica","sebica_mini":"#/$defs/sebica_mini","spanplus":"#/$defs/spanplus","spanplus_sts":"#/$defs/spanplus_sts","spanplus_s":"#/$defs/spanplus_s","spanplus_st":"#/$defs/spanplus_st","compact":"#/$defs/compact","ultracompact":"#/$defs/ultracompact","superultracompact":"#/$defs/superultracompact","tscunet":"#/$defs/tscunet","atd":"#/$defs/atd","atd_light":"#/$defs/atd_light","dat":"#/$defs/dat","dat_s":"#/$defs/dat_s","dat_2":"#/$defs/dat_2","dat_light":"#/$defs/dat_light","dctlsa":"#/$defs/dctlsa","ditn_real":"#/$defs/ditn_real","drct":"#/$defs/drct","drct_l":"#/$defs/drct_l","drct_xl":"#/$defs/drct_xl","grl_b":"#/$defs/grl_b","grl_s":"#/$defs/grl_s","grl_t":"#/$defs/grl_t","hat_l":"#/$defs/hat_l","hat_m":"#/$defs/hat_m","hat_s":"#/$defs/hat_s","omnisr":"#/$defs/omnisr","plksr":"#/$defs/plksr","plksr_tiny":"#/$defs/plksr_tiny","realcugan":"#/$defs/realcugan","rgt":"#/$defs/rgt","rgt_s":"#/$defs/rgt_s","esrgan":"#/$defs/esrgan","esrgan_lite":"#/$defs/esrgan_lite","safmn":"#/$defs/safmn","safmn_l":"#/$defs/safmn_l","seemore_t":"#/$defs/seemore_t","span":"#/$defs/span","srformer":"#/$defs/srformer","srformer_light":"#/$defs/srformer_light","swin2sr_l":"#/$defs/swin2sr_l","swin2sr_m":"#/$defs/swin2sr_m","swin2sr_s":"#/$defs/swin2sr_s","swinir_l":"#/$defs/swinir_l","swinir_m":"#/$defs/swinir_m","swinir_s":"#/$defs/swinir_s"}}},"network_d":{"description":"The options for the discriminator model.","anyOf":[{"type":"object"},{"type":"null"}],"default":null},"manual_seed":{"description":"Deterministic mode, slows down training. Only use for reproducible experiments.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"deterministic":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"dist":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"launcher":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"rank":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"world_size":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"auto_resume":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"resume":{"type":"integer","default":0},"is_train":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"root_path":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"use_amp":{"description":"Speed up training and reduce VRAM usage. NVIDIA only.","type":"boolean","default":false},"amp_bf16":{"description":"Use bf16 instead of fp16 for AMP, RTX 3000 series or newer only. Only recommended if fp16 doesn't work.","type":"boolean","default":false},"use_channels_last":{"description":"Enable channels last memory format while using AMP. Reduces VRAM and speeds up training for most architectures, but some architectures are slower with channels last.","type":"boolean","default":true},"fast_matmul":{"description":"Trade precision for performance.","type":"boolean","default":false},"use_compile":{"description":"Enable torch.compile for the generator model, which takes time on startup to compile the model, but can speed up training after the model is compiled. However, compilation must be redone when starting training each time, as the compiled model is not saved, so for models that take too long to compile it may not worth it.","type":"boolean","default":false},"detect_anomaly":{"description":"Whether or not to enable anomaly detection, which can be useful for debugging NaNs that occur during training. Has a significant performance hit and should be disabled when not debugging.","type":"boolean","default":false},"high_order_degradation":{"description":"Whether or not to enable OTF (on the fly) degradations, which generates LRs on the fly.","type":"boolean","default":false},"high_order_degradations_debug":{"description":"Whether or not to enable debugging for OTF, which saves the OTF generated LR images so they can be inspected to view the effect of different OTF settings.","type":"boolean","default":false},"high_order_degradations_debug_limit":{"description":"The maximum number of OTF images to save when debugging is enabled.","type":"integer","default":100},"dataroot_lq_prob":{"description":"Probability of using paired LR data instead of OTF LR data.","type":"number","default":0},"lq_usm":{"description":"Whether to enable unsharp mask on the LQ image.","type":"boolean","default":false},"lq_usm_radius_range":{"description":"For the unsharp mask of the LQ image, use a radius randomly selected from this range.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[1,25]},"blur_prob":{"description":"Probability of applying the first blur to the LQ, between 0 and 1.","type":"number","default":0},"resize_prob":{"description":"List of 3 probabilities for the first resize which should add up to 1: the probability of upscaling, the probability of downscaling, and the probability of no resize.","type":"array","items":{"type":"number"}},"resize_mode_list":{"description":"List of possible resize modes to use for the first resize.","type":"array","items":{"enum":["bicubic","bilinear","lanczos","nearest-exact"]}},"resize_mode_prob":{"description":"List of probabilities for the first resize of selecting the corresponding resize mode in `resize_mode_list`.","type":"array","items":{"type":"number"}},"resize_range":{"description":"The resize range for the first resize, in the format `[min_resize, max_resize]`.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.4,1.5]},"gaussian_noise_prob":{"description":"The probability of applying the first gaussian noise to the LQ, between 0 and 1.","type":"number","default":0},"noise_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"poisson_scale_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"gray_noise_prob":{"type":"number","default":0},"jpeg_prob":{"description":"The probability of applying the first JPEG degradation to the LQ, between 0 and 1.","type":"number","default":1},"jpeg_range":{"description":"The range of JPEG quality to apply for the first JPEG degradation, in the format `[min_quality, max_quality]`.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[75,95]},"blur_prob2":{"description":"Probability of applying the second blur to the LQ, between 0 and 1.","type":"number","default":0},"resize_prob2":{"description":"List of 3 probabilities for the second resize which should add up to 1: the probability of upscaling, the probability of downscaling, and the probability of no resize.","type":"array","items":{"type":"number"}},"resize_mode_list2":{"description":"List of possible resize modes to use for the second resize.","type":"array","items":{"enum":["bicubic","bilinear","lanczos","nearest-exact"]}},"resize_mode_prob2":{"description":"List of probabilities for the second resize of selecting the corresponding resize mode in `resize_mode_list2`.","type":"array","items":{"type":"number"}},"resize_range2":{"description":"The resize range for the second resize, in the format `[min_resize, max_resize]`.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.6,1.2]},"gaussian_noise_prob2":{"description":"The probability of applying the second gaussian noise to the LQ, between 0 and 1.","type":"number","default":0},"noise_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"poisson_scale_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"gray_noise_prob2":{"type":"number","default":0},"jpeg_prob2":{"description":"The probability of applying the second JPEG degradation to the LQ, between 0 and 1.","type":"number","default":1},"jpeg_range2":{"description":"The range of JPEG quality to apply for the second JPEG degradation, in the format `[min_quality, max_quality]`.","type":"array","items":{"type":"number"}},"resize_mode_list3":{"description":"List of possible resize modes to use for the final resize.","type":"array","items":{"enum":["bicubic","bilinear","lanczos","nearest-exact"]}},"resize_mode_prob3":{"description":"List of probabilities for the final resize of selecting the corresponding resize mode in `resize_mode_list3`.","type":"array","items":{"type":"number"}},"queue_size":{"description":"Queue size for OTF processing, must be a multiple of `batch_size_per_gpu`.","type":"integer","default":120},"datasets":{"type":"object","additionalProperties":{"$ref":"#/$defs/DatasetOptions"},"default":{}},"train":{"$ref":"#/$defs/TrainOptions","default":null},"val":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/ValOptions"}],"default":null},"logger":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/LogOptions"}],"default":null},"dist_params":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"onnx":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/OnnxOptions"}],"default":null},"find_unused_parameters":{"type":"boolean","default":false}},"required":["name","scale","num_gpu","path","network_g"]},"PathOptions":{"title":"PathOptions","type":"object","properties":{"experiments_root":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"models":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"resume_models":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"training_states":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"log":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"results_root":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_g":{"description":"Path to the pretrain model for the generator. `pth` and `safetensors` formats are supported.","anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_g_path":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"param_key_g":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"strict_load_g":{"description":"Whether to load the pretrain model for the generator in strict mode. It should be enabled in most cases, unless you want to partially load a pretrain of a different scale or with slightly different hyperparameters.","type":"boolean","default":true},"resume_state":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_g_ema":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_d":{"description":"Path to the pretrain model for the discriminator. `pth` and `safetensors` formats are supported.","anyOf":[{"type":"string"},{"type":"null"}],"default":null},"param_key_d":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"strict_load_d":{"description":"Whether to load the pretrain model for the discriminator in strict mode. It should be enabled in most cases.","type":"boolean","default":true},"ignore_resume_networks":{"anyOf":[{"type":"array","items":{"type":"string"}},{"type":"null"}],"default":null}},"required":[],"additionalProperties":false},"artcnn":{"title":"artcnn","type":"object","properties":{"type":{"enum":["artcnn"]},"in_ch":{"type":"integer","default":3},"scale":{"type":"integer","default":4},"filters":{"type":"integer","default":96},"n_block":{"type":"integer","default":16},"kernel_size":{"type":"integer","default":3}},"required":["type"]},"artcnn_r16f96":{"title":"artcnn_r16f96","type":"object","properties":{"type":{"enum":["artcnn_r16f96"]}},"required":["type"]},"artcnn_r8f64":{"title":"artcnn_r8f64","type":"object","properties":{"type":{"enum":["artcnn_r8f64"]}},"required":["type"]},"artcnn_r8f48":{"title":"artcnn_r8f48","type":"object","properties":{"type":{"enum":["artcnn_r8f48"]}},"required":["type"]},"vggstylediscriminator":{"title":"vggstylediscriminator","type":"object","properties":{"type":{"enum":["vggstylediscriminator"]},"num_in_ch":{"type":"integer","default":3},"num_feat":{"type":"integer","default":64},"input_size":{"type":"integer","default":128}},"required":["type"]},"unetdiscriminatorsn":{"title":"unetdiscriminatorsn","type":"object","properties":{"type":{"enum":["unetdiscriminatorsn"]},"num_in_ch":{"type":"integer","default":3},"num_feat":{"type":"integer","default":64},"skip_connection":{"type":"boolean","default":true}},"required":["type"]},"dunet":{"title":"dunet","type":"object","properties":{"type":{"enum":["dunet"]},"num_in_ch":{"type":"integer","default":3},"num_feat":{"type":"integer","default":64}},"required":["type"]},"dwt":{"title":"dwt","type":"object","properties":{"type":{"enum":["dwt"]}},"required":["type"]},"dwt_s":{"title":"dwt_s","type":"object","properties":{"type":{"enum":["dwt_s"]}},"required":["type"]},"eimn":{"title":"eimn","type":"object","properties":{"type":{"enum":["eimn"]},"embed_dims":{"type":"integer","default":64},"scale":{"type":"integer","default":2},"depths":{"type":"integer","default":1},"mlp_ratios":{"type":"number","default":2.66},"drop_rate":{"type":"number","default":0.0},"drop_path_rate":{"type":"number","default":0.0},"num_stages":{"type":"integer","default":16},"freeze_param":{"type":"boolean","default":false}},"required":["type"]},"eimn_l":{"title":"eimn_l","type":"object","properties":{"type":{"enum":["eimn_l"]}},"required":["type"]},"eimn_a":{"title":"eimn_a","type":"object","properties":{"type":{"enum":["eimn_a"]}},"required":["type"]},"elan":{"title":"elan","type":"object","properties":{"type":{"enum":["elan"]},"scale":{"type":"integer","default":4},"colors":{"type":"integer","default":3},"rgb_range":{"type":"integer","default":255},"norm":{"type":"boolean","default":false},"window_sizes":{"anyOf":[{"type":"array","items":{"type":"integer"}},{"type":"null"}],"default":null},"m_elan":{"type":"integer","default":36},"c_elan":{"type":"integer","default":180},"n_share":{"type":"integer","default":0},"r_expand":{"type":"integer","default":2}},"required":["type"]},"elan_light":{"title":"elan_light","type":"object","properties":{"type":{"enum":["elan_light"]}},"required":["type"]},"emt":{"title":"emt","type":"object","properties":{"type":{"enum":["emt"]},"scale":{"type":"integer","default":4},"num_in_ch":{"type":"integer","default":3},"num_out_ch":{"type":"integer","default":3},"upsampler":{"enum":["pixelshuffle","pixelshuffledirect"],"default":"pixelshuffle"},"dim":{"type":"integer","default":60},"n_blocks":{"type":"integer","default":6},"n_layers":{"type":"integer","default":6},"num_heads":{"type":"integer","default":3},"mlp_ratio":{"type":"integer","default":2},"n_GTLs":{"type":"integer","default":2},"window_list":{"type":"array","default":[[32,8],[8,32]]},"shift_list":{"type":"array","default":[[16,4],[4,16]]}},"required":["type"]},"flexnet":{"title":"flexnet","type":"object","properties":{"type":{"enum":["flexnet"]},"inp_channels":{"type":"integer","default":3},"out_channels":{"type":"integer","default":3},"scale":{"type":"integer","default":4},"dim":{"type":"integer","default":64},"num_blocks":{"type":"array","items":{"type":"integer"},"default":[6,6,6,6,6,6]},"window_size":{"type":"integer","default":8},"hidden_rate":{"type":"integer","default":4},"channel_norm":{"type":"boolean","default":false},"attn_drop":{"type":"number","default":0.0},"proj_drop":{"type":"number","default":0.0},"pipeline_type":{"enum":["linear","meta"],"default":"linear"},"upsampler":{"enum":["dysample","nearest+conv","pixelshuffle"],"default":"pixelshuffle"}},"required":["type"]},"metaflexnet":{"title":"metaflexnet","type":"object","properties":{"type":{"enum":["metaflexnet"]}},"required":["type"]},"hit_sir":{"title":"hit_sir","type":"object","properties":{"type":{"enum":["hit_sir"]}},"required":["type"]},"hit_sng":{"title":"hit_sng","type":"object","properties":{"type":{"enum":["hit_sng"]}},"required":["type"]},"hit_srf":{"title":"hit_srf","type":"object","properties":{"type":{"enum":["hit_srf"]}},"required":["type"]},"lmlt":{"title":"lmlt","type":"object","properties":{"type":{"enum":["lmlt"]},"dim":{"type":"integer"},"n_blocks":{"type":"integer","default":8},"ffn_scale":{"type":"number","default":2.0},"scale":{"type":"integer","default":4},"drop_rate":{"type":"number","default":0.0},"attn_drop_rate":{"type":"number","default":0.0},"drop_path_rate":{"type":"number","default":0.0}},"required":["type","dim"]},"lmlt_base":{"title":"lmlt_base","type":"object","properties":{"type":{"enum":["lmlt_base"]}},"required":["type"]},"lmlt_large":{"title":"lmlt_large","type":"object","properties":{"type":{"enum":["lmlt_large"]}},"required":["type"]},"lmlt_tiny":{"title":"lmlt_tiny","type":"object","properties":{"type":{"enum":["lmlt_tiny"]}},"required":["type"]},"man":{"title":"man","type":"object","properties":{"type":{"enum":["man"]},"n_resblocks":{"type":"integer","default":36},"n_resgroups":{"type":"integer","default":1},"n_colors":{"type":"integer","default":3},"n_feats":{"type":"integer","default":180},"scale":{"type":"integer","default":2}},"required":["type"]},"man_tiny":{"title":"man_tiny","type":"object","properties":{"type":{"enum":["man_tiny"]}},"required":["type"]},"man_light":{"title":"man_light","type":"object","properties":{"type":{"enum":["man_light"]}},"required":["type"]},"metagan2":{"title":"metagan2","type":"object","properties":{"type":{"enum":["metagan2"]},"in_ch":{"type":"integer","default":3},"n_class":{"type":"integer","default":1},"dims":{"type":"array","items":{"type":"integer"},"default":[48,96,192,288]},"blocks":{"type":"array","items":{"type":"integer"},"default":[3,3,9,3]},"downs":{"type":"array","items":{"type":"integer"},"default":[4,4,2,2]},"drop_path":{"type":"number","default":0.2},"end_drop":{"type":"number","default":0.4}},"required":["type"]},"moesr2":{"title":"moesr2","type":"object","properties":{"type":{"enum":["moesr2"]},"in_ch":{"type":"integer","default":3},"out_ch":{"type":"integer","default":3},"scale":{"type":"integer","default":4},"dim":{"type":"integer","default":64},"n_blocks":{"type":"integer","default":9},"n_block":{"type":"integer","default":4},"expansion_factor":{"type":"integer","default":2.6666666666666665},"expansion_msg":{"type":"integer","default":1.5},"upsampler":{"enum":["conv","dysample","nearest+conv","pixelshuffle","pixelshuffledirect"],"default":"pixelshuffledirect"},"upsample_dim":{"type":"integer","default":64}},"required":["type"]},"mosr":{"title":"mosr","type":"object","properties":{"type":{"enum":["mosr"]}},"required":["type"]},"mosr_t":{"title":"mosr_t","type":"object","properties":{"type":{"enum":["mosr_t"]}},"required":["type"]},"rcan":{"title":"rcan","type":"object","properties":{"type":{"enum":["rcan"]},"scale":{"type":"integer","default":4},"n_resgroups":{"type":"integer","default":10},"n_resblocks":{"type":"integer","default":20},"n_feats":{"type":"integer","default":64},"n_colors":{"type":"integer","default":3},"rgb_range":{"type":"integer","default":255},"norm":{"type":"boolean","default":false},"kernel_size":{"type":"integer","default":3},"reduction":{"type":"integer","default":16},"res_scale":{"type":"number","default":1},"act_mode":{"type":"string","default":"relu"}},"required":["type"]},"realplksr":{"title":"realplksr","type":"object","properties":{"type":{"enum":["realplksr"]}},"required":["type"]},"realplksr_tiny":{"title":"realplksr_tiny","type":"object","properties":{"type":{"enum":["realplksr_tiny"]}},"required":["type"]},"rtmosr":{"title":"rtmosr","type":"object","properties":{"type":{"enum":["rtmosr"]},"scale":{"type":"integer","default":2},"dim":{"type":"integer","default":32},"ffn_expansion":{"type":"number","default":2},"n_blocks":{"type":"integer","default":2},"unshuffle_mod":{"type":"boolean","default":false},"dccm":{"type":"boolean","default":true},"se":{"type":"boolean","default":true}},"required":["type"]},"rtmosr_l":{"title":"rtmosr_l","type":"object","properties":{"type":{"enum":["rtmosr_l"]}},"required":["type"]},"rtmosr_ul":{"title":"rtmosr_ul","type":"object","properties":{"type":{"enum":["rtmosr_ul"]}},"required":["type"]},"scunet_aaf6aa":{"title":"scunet_aaf6aa","type":"object","properties":{"type":{"enum":["scunet_aaf6aa"]}},"required":["type"]},"sebica":{"title":"sebica","type":"object","properties":{"type":{"enum":["sebica"]},"scale":{"type":"integer","default":4},"N":{"type":"integer","default":16}},"required":["type"]},"sebica_mini":{"title":"sebica_mini","type":"object","properties":{"type":{"enum":["sebica_mini"]}},"required":["type"]},"spanplus":{"title":"spanplus","type":"object","properties":{"type":{"enum":["spanplus"]}},"required":["type"]},"spanplus_sts":{"title":"spanplus_sts","type":"object","properties":{"type":{"enum":["spanplus_sts"]}},"required":["type"]},"spanplus_s":{"title":"spanplus_s","type":"object","properties":{"type":{"enum":["spanplus_s"]}},"required":["type"]},"spanplus_st":{"title":"spanplus_st","type":"object","properties":{"type":{"enum":["spanplus_st"]}},"required":["type"]},"compact":{"title":"compact","type":"object","properties":{"type":{"enum":["compact"]}},"required":["type"]},"ultracompact":{"title":"ultracompact","type":"object","properties":{"type":{"enum":["ultracompact"]}},"required":["type"]},"superultracompact":{"title":"superultracompact","type":"object","properties":{"type":{"enum":["superultracompact"]}},"required":["type"]},"tscunet":{"title":"tscunet","type":"object","properties":{"type":{"enum":["tscunet"]}},"required":["type"]},"atd":{"title":"atd","type":"object","properties":{"type":{"enum":["atd"]}},"required":["type"]},"atd_light":{"title":"atd_light","type":"object","properties":{"type":{"enum":["atd_light"]}},"required":["type"]},"dat":{"title":"dat","type":"object","properties":{"type":{"enum":["dat"]}},"required":["type"]},"dat_s":{"title":"dat_s","type":"object","properties":{"type":{"enum":["dat_s"]}},"required":["type"]},"dat_2":{"title":"dat_2","type":"object","properties":{"type":{"enum":["dat_2"]}},"required":["type"]},"dat_light":{"title":"dat_light","type":"object","properties":{"type":{"enum":["dat_light"]}},"required":["type"]},"dctlsa":{"title":"dctlsa","type":"object","properties":{"type":{"enum":["dctlsa"]}},"required":["type"]},"ditn_real":{"title":"ditn_real","type":"object","properties":{"type":{"enum":["ditn_real"]}},"required":["type"]},"drct":{"title":"drct","type":"object","properties":{"type":{"enum":["drct"]}},"required":["type"]},"drct_l":{"title":"drct_l","type":"object","properties":{"type":{"enum":["drct_l"]}},"required":["type"]},"drct_xl":{"title":"drct_xl","type":"object","properties":{"type":{"enum":["drct_xl"]}},"required":["type"]},"grl_b":{"title":"grl_b","type":"object","properties":{"type":{"enum":["grl_b"]}},"required":["type"]},"grl_s":{"title":"grl_s","type":"object","properties":{"type":{"enum":["grl_s"]}},"required":["type"]},"grl_t":{"title":"grl_t","type":"object","properties":{"type":{"enum":["grl_t"]}},"required":["type"]},"hat_l":{"title":"hat_l","type":"object","properties":{"type":{"enum":["hat_l"]}},"required":["type"]},"hat_m":{"title":"hat_m","type":"object","properties":{"type":{"enum":["hat_m"]}},"required":["type"]},"hat_s":{"title":"hat_s","type":"object","properties":{"type":{"enum":["hat_s"]}},"required":["type"]},"omnisr":{"title":"omnisr","type":"object","properties":{"type":{"enum":["omnisr"]}},"required":["type"]},"plksr":{"title":"plksr","type":"object","properties":{"type":{"enum":["plksr"]}},"required":["type"]},"plksr_tiny":{"title":"plksr_tiny","type":"object","properties":{"type":{"enum":["plksr_tiny"]}},"required":["type"]},"realcugan":{"title":"realcugan","type":"object","properties":{"type":{"enum":["realcugan"]}},"required":["type"]},"rgt":{"title":"rgt","type":"object","properties":{"type":{"enum":["rgt"]}},"required":["type"]},"rgt_s":{"title":"rgt_s","type":"object","properties":{"type":{"enum":["rgt_s"]}},"required":["type"]},"esrgan":{"title":"esrgan","type":"object","properties":{"type":{"enum":["esrgan"]}},"required":["type"]},"esrgan_lite":{"title":"esrgan_lite","type":"object","properties":{"type":{"enum":["esrgan_lite"]}},"required":["type"]},"safmn":{"title":"safmn","type":"object","properties":{"type":{"enum":["safmn"]}},"required":["type"]},"safmn_l":{"title":"safmn_l","type":"object","properties":{"type":{"enum":["safmn_l"]}},"required":["type"]},"seemore_t":{"title":"seemore_t","type":"object","properties":{"type":{"enum":["seemore_t"]}},"required":["type"]},"span":{"title":"span","type":"object","properties":{"type":{"enum":["span"]}},"required":["type"]},"srformer":{"title":"srformer","type":"object","properties":{"type":{"enum":["srformer"]}},"required":["type"]},"srformer_light":{"title":"srformer_light","type":"object","properties":{"type":{"enum":["srformer_light"]}},"required":["type"]},"swin2sr_l":{"title":"swin2sr_l","type":"object","properties":{"type":{"enum":["swin2sr_l"]}},"required":["type"]},"swin2sr_m":{"title":"swin2sr_m","type":"object","properties":{"type":{"enum":["swin2sr_m"]}},"required":["type"]},"swin2sr_s":{"title":"swin2sr_s","type":"object","properties":{"type":{"enum":["swin2sr_s"]}},"required":["type"]},"swinir_l":{"title":"swinir_l","type":"object","properties":{"type":{"enum":["swinir_l"]}},"required":["type"]},"swinir_m":{"title":"swinir_m","type":"object","properties":{"type":{"enum":["swinir_m"]}},"required":["type"]},"swinir_s":{"title":"swinir_s","type":"object","properties":{"type":{"enum":["swinir_s"]}},"required":["type"]},"DatasetOptions":{"title":"DatasetOptions","type":"object","properties":{"name":{"description":"Name of the dataset. It should be unique compared to other datasets in this config, but the exact name isn't very important.","type":"string"},"type":{"description":"The type of dataset to use.","enum":["basedataset","oldpairedimagedataset","oldrealesrgandataset_traiNNer","pairedimagedataset","pairedvideodataset","realesrgandataset_traiNNer","realesrganpaireddataset_traiNNer","singleimagedataset"]},"io_backend":{"type":"object"},"num_worker_per_gpu":{"description":"Number of subprocesses to use for data loading with PyTorch dataloader.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"batch_size_per_gpu":{"description":"Increasing stabilizes training but going too high can cause issues. Use multiple of 8 for best performance with AMP. A higher batch size, like 32 or 64 is more important when training from scratch, while smaller batches like 8 can be used when training with a quality pretrain model.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"accum_iter":{"description":"Using values larger than 1 simulates higher batch size by trading performance for reduced VRAM usage. If accum_iter = 4 and batch_size_per_gpu = 6 then effective batch size = 4 * 6 = 24 but performance may be as much as 4 times as slow.","type":"integer","default":1},"use_hflip":{"description":"Randomly flip the images horizontally.","type":"boolean","default":true},"use_rot":{"description":"Randomly rotate the images.","type":"boolean","default":true},"mean":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"std":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"gt_size":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"lq_size":{"description":"During training, a square of this size is cropped from LR images. Larger is usually better but uses more VRAM. Previously gt_size, use lq_size = gt_size / scale to convert. Use multiple of 8 for best performance with AMP.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"color":{"anyOf":[{"enum":["y"]},{"type":"null"}],"default":null},"phase":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"scale":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"dataset_enlarge_ratio":{"description":"Increase if the dataset is less than 1000 images to avoid slowdowns. Auto will automatically enlarge small datasets only.","anyOf":[{"enum":["auto"]},{"type":"integer"}],"default":"auto"},"prefetch_mode":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pin_memory":{"type":"boolean","default":true},"persistent_workers":{"type":"boolean","default":true},"num_prefetch_queue":{"type":"integer","default":1},"prefetch_factor":{"type":"integer","default":2},"clip_size":{"description":"Number of frames per clip in `PairedVideoDataset`. Must match the `clip_size` option for video generator networks such as `tscunet`.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"dataroot_gt":{"description":"Path to the HR (high res) images in your training dataset. Specify one or multiple folders, separated by commas.","anyOf":[{"type":"string"},{"type":"array","items":{"type":"string"}},{"type":"null"}],"default":null},"dataroot_lq":{"description":"Path to the LR (low res) images in your training dataset. Specify one or multiple folders, separated by commas.","anyOf":[{"type":"string"},{"type":"array","items":{"type":"string"}},{"type":"null"}],"default":null},"meta_info":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"filename_tmpl":{"description":"Filename template to use for LR images. Commonly used values might be `{}x2` or `{}x4`, which should be used if the LR dataset filename is in the format filename.png while the LR dataset filename is in the format `filename_x2.png` or `filename_x4.png`. This is common on some research datasets such as DIV2K or DF2K.","type":"string","default":"{}"},"blur_kernel_size":{"type":"integer","default":12},"kernel_list":{"type":"array","items":{"type":"string"}},"kernel_prob":{"type":"array","items":{"type":"number"}},"kernel_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[5,17]},"sinc_prob":{"type":"number","default":0},"blur_sigma":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.2,2]},"betag_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.5,4]},"betap_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[1,2]},"blur_kernel_size2":{"type":"integer","default":12},"kernel_list2":{"type":"array","items":{"type":"string"}},"kernel_prob2":{"type":"array","items":{"type":"number"}},"kernel_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[5,17]},"sinc_prob2":{"type":"number","default":0},"blur_sigma2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.2,1]},"betag_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.5,4]},"betap_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[1,2]},"final_sinc_prob":{"type":"number","default":0},"final_kernel_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[5,17]}},"required":["name","type"]},"TrainOptions":{"title":"TrainOptions","type":"object","properties":{"total_iter":{"description":"The total number of iterations to train.","type":"integer"},"optim_g":{"description":"The optimizer to use for the generator model.","type":"object"},"ema_decay":{"description":"The decay factor to use for EMA (exponential moving average). Set to 0 to disable EMA.","type":"number","default":0},"grad_clip":{"description":"Whether or not to enable gradient clipping, which can improve stability when using higher learning rates, but can also cause issues in some situations.","type":"boolean","default":false},"warmup_iter":{"description":"Gradually ramp up learning rates until this iteration, to stabilize early training. Use -1 to disable.","type":"integer","default":-1},"scheduler":{"description":"Options for the optimizer scheduler. If there are multiple optimizers, both will use the same scheduler options.","anyOf":[{"type":"null"},{"$ref":"#/$defs/SchedulerOptions"}],"default":null},"optim_d":{"description":"The optimizer to use for the discriminator model.","anyOf":[{"type":"object"},{"type":"null"}],"default":null},"losses":{"description":"The list of loss functions to optimize.","type":"array","items":{"anyOf":[{"$ref":"#/$defs/ganloss"},{"$ref":"#/$defs/multiscaleganloss"},{"$ref":"#/$defs/adistsloss"},{"$ref":"#/$defs/l1loss"},{"$ref":"#/$defs/mseloss"},{"$ref":"#/$defs/charbonnierloss"},{"$ref":"#/$defs/colorloss"},{"$ref":"#/$defs/averageloss"},{"$ref":"#/$defs/bicubicloss"},{"$ref":"#/$defs/lumaloss"},{"$ref":"#/$defs/hsluvloss"},{"$ref":"#/$defs/contextualloss"},{"$ref":"#/$defs/distsloss"},{"$ref":"#/$defs/ffloss"},{"$ref":"#/$defs/ldlloss"},{"$ref":"#/$defs/mssimloss"},{"$ref":"#/$defs/msssiml1loss"},{"$ref":"#/$defs/nccloss"},{"$ref":"#/$defs/perceptualfp16loss"},{"$ref":"#/$defs/perceptualloss"}],"discriminator":{"propertyName":"type","mapping":{"ganloss":"#/$defs/ganloss","multiscaleganloss":"#/$defs/multiscaleganloss","adistsloss":"#/$defs/adistsloss","l1loss":"#/$defs/l1loss","mseloss":"#/$defs/mseloss","charbonnierloss":"#/$defs/charbonnierloss","colorloss":"#/$defs/colorloss","averageloss":"#/$defs/averageloss","bicubicloss":"#/$defs/bicubicloss","lumaloss":"#/$defs/lumaloss","hsluvloss":"#/$defs/hsluvloss","contextualloss":"#/$defs/contextualloss","distsloss":"#/$defs/distsloss","ffloss":"#/$defs/ffloss","ldlloss":"#/$defs/ldlloss","mssimloss":"#/$defs/mssimloss","msssiml1loss":"#/$defs/msssiml1loss","nccloss":"#/$defs/nccloss","perceptualfp16loss":"#/$defs/perceptualfp16loss","perceptualloss":"#/$defs/perceptualloss"}}},"default":null},"pixel_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"mssim_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"ms_ssim_l1_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"perceptual_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"contextual_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"dists_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"hr_inversion_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"dinov2_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"topiq_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"pd_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"fd_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"ldl_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"hsluv_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"gan_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"color_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"luma_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"avg_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"bicubic_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"use_moa":{"description":"Whether to enable mixture of augmentations, which augments the dataset on the fly to create more variety and help the model generalize.","type":"boolean","default":false},"moa_augs":{"description":"The list of augmentations to choose from, only one is selected per iteration.","type":"array","items":{"type":"string"}},"moa_probs":{"description":"The probability each augmentation in moa_augs will be applied. Total should add up to 1.","type":"array","items":{"type":"number"}},"moa_debug":{"description":"Save images before and after augment to debug/moa folder inside of the root training directory.","type":"boolean","default":false},"moa_debug_limit":{"description":"The max number of iterations to save augmentation images for.","type":"integer","default":100}},"required":["total_iter","optim_g"]},"SchedulerOptions":{"title":"SchedulerOptions","type":"object","properties":{"type":{"description":"Name of the optimizer scheduler to use for all optimizers. For a list of scheduler names, see the [PyTorch documentation](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate).","type":"string"},"milestones":{"description":"List of milestones, iterations where the learning rate is reduced.","type":"array","items":{"type":"integer"}},"gamma":{"description":"At each milestone, the learning rate is multiplied by this number, so a gamma of 0.5 cuts the learning rate in half at each milestone.","type":"number"}},"required":["type","milestones","gamma"],"additionalProperties":false},"ganloss":{"title":"ganloss","type":"object","properties":{"type":{"enum":["ganloss"]},"loss_weight":{"type":"number"},"gan_type":{"type":"string","default":"vanilla"},"real_label_val":{"type":"number","default":1.0},"fake_label_val":{"type":"number","default":0.0}},"required":["type","loss_weight"]},"multiscaleganloss":{"title":"multiscaleganloss","type":"object","properties":{"type":{"enum":["multiscaleganloss"]},"loss_weight":{"type":"number"},"gan_type":{"type":"string"},"real_label_val":{"type":"number","default":1.0},"fake_label_val":{"type":"number","default":0.0}},"required":["type","loss_weight","gan_type"]},"adistsloss":{"title":"adistsloss","type":"object","properties":{"type":{"enum":["adistsloss"]},"loss_weight":{"type":"number"},"window_size":{"type":"integer","default":21},"resize_input":{"type":"boolean","default":false}},"required":["type","loss_weight"]},"l1loss":{"title":"l1loss","type":"object","properties":{"type":{"enum":["l1loss"]},"loss_weight":{"type":"number"},"reduction":{"type":"string","default":"mean"}},"required":["type","loss_weight"]},"mseloss":{"title":"mseloss","type":"object","properties":{"type":{"enum":["mseloss"]},"loss_weight":{"type":"number"},"reduction":{"type":"string","default":"mean"}},"required":["type","loss_weight"]},"charbonnierloss":{"title":"charbonnierloss","type":"object","properties":{"type":{"enum":["charbonnierloss"]},"loss_weight":{"type":"number"},"reduction":{"type":"string","default":"mean"},"eps":{"type":"number","default":1e-12}},"required":["type","loss_weight"]},"colorloss":{"title":"colorloss","type":"object","properties":{"type":{"enum":["colorloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"},"scale":{"type":"integer","default":4}},"required":["type","loss_weight"]},"averageloss":{"title":"averageloss","type":"object","properties":{"type":{"enum":["averageloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"},"scale":{"type":"integer","default":4}},"required":["type","loss_weight"]},"bicubicloss":{"title":"bicubicloss","type":"object","properties":{"type":{"enum":["bicubicloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"},"scale":{"type":"integer","default":4}},"required":["type","loss_weight"]},"lumaloss":{"title":"lumaloss","type":"object","properties":{"type":{"enum":["lumaloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"}},"required":["type","loss_weight"]},"hsluvloss":{"title":"hsluvloss","type":"object","properties":{"type":{"enum":["hsluvloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"}},"required":["type","loss_weight"]},"contextualloss":{"title":"contextualloss","type":"object","properties":{"type":{"enum":["contextualloss"]},"loss_weight":{"type":"number"},"layer_weights":{"anyOf":[{"type":"object","additionalProperties":{"type":"number"}},{"type":"null"}],"default":null},"crop_quarter":{"type":"boolean","default":false},"max_1d_size":{"type":"integer","default":100},"distance_type":{"type":"string","default":"cosine"},"b":{"type":"number","default":1.0},"band_width":{"type":"number","default":0.5},"use_vgg":{"type":"boolean","default":true},"net":{"type":"string","default":"vgg19"},"calc_type":{"type":"string","default":"regular"},"z_norm":{"type":"boolean","default":false}},"required":["type","loss_weight"]},"distsloss":{"title":"distsloss","type":"object","properties":{"type":{"enum":["distsloss"]},"loss_weight":{"type":"number"},"as_loss":{"type":"boolean","default":true},"load_weights":{"type":"boolean","default":true},"use_input_norm":{"type":"boolean","default":true},"clip_min":{"type":"integer","default":0}},"required":["type","loss_weight"]},"ffloss":{"title":"ffloss","type":"object","properties":{"type":{"enum":["ffloss"]},"loss_weight":{"type":"number"},"alpha":{"type":"number","default":1.0},"patch_factor":{"type":"integer","default":1},"ave_spectrum":{"type":"boolean","default":true},"log_matrix":{"type":"boolean","default":false},"batch_matrix":{"type":"boolean","default":false}},"required":["type","loss_weight"]},"ldlloss":{"title":"ldlloss","type":"object","properties":{"type":{"enum":["ldlloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"}},"required":["type","loss_weight"]},"mssimloss":{"title":"mssimloss","type":"object","properties":{"type":{"enum":["mssimloss"]},"loss_weight":{"type":"number"},"window_size":{"type":"integer","default":11},"in_channels":{"type":"integer","default":3},"sigma":{"type":"number","default":1.5},"k1":{"type":"number","default":0.01},"k2":{"type":"number","default":0.03},"l":{"type":"integer","default":1},"padding":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"cosim":{"type":"boolean","default":true},"cosim_lambda":{"type":"integer","default":5}},"required":["type","loss_weight"]},"msssiml1loss":{"title":"msssiml1loss","type":"object","properties":{"type":{"enum":["msssiml1loss"]},"loss_weight":{"type":"number"},"gaussian_sigmas":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"data_range":{"type":"number","default":1.0},"k":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.01,0.03]},"alpha":{"type":"number","default":0.1},"cuda_dev":{"type":"integer","default":0}},"required":["type","loss_weight"]},"nccloss":{"title":"nccloss","type":"object","properties":{"type":{"enum":["nccloss"]},"loss_weight":{"type":"number"}},"required":["type","loss_weight"]},"perceptualfp16loss":{"title":"perceptualfp16loss","type":"object","properties":{"type":{"enum":["perceptualfp16loss"]},"loss_weight":{"type":"number"},"layer_weights":{"anyOf":[{"type":"object","additionalProperties":{"type":"number"}},{"type":"null"}],"default":null},"w_lambda":{"type":"number","default":0.01},"alpha":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"criterion":{"enum":["charbonnier","fd","fd+l1pd","l1","pd+l1"],"default":"pd+l1"},"num_proj_fd":{"type":"integer","default":256},"phase_weight_fd":{"type":"number","default":1.0},"stride_fd":{"type":"integer","default":1}},"required":["type","loss_weight"]},"perceptualloss":{"title":"perceptualloss","type":"object","properties":{"type":{"enum":["perceptualloss"]},"loss_weight":{"type":"number"},"layer_weights":{"anyOf":[{"type":"object","additionalProperties":{"type":"number"}},{"type":"null"}],"default":null},"w_lambda":{"type":"number","default":0.01},"alpha":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"criterion":{"enum":["charbonnier","fd","fd+l1","l1","pd","pd+l1"],"default":"pd+l1"},"num_proj_fd":{"type":"integer","default":256},"phase_weight_fd":{"type":"number","default":1.0},"stride_fd":{"type":"integer","default":1}},"required":["type","loss_weight"]},"ValOptions":{"title":"ValOptions","type":"object","properties":{"val_enabled":{"description":"Whether to enable validations. If disabled, all validation settings below are ignored.","type":"boolean"},"save_img":{"description":"Whether to save the validation images during validation, in the experiments//visualization folder.","type":"boolean"},"val_freq":{"description":"How often to run validations, in iterations.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"suffix":{"description":"Optional suffix to append to saved filenames.","anyOf":[{"type":"string"},{"type":"null"}],"default":null},"metrics_enabled":{"description":"Whether to run metrics calculations during validation.","type":"boolean","default":false},"metrics":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"pbar":{"type":"boolean","default":true}},"required":["val_enabled","save_img"],"additionalProperties":false},"LogOptions":{"title":"LogOptions","type":"object","properties":{"print_freq":{"description":"How often to print logs to the console, in iterations.","type":"integer"},"save_checkpoint_freq":{"description":"How often to save model checkpoints and training states, in iterations.","type":"integer"},"use_tb_logger":{"description":"Whether or not to enable TensorBoard logging.","type":"boolean"},"save_checkpoint_format":{"description":"Format to save model checkpoints.","enum":["pth","safetensors"],"default":"safetensors"},"wandb":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/WandbOptions"}],"default":null}},"required":["print_freq","save_checkpoint_freq","use_tb_logger"],"additionalProperties":false},"WandbOptions":{"title":"WandbOptions","type":"object","properties":{"resume_id":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"project":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null}},"required":[],"additionalProperties":false},"OnnxOptions":{"title":"OnnxOptions","type":"object","properties":{"dynamo":{"type":"boolean","default":false},"opset":{"type":"integer","default":20},"use_static_shapes":{"type":"boolean","default":false},"shape":{"type":"string","default":"3x256x256"},"verify":{"type":"boolean","default":true},"fp16":{"type":"boolean","default":false},"optimize":{"type":"boolean","default":true}},"required":[],"additionalProperties":false}}} \ No newline at end of file +{"$ref":"#/$defs/ReduxOptions","$defs":{"ReduxOptions":{"title":"ReduxOptions","type":"object","properties":{"name":{"description":"Name of the experiment. It should be a unique name if you want to run a new experiment. If you enable auto resume, the experiment with this name will be resumed instead of starting a new training run.","type":"string"},"scale":{"description":"Scale of the model. Most architectures support a scale of 1, 2, 3, 4, or 8. A scale of 1 can be used for restoration models that don't change the resolution of the input image. A scale of 2 means the width and height of the input image are doubled, so a 640x480 input will be upscaled to 1280x960.","type":"integer"},"num_gpu":{"description":"The number of GPUs to use for training, if using multiple GPUs.","anyOf":[{"enum":["auto"]},{"type":"integer"}]},"path":{"$ref":"#/$defs/PathOptions"},"network_g":{"anyOf":[{"$ref":"#/$defs/artcnn"},{"$ref":"#/$defs/artcnn_r16f96"},{"$ref":"#/$defs/artcnn_r8f64"},{"$ref":"#/$defs/artcnn_r8f48"},{"$ref":"#/$defs/vggstylediscriminator"},{"$ref":"#/$defs/unetdiscriminatorsn"},{"$ref":"#/$defs/dunet"},{"$ref":"#/$defs/dwt"},{"$ref":"#/$defs/dwt_s"},{"$ref":"#/$defs/eimn"},{"$ref":"#/$defs/eimn_l"},{"$ref":"#/$defs/eimn_a"},{"$ref":"#/$defs/elan"},{"$ref":"#/$defs/elan_light"},{"$ref":"#/$defs/emt"},{"$ref":"#/$defs/flexnet"},{"$ref":"#/$defs/metaflexnet"},{"$ref":"#/$defs/hit_sir"},{"$ref":"#/$defs/hit_sng"},{"$ref":"#/$defs/hit_srf"},{"$ref":"#/$defs/lmlt"},{"$ref":"#/$defs/lmlt_base"},{"$ref":"#/$defs/lmlt_large"},{"$ref":"#/$defs/lmlt_tiny"},{"$ref":"#/$defs/man"},{"$ref":"#/$defs/man_tiny"},{"$ref":"#/$defs/man_light"},{"$ref":"#/$defs/metagan2"},{"$ref":"#/$defs/moesr2"},{"$ref":"#/$defs/mosr"},{"$ref":"#/$defs/mosr_t"},{"$ref":"#/$defs/rcan"},{"$ref":"#/$defs/realplksr"},{"$ref":"#/$defs/realplksr_tiny"},{"$ref":"#/$defs/rtmosr"},{"$ref":"#/$defs/rtmosr_l"},{"$ref":"#/$defs/rtmosr_ul"},{"$ref":"#/$defs/scunet_aaf6aa"},{"$ref":"#/$defs/sebica"},{"$ref":"#/$defs/sebica_mini"},{"$ref":"#/$defs/spanplus"},{"$ref":"#/$defs/spanplus_sts"},{"$ref":"#/$defs/spanplus_s"},{"$ref":"#/$defs/spanplus_st"},{"$ref":"#/$defs/compact"},{"$ref":"#/$defs/ultracompact"},{"$ref":"#/$defs/superultracompact"},{"$ref":"#/$defs/tscunet"},{"$ref":"#/$defs/atd"},{"$ref":"#/$defs/atd_light"},{"$ref":"#/$defs/dat"},{"$ref":"#/$defs/dat_s"},{"$ref":"#/$defs/dat_2"},{"$ref":"#/$defs/dat_light"},{"$ref":"#/$defs/dctlsa"},{"$ref":"#/$defs/ditn_real"},{"$ref":"#/$defs/drct"},{"$ref":"#/$defs/drct_l"},{"$ref":"#/$defs/drct_xl"},{"$ref":"#/$defs/grl_b"},{"$ref":"#/$defs/grl_s"},{"$ref":"#/$defs/grl_t"},{"$ref":"#/$defs/hat_l"},{"$ref":"#/$defs/hat_m"},{"$ref":"#/$defs/hat_s"},{"$ref":"#/$defs/omnisr"},{"$ref":"#/$defs/plksr"},{"$ref":"#/$defs/plksr_tiny"},{"$ref":"#/$defs/realcugan"},{"$ref":"#/$defs/rgt"},{"$ref":"#/$defs/rgt_s"},{"$ref":"#/$defs/esrgan"},{"$ref":"#/$defs/esrgan_lite"},{"$ref":"#/$defs/safmn"},{"$ref":"#/$defs/safmn_l"},{"$ref":"#/$defs/seemore_t"},{"$ref":"#/$defs/span"},{"$ref":"#/$defs/srformer"},{"$ref":"#/$defs/srformer_light"},{"$ref":"#/$defs/swin2sr_l"},{"$ref":"#/$defs/swin2sr_m"},{"$ref":"#/$defs/swin2sr_s"},{"$ref":"#/$defs/swinir_l"},{"$ref":"#/$defs/swinir_m"},{"$ref":"#/$defs/swinir_s"}],"discriminator":{"propertyName":"type","mapping":{"artcnn":"#/$defs/artcnn","artcnn_r16f96":"#/$defs/artcnn_r16f96","artcnn_r8f64":"#/$defs/artcnn_r8f64","artcnn_r8f48":"#/$defs/artcnn_r8f48","vggstylediscriminator":"#/$defs/vggstylediscriminator","unetdiscriminatorsn":"#/$defs/unetdiscriminatorsn","dunet":"#/$defs/dunet","dwt":"#/$defs/dwt","dwt_s":"#/$defs/dwt_s","eimn":"#/$defs/eimn","eimn_l":"#/$defs/eimn_l","eimn_a":"#/$defs/eimn_a","elan":"#/$defs/elan","elan_light":"#/$defs/elan_light","emt":"#/$defs/emt","flexnet":"#/$defs/flexnet","metaflexnet":"#/$defs/metaflexnet","hit_sir":"#/$defs/hit_sir","hit_sng":"#/$defs/hit_sng","hit_srf":"#/$defs/hit_srf","lmlt":"#/$defs/lmlt","lmlt_base":"#/$defs/lmlt_base","lmlt_large":"#/$defs/lmlt_large","lmlt_tiny":"#/$defs/lmlt_tiny","man":"#/$defs/man","man_tiny":"#/$defs/man_tiny","man_light":"#/$defs/man_light","metagan2":"#/$defs/metagan2","moesr2":"#/$defs/moesr2","mosr":"#/$defs/mosr","mosr_t":"#/$defs/mosr_t","rcan":"#/$defs/rcan","realplksr":"#/$defs/realplksr","realplksr_tiny":"#/$defs/realplksr_tiny","rtmosr":"#/$defs/rtmosr","rtmosr_l":"#/$defs/rtmosr_l","rtmosr_ul":"#/$defs/rtmosr_ul","scunet_aaf6aa":"#/$defs/scunet_aaf6aa","sebica":"#/$defs/sebica","sebica_mini":"#/$defs/sebica_mini","spanplus":"#/$defs/spanplus","spanplus_sts":"#/$defs/spanplus_sts","spanplus_s":"#/$defs/spanplus_s","spanplus_st":"#/$defs/spanplus_st","compact":"#/$defs/compact","ultracompact":"#/$defs/ultracompact","superultracompact":"#/$defs/superultracompact","tscunet":"#/$defs/tscunet","atd":"#/$defs/atd","atd_light":"#/$defs/atd_light","dat":"#/$defs/dat","dat_s":"#/$defs/dat_s","dat_2":"#/$defs/dat_2","dat_light":"#/$defs/dat_light","dctlsa":"#/$defs/dctlsa","ditn_real":"#/$defs/ditn_real","drct":"#/$defs/drct","drct_l":"#/$defs/drct_l","drct_xl":"#/$defs/drct_xl","grl_b":"#/$defs/grl_b","grl_s":"#/$defs/grl_s","grl_t":"#/$defs/grl_t","hat_l":"#/$defs/hat_l","hat_m":"#/$defs/hat_m","hat_s":"#/$defs/hat_s","omnisr":"#/$defs/omnisr","plksr":"#/$defs/plksr","plksr_tiny":"#/$defs/plksr_tiny","realcugan":"#/$defs/realcugan","rgt":"#/$defs/rgt","rgt_s":"#/$defs/rgt_s","esrgan":"#/$defs/esrgan","esrgan_lite":"#/$defs/esrgan_lite","safmn":"#/$defs/safmn","safmn_l":"#/$defs/safmn_l","seemore_t":"#/$defs/seemore_t","span":"#/$defs/span","srformer":"#/$defs/srformer","srformer_light":"#/$defs/srformer_light","swin2sr_l":"#/$defs/swin2sr_l","swin2sr_m":"#/$defs/swin2sr_m","swin2sr_s":"#/$defs/swin2sr_s","swinir_l":"#/$defs/swinir_l","swinir_m":"#/$defs/swinir_m","swinir_s":"#/$defs/swinir_s"}}},"network_d":{"description":"The options for the discriminator model.","anyOf":[{"type":"object"},{"type":"null"}],"default":null},"manual_seed":{"description":"Deterministic mode, slows down training. Only use for reproducible experiments.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"deterministic":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"dist":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"launcher":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"rank":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"world_size":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"auto_resume":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"resume":{"type":"integer","default":0},"is_train":{"anyOf":[{"type":"boolean"},{"type":"null"}],"default":null},"root_path":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"use_amp":{"description":"Speed up training and reduce VRAM usage. NVIDIA only.","type":"boolean","default":false},"amp_bf16":{"description":"Use bf16 instead of fp16 for AMP, RTX 3000 series or newer only. Only recommended if fp16 doesn't work.","type":"boolean","default":false},"use_channels_last":{"description":"Enable channels last memory format while using AMP. Reduces VRAM and speeds up training for most architectures, but some architectures are slower with channels last.","type":"boolean","default":true},"fast_matmul":{"description":"Trade precision for performance.","type":"boolean","default":false},"use_compile":{"description":"Enable torch.compile for the generator model, which takes time on startup to compile the model, but can speed up training after the model is compiled. However, compilation must be redone when starting training each time, as the compiled model is not saved, so for models that take too long to compile it may not worth it.","type":"boolean","default":false},"detect_anomaly":{"description":"Whether or not to enable anomaly detection, which can be useful for debugging NaNs that occur during training. Has a significant performance hit and should be disabled when not debugging.","type":"boolean","default":false},"high_order_degradation":{"description":"Whether or not to enable OTF (on the fly) degradations, which generates LRs on the fly.","type":"boolean","default":false},"high_order_degradations_debug":{"description":"Whether or not to enable debugging for OTF, which saves the OTF generated LR images so they can be inspected to view the effect of different OTF settings.","type":"boolean","default":false},"high_order_degradations_debug_limit":{"description":"The maximum number of OTF images to save when debugging is enabled.","type":"integer","default":100},"dataroot_lq_prob":{"description":"Probability of using paired LR data instead of OTF LR data.","type":"number","default":0},"lq_usm":{"description":"Whether to enable unsharp mask on the LQ image.","type":"boolean","default":false},"lq_usm_radius_range":{"description":"For the unsharp mask of the LQ image, use a radius randomly selected from this range.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[1,25]},"blur_prob":{"description":"Probability of applying the first blur to the LQ, between 0 and 1.","type":"number","default":0},"resize_prob":{"description":"List of 3 probabilities for the first resize which should add up to 1: the probability of upscaling, the probability of downscaling, and the probability of no resize.","type":"array","items":{"type":"number"}},"resize_mode_list":{"description":"List of possible resize modes to use for the first resize.","type":"array","items":{"enum":["bicubic","bilinear","lanczos","nearest-exact"]}},"resize_mode_prob":{"description":"List of probabilities for the first resize of selecting the corresponding resize mode in `resize_mode_list`.","type":"array","items":{"type":"number"}},"resize_range":{"description":"The resize range for the first resize, in the format `[min_resize, max_resize]`.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.4,1.5]},"gaussian_noise_prob":{"description":"The probability of applying the first gaussian noise to the LQ, between 0 and 1.","type":"number","default":0},"noise_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"poisson_scale_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"gray_noise_prob":{"type":"number","default":0},"jpeg_prob":{"description":"The probability of applying the first JPEG degradation to the LQ, between 0 and 1.","type":"number","default":1},"jpeg_range":{"description":"The range of JPEG quality to apply for the first JPEG degradation, in the format `[min_quality, max_quality]`.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[75,95]},"blur_prob2":{"description":"Probability of applying the second blur to the LQ, between 0 and 1.","type":"number","default":0},"resize_prob2":{"description":"List of 3 probabilities for the second resize which should add up to 1: the probability of upscaling, the probability of downscaling, and the probability of no resize.","type":"array","items":{"type":"number"}},"resize_mode_list2":{"description":"List of possible resize modes to use for the second resize.","type":"array","items":{"enum":["bicubic","bilinear","lanczos","nearest-exact"]}},"resize_mode_prob2":{"description":"List of probabilities for the second resize of selecting the corresponding resize mode in `resize_mode_list2`.","type":"array","items":{"type":"number"}},"resize_range2":{"description":"The resize range for the second resize, in the format `[min_resize, max_resize]`.","type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.6,1.2]},"gaussian_noise_prob2":{"description":"The probability of applying the second gaussian noise to the LQ, between 0 and 1.","type":"number","default":0},"noise_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"poisson_scale_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0,0]},"gray_noise_prob2":{"type":"number","default":0},"jpeg_prob2":{"description":"The probability of applying the second JPEG degradation to the LQ, between 0 and 1.","type":"number","default":1},"jpeg_range2":{"description":"The range of JPEG quality to apply for the second JPEG degradation, in the format `[min_quality, max_quality]`.","type":"array","items":{"type":"number"}},"resize_mode_list3":{"description":"List of possible resize modes to use for the final resize.","type":"array","items":{"enum":["bicubic","bilinear","lanczos","nearest-exact"]}},"resize_mode_prob3":{"description":"List of probabilities for the final resize of selecting the corresponding resize mode in `resize_mode_list3`.","type":"array","items":{"type":"number"}},"queue_size":{"description":"Queue size for OTF processing, must be a multiple of `batch_size_per_gpu`.","type":"integer","default":120},"datasets":{"type":"object","additionalProperties":{"$ref":"#/$defs/DatasetOptions"},"default":{}},"train":{"$ref":"#/$defs/TrainOptions","default":null},"val":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/ValOptions"}],"default":null},"logger":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/LogOptions"}],"default":null},"dist_params":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"onnx":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/OnnxOptions"}],"default":null},"find_unused_parameters":{"type":"boolean","default":false}},"required":["name","scale","num_gpu","path","network_g"]},"PathOptions":{"title":"PathOptions","type":"object","properties":{"experiments_root":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"models":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"resume_models":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"training_states":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"log":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"visualization":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"results_root":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_g":{"description":"Path to the pretrain model for the generator. `pth` and `safetensors` formats are supported.","anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_g_path":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"param_key_g":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"strict_load_g":{"description":"Whether to load the pretrain model for the generator in strict mode. It should be enabled in most cases, unless you want to partially load a pretrain of a different scale or with slightly different hyperparameters.","type":"boolean","default":true},"resume_state":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_g_ema":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pretrain_network_d":{"description":"Path to the pretrain model for the discriminator. `pth` and `safetensors` formats are supported.","anyOf":[{"type":"string"},{"type":"null"}],"default":null},"param_key_d":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"strict_load_d":{"description":"Whether to load the pretrain model for the discriminator in strict mode. It should be enabled in most cases.","type":"boolean","default":true},"ignore_resume_networks":{"anyOf":[{"type":"array","items":{"type":"string"}},{"type":"null"}],"default":null}},"required":[],"additionalProperties":false},"artcnn":{"title":"artcnn","type":"object","properties":{"type":{"enum":["artcnn"]},"in_ch":{"type":"integer","default":3},"scale":{"type":"integer","default":4},"filters":{"type":"integer","default":96},"n_block":{"type":"integer","default":16},"kernel_size":{"type":"integer","default":3}},"required":["type"]},"artcnn_r16f96":{"title":"artcnn_r16f96","type":"object","properties":{"type":{"enum":["artcnn_r16f96"]}},"required":["type"]},"artcnn_r8f64":{"title":"artcnn_r8f64","type":"object","properties":{"type":{"enum":["artcnn_r8f64"]}},"required":["type"]},"artcnn_r8f48":{"title":"artcnn_r8f48","type":"object","properties":{"type":{"enum":["artcnn_r8f48"]}},"required":["type"]},"vggstylediscriminator":{"title":"vggstylediscriminator","type":"object","properties":{"type":{"enum":["vggstylediscriminator"]},"num_in_ch":{"type":"integer","default":3},"num_feat":{"type":"integer","default":64},"input_size":{"type":"integer","default":128}},"required":["type"]},"unetdiscriminatorsn":{"title":"unetdiscriminatorsn","type":"object","properties":{"type":{"enum":["unetdiscriminatorsn"]},"num_in_ch":{"type":"integer","default":3},"num_feat":{"type":"integer","default":64},"skip_connection":{"type":"boolean","default":true}},"required":["type"]},"dunet":{"title":"dunet","type":"object","properties":{"type":{"enum":["dunet"]},"num_in_ch":{"type":"integer","default":3},"num_feat":{"type":"integer","default":64}},"required":["type"]},"dwt":{"title":"dwt","type":"object","properties":{"type":{"enum":["dwt"]}},"required":["type"]},"dwt_s":{"title":"dwt_s","type":"object","properties":{"type":{"enum":["dwt_s"]}},"required":["type"]},"eimn":{"title":"eimn","type":"object","properties":{"type":{"enum":["eimn"]},"embed_dims":{"type":"integer","default":64},"scale":{"type":"integer","default":2},"depths":{"type":"integer","default":1},"mlp_ratios":{"type":"number","default":2.66},"drop_rate":{"type":"number","default":0.0},"drop_path_rate":{"type":"number","default":0.0},"num_stages":{"type":"integer","default":16},"freeze_param":{"type":"boolean","default":false}},"required":["type"]},"eimn_l":{"title":"eimn_l","type":"object","properties":{"type":{"enum":["eimn_l"]}},"required":["type"]},"eimn_a":{"title":"eimn_a","type":"object","properties":{"type":{"enum":["eimn_a"]}},"required":["type"]},"elan":{"title":"elan","type":"object","properties":{"type":{"enum":["elan"]},"scale":{"type":"integer","default":4},"colors":{"type":"integer","default":3},"rgb_range":{"type":"integer","default":255},"norm":{"type":"boolean","default":false},"window_sizes":{"anyOf":[{"type":"array","items":{"type":"integer"}},{"type":"null"}],"default":null},"m_elan":{"type":"integer","default":36},"c_elan":{"type":"integer","default":180},"n_share":{"type":"integer","default":0},"r_expand":{"type":"integer","default":2}},"required":["type"]},"elan_light":{"title":"elan_light","type":"object","properties":{"type":{"enum":["elan_light"]}},"required":["type"]},"emt":{"title":"emt","type":"object","properties":{"type":{"enum":["emt"]},"scale":{"type":"integer","default":4},"num_in_ch":{"type":"integer","default":3},"num_out_ch":{"type":"integer","default":3},"upsampler":{"enum":["pixelshuffle","pixelshuffledirect"],"default":"pixelshuffle"},"dim":{"type":"integer","default":60},"n_blocks":{"type":"integer","default":6},"n_layers":{"type":"integer","default":6},"num_heads":{"type":"integer","default":3},"mlp_ratio":{"type":"integer","default":2},"n_GTLs":{"type":"integer","default":2},"window_list":{"type":"array","default":[[32,8],[8,32]]},"shift_list":{"type":"array","default":[[16,4],[4,16]]}},"required":["type"]},"flexnet":{"title":"flexnet","type":"object","properties":{"type":{"enum":["flexnet"]},"inp_channels":{"type":"integer","default":3},"out_channels":{"type":"integer","default":3},"scale":{"type":"integer","default":4},"dim":{"type":"integer","default":64},"num_blocks":{"type":"array","items":{"type":"integer"},"default":[6,6,6,6,6,6]},"window_size":{"type":"integer","default":8},"hidden_rate":{"type":"integer","default":4},"channel_norm":{"type":"boolean","default":false},"attn_drop":{"type":"number","default":0.0},"proj_drop":{"type":"number","default":0.0},"pipeline_type":{"enum":["linear","meta"],"default":"linear"},"upsampler":{"enum":["dysample","nearest+conv","pixelshuffle"],"default":"pixelshuffle"}},"required":["type"]},"metaflexnet":{"title":"metaflexnet","type":"object","properties":{"type":{"enum":["metaflexnet"]}},"required":["type"]},"hit_sir":{"title":"hit_sir","type":"object","properties":{"type":{"enum":["hit_sir"]}},"required":["type"]},"hit_sng":{"title":"hit_sng","type":"object","properties":{"type":{"enum":["hit_sng"]}},"required":["type"]},"hit_srf":{"title":"hit_srf","type":"object","properties":{"type":{"enum":["hit_srf"]}},"required":["type"]},"lmlt":{"title":"lmlt","type":"object","properties":{"type":{"enum":["lmlt"]},"dim":{"type":"integer"},"n_blocks":{"type":"integer","default":8},"ffn_scale":{"type":"number","default":2.0},"scale":{"type":"integer","default":4},"drop_rate":{"type":"number","default":0.0},"attn_drop_rate":{"type":"number","default":0.0},"drop_path_rate":{"type":"number","default":0.0}},"required":["type","dim"]},"lmlt_base":{"title":"lmlt_base","type":"object","properties":{"type":{"enum":["lmlt_base"]}},"required":["type"]},"lmlt_large":{"title":"lmlt_large","type":"object","properties":{"type":{"enum":["lmlt_large"]}},"required":["type"]},"lmlt_tiny":{"title":"lmlt_tiny","type":"object","properties":{"type":{"enum":["lmlt_tiny"]}},"required":["type"]},"man":{"title":"man","type":"object","properties":{"type":{"enum":["man"]},"n_resblocks":{"type":"integer","default":36},"n_resgroups":{"type":"integer","default":1},"n_colors":{"type":"integer","default":3},"n_feats":{"type":"integer","default":180},"scale":{"type":"integer","default":2}},"required":["type"]},"man_tiny":{"title":"man_tiny","type":"object","properties":{"type":{"enum":["man_tiny"]}},"required":["type"]},"man_light":{"title":"man_light","type":"object","properties":{"type":{"enum":["man_light"]}},"required":["type"]},"metagan2":{"title":"metagan2","type":"object","properties":{"type":{"enum":["metagan2"]},"in_ch":{"type":"integer","default":3},"n_class":{"type":"integer","default":1},"dims":{"type":"array","items":{"type":"integer"},"default":[48,96,192,288]},"blocks":{"type":"array","items":{"type":"integer"},"default":[3,3,9,3]},"downs":{"type":"array","items":{"type":"integer"},"default":[4,4,2,2]},"drop_path":{"type":"number","default":0.2},"end_drop":{"type":"number","default":0.4}},"required":["type"]},"moesr2":{"title":"moesr2","type":"object","properties":{"type":{"enum":["moesr2"]},"in_ch":{"type":"integer","default":3},"out_ch":{"type":"integer","default":3},"scale":{"type":"integer","default":4},"dim":{"type":"integer","default":64},"n_blocks":{"type":"integer","default":9},"n_block":{"type":"integer","default":4},"expansion_factor":{"type":"integer","default":2.6666666666666665},"expansion_msg":{"type":"integer","default":1.5},"upsampler":{"enum":["conv","dysample","nearest+conv","pixelshuffle","pixelshuffledirect"],"default":"pixelshuffledirect"},"upsample_dim":{"type":"integer","default":64}},"required":["type"]},"mosr":{"title":"mosr","type":"object","properties":{"type":{"enum":["mosr"]}},"required":["type"]},"mosr_t":{"title":"mosr_t","type":"object","properties":{"type":{"enum":["mosr_t"]}},"required":["type"]},"rcan":{"title":"rcan","type":"object","properties":{"type":{"enum":["rcan"]},"scale":{"type":"integer","default":4},"n_resgroups":{"type":"integer","default":10},"n_resblocks":{"type":"integer","default":20},"n_feats":{"type":"integer","default":64},"n_colors":{"type":"integer","default":3},"rgb_range":{"type":"integer","default":255},"norm":{"type":"boolean","default":false},"kernel_size":{"type":"integer","default":3},"reduction":{"type":"integer","default":16},"res_scale":{"type":"number","default":1},"act_mode":{"type":"string","default":"relu"},"unshuffle_mod":{"type":"boolean","default":false}},"required":["type"]},"realplksr":{"title":"realplksr","type":"object","properties":{"type":{"enum":["realplksr"]}},"required":["type"]},"realplksr_tiny":{"title":"realplksr_tiny","type":"object","properties":{"type":{"enum":["realplksr_tiny"]}},"required":["type"]},"rtmosr":{"title":"rtmosr","type":"object","properties":{"type":{"enum":["rtmosr"]},"scale":{"type":"integer","default":2},"dim":{"type":"integer","default":32},"ffn_expansion":{"type":"number","default":2},"n_blocks":{"type":"integer","default":2},"unshuffle_mod":{"type":"boolean","default":false},"dccm":{"type":"boolean","default":true},"se":{"type":"boolean","default":true}},"required":["type"]},"rtmosr_l":{"title":"rtmosr_l","type":"object","properties":{"type":{"enum":["rtmosr_l"]}},"required":["type"]},"rtmosr_ul":{"title":"rtmosr_ul","type":"object","properties":{"type":{"enum":["rtmosr_ul"]}},"required":["type"]},"scunet_aaf6aa":{"title":"scunet_aaf6aa","type":"object","properties":{"type":{"enum":["scunet_aaf6aa"]}},"required":["type"]},"sebica":{"title":"sebica","type":"object","properties":{"type":{"enum":["sebica"]},"scale":{"type":"integer","default":4},"N":{"type":"integer","default":16}},"required":["type"]},"sebica_mini":{"title":"sebica_mini","type":"object","properties":{"type":{"enum":["sebica_mini"]}},"required":["type"]},"spanplus":{"title":"spanplus","type":"object","properties":{"type":{"enum":["spanplus"]}},"required":["type"]},"spanplus_sts":{"title":"spanplus_sts","type":"object","properties":{"type":{"enum":["spanplus_sts"]}},"required":["type"]},"spanplus_s":{"title":"spanplus_s","type":"object","properties":{"type":{"enum":["spanplus_s"]}},"required":["type"]},"spanplus_st":{"title":"spanplus_st","type":"object","properties":{"type":{"enum":["spanplus_st"]}},"required":["type"]},"compact":{"title":"compact","type":"object","properties":{"type":{"enum":["compact"]}},"required":["type"]},"ultracompact":{"title":"ultracompact","type":"object","properties":{"type":{"enum":["ultracompact"]}},"required":["type"]},"superultracompact":{"title":"superultracompact","type":"object","properties":{"type":{"enum":["superultracompact"]}},"required":["type"]},"tscunet":{"title":"tscunet","type":"object","properties":{"type":{"enum":["tscunet"]}},"required":["type"]},"atd":{"title":"atd","type":"object","properties":{"type":{"enum":["atd"]}},"required":["type"]},"atd_light":{"title":"atd_light","type":"object","properties":{"type":{"enum":["atd_light"]}},"required":["type"]},"dat":{"title":"dat","type":"object","properties":{"type":{"enum":["dat"]}},"required":["type"]},"dat_s":{"title":"dat_s","type":"object","properties":{"type":{"enum":["dat_s"]}},"required":["type"]},"dat_2":{"title":"dat_2","type":"object","properties":{"type":{"enum":["dat_2"]}},"required":["type"]},"dat_light":{"title":"dat_light","type":"object","properties":{"type":{"enum":["dat_light"]}},"required":["type"]},"dctlsa":{"title":"dctlsa","type":"object","properties":{"type":{"enum":["dctlsa"]}},"required":["type"]},"ditn_real":{"title":"ditn_real","type":"object","properties":{"type":{"enum":["ditn_real"]}},"required":["type"]},"drct":{"title":"drct","type":"object","properties":{"type":{"enum":["drct"]}},"required":["type"]},"drct_l":{"title":"drct_l","type":"object","properties":{"type":{"enum":["drct_l"]}},"required":["type"]},"drct_xl":{"title":"drct_xl","type":"object","properties":{"type":{"enum":["drct_xl"]}},"required":["type"]},"grl_b":{"title":"grl_b","type":"object","properties":{"type":{"enum":["grl_b"]}},"required":["type"]},"grl_s":{"title":"grl_s","type":"object","properties":{"type":{"enum":["grl_s"]}},"required":["type"]},"grl_t":{"title":"grl_t","type":"object","properties":{"type":{"enum":["grl_t"]}},"required":["type"]},"hat_l":{"title":"hat_l","type":"object","properties":{"type":{"enum":["hat_l"]}},"required":["type"]},"hat_m":{"title":"hat_m","type":"object","properties":{"type":{"enum":["hat_m"]}},"required":["type"]},"hat_s":{"title":"hat_s","type":"object","properties":{"type":{"enum":["hat_s"]}},"required":["type"]},"omnisr":{"title":"omnisr","type":"object","properties":{"type":{"enum":["omnisr"]}},"required":["type"]},"plksr":{"title":"plksr","type":"object","properties":{"type":{"enum":["plksr"]}},"required":["type"]},"plksr_tiny":{"title":"plksr_tiny","type":"object","properties":{"type":{"enum":["plksr_tiny"]}},"required":["type"]},"realcugan":{"title":"realcugan","type":"object","properties":{"type":{"enum":["realcugan"]}},"required":["type"]},"rgt":{"title":"rgt","type":"object","properties":{"type":{"enum":["rgt"]}},"required":["type"]},"rgt_s":{"title":"rgt_s","type":"object","properties":{"type":{"enum":["rgt_s"]}},"required":["type"]},"esrgan":{"title":"esrgan","type":"object","properties":{"type":{"enum":["esrgan"]}},"required":["type"]},"esrgan_lite":{"title":"esrgan_lite","type":"object","properties":{"type":{"enum":["esrgan_lite"]}},"required":["type"]},"safmn":{"title":"safmn","type":"object","properties":{"type":{"enum":["safmn"]}},"required":["type"]},"safmn_l":{"title":"safmn_l","type":"object","properties":{"type":{"enum":["safmn_l"]}},"required":["type"]},"seemore_t":{"title":"seemore_t","type":"object","properties":{"type":{"enum":["seemore_t"]}},"required":["type"]},"span":{"title":"span","type":"object","properties":{"type":{"enum":["span"]}},"required":["type"]},"srformer":{"title":"srformer","type":"object","properties":{"type":{"enum":["srformer"]}},"required":["type"]},"srformer_light":{"title":"srformer_light","type":"object","properties":{"type":{"enum":["srformer_light"]}},"required":["type"]},"swin2sr_l":{"title":"swin2sr_l","type":"object","properties":{"type":{"enum":["swin2sr_l"]}},"required":["type"]},"swin2sr_m":{"title":"swin2sr_m","type":"object","properties":{"type":{"enum":["swin2sr_m"]}},"required":["type"]},"swin2sr_s":{"title":"swin2sr_s","type":"object","properties":{"type":{"enum":["swin2sr_s"]}},"required":["type"]},"swinir_l":{"title":"swinir_l","type":"object","properties":{"type":{"enum":["swinir_l"]}},"required":["type"]},"swinir_m":{"title":"swinir_m","type":"object","properties":{"type":{"enum":["swinir_m"]}},"required":["type"]},"swinir_s":{"title":"swinir_s","type":"object","properties":{"type":{"enum":["swinir_s"]}},"required":["type"]},"DatasetOptions":{"title":"DatasetOptions","type":"object","properties":{"name":{"description":"Name of the dataset. It should be unique compared to other datasets in this config, but the exact name isn't very important.","type":"string"},"type":{"description":"The type of dataset to use.","enum":["basedataset","oldpairedimagedataset","oldrealesrgandataset_traiNNer","pairedimagedataset","pairedvideodataset","realesrgandataset_traiNNer","realesrganpaireddataset_traiNNer","singleimagedataset"]},"io_backend":{"type":"object"},"num_worker_per_gpu":{"description":"Number of subprocesses to use for data loading with PyTorch dataloader.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"batch_size_per_gpu":{"description":"Increasing stabilizes training but going too high can cause issues. Use multiple of 8 for best performance with AMP. A higher batch size, like 32 or 64 is more important when training from scratch, while smaller batches like 8 can be used when training with a quality pretrain model.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"accum_iter":{"description":"Using values larger than 1 simulates higher batch size by trading performance for reduced VRAM usage. If accum_iter = 4 and batch_size_per_gpu = 6 then effective batch size = 4 * 6 = 24 but performance may be as much as 4 times as slow.","type":"integer","default":1},"use_hflip":{"description":"Randomly flip the images horizontally.","type":"boolean","default":true},"use_rot":{"description":"Randomly rotate the images.","type":"boolean","default":true},"mean":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"std":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"gt_size":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"lq_size":{"description":"During training, a square of this size is cropped from LR images. Larger is usually better but uses more VRAM. Previously gt_size, use lq_size = gt_size / scale to convert. Use multiple of 8 for best performance with AMP.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"color":{"anyOf":[{"enum":["y"]},{"type":"null"}],"default":null},"phase":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"scale":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"dataset_enlarge_ratio":{"description":"Increase if the dataset is less than 1000 images to avoid slowdowns. Auto will automatically enlarge small datasets only.","anyOf":[{"enum":["auto"]},{"type":"integer"}],"default":"auto"},"prefetch_mode":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"pin_memory":{"type":"boolean","default":true},"persistent_workers":{"type":"boolean","default":true},"num_prefetch_queue":{"type":"integer","default":1},"prefetch_factor":{"type":"integer","default":2},"clip_size":{"description":"Number of frames per clip in `PairedVideoDataset`. Must match the `clip_size` option for video generator networks such as `tscunet`.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"dataroot_gt":{"description":"Path to the HR (high res) images in your training dataset. Specify one or multiple folders, separated by commas.","anyOf":[{"type":"string"},{"type":"array","items":{"type":"string"}},{"type":"null"}],"default":null},"dataroot_lq":{"description":"Path to the LR (low res) images in your training dataset. Specify one or multiple folders, separated by commas.","anyOf":[{"type":"string"},{"type":"array","items":{"type":"string"}},{"type":"null"}],"default":null},"meta_info":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"filename_tmpl":{"description":"Filename template to use for LR images. Commonly used values might be `{}x2` or `{}x4`, which should be used if the LR dataset filename is in the format filename.png while the LR dataset filename is in the format `filename_x2.png` or `filename_x4.png`. This is common on some research datasets such as DIV2K or DF2K.","type":"string","default":"{}"},"blur_kernel_size":{"type":"integer","default":12},"kernel_list":{"type":"array","items":{"type":"string"}},"kernel_prob":{"type":"array","items":{"type":"number"}},"kernel_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[5,17]},"sinc_prob":{"type":"number","default":0},"blur_sigma":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.2,2]},"betag_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.5,4]},"betap_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[1,2]},"blur_kernel_size2":{"type":"integer","default":12},"kernel_list2":{"type":"array","items":{"type":"string"}},"kernel_prob2":{"type":"array","items":{"type":"number"}},"kernel_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[5,17]},"sinc_prob2":{"type":"number","default":0},"blur_sigma2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.2,1]},"betag_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.5,4]},"betap_range2":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[1,2]},"final_sinc_prob":{"type":"number","default":0},"final_kernel_range":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"integer"},{"type":"integer"}],"items":false,"default":[5,17]}},"required":["name","type"]},"TrainOptions":{"title":"TrainOptions","type":"object","properties":{"total_iter":{"description":"The total number of iterations to train.","type":"integer"},"optim_g":{"description":"The optimizer to use for the generator model.","type":"object"},"ema_decay":{"description":"The decay factor to use for EMA (exponential moving average). Set to 0 to disable EMA.","type":"number","default":0},"grad_clip":{"description":"Whether or not to enable gradient clipping, which can improve stability when using higher learning rates, but can also cause issues in some situations.","type":"boolean","default":false},"warmup_iter":{"description":"Gradually ramp up learning rates until this iteration, to stabilize early training. Use -1 to disable.","type":"integer","default":-1},"scheduler":{"description":"Options for the optimizer scheduler. If there are multiple optimizers, both will use the same scheduler options.","anyOf":[{"type":"null"},{"$ref":"#/$defs/SchedulerOptions"}],"default":null},"optim_d":{"description":"The optimizer to use for the discriminator model.","anyOf":[{"type":"object"},{"type":"null"}],"default":null},"losses":{"description":"The list of loss functions to optimize.","type":"array","items":{"anyOf":[{"$ref":"#/$defs/ganloss"},{"$ref":"#/$defs/multiscaleganloss"},{"$ref":"#/$defs/adistsloss"},{"$ref":"#/$defs/l1loss"},{"$ref":"#/$defs/mseloss"},{"$ref":"#/$defs/charbonnierloss"},{"$ref":"#/$defs/colorloss"},{"$ref":"#/$defs/averageloss"},{"$ref":"#/$defs/bicubicloss"},{"$ref":"#/$defs/lumaloss"},{"$ref":"#/$defs/hsluvloss"},{"$ref":"#/$defs/contextualloss"},{"$ref":"#/$defs/distsloss"},{"$ref":"#/$defs/ffloss"},{"$ref":"#/$defs/ldlloss"},{"$ref":"#/$defs/mssimloss"},{"$ref":"#/$defs/msssiml1loss"},{"$ref":"#/$defs/nccloss"},{"$ref":"#/$defs/perceptualfp16loss"},{"$ref":"#/$defs/perceptualloss"}],"discriminator":{"propertyName":"type","mapping":{"ganloss":"#/$defs/ganloss","multiscaleganloss":"#/$defs/multiscaleganloss","adistsloss":"#/$defs/adistsloss","l1loss":"#/$defs/l1loss","mseloss":"#/$defs/mseloss","charbonnierloss":"#/$defs/charbonnierloss","colorloss":"#/$defs/colorloss","averageloss":"#/$defs/averageloss","bicubicloss":"#/$defs/bicubicloss","lumaloss":"#/$defs/lumaloss","hsluvloss":"#/$defs/hsluvloss","contextualloss":"#/$defs/contextualloss","distsloss":"#/$defs/distsloss","ffloss":"#/$defs/ffloss","ldlloss":"#/$defs/ldlloss","mssimloss":"#/$defs/mssimloss","msssiml1loss":"#/$defs/msssiml1loss","nccloss":"#/$defs/nccloss","perceptualfp16loss":"#/$defs/perceptualfp16loss","perceptualloss":"#/$defs/perceptualloss"}}},"default":null},"pixel_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"mssim_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"ms_ssim_l1_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"perceptual_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"contextual_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"dists_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"hr_inversion_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"dinov2_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"topiq_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"pd_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"fd_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"ldl_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"hsluv_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"gan_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"color_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"luma_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"avg_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"bicubic_opt":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"use_moa":{"description":"Whether to enable mixture of augmentations, which augments the dataset on the fly to create more variety and help the model generalize.","type":"boolean","default":false},"moa_augs":{"description":"The list of augmentations to choose from, only one is selected per iteration.","type":"array","items":{"type":"string"}},"moa_probs":{"description":"The probability each augmentation in moa_augs will be applied. Total should add up to 1.","type":"array","items":{"type":"number"}},"moa_debug":{"description":"Save images before and after augment to debug/moa folder inside of the root training directory.","type":"boolean","default":false},"moa_debug_limit":{"description":"The max number of iterations to save augmentation images for.","type":"integer","default":100}},"required":["total_iter","optim_g"]},"SchedulerOptions":{"title":"SchedulerOptions","type":"object","properties":{"type":{"description":"Name of the optimizer scheduler to use for all optimizers. For a list of scheduler names, see the [PyTorch documentation](https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate).","type":"string"},"milestones":{"description":"List of milestones, iterations where the learning rate is reduced.","type":"array","items":{"type":"integer"}},"gamma":{"description":"At each milestone, the learning rate is multiplied by this number, so a gamma of 0.5 cuts the learning rate in half at each milestone.","type":"number"}},"required":["type","milestones","gamma"],"additionalProperties":false},"ganloss":{"title":"ganloss","type":"object","properties":{"type":{"enum":["ganloss"]},"loss_weight":{"type":"number"},"gan_type":{"type":"string","default":"vanilla"},"real_label_val":{"type":"number","default":1.0},"fake_label_val":{"type":"number","default":0.0}},"required":["type","loss_weight"]},"multiscaleganloss":{"title":"multiscaleganloss","type":"object","properties":{"type":{"enum":["multiscaleganloss"]},"loss_weight":{"type":"number"},"gan_type":{"type":"string"},"real_label_val":{"type":"number","default":1.0},"fake_label_val":{"type":"number","default":0.0}},"required":["type","loss_weight","gan_type"]},"adistsloss":{"title":"adistsloss","type":"object","properties":{"type":{"enum":["adistsloss"]},"loss_weight":{"type":"number"},"window_size":{"type":"integer","default":21},"resize_input":{"type":"boolean","default":false}},"required":["type","loss_weight"]},"l1loss":{"title":"l1loss","type":"object","properties":{"type":{"enum":["l1loss"]},"loss_weight":{"type":"number"},"reduction":{"type":"string","default":"mean"}},"required":["type","loss_weight"]},"mseloss":{"title":"mseloss","type":"object","properties":{"type":{"enum":["mseloss"]},"loss_weight":{"type":"number"},"reduction":{"type":"string","default":"mean"}},"required":["type","loss_weight"]},"charbonnierloss":{"title":"charbonnierloss","type":"object","properties":{"type":{"enum":["charbonnierloss"]},"loss_weight":{"type":"number"},"reduction":{"type":"string","default":"mean"},"eps":{"type":"number","default":1e-12}},"required":["type","loss_weight"]},"colorloss":{"title":"colorloss","type":"object","properties":{"type":{"enum":["colorloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"},"scale":{"type":"integer","default":4}},"required":["type","loss_weight"]},"averageloss":{"title":"averageloss","type":"object","properties":{"type":{"enum":["averageloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"},"scale":{"type":"integer","default":4}},"required":["type","loss_weight"]},"bicubicloss":{"title":"bicubicloss","type":"object","properties":{"type":{"enum":["bicubicloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"},"scale":{"type":"integer","default":4}},"required":["type","loss_weight"]},"lumaloss":{"title":"lumaloss","type":"object","properties":{"type":{"enum":["lumaloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"}},"required":["type","loss_weight"]},"hsluvloss":{"title":"hsluvloss","type":"object","properties":{"type":{"enum":["hsluvloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"}},"required":["type","loss_weight"]},"contextualloss":{"title":"contextualloss","type":"object","properties":{"type":{"enum":["contextualloss"]},"loss_weight":{"type":"number"},"layer_weights":{"anyOf":[{"type":"object","additionalProperties":{"type":"number"}},{"type":"null"}],"default":null},"crop_quarter":{"type":"boolean","default":false},"max_1d_size":{"type":"integer","default":100},"distance_type":{"type":"string","default":"cosine"},"b":{"type":"number","default":1.0},"band_width":{"type":"number","default":0.5},"use_vgg":{"type":"boolean","default":true},"net":{"type":"string","default":"vgg19"},"calc_type":{"type":"string","default":"regular"},"z_norm":{"type":"boolean","default":false}},"required":["type","loss_weight"]},"distsloss":{"title":"distsloss","type":"object","properties":{"type":{"enum":["distsloss"]},"loss_weight":{"type":"number"},"as_loss":{"type":"boolean","default":true},"load_weights":{"type":"boolean","default":true},"use_input_norm":{"type":"boolean","default":true},"clip_min":{"type":"integer","default":0}},"required":["type","loss_weight"]},"ffloss":{"title":"ffloss","type":"object","properties":{"type":{"enum":["ffloss"]},"loss_weight":{"type":"number"},"alpha":{"type":"number","default":1.0},"patch_factor":{"type":"integer","default":1},"ave_spectrum":{"type":"boolean","default":true},"log_matrix":{"type":"boolean","default":false},"batch_matrix":{"type":"boolean","default":false}},"required":["type","loss_weight"]},"ldlloss":{"title":"ldlloss","type":"object","properties":{"type":{"enum":["ldlloss"]},"loss_weight":{"type":"number"},"criterion":{"type":"string","default":"l1"}},"required":["type","loss_weight"]},"mssimloss":{"title":"mssimloss","type":"object","properties":{"type":{"enum":["mssimloss"]},"loss_weight":{"type":"number"},"window_size":{"type":"integer","default":11},"in_channels":{"type":"integer","default":3},"sigma":{"type":"number","default":1.5},"k1":{"type":"number","default":0.01},"k2":{"type":"number","default":0.03},"l":{"type":"integer","default":1},"padding":{"anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"cosim":{"type":"boolean","default":true},"cosim_lambda":{"type":"integer","default":5}},"required":["type","loss_weight"]},"msssiml1loss":{"title":"msssiml1loss","type":"object","properties":{"type":{"enum":["msssiml1loss"]},"loss_weight":{"type":"number"},"gaussian_sigmas":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"data_range":{"type":"number","default":1.0},"k":{"type":"array","minItems":2,"maxItems":2,"prefixItems":[{"type":"number"},{"type":"number"}],"items":false,"default":[0.01,0.03]},"alpha":{"type":"number","default":0.1},"cuda_dev":{"type":"integer","default":0}},"required":["type","loss_weight"]},"nccloss":{"title":"nccloss","type":"object","properties":{"type":{"enum":["nccloss"]},"loss_weight":{"type":"number"}},"required":["type","loss_weight"]},"perceptualfp16loss":{"title":"perceptualfp16loss","type":"object","properties":{"type":{"enum":["perceptualfp16loss"]},"loss_weight":{"type":"number"},"layer_weights":{"anyOf":[{"type":"object","additionalProperties":{"type":"number"}},{"type":"null"}],"default":null},"w_lambda":{"type":"number","default":0.01},"alpha":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"criterion":{"enum":["charbonnier","fd","fd+l1pd","l1","pd+l1"],"default":"pd+l1"},"num_proj_fd":{"type":"integer","default":256},"phase_weight_fd":{"type":"number","default":1.0},"stride_fd":{"type":"integer","default":1}},"required":["type","loss_weight"]},"perceptualloss":{"title":"perceptualloss","type":"object","properties":{"type":{"enum":["perceptualloss"]},"loss_weight":{"type":"number"},"layer_weights":{"anyOf":[{"type":"object","additionalProperties":{"type":"number"}},{"type":"null"}],"default":null},"w_lambda":{"type":"number","default":0.01},"alpha":{"anyOf":[{"type":"array","items":{"type":"number"}},{"type":"null"}],"default":null},"criterion":{"enum":["charbonnier","fd","fd+l1","l1","pd","pd+l1"],"default":"pd+l1"},"num_proj_fd":{"type":"integer","default":256},"phase_weight_fd":{"type":"number","default":1.0},"stride_fd":{"type":"integer","default":1}},"required":["type","loss_weight"]},"ValOptions":{"title":"ValOptions","type":"object","properties":{"val_enabled":{"description":"Whether to enable validations. If disabled, all validation settings below are ignored.","type":"boolean"},"save_img":{"description":"Whether to save the validation images during validation, in the experiments//visualization folder.","type":"boolean"},"val_freq":{"description":"How often to run validations, in iterations.","anyOf":[{"type":"integer"},{"type":"null"}],"default":null},"suffix":{"description":"Optional suffix to append to saved filenames.","anyOf":[{"type":"string"},{"type":"null"}],"default":null},"metrics_enabled":{"description":"Whether to run metrics calculations during validation.","type":"boolean","default":false},"metrics":{"anyOf":[{"type":"object"},{"type":"null"}],"default":null},"pbar":{"type":"boolean","default":true}},"required":["val_enabled","save_img"],"additionalProperties":false},"LogOptions":{"title":"LogOptions","type":"object","properties":{"print_freq":{"description":"How often to print logs to the console, in iterations.","type":"integer"},"save_checkpoint_freq":{"description":"How often to save model checkpoints and training states, in iterations.","type":"integer"},"use_tb_logger":{"description":"Whether or not to enable TensorBoard logging.","type":"boolean"},"save_checkpoint_format":{"description":"Format to save model checkpoints.","enum":["pth","safetensors"],"default":"safetensors"},"wandb":{"anyOf":[{"type":"null"},{"$ref":"#/$defs/WandbOptions"}],"default":null}},"required":["print_freq","save_checkpoint_freq","use_tb_logger"],"additionalProperties":false},"WandbOptions":{"title":"WandbOptions","type":"object","properties":{"resume_id":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null},"project":{"anyOf":[{"type":"string"},{"type":"null"}],"default":null}},"required":[],"additionalProperties":false},"OnnxOptions":{"title":"OnnxOptions","type":"object","properties":{"dynamo":{"type":"boolean","default":false},"opset":{"type":"integer","default":20},"use_static_shapes":{"type":"boolean","default":false},"shape":{"type":"string","default":"3x256x256"},"verify":{"type":"boolean","default":true},"fp16":{"type":"boolean","default":false},"optimize":{"type":"boolean","default":true}},"required":[],"additionalProperties":false}}} \ No newline at end of file diff --git a/scripts/options/generate_default_options.py b/scripts/options/generate_default_options.py index ebc4fe27..5073aa32 100644 --- a/scripts/options/generate_default_options.py +++ b/scripts/options/generate_default_options.py @@ -229,7 +229,6 @@ def final_template( "upsampler": "pixelshuffle # pixelshuffle, nearest+conv, dysample (best on even number scales, does not support dynamic ONNX)" }, }, - {"names": ["RCAN"], "scales": ALL_SCALES}, {"names": ["Swin2SR_L", "Swin2SR_M", "Swin2SR_S"], "scales": ALL_SCALES}, { "names": ["MoESR2"], @@ -239,7 +238,13 @@ def final_template( "upsampler": "pixelshuffledirect # conv, pixelshuffledirect, pixelshuffle, nearest+conv, dysample (best on even number scales, does not support dynamic ONNX)", }, }, - {"names": ["RCAN"], "scales": ALL_SCALES}, + { + "names": ["RCAN"], + "scales": ALL_SCALES, + "extras": { + "unshuffle_mod": "true # Has no effect on scales larger than 2. For scales 1 and 2, setting to true speeds up the model and reduces VRAM usage significantly, but reduces quality." + }, + }, {"names": ["RTMoSR", "RTMoSR_L", "RTMoSR_UL"], "scales": ALL_SCALES}, { "names": ["GRL_B", "GRL_S", "GRL_T"], diff --git a/traiNNer/archs/rcan_arch.py b/traiNNer/archs/rcan_arch.py index 52e6bcaa..97733d93 100644 --- a/traiNNer/archs/rcan_arch.py +++ b/traiNNer/archs/rcan_arch.py @@ -257,7 +257,7 @@ def __init__( reduction: int = 16, res_scale: float = 1, act_mode: str = "relu", - unshuffle_mod: bool = False, + unshuffle_mod: bool = True, conv: Callable[..., nn.Conv2d] = default_conv, ) -> None: super().__init__() @@ -334,98 +334,4 @@ def forward(self, x: Tensor) -> Tensor: x = self.tail(res) x = self.add_mean(x) - out = (x / self.rgb_range)[:, :, : h * self.scale, : w * self.scale] - return out - - -# @ARCH_REGISTRY.register() -# def rcan_rg20( -# scale: int = 4, -# n_resgroups: int = 20, -# n_resblocks: int = 20, -# n_feats: int = 64, -# n_colors: int = 3, -# rgb_range: int = 255, -# norm: bool = False, -# kernel_size: int = 3, -# reduction: int = 16, -# res_scale: float = 1, -# act_mode: str = "relu", -# conv: Callable[..., nn.Conv2d] = default_conv, -# ) -> RCAN: -# return RCAN( -# scale=scale, -# n_resgroups=n_resgroups, -# n_resblocks=n_resblocks, -# n_feats=n_feats, -# n_colors=n_colors, -# rgb_range=rgb_range, -# norm=norm, -# kernel_size=kernel_size, -# reduction=reduction, -# res_scale=res_scale, -# act_mode=act_mode, -# conv=conv, -# ) - - -# @ARCH_REGISTRY.register() -# def rcan_rb40( -# scale: int = 4, -# n_resgroups: int = 10, -# n_resblocks: int = 40, -# n_feats: int = 64, -# n_colors: int = 3, -# rgb_range: int = 255, -# norm: bool = False, -# kernel_size: int = 3, -# reduction: int = 16, -# res_scale: float = 1, -# act_mode: str = "relu", -# conv: Callable[..., nn.Conv2d] = default_conv, -# ) -> RCAN: -# return RCAN( -# scale=scale, -# n_resgroups=n_resgroups, -# n_resblocks=n_resblocks, -# n_feats=n_feats, -# n_colors=n_colors, -# rgb_range=rgb_range, -# norm=norm, -# kernel_size=kernel_size, -# reduction=reduction, -# res_scale=res_scale, -# act_mode=act_mode, -# conv=conv, -# ) - - -# @ARCH_REGISTRY.register() -# def rcan_nf128( -# scale: int = 4, -# n_resgroups: int = 10, -# n_resblocks: int = 20, -# n_feats: int = 128, -# n_colors: int = 3, -# rgb_range: int = 255, -# norm: bool = False, -# kernel_size: int = 3, -# reduction: int = 16, -# res_scale: float = 1, -# act_mode: str = "relu", -# conv: Callable[..., nn.Conv2d] = default_conv, -# ) -> RCAN: -# return RCAN( -# scale=scale, -# n_resgroups=n_resgroups, -# n_resblocks=n_resblocks, -# n_feats=n_feats, -# n_colors=n_colors, -# rgb_range=rgb_range, -# norm=norm, -# kernel_size=kernel_size, -# reduction=reduction, -# res_scale=res_scale, -# act_mode=act_mode, -# conv=conv, -# ) + return (x / self.rgb_range)[:, :, : h * self.scale, : w * self.scale] From bddaa7d7bd642032ff878da5be0ae3ed43cddf82 Mon Sep 17 00:00:00 2001 From: the-database <25811902+the-database@users.noreply.github.com> Date: Wed, 8 Jan 2025 22:13:13 -0500 Subject: [PATCH 4/4] rcan unshuffle fix --- tests/test_archs/test_archs.py | 5 +++++ traiNNer/archs/rcan_arch.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/test_archs/test_archs.py b/tests/test_archs/test_archs.py index e5298ac6..135e4ba6 100644 --- a/tests/test_archs/test_archs.py +++ b/tests/test_archs/test_archs.py @@ -56,6 +56,11 @@ {"upsampler": "geoensemblepixelshuffle"}, ] +EXTRA_ARCH_PARAMS["rcan"] = [ + {"unshuffle_mod": True}, + {"unshuffle_mod": False}, +] + # A list of tuples in the format of (name, arch, scale, extra_params). FILTERED_REGISTRIES_SCALES_PARAMS = [ (name, arch, scale, extra_params) diff --git a/traiNNer/archs/rcan_arch.py b/traiNNer/archs/rcan_arch.py index 97733d93..8e8a85cd 100644 --- a/traiNNer/archs/rcan_arch.py +++ b/traiNNer/archs/rcan_arch.py @@ -277,7 +277,7 @@ def __init__( self.add_mean = nn.Identity() # define head module - unshuffle_mod = unshuffle_mod and scale < 4 + unshuffle_mod = unshuffle_mod and scale <= 2 self.downscale_factor = 1 if unshuffle_mod: self.downscale_factor = 4 // scale