# @package _global_ defaults: - override /network: vqvae - override /criterion: vqgan_loss - override /model: lit_vqgan - override /callbacks: wandb_vae - override /optimizers: null - override /lr_schedulers: null datamodule: batch_size: 8 lr_schedulers: generator: _target_: torch.optim.lr_scheduler.CosineAnnealingLR T_max: 256 eta_min: 0.0 last_epoch: -1 interval: epoch monitor: val/loss discriminator: _target_: torch.optim.lr_scheduler.CosineAnnealingLR T_max: 256 eta_min: 0.0 last_epoch: -1 interval: epoch monitor: val/loss optimizers: generator: _target_: madgrad.MADGRAD lr: 2.0e-5 momentum: 0.5 weight_decay: 0 eps: 1.0e-6 parameters: network discriminator: _target_: madgrad.MADGRAD lr: 2.0e-5 momentum: 0.5 weight_decay: 0 eps: 1.0e-6 parameters: loss_fn.discriminator trainer: max_epochs: 256 # gradient_clip_val: 0.25 summary: null