# @package _global_ defaults: - override /network: vqvae - override /criterion: vqgan_loss - override /model: lit_vqgan - override /callbacks: wandb_vae - override /lr_schedulers: null datamodule: batch_size: 8 lr_schedulers: - generator: T_max: 256 eta_min: 0.0 last_epoch: -1 interval: epoch monitor: val/loss - discriminator: T_max: 256 eta_min: 0.0 last_epoch: -1 interval: epoch monitor: val/loss optimizer: - generator: _target_: torch.optim.lr_scheduler.CosineAnnealingLR T_max: 256 eta_min: 0.0 last_epoch: -1 interval: epoch monitor: val/loss parameters: network - discriminator: _target_: torch.optim.lr_scheduler.CosineAnnealingLR T_max: 256 eta_min: 0.0 last_epoch: -1 interval: epoch monitor: val/loss parameters: loss_fn trainer: max_epochs: 256 # gradient_clip_val: 0.25 summary: null