# @package _global_ defaults: - override /criterion: cross_entropy - override /callbacks: htr - override /datamodule: iam_extended_paragraphs - override /network: conv_transformer - override /model: lit_transformer - override /lr_scheduler: null - override /optimizer: null tags: [paragraphs] epochs: &epochs 600 num_classes: &num_classes 58 ignore_index: &ignore_index 3 max_output_len: &max_output_len 682 summary: [[1, 1, 576, 640], [1, 682]] logger: wandb: tags: ${tags} criterion: ignore_index: *ignore_index label_smoothing: 0.05 callbacks: stochastic_weight_averaging: _target_: pytorch_lightning.callbacks.StochasticWeightAveraging swa_epoch_start: 0.75 swa_lrs: 1.0e-5 annealing_epochs: 10 annealing_strategy: cos device: null optimizer: _target_: torch.optim.RAdam lr: 3.0e-4 betas: [0.9, 0.999] weight_decay: 0 eps: 1.0e-8 lr_scheduler: _target_: torch.optim.lr_scheduler.ReduceLROnPlateau mode: min factor: 0.8 patience: 10 threshold: 1.0e-4 threshold_mode: rel cooldown: 0 min_lr: 1.0e-5 eps: 1.0e-8 verbose: false interval: epoch monitor: val/cer datamodule: batch_size: 8 train_fraction: 0.95 network: input_dims: [1, 1, 576, 640] num_classes: *num_classes pad_index: *ignore_index encoder: depth: 4 decoder: depth: 6 pixel_embedding: shape: [18, 79] model: max_output_len: *max_output_len trainer: gradient_clip_val: 1.0 max_epochs: *epochs accumulate_grad_batches: 1