# @package _global_ defaults: - override /criterion: cross_entropy - override /callbacks: htr - override /datamodule: iam_extended_paragraphs - override /network: conv_transformer - override /model: lit_transformer - override /lr_schedulers: null - override /optimizers: null epochs: &epochs 600 num_classes: &num_classes 58 ignore_index: &ignore_index 3 max_output_len: &max_output_len 682 summary: [[1, 1, 576, 640], [1, 682]] criterion: ignore_index: *ignore_index label_smoothing: 0.05 callbacks: stochastic_weight_averaging: _target_: pytorch_lightning.callbacks.StochasticWeightAveraging swa_epoch_start: 0.75 swa_lrs: 1.0e-5 annealing_epochs: 10 annealing_strategy: cos device: null optimizers: _target_: torch.optim.RAdam lr: 3.0e-4 betas: [0.9, 0.999] weight_decay: 0 eps: 1.0e-8 parameters: network lr_schedulers: _target_: torch.optim.lr_scheduler.OneCycleLR max_lr: 3.0e-4 total_steps: null epochs: *epochs steps_per_epoch: 3201 pct_start: 0.3 anneal_strategy: cos cycle_momentum: true base_momentum: 0.85 max_momentum: 0.95 div_factor: 25.0 final_div_factor: 10000.0 three_phase: true last_epoch: -1 verbose: false interval: step monitor: val/cer datamodule: batch_size: 6 train_fraction: 0.95 network: input_dims: [1, 1, 576, 640] num_classes: *num_classes pad_index: *ignore_index encoder: depth: 5 decoder: depth: 6 pixel_embedding: shape: [18, 78] model: max_output_len: *max_output_len trainer: gradient_clip_val: 0.5 max_epochs: *epochs accumulate_grad_batches: 1