# @package _global_ defaults: - override /mapping: null - override /criterion: null - override /callbacks: htr - override /datamodule: iam_extended_paragraphs - override /network: null - override /model: null - override /lr_schedulers: null - override /optimizers: null epochs: &epochs 720 ignore_index: &ignore_index 3 num_classes: &num_classes 58 max_output_len: &max_output_len 682 summary: [[1, 1, 576, 640], [1, 682]] criterion: _target_: torch.nn.CrossEntropyLoss ignore_index: *ignore_index mapping: &mapping mapping: _target_: text_recognizer.data.mappings.emnist.EmnistMapping extra_symbols: [ "\n" ] callbacks: stochastic_weight_averaging: _target_: pytorch_lightning.callbacks.StochasticWeightAveraging swa_epoch_start: 0.75 swa_lrs: 1.0e-5 annealing_epochs: 10 annealing_strategy: cos device: null optimizers: madgrad: _target_: madgrad.MADGRAD lr: 1.0e-4 momentum: 0.9 weight_decay: 5.0e-6 eps: 1.0e-6 parameters: network lr_schedulers: network: _target_: torch.optim.lr_scheduler.OneCycleLR max_lr: 1.0e-4 total_steps: null epochs: *epochs steps_per_epoch: 211 pct_start: 0.03 anneal_strategy: cos cycle_momentum: true base_momentum: 0.85 max_momentum: 0.95 div_factor: 25 final_div_factor: 1.0e2 three_phase: false last_epoch: -1 verbose: false interval: step monitor: val/loss datamodule: _target_: text_recognizer.data.iam_extended_paragraphs.IAMExtendedParagraphs batch_size: 6 num_workers: 12 train_fraction: 0.8 pin_memory: true << : *mapping network: _target_: text_recognizer.networks.conv_transformer.ConvTransformer input_dims: [1, 576, 640] hidden_dim: &hidden_dim 192 encoder_dim: 1280 dropout_rate: 0.1 num_classes: *num_classes pad_index: *ignore_index encoder: _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet arch: b0 out_channels: 1280 stochastic_dropout_rate: 0.2 bn_momentum: 0.99 bn_eps: 1.0e-3 decoder: _target_: text_recognizer.networks.transformer.layers.Decoder dim: *hidden_dim depth: 3 num_heads: 4 attn_fn: text_recognizer.networks.transformer.attention.Attention attn_kwargs: dim_head: 32 dropout_rate: 0.05 local_attn_fn: text_recognizer.networks.transformer.local_attention.LocalAttention local_attn_kwargs: dim_head: 32 dropout_rate: 0.05 window_size: 11 look_back: 2 depth: 2 norm_fn: text_recognizer.networks.transformer.norm.ScaleNorm ff_fn: text_recognizer.networks.transformer.mlp.FeedForward ff_kwargs: dim_out: null expansion_factor: 4 glu: true dropout_rate: 0.05 cross_attend: true pre_norm: true rotary_emb: _target_: text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding dim: 32 pixel_pos_embedding: _target_: text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding dim: *hidden_dim shape: [18, 20] token_pos_embedding: _target_: text_recognizer.networks.transformer.embeddings.fourier.PositionalEncoding hidden_dim: *hidden_dim dropout_rate: 0.05 max_len: *max_output_len model: _target_: text_recognizer.models.transformer.TransformerLitModel << : *mapping max_output_len: *max_output_len start_token: end_token: pad_token:

trainer: _target_: pytorch_lightning.Trainer stochastic_weight_avg: true auto_scale_batch_size: binsearch auto_lr_find: false gradient_clip_val: 0.5 fast_dev_run: false gpus: 1 precision: 16 max_epochs: *epochs terminate_on_nan: true weights_summary: null limit_train_batches: 1.0 limit_val_batches: 1.0 limit_test_batches: 1.0 resume_from_checkpoint: null accumulate_grad_batches: 16 overfit_batches: 0