# @package _global_ defaults: - override /mapping: null - override /criterion: null - override /datamodule: null - override /network: null - override /model: null - override /lr_schedulers: null - override /optimizers: null criterion: _target_: torch.nn.CrossEntropyLoss ignore_index: 3 mapping: _target_: text_recognizer.data.emnist_mapping.EmnistMapping # extra_symbols: [ "\n" ] optimizers: madgrad: _target_: madgrad.MADGRAD lr: 1.0e-4 momentum: 0.9 weight_decay: 0 eps: 1.0e-6 parameters: network lr_schedulers: network: _target_: torch.optim.lr_scheduler.CosineAnnealingLR T_max: 1024 eta_min: 4.5e-6 last_epoch: -1 interval: epoch monitor: val/loss datamodule: _target_: text_recognizer.data.iam_lines.IAMLines batch_size: 24 num_workers: 12 train_fraction: 0.8 augment: true pin_memory: false network: _target_: text_recognizer.networks.conv_transformer.ConvTransformer input_dims: [1, 56, 1024] hidden_dim: 128 encoder_dim: 1280 dropout_rate: 0.2 num_classes: 58 pad_index: 3 encoder: _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet arch: b0 out_channels: 1280 stochastic_dropout_rate: 0.2 bn_momentum: 0.99 bn_eps: 1.0e-3 decoder: _target_: text_recognizer.networks.transformer.Decoder dim: 128 depth: 3 num_heads: 4 attn_fn: text_recognizer.networks.transformer.attention.Attention attn_kwargs: dim_head: 32 dropout_rate: 0.2 norm_fn: torch.nn.LayerNorm ff_fn: text_recognizer.networks.transformer.mlp.FeedForward ff_kwargs: dim_out: null expansion_factor: 4 glu: true dropout_rate: 0.2 cross_attend: true pre_norm: true rotary_emb: null model: _target_: text_recognizer.models.transformer.TransformerLitModel max_output_len: 89 start_token: end_token: pad_token:

trainer: _target_: pytorch_lightning.Trainer stochastic_weight_avg: false auto_scale_batch_size: binsearch auto_lr_find: false gradient_clip_val: 0 fast_dev_run: false gpus: 1 precision: 16 max_epochs: 1024 terminate_on_nan: true weights_summary: top limit_train_batches: 1.0 limit_val_batches: 1.0 limit_test_batches: 1.0 resume_from_checkpoint: null accumulate_grad_batches: 4 overfit_batches: 0.0 # summary: [[1, 1, 56, 1024], [1, 89]]