summaryrefslogtreecommitdiff
path: root/training/conf/experiment/conv_transformer_lines.yaml
blob: 20e369ee4d0500f7a5122d054763f631ba054d9e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
# @package _global_

defaults:
  - override /mapping: null
  - override /criterion: cross_entropy
  - override /callbacks: htr
  - override /datamodule: iam_lines
  - override /network: null
  - override /model: null
  - override /lr_schedulers: null
  - override /optimizers: null

epochs: &epochs 300
ignore_index: &ignore_index 3
num_classes: &num_classes 57
max_output_len: &max_output_len 89
summary: [[1, 1, 56, 1024], [1, 89]]

criterion:
  ignore_index: *ignore_index
    
mapping: &mapping
  mapping:
    _target_: text_recognizer.data.mappings.emnist.EmnistMapping

callbacks:
  stochastic_weight_averaging:
    _target_: pytorch_lightning.callbacks.StochasticWeightAveraging
    swa_epoch_start: 0.75
    swa_lrs: 1.0e-4
    annealing_epochs: 10
    annealing_strategy: cos
    device: null

optimizers:
  madgrad:
    _target_: madgrad.MADGRAD
    lr: 3.0e-4
    momentum: 0.9
    weight_decay: 0
    eps: 1.0e-6
    parameters: network

lr_schedulers:
  network:
    _target_: torch.optim.lr_scheduler.CosineAnnealingLR
    T_max: *epochs
    eta_min: 1.0e-4
    last_epoch: -1
    interval: epoch
    monitor: val/loss

datamodule:
  batch_size: 16
  num_workers: 12
  train_fraction: 0.9
  pin_memory: true
  << : *mapping

rotary_embedding: &rotary_embedding
  rotary_embedding: 
    _target_: text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding
    dim: 64

attn: &attn
  dim: &hidden_dim 256
  num_heads: 6
  dim_head: 64
  dropout_rate: &dropout_rate 0.5

network:
  _target_: text_recognizer.networks.conv_transformer.ConvTransformer
  input_dims: [1, 56, 1024]
  hidden_dim: *hidden_dim
  num_classes: *num_classes
  pad_index: *ignore_index
  encoder:
    _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet
    arch: b0
    stochastic_dropout_rate: 0.2
    bn_momentum: 0.99
    bn_eps: 1.0e-3
  decoder:
    depth: 3
    _target_: text_recognizer.networks.transformer.layers.Decoder
    self_attn:
      _target_: text_recognizer.networks.transformer.attention.Attention
      << : *attn
      causal: true
      << : *rotary_embedding
    cross_attn:
      _target_: text_recognizer.networks.transformer.attention.Attention
      << : *attn
      causal: false
    norm:
      _target_: text_recognizer.networks.transformer.norm.ScaleNorm
      normalized_shape: *hidden_dim
    ff: 
      _target_: text_recognizer.networks.transformer.mlp.FeedForward
      dim: *hidden_dim
      dim_out: null
      expansion_factor: 4
      glu: true
      dropout_rate: *dropout_rate
    pre_norm: true
  pixel_pos_embedding:
    _target_: text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding
    dim: *hidden_dim
    shape: [3, 64]

model:
  _target_: text_recognizer.models.transformer.TransformerLitModel
  << : *mapping
  max_output_len: *max_output_len
  start_token: <s>
  end_token: <e>
  pad_token: <p>

trainer:
  _target_: pytorch_lightning.Trainer
  stochastic_weight_avg: true
  auto_scale_batch_size: binsearch
  auto_lr_find: false
  gradient_clip_val: 0.5
  fast_dev_run: false
  gpus: 1
  precision: 16
  max_epochs: *epochs
  terminate_on_nan: true
  weights_summary: null
  limit_train_batches: 1.0 
  limit_val_batches: 1.0
  limit_test_batches: 1.0
  resume_from_checkpoint: null
  accumulate_grad_batches: 1
  overfit_batches: 0