summaryrefslogtreecommitdiff
path: root/training/conf/experiment/conv_transformer_lines.yaml
blob: eb9bc9e88c9f8165031510591c420cfdf7e129d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# @package _global_

defaults:
  - override /criterion: cross_entropy
  - override /callbacks: htr
  - override /datamodule: iam_lines
  - override /network: conv_transformer
  - override /model: lit_transformer
  - override /lr_scheduler: null
  - override /optimizer: null

epochs: &epochs 512
ignore_index: &ignore_index 3
num_classes: &num_classes 57
max_output_len: &max_output_len 89
summary: [[1, 1, 56, 1024], [1, 89]]

criterion:
  ignore_index: *ignore_index
  label_smoothing: 0.05

callbacks:
  stochastic_weight_averaging:
    _target_: pytorch_lightning.callbacks.StochasticWeightAveraging
    swa_epoch_start: 0.75
    swa_lrs: 1.0e-5
    annealing_epochs: 10
    annealing_strategy: cos
    device: null

optimizer:
  _target_: torch.optim.RAdam
  lr: 3.0e-4
  betas: [0.9, 0.999]
  weight_decay: 0
  eps: 1.0e-8
  parameters: network

lr_scheduler:
  _target_: torch.optim.lr_scheduler.OneCycleLR
  max_lr: 3.0e-4
  total_steps: null
  epochs: *epochs
  steps_per_epoch: 1354
  pct_start: 0.3
  anneal_strategy: cos
  cycle_momentum: true
  base_momentum: 0.85
  max_momentum: 0.95
  div_factor: 25.0
  final_div_factor: 10000.0
  three_phase: true
  last_epoch: -1
  verbose: false
  interval: step
  monitor: val/cer

datamodule:
  batch_size: 8
  train_fraction: 0.95

network:
  input_dims: [1, 1, 56, 1024]
  num_classes: *num_classes
  pad_index: *ignore_index
  encoder:
    depth: 5
  decoder:
    depth: 6
  pixel_embedding:
    shape: [3, 64]

model:
  max_output_len: *max_output_len

trainer:
  gradient_clip_val: 0.5
  max_epochs: *epochs
  accumulate_grad_batches: 1