summaryrefslogtreecommitdiff
path: root/training/conf/experiment/cnn_htr_char_lines.yaml
blob: 08d928234720299c18a92ddc5def433df54703a3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# @package _global_

defaults:
  - override /mapping: null
  - override /criterion: null
  - override /datamodule: null
  - override /network: null
  - override /model: null
  - override /lr_schedulers: null
  - override /optimizers: null


criterion:
  _target_: torch.nn.CrossEntropyLoss
  ignore_index: 3
    
mapping:
  _target_: text_recognizer.data.emnist_mapping.EmnistMapping
  # extra_symbols: [ "\n" ]

optimizers:
  madgrad:
    _target_: madgrad.MADGRAD
    lr: 1.0e-4
    momentum: 0.9
    weight_decay: 0
    eps: 1.0e-6

    parameters: network

lr_schedulers:
  network:
    _target_: torch.optim.lr_scheduler.CosineAnnealingLR
    T_max: 1024
    eta_min: 4.5e-6
    last_epoch: -1
    interval: epoch
    monitor: val/loss

datamodule:
  _target_: text_recognizer.data.iam_lines.IAMLines
  batch_size: 24
  num_workers: 12
  train_fraction: 0.8
  augment: true
  pin_memory: false

network:
  _target_: text_recognizer.networks.conv_transformer.ConvTransformer
  input_dims: [1, 56, 1024]
  hidden_dim: 128
  encoder_dim: 1280
  dropout_rate: 0.2
  num_classes: 58
  pad_index: 3
  encoder:
    _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet
    arch: b0
    out_channels: 1280
    stochastic_dropout_rate: 0.2
    bn_momentum: 0.99
    bn_eps: 1.0e-3
  decoder:
    _target_: text_recognizer.networks.transformer.Decoder
    dim: 128
    depth: 3 
    num_heads: 4
    attn_fn: text_recognizer.networks.transformer.attention.Attention
    attn_kwargs:
      dim_head: 32
      dropout_rate: 0.2
    norm_fn: torch.nn.LayerNorm
    ff_fn: text_recognizer.networks.transformer.mlp.FeedForward
    ff_kwargs:
      dim_out: null
      expansion_factor: 4
      glu: true
      dropout_rate: 0.2
    cross_attend: true
    pre_norm: true
    rotary_emb: null

model:
  _target_: text_recognizer.models.transformer.TransformerLitModel
  max_output_len: 89
  start_token: <s>
  end_token: <e>
  pad_token: <p>

trainer:
  _target_: pytorch_lightning.Trainer
  stochastic_weight_avg: false
  auto_scale_batch_size: binsearch
  auto_lr_find: false
  gradient_clip_val: 0
  fast_dev_run: false
  gpus: 1
  precision: 16
  max_epochs: 1024
  terminate_on_nan: true
  weights_summary: top
  limit_train_batches: 1.0 
  limit_val_batches: 1.0
  limit_test_batches: 1.0
  resume_from_checkpoint: null
  accumulate_grad_batches: 4
  overfit_batches: 0.0

# summary: [[1, 1, 56, 1024], [1, 89]]