summaryrefslogtreecommitdiff
path: root/training/conf
diff options
context:
space:
mode:
Diffstat (limited to 'training/conf')
-rw-r--r--training/conf/experiment/conformer_lines.yaml43
-rw-r--r--training/conf/experiment/conv_transformer_lines.yaml16
-rw-r--r--training/conf/experiment/conv_transformer_paragraphs.yaml103
-rw-r--r--training/conf/mapping/characters.yaml2
-rw-r--r--training/conf/mapping/word_piece.yaml8
-rw-r--r--training/conf/model/lit_transformer.yaml5
-rw-r--r--training/conf/network/conv_transformer.yaml60
-rw-r--r--training/conf/trainer/default.yaml13
8 files changed, 76 insertions, 174 deletions
diff --git a/training/conf/experiment/conformer_lines.yaml b/training/conf/experiment/conformer_lines.yaml
index c3f4ea5..06e761e 100644
--- a/training/conf/experiment/conformer_lines.yaml
+++ b/training/conf/experiment/conformer_lines.yaml
@@ -5,15 +5,15 @@ defaults:
- override /criterion: ctc
- override /callbacks: htr
- override /datamodule: iam_lines
- - override /network: null
+ - override /network: conformer
- override /model: null
- override /lr_schedulers: null
- override /optimizers: null
-epochs: &epochs 200
+epochs: &epochs 999
num_classes: &num_classes 57
-max_output_len: &max_output_len 762
-summary: [[1, 57, 1024]]
+max_output_len: &max_output_len 89
+summary: [[1, 56, 1024]]
mapping: &mapping
mapping:
@@ -59,41 +59,6 @@ datamodule:
pin_memory: true
<< : *mapping
-network:
- _target_: text_recognizer.networks.conformer.Conformer
- depth: 16
- num_classes: *num_classes
- dim: &dim 128
- block:
- _target_: text_recognizer.networks.conformer.ConformerBlock
- dim: *dim
- attn:
- _target_: text_recognizer.networks.conformer.Attention
- dim: *dim
- heads: 8
- dim_head: 64
- mult: 4
- ff:
- _target_: text_recognizer.networks.conformer.Feedforward
- dim: *dim
- expansion_factor: 4
- dropout: 0.1
- conv:
- _target_: text_recognizer.networks.conformer.ConformerConv
- dim: *dim
- expansion_factor: 2
- kernel_size: 31
- dropout: 0.1
- subsampler:
- _target_: text_recognizer.networks.conformer.Subsampler
- pixel_pos_embedding:
- _target_: text_recognizer.networks.transformer.AxialPositionalEmbedding
- dim: *dim
- shape: [6, 127]
- channels: *dim
- depth: 3
- dropout: 0.1
-
model:
_target_: text_recognizer.models.conformer.LitConformer
<<: *mapping
diff --git a/training/conf/experiment/conv_transformer_lines.yaml b/training/conf/experiment/conv_transformer_lines.yaml
index 259e4ea..55dc896 100644
--- a/training/conf/experiment/conv_transformer_lines.yaml
+++ b/training/conf/experiment/conv_transformer_lines.yaml
@@ -93,12 +93,12 @@ decoder: &decoder
_target_: text_recognizer.networks.transformer.decoder.DecoderBlock
self_attn:
_target_: text_recognizer.networks.transformer.attention.Attention
- << : *attn
+ <<: *attn
causal: true
- << : *rotary_embedding
+ <<: *rotary_embedding
cross_attn:
_target_: text_recognizer.networks.transformer.attention.Attention
- << : *attn
+ <<: *attn
causal: false
norm:
_target_: text_recognizer.networks.transformer.norm.RMSNorm
@@ -124,15 +124,15 @@ network:
num_classes: *num_classes
pad_index: *ignore_index
encoder:
- << : *encoder
+ <<: *encoder
decoder:
- << : *decoder
+ <<: *decoder
pixel_pos_embedding:
- << : *pixel_pos_embedding
+ <<: *pixel_pos_embedding
model:
- _target_: text_recognizer.models.transformer.TransformerLitModel
- << : *mapping
+ _target_: text_recognizer.models.transformer.LitTransformer
+ <<: *mapping
max_output_len: *max_output_len
start_token: <s>
end_token: <e>
diff --git a/training/conf/experiment/conv_transformer_paragraphs.yaml b/training/conf/experiment/conv_transformer_paragraphs.yaml
index afa1785..cc81645 100644
--- a/training/conf/experiment/conv_transformer_paragraphs.yaml
+++ b/training/conf/experiment/conv_transformer_paragraphs.yaml
@@ -1,31 +1,23 @@
----
# @package _global_
defaults:
- - override /mapping: null
+ - override /mapping: emnist
- override /criterion: cross_entropy
- override /callbacks: htr
- override /datamodule: iam_extended_paragraphs
- - override /network: null
- - override /model: null
+ - override /network: conv_transformer
+ - override /model: lit_transformer
- override /lr_schedulers: null
- override /optimizers: null
epochs: &epochs 200
ignore_index: &ignore_index 3
-num_classes: &num_classes 58
-max_output_len: &max_output_len 682
summary: [[1, 1, 576, 640], [1, 682]]
criterion:
ignore_index: *ignore_index
# label_smoothing: 0.1
-mapping: &mapping
- mapping:
- _target_: text_recognizer.data.mappings.emnist.EmnistMapping
- extra_symbols: ["\n"]
-
callbacks:
stochastic_weight_averaging:
_target_: pytorch_lightning.callbacks.StochasticWeightAveraging
@@ -67,94 +59,5 @@ datamodule:
pin_memory: true
<< : *mapping
-encoder: &encoder
- _target_: text_recognizer.networks.efficientnet.efficientnet.EfficientNet
- arch: b0
- stochastic_dropout_rate: 0.2
- bn_momentum: 0.99
- bn_eps: 1.0e-3
- depth: 7
-
-rotary_embedding: &rotary_embedding
- rotary_embedding:
- _target_: >
- text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding
- dim: 64
-
-attn: &attn
- dim: &hidden_dim 512
- num_heads: 4
- dim_head: 64
- dropout_rate: &dropout_rate 0.4
-
-decoder: &decoder
- _target_: text_recognizer.networks.transformer.decoder.Decoder
- depth: 6
- has_pos_emb: true
- block:
- _target_: text_recognizer.networks.transformer.decoder.DecoderBlock
- self_attn:
- _target_: text_recognizer.networks.transformer.attention.Attention
- << : *attn
- causal: true
- << : *rotary_embedding
- cross_attn:
- _target_: text_recognizer.networks.transformer.attention.Attention
- << : *attn
- causal: false
- norm:
- _target_: text_recognizer.networks.transformer.norm.RMSNorm
- dim: *hidden_dim
- ff:
- _target_: text_recognizer.networks.transformer.mlp.FeedForward
- dim: *hidden_dim
- dim_out: null
- expansion_factor: 2
- glu: true
- dropout_rate: *dropout_rate
-
-pixel_pos_embedding: &pixel_pos_embedding
- _target_: >
- text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding
- dim: *hidden_dim
- shape: &shape [18, 20]
-
-network:
- _target_: text_recognizer.networks.conv_transformer.ConvTransformer
- input_dims: [1, 1, 576, 640]
- hidden_dim: *hidden_dim
- num_classes: *num_classes
- pad_index: *ignore_index
- encoder:
- << : *encoder
- decoder:
- << : *decoder
- pixel_pos_embedding:
- << : *pixel_pos_embedding
-
-model:
- _target_: text_recognizer.models.transformer.TransformerLitModel
- << : *mapping
- max_output_len: *max_output_len
- start_token: <s>
- end_token: <e>
- pad_token: <p>
-
trainer:
- _target_: pytorch_lightning.Trainer
- stochastic_weight_avg: true
- auto_scale_batch_size: binsearch
- auto_lr_find: false
- gradient_clip_val: 0.5
- fast_dev_run: false
- gpus: 1
- precision: 16
max_epochs: *epochs
- terminate_on_nan: true
- weights_summary: null
- limit_train_batches: 1.0
- limit_val_batches: 1.0
- limit_test_batches: 1.0
- resume_from_checkpoint: null
- accumulate_grad_batches: 2
- overfit_batches: 0
diff --git a/training/conf/mapping/characters.yaml b/training/conf/mapping/characters.yaml
index d91c9e5..8cbd55d 100644
--- a/training/conf/mapping/characters.yaml
+++ b/training/conf/mapping/characters.yaml
@@ -1,2 +1,2 @@
-_target_: text_recognizer.data.mappings.emnist.EmnistMapping
+_target_: text_recognizer.data.mappings.EmnistMapping
extra_symbols: [ "\n" ]
diff --git a/training/conf/mapping/word_piece.yaml b/training/conf/mapping/word_piece.yaml
deleted file mode 100644
index 6b4dc07..0000000
--- a/training/conf/mapping/word_piece.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-_target_: text_recognizer.data.mappings.word_piece.WordPieceMapping
-num_features: 1000
-tokens: iamdb_1kwp_tokens_1000.txt
-lexicon: iamdb_1kwp_lex_1000.txt
-use_words: false
-prepend_wordsep: false
-special_tokens: [ <s>, <e>, <p> ]
-extra_symbols: [ "\n" ]
diff --git a/training/conf/model/lit_transformer.yaml b/training/conf/model/lit_transformer.yaml
index 5172533..c1491ec 100644
--- a/training/conf/model/lit_transformer.yaml
+++ b/training/conf/model/lit_transformer.yaml
@@ -1,5 +1,8 @@
-_target_: text_recognizer.models.transformer.TransformerLitModel
+_target_: text_recognizer.models.LitTransformer
max_output_len: 682
start_token: <s>
end_token: <e>
pad_token: <p>
+mapping:
+ _target_: text_recognizer.data.mappings.EmnistMapping
+ extra_symbols: ["\n"]
diff --git a/training/conf/network/conv_transformer.yaml b/training/conf/network/conv_transformer.yaml
index 1d61129..54eb028 100644
--- a/training/conf/network/conv_transformer.yaml
+++ b/training/conf/network/conv_transformer.yaml
@@ -1,11 +1,49 @@
-defaults:
- - encoder: efficientnet
- - decoder: transformer_decoder
-
-_target_: text_recognizer.networks.conv_transformer.ConvTransformer
-input_dims: [1, 576, 640]
-hidden_dim: 128
-encoder_dim: 1280
-dropout_rate: 0.2
-num_classes: 1006
-pad_index: 1002
+_target_: text_recognizer.networks.ConvTransformer
+input_dims: [1, 1, 576, 640]
+hidden_dim: &hidden_dim 144
+num_classes: 58
+pad_index: 3
+encoder:
+ _target_: text_recognizer.networks.EfficientNet
+ arch: b0
+ stochastic_dropout_rate: 0.2
+ bn_momentum: 0.99
+ bn_eps: 1.0e-3
+ depth: 3
+ out_channels: 128
+decoder:
+ _target_: text_recognizer.networks.transformer.Decoder
+ depth: 6
+ block:
+ _target_: text_recognizer.networks.transformer.DecoderBlock
+ self_attn:
+ _target_: text_recognizer.networks.transformer.Attention
+ dim: *hidden_dim
+ num_heads: 8
+ dim_head: 64
+ dropout_rate: &dropout_rate 0.4
+ causal: true
+ rotary_embedding:
+ _target_: text_recognizer.networks.transformer.RotaryEmbedding
+ dim: 64
+ cross_attn:
+ _target_: text_recognizer.networks.transformer.Attention
+ dim: *hidden_dim
+ num_heads: 8
+ dim_head: 64
+ dropout_rate: *dropout_rate
+ causal: false
+ norm:
+ _target_: text_recognizer.networks.transformer.RMSNorm
+ dim: *hidden_dim
+ ff:
+ _target_: text_recognizer.networks.transformer.FeedForward
+ dim: *hidden_dim
+ dim_out: null
+ expansion_factor: 2
+ glu: true
+ dropout_rate: *dropout_rate
+pixel_embedding:
+ _target_: text_recognizer.networks.transformer.AxialPositionalEmbedding
+ dim: *hidden_dim
+ shape: [72, 80]
diff --git a/training/conf/trainer/default.yaml b/training/conf/trainer/default.yaml
index ef5b018..d4ffcdc 100644
--- a/training/conf/trainer/default.yaml
+++ b/training/conf/trainer/default.yaml
@@ -1,16 +1,17 @@
_target_: pytorch_lightning.Trainer
-stochastic_weight_avg: false
+stochastic_weight_avg: true
auto_scale_batch_size: binsearch
auto_lr_find: false
-gradient_clip_val: 0
+gradient_clip_val: 0.5
fast_dev_run: false
gpus: 1
precision: 16
-max_epochs: 512
+max_epochs: 256
terminate_on_nan: true
-weights_summary: top
-limit_train_batches: 1.0
+weights_summary: null
+limit_train_batches: 1.0
limit_val_batches: 1.0
limit_test_batches: 1.0
resume_from_checkpoint: null
-accumulate_grad_batches: 1
+accumulate_grad_batches: 2
+overfit_batches: 0