summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2022-06-11 23:10:56 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2022-06-11 23:10:56 +0200
commit8fe4b36bf22281c84c4afee811b3435f3b50686d (patch)
treeab378f98c03ee63943a6a026dcbf869d7a39df23
parent3fa2733b697bfe1c1f8a6031ae7f1f66343d5a5b (diff)
Update configs
-rw-r--r--training/conf/datamodule/iam_extended_paragraphs.yaml5
-rw-r--r--training/conf/datamodule/iam_lines.yaml4
-rw-r--r--training/conf/experiment/conv_transformer_lines.yaml99
-rw-r--r--training/conf/experiment/conv_transformer_paragraphs.yaml10
-rw-r--r--training/conf/network/decoder/transformer_decoder.yaml70
5 files changed, 46 insertions, 142 deletions
diff --git a/training/conf/datamodule/iam_extended_paragraphs.yaml b/training/conf/datamodule/iam_extended_paragraphs.yaml
index f53e5b6..0921b25 100644
--- a/training/conf/datamodule/iam_extended_paragraphs.yaml
+++ b/training/conf/datamodule/iam_extended_paragraphs.yaml
@@ -2,7 +2,10 @@ _target_: text_recognizer.data.iam_extended_paragraphs.IAMExtendedParagraphs
batch_size: 4
num_workers: 12
train_fraction: 0.8
-pin_memory: false
+pin_memory: true
transform: transform/paragraphs.yaml
test_transform: test_transform/paragraphs.yaml
target_transform: target_transform/pad.yaml
+mapping:
+ _target_: text_recognizer.data.mappings.EmnistMapping
+ extra_symbols: ["\n"]
diff --git a/training/conf/datamodule/iam_lines.yaml b/training/conf/datamodule/iam_lines.yaml
index 19c72f8..85a023b 100644
--- a/training/conf/datamodule/iam_lines.yaml
+++ b/training/conf/datamodule/iam_lines.yaml
@@ -1,7 +1,9 @@
_target_: text_recognizer.data.iam_lines.IAMLines
batch_size: 8
num_workers: 12
-train_fraction: 0.8
+train_fraction: 0.9
pin_memory: true
transform: transform/lines.yaml
test_transform: test_transform/lines.yaml
+mapping:
+ _target_: text_recognizer.data.mappings.EmnistMapping
diff --git a/training/conf/experiment/conv_transformer_lines.yaml b/training/conf/experiment/conv_transformer_lines.yaml
index 55dc896..8404cd1 100644
--- a/training/conf/experiment/conv_transformer_lines.yaml
+++ b/training/conf/experiment/conv_transformer_lines.yaml
@@ -1,17 +1,16 @@
----
# @package _global_
defaults:
- - override /mapping: null
+ - override /mapping: characters
- override /criterion: cross_entropy
- override /callbacks: htr
- override /datamodule: iam_lines
- - override /network: null
- - override /model: null
+ - override /network: conv_transformer
+ - override /model: lit_transformer
- override /lr_schedulers: null
- override /optimizers: null
-epochs: &epochs 200
+epochs: &epochs 512
ignore_index: &ignore_index 3
num_classes: &num_classes 57
max_output_len: &max_output_len 89
@@ -21,10 +20,6 @@ criterion:
ignore_index: *ignore_index
# label_smoothing: 0.1
-mapping: &mapping
- mapping:
- _target_: text_recognizer.data.mappings.emnist.EmnistMapping
-
callbacks:
stochastic_weight_averaging:
_target_: pytorch_lightning.callbacks.StochasticWeightAveraging
@@ -60,99 +55,19 @@ lr_schedulers:
datamodule:
batch_size: 16
- num_workers: 12
- train_fraction: 0.9
- pin_memory: true
- << : *mapping
-
-encoder: &encoder
- _target_: text_recognizer.networks.efficientnet.efficientnet.EfficientNet
- arch: b0
- stochastic_dropout_rate: 0.2
- bn_momentum: 0.99
- bn_eps: 1.0e-3
- depth: 5
-
-rotary_embedding: &rotary_embedding
- rotary_embedding:
- _target_: >
- text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding
- dim: 64
-
-attn: &attn
- dim: &hidden_dim 512
- num_heads: 4
- dim_head: 64
- dropout_rate: &dropout_rate 0.4
-
-decoder: &decoder
- _target_: text_recognizer.networks.transformer.decoder.Decoder
- depth: 6
- has_pos_emb: true
- block:
- _target_: text_recognizer.networks.transformer.decoder.DecoderBlock
- self_attn:
- _target_: text_recognizer.networks.transformer.attention.Attention
- <<: *attn
- causal: true
- <<: *rotary_embedding
- cross_attn:
- _target_: text_recognizer.networks.transformer.attention.Attention
- <<: *attn
- causal: false
- norm:
- _target_: text_recognizer.networks.transformer.norm.RMSNorm
- dim: *hidden_dim
- ff:
- _target_: text_recognizer.networks.transformer.mlp.FeedForward
- dim: *hidden_dim
- dim_out: null
- expansion_factor: 2
- glu: true
- dropout_rate: *dropout_rate
-
-pixel_pos_embedding: &pixel_pos_embedding
- _target_: >
- text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding
- dim: *hidden_dim
- shape: &shape [3, 64]
network:
- _target_: text_recognizer.networks.conv_transformer.ConvTransformer
input_dims: [1, 1, 56, 1024]
- hidden_dim: *hidden_dim
num_classes: *num_classes
pad_index: *ignore_index
- encoder:
- <<: *encoder
decoder:
- <<: *decoder
- pixel_pos_embedding:
- <<: *pixel_pos_embedding
+ depth: 10
+ pixel_embedding:
+ shape: [7, 128]
model:
- _target_: text_recognizer.models.transformer.LitTransformer
- <<: *mapping
max_output_len: *max_output_len
- start_token: <s>
- end_token: <e>
- pad_token: <p>
trainer:
- _target_: pytorch_lightning.Trainer
- stochastic_weight_avg: true
- auto_scale_batch_size: binsearch
- auto_lr_find: false
gradient_clip_val: 0.5
- fast_dev_run: false
- gpus: 1
- precision: 16
max_epochs: *epochs
- terminate_on_nan: true
- weights_summary: null
- limit_train_batches: 1.0
- limit_val_batches: 1.0
- limit_test_batches: 1.0
- resume_from_checkpoint: null
- accumulate_grad_batches: 1
- overfit_batches: 0
diff --git a/training/conf/experiment/conv_transformer_paragraphs.yaml b/training/conf/experiment/conv_transformer_paragraphs.yaml
index cc81645..1465e62 100644
--- a/training/conf/experiment/conv_transformer_paragraphs.yaml
+++ b/training/conf/experiment/conv_transformer_paragraphs.yaml
@@ -1,7 +1,7 @@
# @package _global_
defaults:
- - override /mapping: emnist
+ - override /mapping: characters
- override /criterion: cross_entropy
- override /callbacks: htr
- override /datamodule: iam_extended_paragraphs
@@ -10,7 +10,7 @@ defaults:
- override /lr_schedulers: null
- override /optimizers: null
-epochs: &epochs 200
+epochs: &epochs 629
ignore_index: &ignore_index 3
summary: [[1, 1, 576, 640], [1, 682]]
@@ -52,12 +52,8 @@ lr_schedulers:
monitor: val/loss
datamodule:
- _target_: text_recognizer.data.iam_extended_paragraphs.IAMExtendedParagraphs
batch_size: 6
- num_workers: 12
- train_fraction: 0.8
- pin_memory: true
- << : *mapping
+ train_fraction: 0.9
trainer:
max_epochs: *epochs
diff --git a/training/conf/network/decoder/transformer_decoder.yaml b/training/conf/network/decoder/transformer_decoder.yaml
index 7dced16..4588ee9 100644
--- a/training/conf/network/decoder/transformer_decoder.yaml
+++ b/training/conf/network/decoder/transformer_decoder.yaml
@@ -1,42 +1,30 @@
-_target_: text_recognizer.networks.transformer.Decoder
+_target_: text_recognizer.networks.transformer.decoder.Decoder
depth: 4
-pre_norm: true
-local_depth: 2
-has_pos_emb: true
-self_attn:
- _target_: text_recognizer.networks.transformer.attention.Attention
- dim: 64
- num_heads: 4
- dim_head: 64
- dropout_rate: 0.05
- causal: true
- rotary_embedding:
- _target_: text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding
- dim: 128
-local_self_attn:
- _target_: text_recognizer.networks.transformer.local_attention.LocalAttention
- dim: 64
- num_heads: 4
- dim_head: 64
- dropout_rate: 0.05
- window_size: 22
- look_back: 1
- rotary_embedding:
- _target_: text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding
- dim: 128
-cross_attn:
- _target_: text_recognizer.networks.transformer.attention.Attention
- dim: 64
- num_heads: 4
- dim_head: 64
- dropout_rate: 0.05
- causal: false
-norm:
- _target_: text_recognizer.networks.transformer.norm.ScaleNorm
- normalized_shape: 192
-ff:
- _target_: text_recognizer.networks.transformer.mlp.FeedForward
- dim_out: null
- expansion_factor: 4
- glu: true
- dropout_rate: 0.2
+block:
+ _target_: text_recognizer.networks.transformer.decoder.DecoderBlock
+ self_attn:
+ _target_: text_recognizer.networks.transformer.attention.Attention
+ dim: 64
+ num_heads: 4
+ dim_head: 64
+ dropout_rate: 0.05
+ causal: true
+ rotary_embedding:
+ _target_: text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding
+ dim: 128
+ cross_attn:
+ _target_: text_recognizer.networks.transformer.attention.Attention
+ dim: 64
+ num_heads: 4
+ dim_head: 64
+ dropout_rate: 0.05
+ causal: false
+ norm:
+ _target_: text_recognizer.networks.transformer.norm.RMSNorm
+ normalized_shape: 192
+ ff:
+ _target_: text_recognizer.networks.transformer.mlp.FeedForward
+ dim_out: null
+ expansion_factor: 4
+ glu: true
+ dropout_rate: 0.2