summaryrefslogtreecommitdiff
path: root/training/conf/experiment/conv_transformer_lines.yaml
diff options
context:
space:
mode:
Diffstat (limited to 'training/conf/experiment/conv_transformer_lines.yaml')
-rw-r--r--training/conf/experiment/conv_transformer_lines.yaml26
1 files changed, 13 insertions, 13 deletions
diff --git a/training/conf/experiment/conv_transformer_lines.yaml b/training/conf/experiment/conv_transformer_lines.yaml
index 11646ca..20e369e 100644
--- a/training/conf/experiment/conv_transformer_lines.yaml
+++ b/training/conf/experiment/conv_transformer_lines.yaml
@@ -10,7 +10,7 @@ defaults:
- override /lr_schedulers: null
- override /optimizers: null
-epochs: &epochs 620
+epochs: &epochs 300
ignore_index: &ignore_index 3
num_classes: &num_classes 57
max_output_len: &max_output_len 89
@@ -27,7 +27,7 @@ callbacks:
stochastic_weight_averaging:
_target_: pytorch_lightning.callbacks.StochasticWeightAveraging
swa_epoch_start: 0.75
- swa_lrs: 1.0e-5
+ swa_lrs: 1.0e-4
annealing_epochs: 10
annealing_strategy: cos
device: null
@@ -43,15 +43,15 @@ optimizers:
lr_schedulers:
network:
- _target_: torch.optim.lr_scheduler.CosineAnnealingLR
- T_max: *epochs
- eta_min: 1.0e-5
- last_epoch: -1
- interval: epoch
- monitor: val/loss
+ _target_: torch.optim.lr_scheduler.CosineAnnealingLR
+ T_max: *epochs
+ eta_min: 1.0e-4
+ last_epoch: -1
+ interval: epoch
+ monitor: val/loss
datamodule:
- batch_size: 32
+ batch_size: 16
num_workers: 12
train_fraction: 0.9
pin_memory: true
@@ -64,7 +64,7 @@ rotary_embedding: &rotary_embedding
attn: &attn
dim: &hidden_dim 256
- num_heads: 4
+ num_heads: 6
dim_head: 64
dropout_rate: &dropout_rate 0.5
@@ -76,12 +76,12 @@ network:
pad_index: *ignore_index
encoder:
_target_: text_recognizer.networks.encoders.efficientnet.EfficientNet
- arch: b3
+ arch: b0
stochastic_dropout_rate: 0.2
bn_momentum: 0.99
bn_eps: 1.0e-3
decoder:
- depth: 6
+ depth: 3
_target_: text_recognizer.networks.transformer.layers.Decoder
self_attn:
_target_: text_recognizer.networks.transformer.attention.Attention
@@ -106,7 +106,7 @@ network:
pixel_pos_embedding:
_target_: text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding
dim: *hidden_dim
- shape: [1, 32]
+ shape: [3, 64]
model:
_target_: text_recognizer.models.transformer.TransformerLitModel