summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
Diffstat (limited to 'training')
-rw-r--r--training/conf/datamodule/transform/lines.yaml2
-rw-r--r--training/conf/experiment/conv_transformer_lines.yaml8
2 files changed, 5 insertions, 5 deletions
diff --git a/training/conf/datamodule/transform/lines.yaml b/training/conf/datamodule/transform/lines.yaml
index 6949a15..34816e1 100644
--- a/training/conf/datamodule/transform/lines.yaml
+++ b/training/conf/datamodule/transform/lines.yaml
@@ -15,7 +15,7 @@ random_affine:
random_perspective:
_target_: torchvision.transforms.RandomPerspective
- distortion_scale: 0.2
+ distortion_scale: 0.15
p: 0.5
fill: 0
diff --git a/training/conf/experiment/conv_transformer_lines.yaml b/training/conf/experiment/conv_transformer_lines.yaml
index 6ba4535..11646ca 100644
--- a/training/conf/experiment/conv_transformer_lines.yaml
+++ b/training/conf/experiment/conv_transformer_lines.yaml
@@ -10,7 +10,7 @@ defaults:
- override /lr_schedulers: null
- override /optimizers: null
-epochs: &epochs 256
+epochs: &epochs 620
ignore_index: &ignore_index 3
num_classes: &num_classes 57
max_output_len: &max_output_len 89
@@ -44,7 +44,7 @@ optimizers:
lr_schedulers:
network:
_target_: torch.optim.lr_scheduler.CosineAnnealingLR
- T_max: 256
+ T_max: *epochs
eta_min: 1.0e-5
last_epoch: -1
interval: epoch
@@ -64,7 +64,7 @@ rotary_embedding: &rotary_embedding
attn: &attn
dim: &hidden_dim 256
- num_heads: 8
+ num_heads: 4
dim_head: 64
dropout_rate: &dropout_rate 0.5
@@ -76,7 +76,7 @@ network:
pad_index: *ignore_index
encoder:
_target_: text_recognizer.networks.encoders.efficientnet.EfficientNet
- arch: b2
+ arch: b3
stochastic_dropout_rate: 0.2
bn_momentum: 0.99
bn_eps: 1.0e-3