summaryrefslogtreecommitdiff
path: root/training
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-11-27 12:37:18 +0100
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-11-27 12:37:18 +0100
commite87e3c5ac01ac3d154dca8496c5e73783a358742 (patch)
tree141c7f4b2050f4866b917b7dfd8af73dd783e6f0 /training
parent05847d70dab99b14e0b9ee8102d403a0a4266535 (diff)
Update lines config with no axial encoder
Diffstat (limited to 'training')
-rw-r--r--training/conf/experiment/conv_transformer_lines.yaml20
1 files changed, 10 insertions, 10 deletions
diff --git a/training/conf/experiment/conv_transformer_lines.yaml b/training/conf/experiment/conv_transformer_lines.yaml
index 3af2c37..38b72c4 100644
--- a/training/conf/experiment/conv_transformer_lines.yaml
+++ b/training/conf/experiment/conv_transformer_lines.yaml
@@ -64,7 +64,7 @@ rotary_embedding: &rotary_embedding
dim: 64
attn: &attn
- dim: &hidden_dim 256
+ dim: &hidden_dim 128
num_heads: 4
dim_head: 64
dropout_rate: &dropout_rate 0.2
@@ -76,7 +76,7 @@ network:
num_classes: *num_classes
pad_index: *ignore_index
encoder:
- _target_: text_recognizer.networks.efficientnet.EfficientNet
+ _target_: text_recognizer.networks.efficientnet.efficientnet.EfficientNet
arch: b0
depth: 5
stochastic_dropout_rate: 0.2
@@ -109,14 +109,14 @@ network:
_target_: text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding
dim: *hidden_dim
shape: &shape [3, 64]
- axial_encoder:
- _target_: text_recognizer.networks.transformer.axial_attention.encoder.AxialEncoder
- dim: *hidden_dim
- heads: 4
- shape: *shape
- depth: 2
- dim_head: 64
- dim_index: 1
+ axial_encoder: null
+ # _target_: text_recognizer.networks.transformer.axial_attention.encoder.AxialEncoder
+ # dim: *hidden_dim
+ # heads: 4
+ # shape: *shape
+ # depth: 2
+ # dim_head: 64
+ # dim_index: 1
model:
_target_: text_recognizer.models.transformer.TransformerLitModel