From bc199a6ae36486c0c98e4808e344c90a6dd452a7 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Sun, 5 Dec 2021 20:24:12 +0100 Subject: Update config --- .../conf/experiment/conv_transformer_paragraphs.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/training/conf/experiment/conv_transformer_paragraphs.yaml b/training/conf/experiment/conv_transformer_paragraphs.yaml index 859117f..7a72a1a 100644 --- a/training/conf/experiment/conv_transformer_paragraphs.yaml +++ b/training/conf/experiment/conv_transformer_paragraphs.yaml @@ -89,7 +89,7 @@ network: stochastic_dropout_rate: 0.2 bn_momentum: 0.99 bn_eps: 1.0e-3 - depth: 7 + depth: 5 decoder: depth: 6 _target_: text_recognizer.networks.transformer.layers.Decoder @@ -116,15 +116,15 @@ network: pixel_pos_embedding: _target_: text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding dim: *hidden_dim - shape: &shape [18, 20] - axial_encoder: - _target_: text_recognizer.networks.transformer.axial_attention.encoder.AxialEncoder - dim: *hidden_dim - heads: 4 - shape: *shape - depth: 2 - dim_head: 64 - dim_index: 1 + shape: &shape [36, 40] + axial_encoder: null + # _target_: text_recognizer.networks.transformer.axial_attention.encoder.AxialEncoder + # dim: *hidden_dim + # heads: 4 + # shape: *shape + # depth: 2 + # dim_head: 64 + # dim_index: 1 model: _target_: text_recognizer.models.transformer.TransformerLitModel -- cgit v1.2.3-70-g09d2