From 8177b5210fbbe11bd361dedbd5b4a4e1950bdb2e Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Fri, 10 Jun 2022 00:36:51 +0200 Subject: Update configs --- .../experiment/conv_transformer_paragraphs.yaml | 103 +-------------------- 1 file changed, 3 insertions(+), 100 deletions(-) (limited to 'training/conf/experiment/conv_transformer_paragraphs.yaml') diff --git a/training/conf/experiment/conv_transformer_paragraphs.yaml b/training/conf/experiment/conv_transformer_paragraphs.yaml index afa1785..cc81645 100644 --- a/training/conf/experiment/conv_transformer_paragraphs.yaml +++ b/training/conf/experiment/conv_transformer_paragraphs.yaml @@ -1,31 +1,23 @@ ---- # @package _global_ defaults: - - override /mapping: null + - override /mapping: emnist - override /criterion: cross_entropy - override /callbacks: htr - override /datamodule: iam_extended_paragraphs - - override /network: null - - override /model: null + - override /network: conv_transformer + - override /model: lit_transformer - override /lr_schedulers: null - override /optimizers: null epochs: &epochs 200 ignore_index: &ignore_index 3 -num_classes: &num_classes 58 -max_output_len: &max_output_len 682 summary: [[1, 1, 576, 640], [1, 682]] criterion: ignore_index: *ignore_index # label_smoothing: 0.1 -mapping: &mapping - mapping: - _target_: text_recognizer.data.mappings.emnist.EmnistMapping - extra_symbols: ["\n"] - callbacks: stochastic_weight_averaging: _target_: pytorch_lightning.callbacks.StochasticWeightAveraging @@ -67,94 +59,5 @@ datamodule: pin_memory: true << : *mapping -encoder: &encoder - _target_: text_recognizer.networks.efficientnet.efficientnet.EfficientNet - arch: b0 - stochastic_dropout_rate: 0.2 - bn_momentum: 0.99 - bn_eps: 1.0e-3 - depth: 7 - -rotary_embedding: &rotary_embedding - rotary_embedding: - _target_: > - text_recognizer.networks.transformer.embeddings.rotary.RotaryEmbedding - dim: 64 - -attn: &attn - dim: &hidden_dim 512 - num_heads: 4 - dim_head: 64 - dropout_rate: &dropout_rate 0.4 - -decoder: &decoder - _target_: text_recognizer.networks.transformer.decoder.Decoder - depth: 6 - has_pos_emb: true - block: - _target_: text_recognizer.networks.transformer.decoder.DecoderBlock - self_attn: - _target_: text_recognizer.networks.transformer.attention.Attention - << : *attn - causal: true - << : *rotary_embedding - cross_attn: - _target_: text_recognizer.networks.transformer.attention.Attention - << : *attn - causal: false - norm: - _target_: text_recognizer.networks.transformer.norm.RMSNorm - dim: *hidden_dim - ff: - _target_: text_recognizer.networks.transformer.mlp.FeedForward - dim: *hidden_dim - dim_out: null - expansion_factor: 2 - glu: true - dropout_rate: *dropout_rate - -pixel_pos_embedding: &pixel_pos_embedding - _target_: > - text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbedding - dim: *hidden_dim - shape: &shape [18, 20] - -network: - _target_: text_recognizer.networks.conv_transformer.ConvTransformer - input_dims: [1, 1, 576, 640] - hidden_dim: *hidden_dim - num_classes: *num_classes - pad_index: *ignore_index - encoder: - << : *encoder - decoder: - << : *decoder - pixel_pos_embedding: - << : *pixel_pos_embedding - -model: - _target_: text_recognizer.models.transformer.TransformerLitModel - << : *mapping - max_output_len: *max_output_len - start_token: - end_token: - pad_token:

- trainer: - _target_: pytorch_lightning.Trainer - stochastic_weight_avg: true - auto_scale_batch_size: binsearch - auto_lr_find: false - gradient_clip_val: 0.5 - fast_dev_run: false - gpus: 1 - precision: 16 max_epochs: *epochs - terminate_on_nan: true - weights_summary: null - limit_train_batches: 1.0 - limit_val_batches: 1.0 - limit_test_batches: 1.0 - resume_from_checkpoint: null - accumulate_grad_batches: 2 - overfit_batches: 0 -- cgit v1.2.3-70-g09d2