From ac31eaa8e8eaf33f25c4cdf0382152adb208ded2 Mon Sep 17 00:00:00 2001
From: Gustaf Rydholm <gustaf.rydholm@gmail.com>
Date: Fri, 11 Feb 2022 22:48:59 +0100
Subject: chore: remove word piece experiment

---
 .../experiment/conv_transformer_paragraphs_wp.yaml | 154 ---------------------
 1 file changed, 154 deletions(-)
 delete mode 100644 training/conf/experiment/conv_transformer_paragraphs_wp.yaml

(limited to 'training')

diff --git a/training/conf/experiment/conv_transformer_paragraphs_wp.yaml b/training/conf/experiment/conv_transformer_paragraphs_wp.yaml
deleted file mode 100644
index bf192ec..0000000
--- a/training/conf/experiment/conv_transformer_paragraphs_wp.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-# @package _global_
-
-defaults:
-  - override /mapping: null
-  - override /criterion: null
-  - override /datamodule: null
-  - override /network: null
-  - override /model: null
-  - override /lr_schedulers: null
-  - override /optimizers: null
-
-epochs: &epochs 1000
-ignore_index: &ignore_index 1000
-num_classes: &num_classes 1006
-max_output_len: &max_output_len 451
-summary: [[1, 1, 576, 640], [1, 451]]
-
-criterion:
-  _target_: torch.nn.CrossEntropyLoss
-  ignore_index: *ignore_index
-
-mapping: &mapping
-  mapping:
-    _target_: text_recognizer.data.mappings.word_piece_mapping.WordPieceMapping
-    num_features: 1000
-    tokens: iamdb_1kwp_tokens_1000.txt
-    lexicon: iamdb_1kwp_lex_1000.txt
-    use_words: false
-    prepend_wordsep: false
-    special_tokens: [ <s>, <e>, <p> ]
-    extra_symbols: [ "\n" ]
-
-callbacks:
-  stochastic_weight_averaging:
-    _target_: pytorch_lightning.callbacks.StochasticWeightAveraging
-    swa_epoch_start: 0.75
-    swa_lrs: 1.0e-5
-    annealing_epochs: 10
-    annealing_strategy: cos
-    device: null
-
-optimizers:
-  madgrad:
-    _target_: madgrad.MADGRAD
-    lr: 1.0e-4
-    momentum: 0.9
-    weight_decay: 0
-    eps: 1.0e-6
-
-    parameters: network
-
-lr_schedulers:
-  network:
-    _target_: torch.optim.lr_scheduler.OneCycleLR
-    max_lr: 3.0e-4
-    total_steps: null
-    epochs: *epochs
-    steps_per_epoch: 632
-    pct_start: 0.3
-    anneal_strategy: cos
-    cycle_momentum: true
-    base_momentum: 0.85
-    max_momentum: 0.95
-    div_factor: 25
-    final_div_factor: 1.0e4
-    three_phase: false
-    last_epoch: -1
-    verbose: false
-    # Non-class arguments
-    interval: step
-    monitor: val/loss
-
-datamodule:
-  _target_: text_recognizer.data.iam_extended_paragraphs.IAMExtendedParagraphs
-  num_workers: 12
-  train_fraction: 0.9
-  pin_memory: true
-  transform: transform/paragraphs.yaml
-  test_transform: transform/paragraphs.yaml
-  target_transform: target_transform/word_piece.yaml
-  << : *mapping
-
-network:
-  _target_: text_recognizer.networks.conv_transformer.ConvTransformer
-  input_dims: [1, 576, 640]
-  hidden_dim: &hidden_dim 128
-  encoder_dim: 1280
-  dropout_rate: 0.2
-  num_classes: *num_classes
-  pad_index: *ignore_index
-  encoder:
-    _target_: text_recognizer.networks.efficientnet.EfficientNet
-    arch: b0
-    out_channels: 1280
-    stochastic_dropout_rate: 0.2
-    bn_momentum: 0.99
-    bn_eps: 1.0e-3
-  decoder:
-    _target_: text_recognizer.networks.transformer.Decoder
-    dim: *hidden_dim
-    depth: 3 
-    num_heads: 4
-    attn_fn: text_recognizer.networks.transformer.attention.Attention
-    attn_kwargs:
-      dim_head: 32
-      dropout_rate: 0.05
-    norm_fn: text_recognizer.networks.transformer.norm.ScaleNorm
-    ff_fn: text_recognizer.networks.transformer.mlp.FeedForward
-    ff_kwargs:
-      dim_out: null
-      expansion_factor: 4
-      glu: true
-      dropout_rate: 0.05
-    cross_attend: true
-    pre_norm: true
-    rotary_emb:
-      _target_: text_recognizer.networks.transformer.positional_encodings.rotary_embedding.RotaryEmbedding
-      dim: 32
-  pixel_pos_embedding:
-    _target_: text_recognizer.networks.transformer.positional_encodings.PositionalEncoding2D
-    hidden_dim: *hidden_dim 
-    max_h: 18
-    max_w: 20
-  token_pos_embedding:
-    _target_: text_recognizer.networks.transformer.positional_encodings.PositionalEncoding
-    hidden_dim: *hidden_dim 
-    dropout_rate: 0.05
-    max_len: *max_output_len
-
-model:
-  _target_: text_recognizer.models.transformer.TransformerLitModel
-  max_output_len: *max_output_len
-  start_token: <s>
-  end_token: <e>
-  pad_token: <p>
-
-trainer:
-  _target_: pytorch_lightning.Trainer
-  stochastic_weight_avg: true
-  auto_scale_batch_size: binsearch
-  auto_lr_find: false
-  gradient_clip_val: 0.5
-  fast_dev_run: false
-  gpus: 1
-  precision: 16
-  max_epochs: *epochs
-  terminate_on_nan: true
-  weights_summary: null
-  limit_train_batches: 1.0 
-  limit_val_batches: 1.0
-  limit_test_batches: 1.0
-  resume_from_checkpoint: null
-  accumulate_grad_batches: 16
-  overfit_batches: 0
-- 
cgit v1.2.3-70-g09d2