diff options
Diffstat (limited to 'training/conf/experiment/vqgan_htr_char_iam_lines.yaml')
-rw-r--r-- | training/conf/experiment/vqgan_htr_char_iam_lines.yaml | 88 |
1 files changed, 0 insertions, 88 deletions
diff --git a/training/conf/experiment/vqgan_htr_char_iam_lines.yaml b/training/conf/experiment/vqgan_htr_char_iam_lines.yaml deleted file mode 100644 index 27fdfda..0000000 --- a/training/conf/experiment/vqgan_htr_char_iam_lines.yaml +++ /dev/null @@ -1,88 +0,0 @@ -defaults: - - override /mapping: null - - override /criterion: null - - override /datamodule: null - - override /network: null - - override /model: null - - override /lr_schedulers: null - # - override /optimizers: null - - -criterion: - _target_: text_recognizer.criterions.label_smoothing.LabelSmoothingLoss - smoothing: 0.1 - ignore_index: 3 - -mapping: - _target_: text_recognizer.data.emnist_mapping.EmnistMapping - # extra_symbols: [ "\n" ] - -lr_schedulers: - network: - _target_: torch.optim.lr_scheduler.CosineAnnealingLR - T_max: 512 - eta_min: 4.5e-6 - last_epoch: -1 - interval: epoch - monitor: val/loss - -datamodule: - _target_: text_recognizer.data.iam_lines.IAMLines - batch_size: 4 - num_workers: 12 - train_fraction: 0.8 - augment: false - pin_memory: false - - -# optimizers: -# - _target_: madgrad.MADGRAD -# lr: 2.0e-4 -# momentum: 0.9 -# weight_decay: 0 -# eps: 1.0e-7 -# parameters: network - -network: - _target_: text_recognizer.networks.vq_transformer.VqTransformer - input_dims: [1, 56, 1024] - encoder_dim: 32 - hidden_dim: 32 - dropout_rate: 0.1 - num_classes: 58 - pad_index: 3 - no_grad: true - decoder: - _target_: text_recognizer.networks.transformer.Decoder - dim: 32 - depth: 4 - num_heads: 8 - attn_fn: text_recognizer.networks.transformer.attention.Attention - attn_kwargs: - dim_head: 32 - dropout_rate: 0.2 - norm_fn: torch.nn.LayerNorm - ff_fn: text_recognizer.networks.transformer.mlp.FeedForward - ff_kwargs: - dim_out: null - expansion_factor: 4 - glu: true - dropout_rate: 0.2 - cross_attend: true - pre_norm: true - rotary_emb: null - pretrained_encoder_path: "training/logs/runs/2021-09-26/23-27-57" - -model: - _target_: text_recognizer.models.vq_transformer.VqTransformerLitModel - start_token: <s> - end_token: <e> - pad_token: <p> - max_output_len: 89 # 451 - alpha: 0.0 - -trainer: - max_epochs: 512 - # limit_train_batches: 0.1 - # limit_val_batches: 0.1 - # gradient_clip_val: 0.5 |