diff options
Diffstat (limited to 'training/conf/experiment')
| -rw-r--r-- | training/conf/experiment/barlow_twins.yaml | 134 | 
1 files changed, 134 insertions, 0 deletions
diff --git a/training/conf/experiment/barlow_twins.yaml b/training/conf/experiment/barlow_twins.yaml new file mode 100644 index 0000000..4901e18 --- /dev/null +++ b/training/conf/experiment/barlow_twins.yaml @@ -0,0 +1,134 @@ +defaults: +  - override /criterion: null +  - override /datamodule: null +  - override /network: null +  - override /model: null +  - override /lr_schedulers: null +  - override /optimizers: null + + +epochs: &epochs 1000 +summary: [[1, 1, 56, 1024]] + +callbacks: +  stochastic_weight_averaging: +    _target_: pytorch_lightning.callbacks.StochasticWeightAveraging +    swa_epoch_start: 0.75 +    swa_lrs: 1.0e-5 +    annealing_epochs: 10 +    annealing_strategy: cos +    device: null + +optimizers: +  madgrad: +    _target_: madgrad.MADGRAD +    lr: 2.0e-4 +    momentum: 0.9 +    weight_decay: 0 +    eps: 1.0e-6 + +    parameters: network + +lr_schedulers: +  network: +    _target_: torch.optim.lr_scheduler.OneCycleLR +    max_lr: 2.0e-4 +    total_steps: null +    epochs: *epochs +    steps_per_epoch: 632 +    pct_start: 0.3 +    anneal_strategy: cos +    cycle_momentum: true +    base_momentum: 0.85 +    max_momentum: 0.95 +    div_factor: 25 +    final_div_factor: 1.0e4 +    three_phase: false +    last_epoch: -1 +    verbose: false +    # Non-class arguments +    interval: step +    monitor: val/loss + +datamodule: +  _target_: text_recognizer.data.iam_extended_paragraphs.IAMExtendedParagraphs +  batch_size: 4 +  num_workers: 12 +  train_fraction: 0.8 +  augment: true +  pin_memory: true +  word_pieces: false +  resize: null + +network: +  _target_: text_recognizer.networks.conv_transformer.ConvTransformer +  input_dims: [1, 576, 640] +  hidden_dim: &hidden_dim 128 +  encoder_dim: 1280 +  dropout_rate: 0.2 +  num_classes: *num_classes +  pad_index: *ignore_index +  encoder: +    _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet +    arch: b0 +    out_channels: 1280 +    stochastic_dropout_rate: 0.2 +    bn_momentum: 0.99 +    bn_eps: 1.0e-3 +  decoder: +    _target_: text_recognizer.networks.transformer.Decoder +    dim: *hidden_dim +    depth: 3  +    num_heads: 4 +    attn_fn: text_recognizer.networks.transformer.attention.Attention +    attn_kwargs: +      dim_head: 32 +      dropout_rate: 0.2 +    norm_fn: text_recognizer.networks.transformer.norm.ScaleNorm +    ff_fn: text_recognizer.networks.transformer.mlp.FeedForward +    ff_kwargs: +      dim_out: null +      expansion_factor: 4 +      glu: true +      dropout_rate: 0.2 +    cross_attend: true +    pre_norm: true +    rotary_emb: +      _target_: text_recognizer.networks.transformer.positional_encodings.rotary_embedding.RotaryEmbedding +      dim: 32 +  pixel_pos_embedding: +    _target_: text_recognizer.networks.transformer.positional_encodings.PositionalEncoding2D +    hidden_dim: *hidden_dim  +    max_h: 18 +    max_w: 20 +  token_pos_embedding: +    _target_: text_recognizer.networks.transformer.positional_encodings.PositionalEncoding +    hidden_dim: *hidden_dim  +    dropout_rate: 0.2 +    max_len: *max_output_len + +model: +  _target_: text_recognizer.models.transformer.TransformerLitModel +  max_output_len: *max_output_len +  start_token: <s> +  end_token: <e> +  pad_token: <p> + +trainer: +  _target_: pytorch_lightning.Trainer +  stochastic_weight_avg: true +  auto_scale_batch_size: binsearch +  auto_lr_find: false +  gradient_clip_val: 0.0 +  fast_dev_run: false +  gpus: 1 +  precision: 16 +  max_epochs: *epochs +  terminate_on_nan: true +  weights_summary: null +  limit_train_batches: 1.0  +  limit_val_batches: 1.0 +  limit_test_batches: 1.0 +  resume_from_checkpoint: null +  accumulate_grad_batches: 16 +  overfit_batches: 0  |