summaryrefslogtreecommitdiff
path: root/src/training/experiments/default_config_emnist.yml
blob: 12a0a9df75e72e834f9ee49f7eb2fe061f03bab1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
dataset: EmnistDataset
dataset_args:
  sample_to_balance: true
  subsample_fraction: 0.33
  transform: null
  target_transform: null
  seed: 4711

data_loader_args:
  splits: [train, val]
  shuffle: true
  num_workers: 8
  cuda: true

model: CharacterModel
metrics: [accuracy]

network_args:
  in_channels: 1
  num_classes: 80
  depths: [2]
  block_sizes: [256]

train_args:
  batch_size: 256
  epochs: 5

criterion: CrossEntropyLoss
criterion_args:
  weight: null
  ignore_index: -100
  reduction: mean

optimizer: AdamW
optimizer_args:
  lr: 1.e-03
  betas: [0.9, 0.999]
  eps: 1.e-08
  # weight_decay: 5.e-4
  amsgrad: false

lr_scheduler: OneCycleLR
lr_scheduler_args:
  max_lr: 1.e-03
  epochs: 5
  anneal_strategy: linear


callbacks: [Checkpoint, ProgressBar, EarlyStopping, WandbCallback, WandbImageLogger, OneCycleLR]
callback_args:
  Checkpoint:
    monitor: val_accuracy
  ProgressBar:
    epochs: 5
    log_batch_frequency: 100
  EarlyStopping:
    monitor: val_loss
    min_delta: 0.0
    patience: 3
    mode: min
  WandbCallback:
    log_batch_frequency: 10
  WandbImageLogger:
    num_examples: 4
  OneCycleLR:
    null
verbosity: 1 # 0, 1, 2
resume_experiment: null
validation_metric: val_accuracy