summaryrefslogtreecommitdiff
path: root/training/conf
diff options
context:
space:
mode:
Diffstat (limited to 'training/conf')
-rw-r--r--training/conf/experiment/vqgan.yaml17
-rw-r--r--training/conf/experiment/vqvae.yaml8
-rw-r--r--training/conf/lr_schedulers/cosine_annealing.yaml (renamed from training/conf/lr_scheduler/cosine_annealing.yaml)0
-rw-r--r--training/conf/lr_schedulers/one_cycle.yaml (renamed from training/conf/lr_scheduler/one_cycle.yaml)0
-rw-r--r--training/conf/optimizers/madgrad.yaml (renamed from training/conf/optimizer/madgrad.yaml)0
5 files changed, 15 insertions, 10 deletions
diff --git a/training/conf/experiment/vqgan.yaml b/training/conf/experiment/vqgan.yaml
index 570e7f9..554ec9e 100644
--- a/training/conf/experiment/vqgan.yaml
+++ b/training/conf/experiment/vqgan.yaml
@@ -8,6 +8,19 @@ defaults:
- override /optimizers: null
- override /lr_schedulers: null
+criterion:
+ _target_: text_recognizer.criterions.vqgan_loss.VQGANLoss
+ reconstruction_loss:
+ _target_: torch.nn.L1Loss
+ reduction: mean
+ discriminator:
+ _target_: text_recognizer.criterions.n_layer_discriminator.NLayerDiscriminator
+ in_channels: 1
+ num_channels: 32
+ num_layers: 3
+ vq_loss_weight: 0.8
+ discriminator_weight: 0.6
+
datamodule:
batch_size: 8
@@ -33,7 +46,7 @@ lr_schedulers:
optimizers:
generator:
_target_: madgrad.MADGRAD
- lr: 2.0e-5
+ lr: 4.5e-6
momentum: 0.5
weight_decay: 0
eps: 1.0e-6
@@ -42,7 +55,7 @@ optimizers:
discriminator:
_target_: madgrad.MADGRAD
- lr: 2.0e-5
+ lr: 4.5e-6
momentum: 0.5
weight_decay: 0
eps: 1.0e-6
diff --git a/training/conf/experiment/vqvae.yaml b/training/conf/experiment/vqvae.yaml
index 397a039..8dbb257 100644
--- a/training/conf/experiment/vqvae.yaml
+++ b/training/conf/experiment/vqvae.yaml
@@ -10,16 +10,8 @@ defaults:
trainer:
max_epochs: 256
- # gradient_clip_val: 0.25
datamodule:
batch_size: 8
-# lr_scheduler:
- # epochs: 64
- # steps_per_epoch: 1245
-
-# optimizer:
- # lr: 1.0e-3
-
summary: null
diff --git a/training/conf/lr_scheduler/cosine_annealing.yaml b/training/conf/lr_schedulers/cosine_annealing.yaml
index c53ee3a..c53ee3a 100644
--- a/training/conf/lr_scheduler/cosine_annealing.yaml
+++ b/training/conf/lr_schedulers/cosine_annealing.yaml
diff --git a/training/conf/lr_scheduler/one_cycle.yaml b/training/conf/lr_schedulers/one_cycle.yaml
index c60577a..c60577a 100644
--- a/training/conf/lr_scheduler/one_cycle.yaml
+++ b/training/conf/lr_schedulers/one_cycle.yaml
diff --git a/training/conf/optimizer/madgrad.yaml b/training/conf/optimizers/madgrad.yaml
index a6c059d..a6c059d 100644
--- a/training/conf/optimizer/madgrad.yaml
+++ b/training/conf/optimizers/madgrad.yaml