summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--text_recognizer/models/base.py8
-rw-r--r--training/conf/config.yaml4
-rw-r--r--training/conf/experiment/conv_transformer_lines.yaml8
-rw-r--r--training/conf/experiment/conv_transformer_paragraphs.yaml8
-rw-r--r--training/conf/lr_scheduler/cosine_annealing.yaml (renamed from training/conf/lr_schedulers/cosine_annealing.yaml)0
-rw-r--r--training/conf/lr_scheduler/one_cycle.yaml (renamed from training/conf/lr_schedulers/one_cycle.yaml)0
-rw-r--r--training/conf/lr_scheduler/reduce_on_plateau.yaml (renamed from training/conf/lr_schedulers/reduce_on_plateau.yaml)0
-rw-r--r--training/conf/optimizer/radam.yaml (renamed from training/conf/optimizers/radam.yaml)0
-rw-r--r--training/run.py4
9 files changed, 16 insertions, 16 deletions
diff --git a/text_recognizer/models/base.py b/text_recognizer/models/base.py
index 1ebb256..8dbdafa 100644
--- a/text_recognizer/models/base.py
+++ b/text_recognizer/models/base.py
@@ -20,16 +20,16 @@ class LitBase(LightningModule):
self,
network: Type[nn.Module],
loss_fn: Type[nn.Module],
- optimizer_configs: DictConfig,
- lr_scheduler_configs: Optional[DictConfig],
+ optimizer_config: DictConfig,
+ lr_scheduler_config: Optional[DictConfig],
mapping: EmnistMapping,
) -> None:
super().__init__()
self.network = network
self.loss_fn = loss_fn
- self.optimizer_configs = optimizer_configs
- self.lr_scheduler_configs = lr_scheduler_configs
+ self.optimizer_config = optimizer_config
+ self.lr_scheduler_config = lr_scheduler_config
self.mapping = mapping
# Placeholders
diff --git a/training/conf/config.yaml b/training/conf/config.yaml
index fdeb5d2..46a1e43 100644
--- a/training/conf/config.yaml
+++ b/training/conf/config.yaml
@@ -7,10 +7,10 @@ defaults:
- datamodule: iam_extended_paragraphs
- hydra: default
- logger: wandb
- - lr_schedulers: cosine_annealing
+ - lr_scheduler: cosine_annealing
- model: lit_transformer
- network: conv_transformer
- - optimizers: radam
+ - optimizer: radam
- trainer: default
seed: 4711
diff --git a/training/conf/experiment/conv_transformer_lines.yaml b/training/conf/experiment/conv_transformer_lines.yaml
index 48df78d..eb9bc9e 100644
--- a/training/conf/experiment/conv_transformer_lines.yaml
+++ b/training/conf/experiment/conv_transformer_lines.yaml
@@ -6,8 +6,8 @@ defaults:
- override /datamodule: iam_lines
- override /network: conv_transformer
- override /model: lit_transformer
- - override /lr_schedulers: null
- - override /optimizers: null
+ - override /lr_scheduler: null
+ - override /optimizer: null
epochs: &epochs 512
ignore_index: &ignore_index 3
@@ -28,7 +28,7 @@ callbacks:
annealing_strategy: cos
device: null
-optimizers:
+optimizer:
_target_: torch.optim.RAdam
lr: 3.0e-4
betas: [0.9, 0.999]
@@ -36,7 +36,7 @@ optimizers:
eps: 1.0e-8
parameters: network
-lr_schedulers:
+lr_scheduler:
_target_: torch.optim.lr_scheduler.OneCycleLR
max_lr: 3.0e-4
total_steps: null
diff --git a/training/conf/experiment/conv_transformer_paragraphs.yaml b/training/conf/experiment/conv_transformer_paragraphs.yaml
index d0d0314..7c6e231 100644
--- a/training/conf/experiment/conv_transformer_paragraphs.yaml
+++ b/training/conf/experiment/conv_transformer_paragraphs.yaml
@@ -6,8 +6,8 @@ defaults:
- override /datamodule: iam_extended_paragraphs
- override /network: conv_transformer
- override /model: lit_transformer
- - override /lr_schedulers: null
- - override /optimizers: null
+ - override /lr_scheduler: null
+ - override /optimizer: null
epochs: &epochs 600
num_classes: &num_classes 58
@@ -28,7 +28,7 @@ callbacks:
annealing_strategy: cos
device: null
-optimizers:
+optimizer:
_target_: torch.optim.RAdam
lr: 3.0e-4
betas: [0.9, 0.999]
@@ -36,7 +36,7 @@ optimizers:
eps: 1.0e-8
parameters: network
-lr_schedulers:
+lr_scheduler:
_target_: torch.optim.lr_scheduler.OneCycleLR
max_lr: 3.0e-4
total_steps: null
diff --git a/training/conf/lr_schedulers/cosine_annealing.yaml b/training/conf/lr_scheduler/cosine_annealing.yaml
index e8364f0..e8364f0 100644
--- a/training/conf/lr_schedulers/cosine_annealing.yaml
+++ b/training/conf/lr_scheduler/cosine_annealing.yaml
diff --git a/training/conf/lr_schedulers/one_cycle.yaml b/training/conf/lr_scheduler/one_cycle.yaml
index 20eab9f..20eab9f 100644
--- a/training/conf/lr_schedulers/one_cycle.yaml
+++ b/training/conf/lr_scheduler/one_cycle.yaml
diff --git a/training/conf/lr_schedulers/reduce_on_plateau.yaml b/training/conf/lr_scheduler/reduce_on_plateau.yaml
index 9aa9de4..9aa9de4 100644
--- a/training/conf/lr_schedulers/reduce_on_plateau.yaml
+++ b/training/conf/lr_scheduler/reduce_on_plateau.yaml
diff --git a/training/conf/optimizers/radam.yaml b/training/conf/optimizer/radam.yaml
index d11fcb5..d11fcb5 100644
--- a/training/conf/optimizers/radam.yaml
+++ b/training/conf/optimizer/radam.yaml
diff --git a/training/run.py b/training/run.py
index 0e17b4e..68cedc7 100644
--- a/training/run.py
+++ b/training/run.py
@@ -41,8 +41,8 @@ def run(config: DictConfig) -> Optional[float]:
network=network,
mapping=datamodule.mapping,
loss_fn=loss_fn,
- optimizer_configs=config.optimizers,
- lr_scheduler_configs=config.lr_schedulers,
+ optimizer_config=config.optimizer,
+ lr_scheduler_config=config.lr_scheduler,
_recursive_=False,
)