summaryrefslogtreecommitdiff
path: root/training/conf/callbacks
diff options
context:
space:
mode:
Diffstat (limited to 'training/conf/callbacks')
-rw-r--r--training/conf/callbacks/checkpoint.yaml9
-rw-r--r--training/conf/callbacks/default.yaml4
-rw-r--r--training/conf/callbacks/early_stopping.yaml6
-rw-r--r--training/conf/callbacks/learning_rate_monitor.yaml4
-rw-r--r--training/conf/callbacks/swa.yaml7
-rw-r--r--training/conf/callbacks/wandb_checkpoints.yaml4
-rw-r--r--training/conf/callbacks/wandb_config.yaml2
-rw-r--r--training/conf/callbacks/wandb_htr.yaml6
-rw-r--r--training/conf/callbacks/wandb_htr_predictions.yaml4
-rw-r--r--training/conf/callbacks/wandb_image_reconstructions.yaml5
-rw-r--r--training/conf/callbacks/wandb_vae.yaml6
-rw-r--r--training/conf/callbacks/wandb_watch.yaml4
12 files changed, 2 insertions, 59 deletions
diff --git a/training/conf/callbacks/checkpoint.yaml b/training/conf/callbacks/checkpoint.yaml
deleted file mode 100644
index b4101d8..0000000
--- a/training/conf/callbacks/checkpoint.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-model_checkpoint:
- _target_: pytorch_lightning.callbacks.ModelCheckpoint
- monitor: val/loss # name of the logged metric which determines when model is improving
- save_top_k: 1 # save k best models (determined by above metric)
- save_last: true # additionaly always save model from last epoch
- mode: min # can be "max" or "min"
- verbose: false
- dirpath: checkpoints/
- filename: "{epoch:02d}"
diff --git a/training/conf/callbacks/default.yaml b/training/conf/callbacks/default.yaml
index 658fc03..c184039 100644
--- a/training/conf/callbacks/default.yaml
+++ b/training/conf/callbacks/default.yaml
@@ -1,3 +1,3 @@
defaults:
- - checkpoint
- - learning_rate_monitor
+ - lightning: checkpoint
+ - lightning: learning_rate_monitor
diff --git a/training/conf/callbacks/early_stopping.yaml b/training/conf/callbacks/early_stopping.yaml
deleted file mode 100644
index a188df3..0000000
--- a/training/conf/callbacks/early_stopping.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-early_stopping:
- _target_: pytorch_lightning.callbacks.EarlyStopping
- monitor: val/loss # name of the logged metric which determines when model is improving
- patience: 16 # how many epochs of not improving until training stops
- mode: min # can be "max" or "min"
- min_delta: 0 # minimum change in the monitored metric needed to qualify as an improvement
diff --git a/training/conf/callbacks/learning_rate_monitor.yaml b/training/conf/callbacks/learning_rate_monitor.yaml
deleted file mode 100644
index 4a14e1f..0000000
--- a/training/conf/callbacks/learning_rate_monitor.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-learning_rate_monitor:
- _target_: pytorch_lightning.callbacks.LearningRateMonitor
- logging_interval: step
- log_momentum: false
diff --git a/training/conf/callbacks/swa.yaml b/training/conf/callbacks/swa.yaml
deleted file mode 100644
index 73f8c66..0000000
--- a/training/conf/callbacks/swa.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-stochastic_weight_averaging:
- _target_: pytorch_lightning.callbacks.StochasticWeightAveraging
- swa_epoch_start: 0.8
- swa_lrs: 0.05
- annealing_epochs: 10
- annealing_strategy: cos
- device: null
diff --git a/training/conf/callbacks/wandb_checkpoints.yaml b/training/conf/callbacks/wandb_checkpoints.yaml
deleted file mode 100644
index a4a16ff..0000000
--- a/training/conf/callbacks/wandb_checkpoints.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-upload_ckpts_as_artifact:
- _target_: callbacks.wandb_callbacks.UploadCheckpointsAsArtifact
- ckpt_dir: checkpoints/
- upload_best_only: true
diff --git a/training/conf/callbacks/wandb_config.yaml b/training/conf/callbacks/wandb_config.yaml
deleted file mode 100644
index 747a7c6..0000000
--- a/training/conf/callbacks/wandb_config.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-upload_code_as_artifact:
- _target_: callbacks.wandb_callbacks.UploadConfigAsArtifact
diff --git a/training/conf/callbacks/wandb_htr.yaml b/training/conf/callbacks/wandb_htr.yaml
deleted file mode 100644
index f8c1ef7..0000000
--- a/training/conf/callbacks/wandb_htr.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-defaults:
- - default
- - wandb_watch
- - wandb_config
- - wandb_checkpoints
- - wandb_htr_predictions
diff --git a/training/conf/callbacks/wandb_htr_predictions.yaml b/training/conf/callbacks/wandb_htr_predictions.yaml
deleted file mode 100644
index 468b6e0..0000000
--- a/training/conf/callbacks/wandb_htr_predictions.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-log_text_predictions:
- _target_: callbacks.wandb_callbacks.LogTextPredictions
- num_samples: 8
- log_train: false
diff --git a/training/conf/callbacks/wandb_image_reconstructions.yaml b/training/conf/callbacks/wandb_image_reconstructions.yaml
deleted file mode 100644
index fabfe31..0000000
--- a/training/conf/callbacks/wandb_image_reconstructions.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-log_image_reconstruction:
- _target_: callbacks.wandb_callbacks.LogReconstuctedImages
- num_samples: 8
- log_train: true
- use_sigmoid: true
diff --git a/training/conf/callbacks/wandb_vae.yaml b/training/conf/callbacks/wandb_vae.yaml
deleted file mode 100644
index ffc467f..0000000
--- a/training/conf/callbacks/wandb_vae.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-defaults:
- - default
- - wandb_watch
- - wandb_checkpoints
- - wandb_image_reconstructions
- - wandb_config
diff --git a/training/conf/callbacks/wandb_watch.yaml b/training/conf/callbacks/wandb_watch.yaml
deleted file mode 100644
index 511608c..0000000
--- a/training/conf/callbacks/wandb_watch.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-watch_model:
- _target_: callbacks.wandb_callbacks.WatchModel
- log: all
- log_freq: 100