summaryrefslogtreecommitdiff
path: root/training/conf/callbacks
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-07-30 23:15:03 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-07-30 23:15:03 +0200
commit7268035fb9e57342612a8cc50a1fe04e8841ca2f (patch)
tree8d4cf3743975bd25f2c04d6a56ff3d4608a7e8d9 /training/conf/callbacks
parent92fc1c7ed2f9f64552be8f71d9b8ab0d5a0a88d4 (diff)
attr bug fix, properly loading network
Diffstat (limited to 'training/conf/callbacks')
-rw-r--r--training/conf/callbacks/checkpoint.yaml12
-rw-r--r--training/conf/callbacks/early_stopping.yaml4
-rw-r--r--training/conf/callbacks/wandb.yaml6
3 files changed, 11 insertions, 11 deletions
diff --git a/training/conf/callbacks/checkpoint.yaml b/training/conf/callbacks/checkpoint.yaml
index 9216715..db34cb1 100644
--- a/training/conf/callbacks/checkpoint.yaml
+++ b/training/conf/callbacks/checkpoint.yaml
@@ -1,9 +1,9 @@
model_checkpoint:
_target_: pytorch_lightning.callbacks.ModelCheckpoint
- monitor: "val/loss" # name of the logged metric which determines when model is improving
+ monitor: val/loss # name of the logged metric which determines when model is improving
save_top_k: 1 # save k best models (determined by above metric)
- save_last: True # additionaly always save model from last epoch
- mode: "min" # can be "max" or "min"
- verbose: False
- dirpath: "checkpoints/"
- filename: "{epoch:02d}"
+ save_last: true # additionaly always save model from last epoch
+ mode: min # can be "max" or "min"
+ verbose: false
+ dirpath: checkpoints/
+ filename: {epoch:02d}
diff --git a/training/conf/callbacks/early_stopping.yaml b/training/conf/callbacks/early_stopping.yaml
index 4cd5aa1..a188df3 100644
--- a/training/conf/callbacks/early_stopping.yaml
+++ b/training/conf/callbacks/early_stopping.yaml
@@ -1,6 +1,6 @@
early_stopping:
_target_: pytorch_lightning.callbacks.EarlyStopping
- monitor: "val/loss" # name of the logged metric which determines when model is improving
+ monitor: val/loss # name of the logged metric which determines when model is improving
patience: 16 # how many epochs of not improving until training stops
- mode: "min" # can be "max" or "min"
+ mode: min # can be "max" or "min"
min_delta: 0 # minimum change in the monitored metric needed to qualify as an improvement
diff --git a/training/conf/callbacks/wandb.yaml b/training/conf/callbacks/wandb.yaml
index 6eedb71..0017e11 100644
--- a/training/conf/callbacks/wandb.yaml
+++ b/training/conf/callbacks/wandb.yaml
@@ -3,7 +3,7 @@ defaults:
watch_model:
_target_: callbacks.wandb_callbacks.WatchModel
- log: "all"
+ log: all
log_freq: 100
upload_code_as_artifact:
@@ -12,8 +12,8 @@ upload_code_as_artifact:
upload_ckpts_as_artifact:
_target_: callbacks.wandb_callbacks.UploadCheckpointsAsArtifact
- ckpt_dir: "checkpoints/"
- upload_best_only: True
+ ckpt_dir: checkpoints/
+ upload_best_only: true
log_text_predictions:
_target_: callbacks.wandb_callbacks.LogTextPredictions