diff options
author | aktersnurra <gustaf.rydholm@gmail.com> | 2020-08-11 23:08:56 +0200 |
---|---|---|
committer | aktersnurra <gustaf.rydholm@gmail.com> | 2020-08-11 23:08:56 +0200 |
commit | 95cbdf5bc1cc9639febda23c28d8f464c998b214 (patch) | |
tree | 435faa5645bab4c05b7824f33d8e94a0bc421b66 /src/training | |
parent | 53677be4ec14854ea4881b0d78730e0414c8dedd (diff) |
Working one the cnn lstm ctc model.
Diffstat (limited to 'src/training')
-rw-r--r-- | src/training/experiments/sample_experiment.yml | 70 | ||||
-rw-r--r-- | src/training/run_sweep.py | 8 |
2 files changed, 8 insertions, 70 deletions
diff --git a/src/training/experiments/sample_experiment.yml b/src/training/experiments/sample_experiment.yml index 57198f1..355305c 100644 --- a/src/training/experiments/sample_experiment.yml +++ b/src/training/experiments/sample_experiment.yml @@ -70,73 +70,3 @@ experiments: null verbosity: 2 # 0, 1, 2 resume_experiment: null - # - dataset: EmnistDataset - # dataset_args: - # sample_to_balance: true - # subsample_fraction: null - # transform: null - # target_transform: null - # seed: 4711 - # data_loader_args: - # splits: [train, val] - # batch_size: 256 - # shuffle: true - # num_workers: 8 - # cuda: true - # model: CharacterModel - # metrics: [accuracy] - # # network: MLP - # # network_args: - # # input_size: 784 - # # output_size: 62 - # # num_layers: 3 - # # activation_fn: GELU - # network: LeNet - # network_args: - # output_size: 62 - # activation_fn: GELU - # train_args: - # batch_size: 256 - # epochs: 16 - # criterion: CrossEntropyLoss - # criterion_args: - # weight: null - # ignore_index: -100 - # reduction: mean - # # optimizer: RMSprop - # # optimizer_args: - # # lr: 1.e-3 - # # alpha: 0.9 - # # eps: 1.e-7 - # # momentum: 0 - # # weight_decay: 0 - # # centered: false - # optimizer: AdamW - # optimizer_args: - # lr: 1.e-2 - # betas: [0.9, 0.999] - # eps: 1.e-08 - # weight_decay: 0 - # amsgrad: false - # # lr_scheduler: null - # lr_scheduler: OneCycleLR - # lr_scheduler_args: - # max_lr: 1.e-3 - # epochs: 16 - # callbacks: [Checkpoint, EarlyStopping, WandbCallback, WandbImageLogger, OneCycleLR] - # callback_args: - # Checkpoint: - # monitor: val_accuracy - # EarlyStopping: - # monitor: val_loss - # min_delta: 0.0 - # patience: 3 - # mode: min - # WandbCallback: - # log_batch_frequency: 10 - # WandbImageLogger: - # num_examples: 4 - # OneCycleLR: - # null - # verbosity: 2 # 0, 1, 2 - # resume_experiment: null diff --git a/src/training/run_sweep.py b/src/training/run_sweep.py new file mode 100644 index 0000000..5c5322a --- /dev/null +++ b/src/training/run_sweep.py @@ -0,0 +1,8 @@ +"""W&B Sweep Functionality.""" +from ast import literal_eval +import json +import os +import signal +import subprocess # nosec +import sys +from typing import Tuple |