diff options
author | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2021-03-20 18:09:06 +0100 |
---|---|---|
committer | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2021-03-20 18:09:06 +0100 |
commit | 7e8e54e84c63171e748bbf09516fd517e6821ace (patch) | |
tree | 996093f75a5d488dddf7ea1f159ed343a561ef89 /src/training/experiments | |
parent | b0719d84138b6bbe5f04a4982dfca673aea1a368 (diff) |
Inital commit for refactoring to lightning
Diffstat (limited to 'src/training/experiments')
-rw-r--r-- | src/training/experiments/default_config_emnist.yml | 70 | ||||
-rw-r--r-- | src/training/experiments/embedding_experiment.yml | 64 | ||||
-rw-r--r-- | src/training/experiments/sample_experiment.yml | 99 |
3 files changed, 0 insertions, 233 deletions
diff --git a/src/training/experiments/default_config_emnist.yml b/src/training/experiments/default_config_emnist.yml deleted file mode 100644 index bf2ed0a..0000000 --- a/src/training/experiments/default_config_emnist.yml +++ /dev/null @@ -1,70 +0,0 @@ -dataset: EmnistDataset -dataset_args: - sample_to_balance: true - subsample_fraction: 0.33 - transform: null - target_transform: null - seed: 4711 - -data_loader_args: - splits: [train, val] - shuffle: true - num_workers: 8 - cuda: true - -model: CharacterModel -metrics: [accuracy] - -network_args: - in_channels: 1 - num_classes: 80 - depths: [2] - block_sizes: [256] - -train_args: - batch_size: 256 - epochs: 5 - -criterion: CrossEntropyLoss -criterion_args: - weight: null - ignore_index: -100 - reduction: mean - -optimizer: AdamW -optimizer_args: - lr: 1.e-03 - betas: [0.9, 0.999] - eps: 1.e-08 - # weight_decay: 5.e-4 - amsgrad: false - -lr_scheduler: OneCycleLR -lr_scheduler_args: - max_lr: 1.e-03 - epochs: 5 - anneal_strategy: linear - - -callbacks: [Checkpoint, ProgressBar, EarlyStopping, WandbCallback, WandbImageLogger, OneCycleLR] -callback_args: - Checkpoint: - monitor: val_accuracy - ProgressBar: - epochs: 5 - log_batch_frequency: 100 - EarlyStopping: - monitor: val_loss - min_delta: 0.0 - patience: 3 - mode: min - WandbCallback: - log_batch_frequency: 10 - WandbImageLogger: - num_examples: 4 - OneCycleLR: - null -verbosity: 1 # 0, 1, 2 -resume_experiment: null -train: true -validation_metric: val_accuracy diff --git a/src/training/experiments/embedding_experiment.yml b/src/training/experiments/embedding_experiment.yml deleted file mode 100644 index 1e5f941..0000000 --- a/src/training/experiments/embedding_experiment.yml +++ /dev/null @@ -1,64 +0,0 @@ -experiment_group: Embedding Experiments -experiments: - - train_args: - transformer_model: false - batch_size: &batch_size 256 - max_epochs: &max_epochs 32 - input_shape: [[1, 28, 28]] - dataset: - type: EmnistDataset - args: - sample_to_balance: true - subsample_fraction: null - transform: null - target_transform: null - seed: 4711 - train_args: - num_workers: 8 - train_fraction: 0.85 - batch_size: *batch_size - model: CharacterModel - metrics: [] - network: - type: DenseNet - args: - growth_rate: 4 - block_config: [4, 4] - in_channels: 1 - base_channels: 24 - num_classes: 128 - bn_size: 4 - dropout_rate: 0.1 - classifier: true - activation: elu - criterion: - type: EmbeddingLoss - args: - margin: 0.2 - type_of_triplets: semihard - optimizer: - type: AdamW - args: - lr: 1.e-02 - betas: [0.9, 0.999] - eps: 1.e-08 - weight_decay: 5.e-4 - amsgrad: false - lr_scheduler: - type: CosineAnnealingLR - args: - T_max: *max_epochs - callbacks: [Checkpoint, ProgressBar, WandbCallback] - callback_args: - Checkpoint: - monitor: val_loss - mode: min - ProgressBar: - epochs: *max_epochs - WandbCallback: - log_batch_frequency: 10 - verbosity: 1 # 0, 1, 2 - resume_experiment: null - train: true - test: true - test_metric: mean_average_precision_at_r diff --git a/src/training/experiments/sample_experiment.yml b/src/training/experiments/sample_experiment.yml deleted file mode 100644 index 8f94475..0000000 --- a/src/training/experiments/sample_experiment.yml +++ /dev/null @@ -1,99 +0,0 @@ -experiment_group: Sample Experiments -experiments: - - train_args: - batch_size: 256 - max_epochs: &max_epochs 32 - dataset: - type: EmnistDataset - args: - sample_to_balance: true - subsample_fraction: null - transform: null - target_transform: null - seed: 4711 - train_args: - num_workers: 6 - train_fraction: 0.8 - - model: CharacterModel - metrics: [accuracy] - # network: MLP - # network_args: - # input_size: 784 - # hidden_size: 512 - # output_size: 80 - # num_layers: 5 - # dropout_rate: 0.2 - # activation_fn: SELU - network: - type: ResidualNetwork - args: - in_channels: 1 - num_classes: 80 - depths: [2, 2] - block_sizes: [64, 64] - activation: leaky_relu - # network: - # type: WideResidualNetwork - # args: - # in_channels: 1 - # num_classes: 80 - # depth: 10 - # num_layers: 3 - # width_factor: 4 - # dropout_rate: 0.2 - # activation: SELU - # network: LeNet - # network_args: - # output_size: 62 - # activation_fn: GELU - criterion: - type: CrossEntropyLoss - args: - weight: null - ignore_index: -100 - reduction: mean - optimizer: - type: AdamW - args: - lr: 1.e-02 - betas: [0.9, 0.999] - eps: 1.e-08 - # weight_decay: 5.e-4 - amsgrad: false - # lr_scheduler: - # type: OneCycleLR - # args: - # max_lr: 1.e-03 - # epochs: *max_epochs - # anneal_strategy: linear - lr_scheduler: - type: CosineAnnealingLR - args: - T_max: *max_epochs - interval: epoch - swa_args: - start: 2 - lr: 5.e-2 - callbacks: [Checkpoint, ProgressBar, WandbCallback, WandbImageLogger, EarlyStopping] - callback_args: - Checkpoint: - monitor: val_accuracy - ProgressBar: - epochs: null - log_batch_frequency: 100 - EarlyStopping: - monitor: val_loss - min_delta: 0.0 - patience: 5 - mode: min - WandbCallback: - log_batch_frequency: 10 - WandbImageLogger: - num_examples: 4 - use_transpose: true - verbosity: 0 # 0, 1, 2 - resume_experiment: null - train: true - test: true - test_metric: test_accuracy |