summaryrefslogtreecommitdiff
path: root/training/conf/datamodule/transform
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-10-10 18:05:44 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-10-10 18:05:44 +0200
commit9d3e158f48e3e965d67f154e05faaaf1cadbb836 (patch)
tree57b8e1b38874e9a778671abe4eaaa841fc56049b /training/conf/datamodule/transform
parent38f546f0b86fc0dc89863b00c5ee8c6685394ef2 (diff)
Update configs
Diffstat (limited to 'training/conf/datamodule/transform')
-rw-r--r--training/conf/datamodule/transform/default.yaml2
-rw-r--r--training/conf/datamodule/transform/emnist_lines.yaml14
-rw-r--r--training/conf/datamodule/transform/iam_lines.yaml17
-rw-r--r--training/conf/datamodule/transform/iam_lines_barlow.yaml39
-rw-r--r--training/conf/datamodule/transform/paragraphs.yaml20
5 files changed, 92 insertions, 0 deletions
diff --git a/training/conf/datamodule/transform/default.yaml b/training/conf/datamodule/transform/default.yaml
new file mode 100644
index 0000000..d6f6d78
--- /dev/null
+++ b/training/conf/datamodule/transform/default.yaml
@@ -0,0 +1,2 @@
+to_tensor:
+ _target_: torchvision.transforms.ToTensor
diff --git a/training/conf/datamodule/transform/emnist_lines.yaml b/training/conf/datamodule/transform/emnist_lines.yaml
new file mode 100644
index 0000000..986ba20
--- /dev/null
+++ b/training/conf/datamodule/transform/emnist_lines.yaml
@@ -0,0 +1,14 @@
+to_tensor:
+ _target_: torchvision.transforms.ToTensor
+
+color_jitter:
+ _target_: torchvision.transforms.ColorJitter
+ brightness: [0.5, 1.0]
+
+random_affine:
+ _target_: torchvision.transforms.RandomAffine
+ degrees: 3
+ translate: [0.0, 0.05]
+ scale: [0.4, 1.1]
+ shear: [-40, 50]
+ fill: 0
diff --git a/training/conf/datamodule/transform/iam_lines.yaml b/training/conf/datamodule/transform/iam_lines.yaml
new file mode 100644
index 0000000..710f6e3
--- /dev/null
+++ b/training/conf/datamodule/transform/iam_lines.yaml
@@ -0,0 +1,17 @@
+embed_crop:
+ _target_: text_recognizer.data.transforms.embed_crop.EmbedCrop
+ augment: false
+
+color_jitter:
+ _target_: torchvision.transforms.ColorJitter
+ brightness: [0.8, 1.6]
+
+random_affine:
+ _target_: torchvision.transforms.RandomAffine
+ degrees: 1
+ shear: [-30, 20]
+ interpolation: BILINEAR
+ fill: 0
+
+to_tensor:
+ _target_: torchvision.transforms.ToTensor
diff --git a/training/conf/datamodule/transform/iam_lines_barlow.yaml b/training/conf/datamodule/transform/iam_lines_barlow.yaml
new file mode 100644
index 0000000..b44ca4e
--- /dev/null
+++ b/training/conf/datamodule/transform/iam_lines_barlow.yaml
@@ -0,0 +1,39 @@
+barlow:
+ _target_: text_recognizer.data.transforms.barlow.BarlowTransform
+ prim:
+ embed_crop:
+ _target_: text_recognizer.data.transforms.embed_crop.EmbedCrop
+ augment: true
+
+ color_jitter:
+ _target_: torchvision.transforms.ColorJitter
+ brightness: [0.8, 1.6]
+
+ random_affine:
+ _target_: torchvision.transforms.RandomAffine
+ degrees: 1
+ shear: [-30, 20]
+ interpolation: BILINEAR
+ fill: 0
+
+ to_tensor:
+ _target_: torchvision.transforms.ToTensor
+
+ bis:
+ embed_crop:
+ _target_: text_recognizer.data.transforms.embed_crop.EmbedCrop
+ augment: true
+
+ color_jitter:
+ _target_: torchvision.transforms.ColorJitter
+ brightness: [0.5, 1.4]
+
+ random_affine:
+ _target_: torchvision.transforms.RandomAffine
+ degrees: 1.5
+ shear: [-30, 20]
+ interpolation: BILINEAR
+ fill: 0
+
+ to_tensor:
+ _target_: torchvision.transforms.ToTensor
diff --git a/training/conf/datamodule/transform/paragraphs.yaml b/training/conf/datamodule/transform/paragraphs.yaml
new file mode 100644
index 0000000..d33a4c3
--- /dev/null
+++ b/training/conf/datamodule/transform/paragraphs.yaml
@@ -0,0 +1,20 @@
+random_crop:
+ _target_: torchvision.transforms.RandomCrop
+ size: [576, 640]
+ padding: null
+ pad_if_needed: true
+ fill: 0
+ padding_mode: constant
+
+color_jitter:
+ _target_: torchvision.transforms.ColorJitter
+ brightness: [0.8, 1.6]
+
+random_affine:
+ _target_: torchvision.transforms.RandomAffine
+ degrees: 1
+ shear: [-10, 10]
+ interpolation: BILINEAR
+
+to_tensor:
+ _target_: torchvision.transforms.ToTensor