summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-10-27 22:27:24 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-10-27 22:27:24 +0200
commit9a8044f4a3826a119416665741b709cd686fca87 (patch)
treee339593bb4e3858fa9379d14752dc52bf5949825
parentae8bfa62f0e02bd70c27bc1e71697249a5a79e7e (diff)
Remove Barlow Twins
-rw-r--r--text_recognizer/criterions/barlow_twins.py26
-rw-r--r--text_recognizer/data/transforms/barlow.py19
-rw-r--r--text_recognizer/models/barlow_twins.py45
-rw-r--r--text_recognizer/networks/barlow_twins/__init__.py1
-rw-r--r--text_recognizer/networks/barlow_twins/network.py18
-rw-r--r--text_recognizer/networks/barlow_twins/projector.py36
-rw-r--r--training/conf/datamodule/transform/barlow_paragraphs.yaml46
-rw-r--r--training/conf/experiment/barlow_twins.yaml102
-rw-r--r--training/conf/experiment/barlow_twins_paragraphs.yaml103
9 files changed, 0 insertions, 396 deletions
diff --git a/text_recognizer/criterions/barlow_twins.py b/text_recognizer/criterions/barlow_twins.py
deleted file mode 100644
index fe30b22..0000000
--- a/text_recognizer/criterions/barlow_twins.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""Barlow twins loss function."""
-
-import torch
-from torch import nn, Tensor
-
-
-def off_diagonal(x: Tensor) -> Tensor:
- n, m = x.shape
- assert n == m
- return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
-
-
-class BarlowTwinsLoss(nn.Module):
- def __init__(self, dim: int, lambda_: float) -> None:
- super().__init__()
- self.bn = nn.BatchNorm1d(dim, affine=False)
- self.lambda_ = lambda_
-
- def forward(self, z1: Tensor, z2: Tensor) -> Tensor:
- """Calculates the Barlow Twin loss."""
- c = self.bn(z1).T @ self.bn(z2)
- c.div_(z1.shape[0])
-
- on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()
- off_diag = off_diagonal(c).pow_(2).sum()
- return on_diag + self.lambda_ * off_diag
diff --git a/text_recognizer/data/transforms/barlow.py b/text_recognizer/data/transforms/barlow.py
deleted file mode 100644
index 78683cb..0000000
--- a/text_recognizer/data/transforms/barlow.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""Augmentations for training Barlow Twins."""
-from omegaconf.dictconfig import DictConfig
-from torch import Tensor
-
-from text_recognizer.data.transforms.load_transform import load_transform
-
-
-class BarlowTransform:
- """Applies two different transforms to input data."""
-
- def __init__(self, prim: DictConfig, bis: DictConfig) -> None:
- self.prim = load_transform(prim)
- self.bis = load_transform(bis)
-
- def __call__(self, data: Tensor) -> Tensor:
- """Applies two different augmentation on the input."""
- x_prim = self.prim(data)
- x_bis = self.bis(data)
- return x_prim, x_bis
diff --git a/text_recognizer/models/barlow_twins.py b/text_recognizer/models/barlow_twins.py
deleted file mode 100644
index 6e2719d..0000000
--- a/text_recognizer/models/barlow_twins.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""PyTorch Lightning Barlow Twins model."""
-from typing import Tuple, Type
-import attr
-from torch import nn
-from torch import Tensor
-
-from text_recognizer.models.base import BaseLitModel
-from text_recognizer.criterions.barlow_twins import BarlowTwinsLoss
-
-
-@attr.s(auto_attribs=True, eq=False)
-class BarlowTwinsLitModel(BaseLitModel):
- """Barlow Twins training proceduer."""
-
- network: Type[nn.Module] = attr.ib()
- loss_fn: BarlowTwinsLoss = attr.ib()
-
- def forward(self, data: Tensor) -> Tensor:
- """Encodes image to projector latent."""
- return self.network(data)
-
- def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:
- """Training step."""
- data, _ = batch
- x1, x2 = data
- z1, z2 = self(x1), self(x2)
- loss = self.loss_fn(z1, z2)
- self.log("train/loss", loss)
- return loss
-
- def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
- """Validation step."""
- data, _ = batch
- x1, x2 = data
- z1, z2 = self(x1), self(x2)
- loss = self.loss_fn(z1, z2)
- self.log("val/loss", loss, prog_bar=True)
-
- def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
- """Test step."""
- data, _ = batch
- x1, x2 = data
- z1, z2 = self(x1), self(x2)
- loss = self.loss_fn(z1, z2)
- self.log("test/loss", loss, prog_bar=True)
diff --git a/text_recognizer/networks/barlow_twins/__init__.py b/text_recognizer/networks/barlow_twins/__init__.py
deleted file mode 100644
index 0b74818..0000000
--- a/text_recognizer/networks/barlow_twins/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Module for projector network in Barlow Twins."""
diff --git a/text_recognizer/networks/barlow_twins/network.py b/text_recognizer/networks/barlow_twins/network.py
deleted file mode 100644
index a3e3750..0000000
--- a/text_recognizer/networks/barlow_twins/network.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""Barlow Twins network."""
-from typing import Type
-
-from torch import nn, Tensor
-import torch.nn.functional as F
-
-
-class BarlowTwins(nn.Module):
- def __init__(self, encoder: Type[nn.Module], projector: Type[nn.Module]) -> None:
- super().__init__()
- self.encoder = encoder
- self.projector = projector
-
- def forward(self, x: Tensor) -> Tensor:
- z = self.encoder(x)
- z_e = F.adaptive_avg_pool2d(z, (1, 1)).flatten(start_dim=1)
- z_p = self.projector(z_e)
- return z_p
diff --git a/text_recognizer/networks/barlow_twins/projector.py b/text_recognizer/networks/barlow_twins/projector.py
deleted file mode 100644
index 05d5e2e..0000000
--- a/text_recognizer/networks/barlow_twins/projector.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Projector network in Barlow Twins."""
-
-from typing import List
-import torch
-from torch import nn
-from torch import Tensor
-
-
-class Projector(nn.Module):
- """MLP network."""
-
- def __init__(self, dims: List[int]) -> None:
- super().__init__()
- self.dims = dims
- self.network = self._build()
-
- def _build(self) -> nn.Sequential:
- """Builds projector network."""
- layers = [
- nn.Sequential(
- nn.Linear(
- in_features=self.dims[i], out_features=self.dims[i + 1], bias=False
- ),
- nn.BatchNorm1d(self.dims[i + 1]),
- nn.ReLU(inplace=True),
- )
- for i in range(len(self.dims) - 2)
- ]
- layers.append(
- nn.Linear(in_features=self.dims[-2], out_features=self.dims[-1], bias=False)
- )
- return nn.Sequential(*layers)
-
- def forward(self, x: Tensor) -> Tensor:
- """Project latent to higher dimesion."""
- return self.network(x)
diff --git a/training/conf/datamodule/transform/barlow_paragraphs.yaml b/training/conf/datamodule/transform/barlow_paragraphs.yaml
deleted file mode 100644
index 5eefce5..0000000
--- a/training/conf/datamodule/transform/barlow_paragraphs.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-
-barlow:
- _target_: text_recognizer.data.transforms.barlow.BarlowTransform
- prim:
- random_crop:
- _target_: torchvision.transforms.RandomCrop
- size: [576, 640]
- padding: null
- pad_if_needed: true
- fill: 0
- padding_mode: constant
-
- color_jitter:
- _target_: torchvision.transforms.ColorJitter
- brightness: [0.8, 1.6]
-
- random_affine:
- _target_: torchvision.transforms.RandomAffine
- degrees: 1
- shear: [-10, 10]
- interpolation: BILINEAR
-
- to_tensor:
- _target_: torchvision.transforms.ToTensor
-
- bis:
- random_crop:
- _target_: torchvision.transforms.RandomCrop
- size: [576, 640]
- padding: null
- pad_if_needed: true
- fill: 0
- padding_mode: constant
-
- color_jitter:
- _target_: torchvision.transforms.ColorJitter
- brightness: [0.8, 2.0]
-
- random_affine:
- _target_: torchvision.transforms.RandomAffine
- degrees: 1
- shear: [-5, 5]
- interpolation: BILINEAR
-
- to_tensor:
- _target_: torchvision.transforms.ToTensor
diff --git a/training/conf/experiment/barlow_twins.yaml b/training/conf/experiment/barlow_twins.yaml
deleted file mode 100644
index cc1295d..0000000
--- a/training/conf/experiment/barlow_twins.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-# @package _global_
-
-defaults:
- - override /criterion: null
- - override /datamodule: null
- - override /network: null
- - override /model: null
- - override /lr_schedulers: null
- - override /optimizers: null
-
-epochs: &epochs 1000
-summary: [[1, 1, 56, 1024]]
-
-criterion:
- _target_: text_recognizer.criterions.barlow_twins.BarlowTwinsLoss
- dim: 512
- lambda_: 3.9e-3
-
-callbacks:
- stochastic_weight_averaging:
- _target_: pytorch_lightning.callbacks.StochasticWeightAveraging
- swa_epoch_start: 0.75
- swa_lrs: 1.0e-5
- annealing_epochs: 10
- annealing_strategy: cos
- device: null
-
-optimizers:
- madgrad:
- _target_: madgrad.MADGRAD
- lr: 1.0e-3
- momentum: 0.9
- weight_decay: 1.0e-6
- eps: 1.0e-6
- parameters: network
-
-lr_schedulers:
- network:
- _target_: torch.optim.lr_scheduler.OneCycleLR
- max_lr: 3.0e-4
- total_steps: null
- epochs: *epochs
- steps_per_epoch: 45
- pct_start: 0.03
- anneal_strategy: cos
- cycle_momentum: true
- base_momentum: 0.85
- max_momentum: 0.95
- div_factor: 25
- final_div_factor: 1.0e4
- three_phase: false
- last_epoch: -1
- verbose: false
- # Non-class arguments
- interval: step
- monitor: val/loss
-
-datamodule:
- _target_: text_recognizer.data.iam_lines.IAMLines
- batch_size: 16
- num_workers: 12
- train_fraction: 0.9
- pin_memory: false
- transform: transform/iam_lines_barlow.yaml
- test_transform: transform/iam_lines_barlow.yaml
- mapping:
- _target_: text_recognizer.data.mappings.emnist_mapping.EmnistMapping
-
-network:
- _target_: text_recognizer.networks.barlow_twins.network.BarlowTwins
- encoder:
- _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet
- arch: b0
- out_channels: 1280
- stochastic_dropout_rate: 0.2
- bn_momentum: 0.99
- bn_eps: 1.0e-3
- projector:
- _target_: text_recognizer.networks.barlow_twins.projector.Projector
- dims: [1280, 512, 512, 512]
-
-model:
- _target_: text_recognizer.models.barlow_twins.BarlowTwinsLitModel
-
-trainer:
- _target_: pytorch_lightning.Trainer
- stochastic_weight_avg: true
- auto_scale_batch_size: binsearch
- auto_lr_find: false
- gradient_clip_val: 0.0
- fast_dev_run: false
- gpus: 1
- precision: 16
- max_epochs: *epochs
- terminate_on_nan: true
- weights_summary: null
- limit_train_batches: 1.0
- limit_val_batches: 1.0
- limit_test_batches: 1.0
- resume_from_checkpoint: null
- accumulate_grad_batches: 32
- overfit_batches: 0
diff --git a/training/conf/experiment/barlow_twins_paragraphs.yaml b/training/conf/experiment/barlow_twins_paragraphs.yaml
deleted file mode 100644
index 9552c0b..0000000
--- a/training/conf/experiment/barlow_twins_paragraphs.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-# @package _global_
-
-defaults:
- - override /criterion: null
- - override /datamodule: null
- - override /network: null
- - override /model: null
- - override /lr_schedulers: null
- - override /optimizers: null
-
-epochs: &epochs 1000
-summary: [[1, 1, 576, 640]]
-
-criterion:
- _target_: text_recognizer.criterions.barlow_twins.BarlowTwinsLoss
- dim: 512
- lambda_: 3.9e-3
-
-# callbacks:
-# stochastic_weight_averaging:
-# _target_: pytorch_lightning.callbacks.StochasticWeightAveraging
-# swa_epoch_start: 0.75
-# swa_lrs: 1.0e-5
-# annealing_epochs: 10
-# annealing_strategy: cos
-# device: null
-
-optimizers:
- madgrad:
- _target_: madgrad.MADGRAD
- lr: 1.0e-3
- momentum: 0.9
- weight_decay: 1.0e-6
- eps: 1.0e-6
- parameters: network
-
-lr_schedulers:
- network:
- _target_: torch.optim.lr_scheduler.OneCycleLR
- max_lr: 1.0e-3
- total_steps: null
- epochs: *epochs
- steps_per_epoch: 40
- pct_start: 0.03
- anneal_strategy: cos
- cycle_momentum: true
- base_momentum: 0.85
- max_momentum: 0.95
- div_factor: 25
- final_div_factor: 1.0e4
- three_phase: false
- last_epoch: -1
- verbose: false
- # Non-class arguments
- interval: step
- monitor: val/loss
-
-datamodule:
- _target_: text_recognizer.data.iam_extended_paragraphs.IAMExtendedParagraphs
- batch_size: 4
- num_workers: 12
- train_fraction: 0.9
- pin_memory: true
- transform: transform/barlow_paragraphs.yaml
- test_transform: transform/barlow_paragraphs.yaml
- mapping:
- _target_: text_recognizer.data.mappings.emnist_mapping.EmnistMapping
- extra_symbols: [ "\n" ]
-
-network:
- _target_: text_recognizer.networks.barlow_twins.network.BarlowTwins
- encoder:
- _target_: text_recognizer.networks.encoders.efficientnet.EfficientNet
- arch: b0
- out_channels: 1280
- stochastic_dropout_rate: 0.2
- bn_momentum: 0.99
- bn_eps: 1.0e-3
- projector:
- _target_: text_recognizer.networks.barlow_twins.projector.Projector
- dims: [1280, 512, 512, 512]
-
-model:
- _target_: text_recognizer.models.barlow_twins.BarlowTwinsLitModel
-
-trainer:
- _target_: pytorch_lightning.Trainer
- stochastic_weight_avg: true
- auto_scale_batch_size: binsearch
- auto_lr_find: false
- gradient_clip_val: 0.0
- fast_dev_run: false
- gpus: 1
- precision: 16
- max_epochs: *epochs
- terminate_on_nan: true
- weights_summary: null
- limit_train_batches: 1.0
- limit_val_batches: 1.0
- limit_test_batches: 1.0
- resume_from_checkpoint: null
- accumulate_grad_batches: 128
- overfit_batches: 0