summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--text_recognizer/callbacks/wandb_callbacks.py9
-rw-r--r--text_recognizer/models/base.py83
-rw-r--r--text_recognizer/models/transformer.py26
-rw-r--r--text_recognizer/networks/encoders/efficientnet/efficientnet.py10
4 files changed, 61 insertions, 67 deletions
diff --git a/text_recognizer/callbacks/wandb_callbacks.py b/text_recognizer/callbacks/wandb_callbacks.py
index 3936aaf..900c3b1 100644
--- a/text_recognizer/callbacks/wandb_callbacks.py
+++ b/text_recognizer/callbacks/wandb_callbacks.py
@@ -29,6 +29,9 @@ class WatchModel(Callback):
log: str = attr.ib(default="gradients")
log_freq: int = attr.ib(default=100)
+ def __attrs_pre_init__(self):
+ super().__init__()
+
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Watches model weights with wandb."""
logger = get_wandb_logger(trainer)
@@ -41,6 +44,9 @@ class UploadCodeAsArtifact(Callback):
project_dir: Path = attr.ib(converter=Path)
+ def __attrs_pre_init__(self):
+ super().__init__()
+
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Uploads project code as an artifact."""
logger = get_wandb_logger(trainer)
@@ -59,6 +65,9 @@ class UploadCheckpointAsArtifact(Callback):
ckpt_dir: Path = attr.ib(converter=Path)
upload_best_only: bool = attr.ib()
+ def __attrs_pre_init__(self):
+ super().__init__()
+
def on_train_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Uploads model checkpoint to W&B."""
logger = get_wandb_logger(trainer)
diff --git a/text_recognizer/models/base.py b/text_recognizer/models/base.py
index 88ffde6..4e803eb 100644
--- a/text_recognizer/models/base.py
+++ b/text_recognizer/models/base.py
@@ -1,8 +1,10 @@
"""Base PyTorch Lightning model."""
from typing import Any, Dict, List, Union, Tuple, Type
-import madgrad
-from omegaconf import DictConfig, OmegaConf
+import attr
+import hydra
+import loguru.logger as log
+from omegaconf import DictConfig
import pytorch_lightning as pl
import torch
from torch import nn
@@ -10,23 +12,29 @@ from torch import Tensor
import torchmetrics
+@attr.s
class LitBaseModel(pl.LightningModule):
"""Abstract PyTorch Lightning class."""
- def __init__(
- self,
- network: Type[nn.Module],
- optimizer: Union[DictConfig, Dict],
- lr_scheduler: Union[DictConfig, Dict],
- criterion: Union[DictConfig, Dict],
- monitor: str = "val_loss",
- ) -> None:
+ network: Type[nn.Module] = attr.ib()
+ criterion_config: DictConfig = attr.ib(converter=DictConfig)
+ optimizer_config: DictConfig = attr.ib(converter=DictConfig)
+ lr_scheduler_config: DictConfig = attr.ib(converter=DictConfig)
+
+ interval: str = attr.ib()
+ monitor: str = attr.ib(default="val/loss")
+
+ loss_fn = attr.ib(init=False)
+
+ train_acc = attr.ib(init=False)
+ val_acc = attr.ib(init=False)
+ test_acc = attr.ib(init=False)
+
+ def __attrs_pre_init__(self):
super().__init__()
- self.monitor = monitor
- self.network = network
- self._optimizer = OmegaConf.create(optimizer)
- self._lr_scheduler = OmegaConf.create(lr_scheduler)
- self.loss_fn = self.configure_criterion(criterion)
+
+ def __attrs_post_init__(self):
+ self.loss_fn = self.configure_criterion()
# Accuracy metric
self.train_acc = torchmetrics.Accuracy()
@@ -34,11 +42,10 @@ class LitBaseModel(pl.LightningModule):
self.test_acc = torchmetrics.Accuracy()
@staticmethod
- def configure_criterion(criterion: Union[DictConfig, Dict]) -> Type[nn.Module]:
+ def configure_criterion(self) -> Type[nn.Module]:
"""Returns a loss functions."""
- criterion = OmegaConf.create(criterion)
- args = {} or criterion.args
- return getattr(nn, criterion.type)(**args)
+ log.info(f"Instantiating criterion <{self.criterion_config._target_}>")
+ return hydra.utils.instantiate(self.criterion_config)
def optimizer_zero_grad(
self,
@@ -51,27 +58,23 @@ class LitBaseModel(pl.LightningModule):
def _configure_optimizer(self) -> Type[torch.optim.Optimizer]:
"""Configures the optimizer."""
- args = {} or self._optimizer.args
- if self._optimizer.type == "MADGRAD":
- optimizer_class = madgrad.MADGRAD
- else:
- optimizer_class = getattr(torch.optim, self._optimizer.type)
- return optimizer_class(params=self.parameters(), **args)
+ log.info(f"Instantiating optimizer <{self.optimizer_config._target_}>")
+ return hydra.utils.instantiate(self.optimizer_config, params=self.parameters())
def _configure_lr_scheduler(
self, optimizer: Type[torch.optim.Optimizer]
) -> Dict[str, Any]:
"""Configures the lr scheduler."""
- scheduler = {"monitor": self.monitor}
- args = {} or self._lr_scheduler.args
-
- if "interval" in args:
- scheduler["interval"] = args.pop("interval")
-
- scheduler["scheduler"] = getattr(
- torch.optim.lr_scheduler, self._lr_scheduler.type
- )(optimizer, **args)
-
+ log.info(
+ f"Instantiating learning rate scheduler <{self.lr_scheduler_config._target_}>"
+ )
+ scheduler = {
+ "monitor": self.monitor,
+ "interval": self.interval,
+ "scheduler": hydra.utils.instantiate(
+ self.lr_scheduler_config, optimizer=optimizer
+ ),
+ }
return scheduler
def configure_optimizers(self) -> Tuple[List[type], List[Dict[str, Any]]]:
@@ -90,9 +93,9 @@ class LitBaseModel(pl.LightningModule):
data, targets = batch
logits = self(data)
loss = self.loss_fn(logits, targets)
- self.log("train_loss", loss)
+ self.log("train/loss", loss)
self.train_acc(logits, targets)
- self.log("train_acc", self.train_acc, on_step=False, on_epoch=True)
+ self.log("train/acc", self.train_acc, on_step=False, on_epoch=True)
return loss
def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
@@ -100,13 +103,13 @@ class LitBaseModel(pl.LightningModule):
data, targets = batch
logits = self(data)
loss = self.loss_fn(logits, targets)
- self.log("val_loss", loss, prog_bar=True)
+ self.log("val/loss", loss, prog_bar=True)
self.val_acc(logits, targets)
- self.log("val_acc", self.val_acc, on_step=False, on_epoch=True, prog_bar=True)
+ self.log("val/acc", self.val_acc, on_step=False, on_epoch=True, prog_bar=True)
def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
"""Test step."""
data, targets = batch
logits = self(data)
self.test_acc(logits, targets)
- self.log("test_acc", self.test_acc, on_step=False, on_epoch=True)
+ self.log("test/acc", self.test_acc, on_step=False, on_epoch=True)
diff --git a/text_recognizer/models/transformer.py b/text_recognizer/models/transformer.py
index bc7e313..6be0ac5 100644
--- a/text_recognizer/models/transformer.py
+++ b/text_recognizer/models/transformer.py
@@ -2,9 +2,7 @@
from typing import Dict, List, Optional, Union, Tuple, Type
from omegaconf import DictConfig
-from torch import nn
-from torch import Tensor
-import wandb
+from torch import nn, Tensor
from text_recognizer.data.emnist import emnist_mapping
from text_recognizer.models.metrics import CharacterErrorRate
@@ -44,24 +42,12 @@ class LitTransformerModel(LitBaseModel):
# TODO: add case for sentence pieces
return mapping, ignore_tokens
- def _log_prediction(self, data: Tensor, pred: Tensor) -> None:
- """Logs prediction on image with wandb."""
- pred_str = "".join(
- self.mapping[i] for i in pred[0].tolist() if i != 3
- ) # pad index is 3
- try:
- self.logger.experiment.log(
- {"val_pred_examples": [wandb.Image(data[0], caption=pred_str)]}
- )
- except AttributeError:
- pass
-
def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:
"""Training step."""
data, targets = batch
logits = self.network(data, targets[:, :-1])
loss = self.loss_fn(logits, targets[:, 1:])
- self.log("train_loss", loss)
+ self.log("train/loss", loss)
return loss
def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
@@ -70,17 +56,15 @@ class LitTransformerModel(LitBaseModel):
logits = self.network(data, targets[:-1])
loss = self.loss_fn(logits, targets[1:])
- self.log("val_loss", loss, prog_bar=True)
+ self.log("val/loss", loss, prog_bar=True)
pred = self.network.predict(data)
- self._log_prediction(data, pred)
self.val_cer(pred, targets)
- self.log("val_cer", self.val_cer, on_step=False, on_epoch=True, prog_bar=True)
+ self.log("val/cer", self.val_cer, on_step=False, on_epoch=True, prog_bar=True)
def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
"""Test step."""
data, targets = batch
pred = self.network.predict(data)
- self._log_prediction(data, pred)
self.test_cer(pred, targets)
- self.log("test_cer", self.test_cer, on_step=False, on_epoch=True, prog_bar=True)
+ self.log("test/cer", self.test_cer, on_step=False, on_epoch=True, prog_bar=True)
diff --git a/text_recognizer/networks/encoders/efficientnet/efficientnet.py b/text_recognizer/networks/encoders/efficientnet/efficientnet.py
index a59abf8..fb4f002 100644
--- a/text_recognizer/networks/encoders/efficientnet/efficientnet.py
+++ b/text_recognizer/networks/encoders/efficientnet/efficientnet.py
@@ -27,7 +27,7 @@ class EfficientNet(nn.Module):
def __init__(
self,
arch: str,
- out_channels: int = 256,
+ out_channels: int = 1280,
stochastic_dropout_rate: float = 0.2,
bn_momentum: float = 0.99,
bn_eps: float = 1.0e-3,
@@ -37,7 +37,7 @@ class EfficientNet(nn.Module):
self.arch = self.archs[arch]
self.out_channels = out_channels
self.stochastic_dropout_rate = stochastic_dropout_rate
- self.bn_momentum = 1 - bn_momentum
+ self.bn_momentum = bn_momentum
self.bn_eps = bn_eps
self._conv_stem: nn.Sequential = None
self._blocks: nn.Sequential = None
@@ -70,9 +70,7 @@ class EfficientNet(nn.Module):
for _ in range(args.num_repeats):
self._blocks.append(
MBConvBlock(
- **args,
- bn_momentum=self.bn_momentum,
- bn_eps=self.bn_eps,
+ **args, bn_momentum=self.bn_momentum, bn_eps=self.bn_eps,
)
)
args.in_channels = args.out_channels
@@ -94,7 +92,7 @@ class EfficientNet(nn.Module):
if self.stochastic_dropout_rate:
stochastic_dropout_rate *= i / len(self._blocks)
x = block(x, stochastic_dropout_rate=stochastic_dropout_rate)
- self._conv_head(x)
+ x = self._conv_head(x)
return x
def forward(self, x: Tensor) -> Tensor: