summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-11-21 21:35:50 +0100
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-11-21 21:35:50 +0100
commitfe43389ad26559cc09f2fd00441c02556ff674c3 (patch)
tree0ce6a70c0dc2ea54bac7e19b539af5aec4ebab19
parenta9363f3944f1ad31590c48d5d51c45df3bbf43b1 (diff)
Format files
-rw-r--r--text_recognizer/models/base.py1
-rw-r--r--text_recognizer/models/transformer.py4
-rw-r--r--text_recognizer/networks/transformer/attention.py2
-rw-r--r--text_recognizer/networks/transformer/norm.py4
4 files changed, 7 insertions, 4 deletions
diff --git a/text_recognizer/models/base.py b/text_recognizer/models/base.py
index cc54de4..821cb69 100644
--- a/text_recognizer/models/base.py
+++ b/text_recognizer/models/base.py
@@ -46,6 +46,7 @@ class BaseLitModel(LightningModule):
optimizer: Type[torch.optim.Optimizer],
optimizer_idx: int,
) -> None:
+ """Optimal way to set grads to zero."""
optimizer.zero_grad(set_to_none=True)
def _configure_optimizer(self) -> List[Type[torch.optim.Optimizer]]:
diff --git a/text_recognizer/models/transformer.py b/text_recognizer/models/transformer.py
index d8cb665..369361b 100644
--- a/text_recognizer/models/transformer.py
+++ b/text_recognizer/models/transformer.py
@@ -60,8 +60,8 @@ class TransformerLitModel(BaseLitModel):
# pred = self(data)
# self.val_cer(pred, targets)
# self.log("val/cer", self.val_cer, on_step=False, on_epoch=True, prog_bar=True)
- # self.test_acc(pred, targets)
- # self.log("val/acc", self.test_acc, on_step=False, on_epoch=True)
+ # self.val_acc(pred, targets)
+ # self.log("val/acc", self.val_acc, on_step=False, on_epoch=True)
def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> None:
"""Test step."""
diff --git a/text_recognizer/networks/transformer/attention.py b/text_recognizer/networks/transformer/attention.py
index b86636e..87792a9 100644
--- a/text_recognizer/networks/transformer/attention.py
+++ b/text_recognizer/networks/transformer/attention.py
@@ -20,7 +20,6 @@ class Attention(nn.Module):
"""Standard attention."""
def __attrs_pre_init__(self) -> None:
- """Pre init constructor."""
super().__init__()
dim: int = attr.ib()
@@ -34,7 +33,6 @@ class Attention(nn.Module):
fc: nn.Linear = attr.ib(init=False)
def __attrs_post_init__(self) -> None:
- """Post init configuration."""
self.scale = self.dim ** -0.5
inner_dim = self.num_heads * self.dim_head
diff --git a/text_recognizer/networks/transformer/norm.py b/text_recognizer/networks/transformer/norm.py
index c59744a..98f4d7f 100644
--- a/text_recognizer/networks/transformer/norm.py
+++ b/text_recognizer/networks/transformer/norm.py
@@ -12,6 +12,8 @@ from torch import Tensor
class ScaleNorm(nn.Module):
+ """Scaled normalization."""
+
def __init__(self, normalized_shape: int, eps: float = 1.0e-5) -> None:
super().__init__()
self.scale = normalized_shape ** -0.5
@@ -25,6 +27,8 @@ class ScaleNorm(nn.Module):
class PreNorm(nn.Module):
+ """Applies layer normalization then function."""
+
def __init__(self, normalized_shape: int, fn: Type[nn.Module]) -> None:
super().__init__()
self.norm = nn.LayerNorm(normalized_shape)