summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2023-08-25 23:19:14 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2023-08-25 23:19:14 +0200
commit49ca6ade1a19f7f9c702171537fe4be0dfcda66d (patch)
tree20062ed1910758481f3d5fff11159706c7b990c6
parent0421daf6bd97596703f426ba61c401599b538eeb (diff)
Rename and add flash atten
-rw-r--r--text_recognizer/data/base_data_module.py4
-rw-r--r--text_recognizer/network/__init__.py1
-rw-r--r--text_recognizer/network/convnext/__init__.py7
-rw-r--r--text_recognizer/network/convnext/attention.py (renamed from text_recognizer/networks/convnext/attention.py)4
-rw-r--r--text_recognizer/network/convnext/convnext.py (renamed from text_recognizer/networks/convnext/convnext.py)6
-rw-r--r--text_recognizer/network/convnext/downsample.py (renamed from text_recognizer/networks/convnext/downsample.py)0
-rw-r--r--text_recognizer/network/convnext/norm.py (renamed from text_recognizer/networks/convnext/norm.py)0
-rw-r--r--text_recognizer/network/convnext/residual.py (renamed from text_recognizer/networks/convnext/residual.py)0
-rw-r--r--text_recognizer/network/transformer/__init__.py1
-rw-r--r--text_recognizer/network/transformer/attend.py94
-rw-r--r--text_recognizer/network/transformer/attention.py56
-rw-r--r--text_recognizer/network/transformer/decoder.py57
-rw-r--r--text_recognizer/network/transformer/embedding/__init__.py (renamed from text_recognizer/networks/transformer/embeddings/__init__.py)0
-rw-r--r--text_recognizer/network/transformer/embedding/absolute.py28
-rw-r--r--text_recognizer/network/transformer/embedding/l2_norm.py9
-rw-r--r--text_recognizer/network/transformer/embedding/sincos.py13
-rw-r--r--text_recognizer/network/transformer/embedding/token.py18
-rw-r--r--text_recognizer/network/transformer/encoder.py46
-rw-r--r--text_recognizer/network/transformer/ff.py22
-rw-r--r--text_recognizer/network/transformer/norm.py22
-rw-r--r--text_recognizer/network/vit.py76
-rw-r--r--text_recognizer/networks/__init__.py2
-rw-r--r--text_recognizer/networks/conv_transformer.py49
-rw-r--r--text_recognizer/networks/convnext/__init__.py7
-rw-r--r--text_recognizer/networks/image_encoder.py45
-rw-r--r--text_recognizer/networks/text_decoder.py55
-rw-r--r--text_recognizer/networks/transformer/__init__.py6
-rw-r--r--text_recognizer/networks/transformer/attention.py109
-rw-r--r--text_recognizer/networks/transformer/decoder.py41
-rw-r--r--text_recognizer/networks/transformer/decoder_block.py44
-rw-r--r--text_recognizer/networks/transformer/embeddings/axial.py104
-rw-r--r--text_recognizer/networks/transformer/embeddings/rotary.py67
-rw-r--r--text_recognizer/networks/transformer/ff.py45
-rw-r--r--text_recognizer/networks/transformer/norm.py51
-rw-r--r--text_recognizer/optimizer/__init__.py (renamed from text_recognizer/optimizers/__init__.py)0
-rw-r--r--text_recognizer/optimizer/laprop.py (renamed from text_recognizer/optimizers/laprop.py)0
36 files changed, 457 insertions, 632 deletions
diff --git a/text_recognizer/data/base_data_module.py b/text_recognizer/data/base_data_module.py
index bd6fd99..3cb0aa2 100644
--- a/text_recognizer/data/base_data_module.py
+++ b/text_recognizer/data/base_data_module.py
@@ -2,7 +2,7 @@
from pathlib import Path
from typing import Callable, Dict, Optional, Tuple, TypeVar
-from pytorch_lightning import LightningDataModule
+import pytorch_lightning as L
from torch.utils.data import DataLoader
from text_recognizer.data.base_dataset import BaseDataset
@@ -19,7 +19,7 @@ def load_and_print_info(data_module_class: type) -> None:
print(dataset)
-class BaseDataModule(LightningDataModule):
+class BaseDataModule(L.LightningDataModule):
"""Base PyTorch Lightning DataModule."""
def __init__(
diff --git a/text_recognizer/network/__init__.py b/text_recognizer/network/__init__.py
new file mode 100644
index 0000000..d9ef58b
--- /dev/null
+++ b/text_recognizer/network/__init__.py
@@ -0,0 +1 @@
+"""Network modules"""
diff --git a/text_recognizer/network/convnext/__init__.py b/text_recognizer/network/convnext/__init__.py
new file mode 100644
index 0000000..dcff3fc
--- /dev/null
+++ b/text_recognizer/network/convnext/__init__.py
@@ -0,0 +1,7 @@
+"""Convnext module."""
+from text_recognizer.network.convnext.attention import (
+ Attention,
+ FeedForward,
+ TransformerBlock,
+)
+from text_recognizer.network.convnext.convnext import ConvNext
diff --git a/text_recognizer/networks/convnext/attention.py b/text_recognizer/network/convnext/attention.py
index 1334feb..6bc9692 100644
--- a/text_recognizer/networks/convnext/attention.py
+++ b/text_recognizer/network/convnext/attention.py
@@ -4,8 +4,8 @@ import torch.nn.functional as F
from einops import rearrange
from torch import Tensor, einsum, nn
-from text_recognizer.networks.convnext.norm import LayerNorm
-from text_recognizer.networks.convnext.residual import Residual
+from text_recognizer.network.convnext.norm import LayerNorm
+from text_recognizer.network.convnext.residual import Residual
def l2norm(t: Tensor) -> Tensor:
diff --git a/text_recognizer/networks/convnext/convnext.py b/text_recognizer/network/convnext/convnext.py
index 9419a15..6acf059 100644
--- a/text_recognizer/networks/convnext/convnext.py
+++ b/text_recognizer/network/convnext/convnext.py
@@ -3,9 +3,9 @@ from typing import Optional, Sequence
from torch import Tensor, nn
-from text_recognizer.networks.convnext.attention import TransformerBlock
-from text_recognizer.networks.convnext.downsample import Downsample
-from text_recognizer.networks.convnext.norm import LayerNorm
+from text_recognizer.network.convnext.attention import TransformerBlock
+from text_recognizer.network.convnext.downsample import Downsample
+from text_recognizer.network.convnext.norm import LayerNorm
class ConvNextBlock(nn.Module):
diff --git a/text_recognizer/networks/convnext/downsample.py b/text_recognizer/network/convnext/downsample.py
index a8a0466..a8a0466 100644
--- a/text_recognizer/networks/convnext/downsample.py
+++ b/text_recognizer/network/convnext/downsample.py
diff --git a/text_recognizer/networks/convnext/norm.py b/text_recognizer/network/convnext/norm.py
index 3355de9..3355de9 100644
--- a/text_recognizer/networks/convnext/norm.py
+++ b/text_recognizer/network/convnext/norm.py
diff --git a/text_recognizer/networks/convnext/residual.py b/text_recognizer/network/convnext/residual.py
index dfc2847..dfc2847 100644
--- a/text_recognizer/networks/convnext/residual.py
+++ b/text_recognizer/network/convnext/residual.py
diff --git a/text_recognizer/network/transformer/__init__.py b/text_recognizer/network/transformer/__init__.py
new file mode 100644
index 0000000..a3f3011
--- /dev/null
+++ b/text_recognizer/network/transformer/__init__.py
@@ -0,0 +1 @@
+"""Transformer modules."""
diff --git a/text_recognizer/network/transformer/attend.py b/text_recognizer/network/transformer/attend.py
new file mode 100644
index 0000000..4e643fb
--- /dev/null
+++ b/text_recognizer/network/transformer/attend.py
@@ -0,0 +1,94 @@
+from typing import Optional
+from collections import namedtuple
+
+import torch
+from torch import Tensor, einsum, nn
+from einops import rearrange
+import torch.nn.functional as F
+
+Config = namedtuple(
+ "FlashAttentionConfig", ["enable_flash", "enable_math", "enable_mem_efficient"]
+)
+
+
+class Attend(nn.Module):
+ def __init__(self, use_flash: bool) -> None:
+ super().__init__()
+ self.use_flash = use_flash
+ self.cpu_cfg = Config(True, True, True)
+ self.cuda_cfg = None
+ if not torch.cuda.is_available():
+ return
+
+ device_properties = torch.cuda.get_device_properties(torch.device("cuda"))
+ if device_properties.major == 8 and device_properties.minor == 0:
+ self.cuda_cfg = Config(True, False, False)
+ else:
+ self.cuda_cfg = Config(False, True, True)
+
+ def flash_attn(self, q: Tensor, k: Tensor, v: Tensor, causal: bool) -> Tensor:
+ cfg = self.cuda_cfg if q.is_cuda else self.cpu_cfg
+ with torch.backends.cuda.sdp_kernel(**cfg._asdict()):
+ out = F.scaled_dot_product_attention(q, k, v, is_causal=causal)
+ return out
+
+ def atten(
+ self,
+ q: Tensor,
+ k: Tensor,
+ v: Tensor,
+ causal: bool,
+ mask: Optional[Tensor] = None,
+ ) -> Tensor:
+ b = q.shape[0]
+ energy = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale
+
+ mask_value = -torch.finfo(energy.dtype).max
+
+ if mask is not None:
+ energy = apply_input_mask(b, k, energy, mask, mask_value)
+
+ if causal:
+ energy = apply_causal_mask(energy, mask_value)
+
+ attn = F.softmax(energy, dim=-1)
+ attn = self.dropout(attn)
+ return einsum("b h i j, b h j d -> b h i d", attn, v)
+
+ def forward(
+ self,
+ q: Tensor,
+ k: Tensor,
+ v: Tensor,
+ causal: bool,
+ mask: Optional[Tensor] = None,
+ ) -> Tensor:
+ if self.use_flash:
+ return self.flash_attn(q, k, v, causal)
+ else:
+ return self.atten(q, k, v, causal, mask)
+
+
+def apply_input_mask(
+ b: int,
+ k: Tensor,
+ energy: Tensor,
+ mask: Optional[Tensor],
+ mask_value: float,
+) -> Tensor:
+ """Applies an input mask."""
+ k_mask = torch.ones((b, k.shape[-2]), device=energy.device).bool()
+ q_mask = rearrange(mask, "b i -> b () i ()")
+ k_mask = rearrange(k_mask, "b j -> b () () j")
+ input_mask = q_mask * k_mask
+ return energy.masked_fill_(~input_mask, mask_value)
+
+
+def apply_causal_mask(
+ energy: Tensor,
+ mask_value: float,
+) -> Tensor:
+ """Applies a causal mask to the energy tensor."""
+ i, j, device = *energy.shape[-2:], energy.device
+ causal_mask = torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
+ return energy.masked_fill(causal_mask, mask_value)
diff --git a/text_recognizer/network/transformer/attention.py b/text_recognizer/network/transformer/attention.py
new file mode 100644
index 0000000..8e18f8a
--- /dev/null
+++ b/text_recognizer/network/transformer/attention.py
@@ -0,0 +1,56 @@
+"""Implements the attention module for the transformer."""
+from typing import Optional
+from text_recognizer.network.transformer.norm import RMSNorm
+from text_recognizer.network.transformer.attend import Attend
+
+import torch
+from einops import rearrange
+from torch import Tensor, nn
+
+
+class Attention(nn.Module):
+ """Standard attention."""
+
+ def __init__(
+ self,
+ dim: int,
+ heads: int,
+ causal: bool = False,
+ dim_head: int = 64,
+ dropout_rate: float = 0.0,
+ use_flash: bool = True,
+ ) -> None:
+ super().__init__()
+ self.heads = heads
+ inner_dim = dim_head * heads
+ self.norm = nn.LayerNorm(dim)
+ self.to_q = nn.Linear(dim, inner_dim, bias=False)
+ self.to_k = nn.Linear(dim, inner_dim, bias=False)
+ self.to_v = nn.Linear(dim, inner_dim, bias=False)
+ # self.q_norm = RMSNorm(heads, dim_head)
+ # self.k_norm = RMSNorm(heads, dim_head)
+ self.attend = Attend(use_flash)
+ self.to_out = nn.Linear(inner_dim, dim, bias=False)
+ self.scale = dim**-0.5
+ self.causal = causal
+ self.dropout_rate = dropout_rate
+ self.dropout = nn.Dropout(p=self.dropout_rate)
+
+ def forward(
+ self,
+ x: Tensor,
+ context: Optional[Tensor] = None,
+ mask: Optional[Tensor] = None,
+ ) -> Tensor:
+ """Computes the attention."""
+ x = self.norm(x)
+ q = self.to_q(x)
+ k = self.to_k(x if context is None else context)
+ v = self.to_v(x if context is None else context)
+ q, k, v = map(
+ lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), (q, k, v)
+ )
+ out = self.attend(q, k, v, self.causal, mask)
+ out = rearrange(out, "b h n d -> b n (h d)")
+ out = self.to_out(out)
+ return out
diff --git a/text_recognizer/network/transformer/decoder.py b/text_recognizer/network/transformer/decoder.py
new file mode 100644
index 0000000..06925ba
--- /dev/null
+++ b/text_recognizer/network/transformer/decoder.py
@@ -0,0 +1,57 @@
+"""Transformer decoder module."""
+from typing import Optional
+from torch import Tensor, nn
+
+from text_recognizer.network.transformer.attention import Attention
+from text_recognizer.network.transformer.ff import FeedForward
+
+
+class Decoder(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ inner_dim: int,
+ heads: int,
+ dim_head: int,
+ depth: int,
+ dropout_rate: float = 0.0,
+ ) -> None:
+ super().__init__()
+ self.norm = nn.LayerNorm(dim)
+ self.layers = nn.ModuleList(
+ [
+ nn.ModuleList(
+ [
+ Attention(
+ dim,
+ heads,
+ True,
+ dim_head,
+ dropout_rate,
+ ),
+ FeedForward(dim, inner_dim, dropout_rate),
+ Attention(
+ dim,
+ heads,
+ False,
+ dim_head,
+ dropout_rate,
+ ),
+ ]
+ )
+ for _ in range(depth)
+ ]
+ )
+
+ def forward(
+ self,
+ x: Tensor,
+ context: Tensor,
+ mask: Optional[Tensor] = None,
+ ) -> Tensor:
+ """Applies decoder block on input signals."""
+ for self_attn, ff, cross_attn in self.layers:
+ x = x + self_attn(x, mask=mask)
+ x = x + ff(x)
+ x = x + cross_attn(x, context=context)
+ return self.norm(x)
diff --git a/text_recognizer/networks/transformer/embeddings/__init__.py b/text_recognizer/network/transformer/embedding/__init__.py
index bb3f904..bb3f904 100644
--- a/text_recognizer/networks/transformer/embeddings/__init__.py
+++ b/text_recognizer/network/transformer/embedding/__init__.py
diff --git a/text_recognizer/network/transformer/embedding/absolute.py b/text_recognizer/network/transformer/embedding/absolute.py
new file mode 100644
index 0000000..08b2c2a
--- /dev/null
+++ b/text_recognizer/network/transformer/embedding/absolute.py
@@ -0,0 +1,28 @@
+from typing import Optional
+
+import torch
+from torch import nn, Tensor
+from text_recognizer.network.transformer.embedding.l2_norm import l2_norm
+
+
+class AbsolutePositionalEmbedding(nn.Module):
+ def __init__(self, dim: int, max_length: int, use_l2: bool = False) -> None:
+ super().__init__()
+ self.scale = dim**-0.5 if not use_l2 else 1.0
+ self.max_length = max_length
+ self.use_l2 = use_l2
+ self.to_embedding = nn.Embedding(max_length, dim)
+ if self.use_l2:
+ nn.init.normal_(self.to_embedding.weight, std=1e-5)
+
+ def forward(self, x: Tensor, pos: Optional[Tensor] = None) -> Tensor:
+ n, device = x.shape[1], x.device
+ assert (
+ n <= self.max_length
+ ), f"Sequence length {n} is greater than the maximum positional embedding {self.max_length}"
+
+ if pos is None:
+ pos = torch.arange(n, device=device)
+
+ embedding = self.to_embedding(pos) * self.scale
+ return l2_norm(embedding) if self.use_l2 else embedding
diff --git a/text_recognizer/network/transformer/embedding/l2_norm.py b/text_recognizer/network/transformer/embedding/l2_norm.py
new file mode 100644
index 0000000..0e48bca
--- /dev/null
+++ b/text_recognizer/network/transformer/embedding/l2_norm.py
@@ -0,0 +1,9 @@
+from einops import rearrange
+import torch.nn.functional as F
+from torch import Tensor
+
+
+def l2_norm(t: Tensor, groups=1) -> Tensor:
+ t = rearrange(t, "... (g d) -> ... g d", g=groups)
+ t = F.normalize(t, p=2, dim=-1)
+ return rearrange(t, "... g d -> ... (g d)")
diff --git a/text_recognizer/network/transformer/embedding/sincos.py b/text_recognizer/network/transformer/embedding/sincos.py
new file mode 100644
index 0000000..ed6b0ab
--- /dev/null
+++ b/text_recognizer/network/transformer/embedding/sincos.py
@@ -0,0 +1,13 @@
+import torch
+
+
+def sincos_2d(h, w, dim, temperature: int = 10000, dtype=torch.float32):
+ y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij")
+ assert (dim % 4) == 0, "feature dimension must be multiple of 4 for sincos emb"
+ omega = torch.arange(dim // 4) / (dim // 4 - 1)
+ omega = 1.0 / (temperature**omega)
+
+ y = y.flatten()[:, None] * omega[None, :]
+ x = x.flatten()[:, None] * omega[None, :]
+ pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1)
+ return pe.type(dtype)
diff --git a/text_recognizer/network/transformer/embedding/token.py b/text_recognizer/network/transformer/embedding/token.py
new file mode 100644
index 0000000..1df2fd6
--- /dev/null
+++ b/text_recognizer/network/transformer/embedding/token.py
@@ -0,0 +1,18 @@
+from torch import nn, Tensor
+
+from text_recognizer.network.transformer.embedding.l2_norm import l2_norm
+
+
+class TokenEmbedding(nn.Module):
+ def __init__(self, num_tokens: int, dim: int, use_l2: bool = True) -> None:
+ super().__init__()
+ self.use_l2 = use_l2
+ self.to_embedding = nn.Embedding(num_tokens, dim)
+ if self.use_l2:
+ nn.init.normal_(self.to_embedding.weight, std=1e-5)
+ else:
+ nn.init.kaiming_normal_(self.to_embedding.weight)
+
+ def forward(self, x: Tensor) -> Tensor:
+ embedding = self.to_embedding(x)
+ return l2_norm(embedding) if self.use_l2 else embedding
diff --git a/text_recognizer/network/transformer/encoder.py b/text_recognizer/network/transformer/encoder.py
new file mode 100644
index 0000000..ea4b0b3
--- /dev/null
+++ b/text_recognizer/network/transformer/encoder.py
@@ -0,0 +1,46 @@
+"""Transformer encoder module."""
+from torch import Tensor, nn
+
+from text_recognizer.network.transformer.attention import Attention
+from text_recognizer.network.transformer.ff import FeedForward
+
+
+class Encoder(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ inner_dim: int,
+ heads: int,
+ dim_head: int,
+ depth: int,
+ dropout_rate: float = 0.0,
+ ) -> None:
+ super().__init__()
+ self.norm = nn.LayerNorm(dim)
+ self.layers = nn.ModuleList(
+ [
+ nn.ModuleList(
+ [
+ Attention(
+ dim,
+ heads,
+ False,
+ dim_head,
+ dropout_rate,
+ ),
+ FeedForward(dim, inner_dim, dropout_rate),
+ ]
+ )
+ for _ in range(depth)
+ ]
+ )
+
+ def forward(
+ self,
+ x: Tensor,
+ ) -> Tensor:
+ """Applies decoder block on input signals."""
+ for self_attn, ff in self.layers:
+ x = x + self_attn(x)
+ x = x + ff(x)
+ return self.norm(x)
diff --git a/text_recognizer/network/transformer/ff.py b/text_recognizer/network/transformer/ff.py
new file mode 100644
index 0000000..9181323
--- /dev/null
+++ b/text_recognizer/network/transformer/ff.py
@@ -0,0 +1,22 @@
+"""Feedforward layer in transformer."""
+from torch import Tensor, nn
+
+
+class FeedForward(nn.Module):
+ def __init__(
+ self,
+ dim: int,
+ inner_dim: int,
+ dropout_rate: float = 0.0,
+ ) -> None:
+ super().__init__()
+ self.ff = nn.Sequential(
+ nn.LayerNorm(dim),
+ nn.Linear(dim, inner_dim),
+ nn.GELU(),
+ nn.Dropout(dropout_rate),
+ nn.Linear(inner_dim, dim),
+ )
+
+ def forward(self, x: Tensor) -> Tensor:
+ return self.ff(x)
diff --git a/text_recognizer/network/transformer/norm.py b/text_recognizer/network/transformer/norm.py
new file mode 100644
index 0000000..2737754
--- /dev/null
+++ b/text_recognizer/network/transformer/norm.py
@@ -0,0 +1,22 @@
+"""Normalization layers for transformers.
+
+Copied from lucidrains:
+ https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py
+
+"""
+import torch
+from torch import Tensor, nn
+import torch.nn.functional as F
+
+
+class RMSNorm(nn.Module):
+ """Root mean square layer normalization."""
+
+ def __init__(self, heads: int, dim: int) -> None:
+ super().__init__()
+ self.scale = dim**-0.5
+ self.gamma = nn.Parameter(torch.ones(heads, 1, dim))
+
+ def forward(self, x: Tensor) -> Tensor:
+ """Applies normalization."""
+ return F.normalize(x, dim=-1) * self.scale * self.gamma
diff --git a/text_recognizer/network/vit.py b/text_recognizer/network/vit.py
new file mode 100644
index 0000000..80176a8
--- /dev/null
+++ b/text_recognizer/network/vit.py
@@ -0,0 +1,76 @@
+"""Transformer module."""
+from typing import Type
+
+from einops.layers.torch import Rearrange
+from torch import Tensor, nn
+
+from text_recognizer.network.transformer.embedding.token import TokenEmbedding
+from text_recognizer.network.transformer.embedding.sincos import sincos_2d
+from text_recognizer.network.transformer.decoder import Decoder
+from text_recognizer.network.transformer.encoder import Encoder
+
+
+class VisionTransformer(nn.Module):
+ def __init__(
+ self,
+ image_height: int,
+ image_width: int,
+ patch_height: int,
+ patch_width: int,
+ dim: int,
+ num_classes: int,
+ encoder: Encoder,
+ decoder: Decoder,
+ token_embedding: TokenEmbedding,
+ pos_embedding: Type[nn.Module],
+ tie_embeddings: bool,
+ pad_index: int,
+ ) -> None:
+ super().__init__()
+ patch_dim = patch_height * patch_width
+ self.to_patch_embedding = nn.Sequential(
+ Rearrange(
+ "b c (h ph) (w pw) -> b (h w) (ph pw c)",
+ ph=patch_height,
+ pw=patch_width,
+ ),
+ nn.LayerNorm(patch_dim),
+ nn.Linear(patch_dim, dim),
+ nn.LayerNorm(dim),
+ )
+ self.patch_embedding = sincos_2d(
+ h=image_height // patch_height, w=image_width // patch_width, dim=dim
+ )
+ self.pos_embedding = pos_embedding
+ self.token_embedding = token_embedding
+ self.to_logits = (
+ nn.Linear(dim, num_classes)
+ if not tie_embeddings
+ else lambda t: t @ self.token_embedding.to_embedding.weight.t()
+ )
+ self.encoder = encoder
+ self.decoder = decoder
+ self.pad_index = pad_index
+
+ def encode(self, img: Tensor) -> Tensor:
+ x = self.to_patch_embedding(img)
+ x += self.patch_embedding.to(img.device, dtype=img.dtype)
+ return self.encoder(x)
+
+ def decode(self, text: Tensor, context: Tensor) -> Tensor:
+ text = text.long()
+ mask = text != self.pad_index
+ tokens = self.token_embedding(text)
+ tokens = tokens + self.pos_embedding(tokens)
+ output = self.decoder(tokens, context)
+ return self.to_logits(output)
+
+ def forward(
+ self,
+ img: Tensor,
+ text: Tensor,
+ ) -> Tensor:
+ """Applies decoder block on input signals."""
+ context = self.encode(img)
+ logits = self.decode(text, context)
+ return logits.permute(0, 2, 1)
diff --git a/text_recognizer/networks/__init__.py b/text_recognizer/networks/__init__.py
deleted file mode 100644
index f921882..0000000
--- a/text_recognizer/networks/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-"""Network modules"""
-from text_recognizer.networks.conv_transformer import ConvTransformer
diff --git a/text_recognizer/networks/conv_transformer.py b/text_recognizer/networks/conv_transformer.py
deleted file mode 100644
index d36162a..0000000
--- a/text_recognizer/networks/conv_transformer.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""Base network module."""
-from typing import Type
-
-from torch import Tensor, nn
-
-from text_recognizer.networks.transformer.decoder import Decoder
-
-
-class ConvTransformer(nn.Module):
- """Base transformer network."""
-
- def __init__(
- self,
- encoder: Type[nn.Module],
- decoder: Decoder,
- ) -> None:
- super().__init__()
- self.encoder = encoder
- self.decoder = decoder
-
- def encode(self, img: Tensor) -> Tensor:
- """Encodes images to latent representation."""
- return self.encoder(img)
-
- def decode(self, tokens: Tensor, img_features: Tensor) -> Tensor:
- """Decodes latent images embedding into characters."""
- return self.decoder(tokens, img_features)
-
- def forward(self, img: Tensor, tokens: Tensor) -> Tensor:
- """Encodes images into token logtis.
-
- Args:
- img (Tensor): Input image(s).
- tokens (Tensor): token embeddings.
-
- Shapes:
- - img: :math: `(B, 1, H, W)`
- - tokens: :math: `(B, Sy)`
- - logits: :math: `(B, Sy, C)`
-
- where B is the batch size, H is the image height, W is the image
- width, Sy the output length, and C is the number of classes.
-
- Returns:
- Tensor: Sequence of logits.
- """
- img_features = self.encode(img)
- logits = self.decode(tokens, img_features)
- return logits
diff --git a/text_recognizer/networks/convnext/__init__.py b/text_recognizer/networks/convnext/__init__.py
deleted file mode 100644
index faebe6f..0000000
--- a/text_recognizer/networks/convnext/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Convnext module."""
-from text_recognizer.networks.convnext.attention import (
- Attention,
- FeedForward,
- TransformerBlock,
-)
-from text_recognizer.networks.convnext.convnext import ConvNext
diff --git a/text_recognizer/networks/image_encoder.py b/text_recognizer/networks/image_encoder.py
deleted file mode 100644
index ab60560..0000000
--- a/text_recognizer/networks/image_encoder.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""Encodes images to latent embeddings."""
-from typing import Tuple, Type
-
-from torch import Tensor, nn
-
-from text_recognizer.networks.transformer.embeddings.axial import (
- AxialPositionalEmbeddingImage,
-)
-
-
-class ImageEncoder(nn.Module):
- """Encodes images to latent embeddings."""
-
- def __init__(
- self,
- encoder: Type[nn.Module],
- pixel_embedding: AxialPositionalEmbeddingImage,
- ) -> None:
- super().__init__()
- self.encoder = encoder
- self.pixel_embedding = pixel_embedding
-
- def forward(self, img: Tensor) -> Tensor:
- """Encodes an image into a latent feature vector.
-
- Args:
- img (Tensor): Image tensor.
-
- Shape:
- - x: :math: `(B, C, H, W)`
- - z: :math: `(B, Sx, D)`
-
- where Sx is the length of the flattened feature maps projected from
- the encoder. D latent dimension for each pixel in the projected
- feature maps.
-
- Returns:
- Tensor: A Latent embedding of the image.
- """
- z = self.encoder(img)
- z = z + self.pixel_embedding(z)
- z = z.flatten(start_dim=2)
- # Permute tensor from [B, E, Ho * Wo] to [B, Sx, E]
- z = z.permute(0, 2, 1)
- return z
diff --git a/text_recognizer/networks/text_decoder.py b/text_recognizer/networks/text_decoder.py
deleted file mode 100644
index 500bcf9..0000000
--- a/text_recognizer/networks/text_decoder.py
+++ /dev/null
@@ -1,55 +0,0 @@
-"""Text decoder."""
-import torch
-from torch import Tensor, nn
-
-from text_recognizer.networks.transformer.decoder import Decoder
-
-
-class TextDecoder(nn.Module):
- """Decodes images to token logits."""
-
- def __init__(
- self,
- dim: int,
- num_classes: int,
- pad_index: Tensor,
- decoder: Decoder,
- ) -> None:
- super().__init__()
- self.dim = dim
- self.num_classes = num_classes
- self.pad_index = pad_index
- self.decoder = decoder
- self.token_embedding = nn.Embedding(
- num_embeddings=self.num_classes, embedding_dim=self.dim
- )
- self.to_logits = nn.Linear(in_features=self.dim, out_features=self.num_classes)
-
- def forward(self, tokens: Tensor, img_features: Tensor) -> Tensor:
- """Decodes latent images embedding into logit tokens.
-
- Args:
- tokens (Tensor): Token indecies.
- img_features (Tensor): Latent images embedding.
-
- Shapes:
- - tokens: :math: `(B, Sy)`
- - img_features: :math: `(B, Sx, D)`
- - logits: :math: `(B, Sy, C)`
-
- where Sy is the length of the output, C is the number of classes
- and D is the hidden dimension.
-
- Returns:
- Tensor: Sequence of logits.
- """
- tokens = tokens.long()
- mask = tokens != self.pad_index
- tokens = self.token_embedding(tokens)
- tokens = self.decoder(x=tokens, context=img_features, mask=mask)
- logits = (
- tokens @ torch.transpose(self.token_embedding.weight.to(tokens.dtype), 0, 1)
- ).float()
- logits = self.to_logits(tokens) # [B, Sy, C]
- logits = logits.permute(0, 2, 1) # [B, C, Sy]
- return logits
diff --git a/text_recognizer/networks/transformer/__init__.py b/text_recognizer/networks/transformer/__init__.py
deleted file mode 100644
index 0d17deb..0000000
--- a/text_recognizer/networks/transformer/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""Transformer modules."""
-from text_recognizer.networks.transformer.attention import Attention
-from text_recognizer.networks.transformer.decoder import Decoder, DecoderBlock
-from text_recognizer.networks.transformer.embeddings.rotary import RotaryEmbedding
-from text_recognizer.networks.transformer.ff import FeedForward
-from text_recognizer.networks.transformer.norm import RMSNorm
diff --git a/text_recognizer/networks/transformer/attention.py b/text_recognizer/networks/transformer/attention.py
deleted file mode 100644
index 85f513e..0000000
--- a/text_recognizer/networks/transformer/attention.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""Implementes the attention module for the transformer."""
-from typing import Optional
-
-import torch
-import torch.nn.functional as F
-from einops import rearrange
-from torch import Tensor, einsum, nn
-
-from text_recognizer.networks.transformer.embeddings.rotary import (
- RotaryEmbedding,
-)
-
-
-class Attention(nn.Module):
- """Standard attention."""
-
- def __init__(
- self,
- dim: int,
- num_heads: int,
- causal: bool = False,
- dim_head: int = 64,
- dropout_rate: float = 0.0,
- ) -> None:
- super().__init__()
- self.dim = dim
- self.scale = self.dim**-0.5
- self.num_heads = num_heads
- self.dim_head = dim_head
-
- self.causal = causal
- self.dropout_rate = dropout_rate
-
- # Single key/value head
- k_dim = dim_head
- v_dim = dim_head
-
- out_dim = self.num_heads * self.dim_head
-
- self.to_q = nn.Linear(self.dim, out_dim, bias=False)
- self.to_k = nn.Linear(self.dim, k_dim, bias=False)
- self.to_v = nn.Linear(self.dim, v_dim, bias=False)
-
- self.dropout = nn.Dropout(p=self.dropout_rate)
-
- # Feedforward
- self.fc = nn.Linear(out_dim, self.dim)
-
- def forward(
- self,
- x: Tensor,
- context: Optional[Tensor] = None,
- mask: Optional[Tensor] = None,
- rotary_embedding: Optional[RotaryEmbedding] = None,
- ) -> Tensor:
- """Computes the attention."""
- b, device = x.shape[0], x.device
-
- q = self.to_q(x)
- q = rearrange(q, "b n (h d) -> b h n d", h=self.num_heads)
- k = self.to_k(context) if context is not None else self.to_k(x)
- v = self.to_v(context) if context is not None else self.to_v(x)
-
- if rotary_embedding is not None:
- q, k, v = map(lambda t: rotary_embedding.rotate(t), (q, k, v))
-
- energy = einsum("b h i d, b j d -> b h i j", q, k) * self.scale
- mask_value = -torch.finfo(energy.dtype).max
- energy = apply_input_mask(b, k, energy, mask, mask_value, device)
- if self.causal:
- energy = apply_causal_mask(energy, mask, mask_value, device)
-
- attn = F.softmax(energy, dim=-1)
- attn = self.dropout(attn)
- out = einsum("b h i j, b j d -> b h i d", attn, v)
- out = rearrange(out, "b h n d -> b n (h d)")
- out = self.fc(out)
- return out
-
-
-def apply_input_mask(
- b: int,
- k: Tensor,
- energy: Tensor,
- mask: Optional[Tensor],
- mask_value: Tensor,
- device: str,
-) -> Tensor:
- """Applies an input mask."""
- if mask is not None:
- k_mask = torch.ones((b, k.shape[-2]), device=device).bool()
- q_mask = rearrange(mask, "b i -> b () i ()")
- k_mask = rearrange(k_mask, "b j -> b () () j")
- input_mask = q_mask * k_mask
-
- energy = energy.masked_fill_(~input_mask, mask_value)
- return energy
-
-
-def apply_causal_mask(
- energy: Tensor, mask: Tensor, mask_value: Tensor, device: str
-) -> Tensor:
- """Applies a causal mask to the energy tensor."""
- i, j = energy.shape[-2:]
- r = torch.arange(i, device=device)
- mask = rearrange(r, "i -> () () i ()") < rearrange(r, "j -> () () () j")
- mask = F.pad(mask, (j - i, 0), value=False)
- energy.masked_fill_(mask, mask_value)
- return energy
diff --git a/text_recognizer/networks/transformer/decoder.py b/text_recognizer/networks/transformer/decoder.py
deleted file mode 100644
index 826bc13..0000000
--- a/text_recognizer/networks/transformer/decoder.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""Transformer decoder module."""
-from copy import deepcopy
-from typing import Optional
-
-from torch import Tensor, nn
-
-from text_recognizer.networks.transformer.decoder_block import DecoderBlock
-from text_recognizer.networks.transformer.embeddings.rotary import RotaryEmbedding
-
-
-class Decoder(nn.Module):
- """Decoder Network."""
-
- def __init__(
- self,
- depth: int,
- dim: int,
- block: DecoderBlock,
- rotary_embedding: RotaryEmbedding,
- ) -> None:
- super().__init__()
- self.depth = depth
- self.rotary_embedding = rotary_embedding
- self.blocks = nn.ModuleList([deepcopy(block) for _ in range(self.depth)])
- self.ln = nn.LayerNorm(dim)
-
- def forward(
- self,
- x: Tensor,
- context: Optional[Tensor] = None,
- mask: Optional[Tensor] = None,
- ) -> Tensor:
- """Applies attention blocks."""
- for block in self.blocks:
- x = block(
- x=x,
- context=context,
- mask=mask,
- rotary_embedding=self.rotary_embedding,
- )
- return self.ln(x)
diff --git a/text_recognizer/networks/transformer/decoder_block.py b/text_recognizer/networks/transformer/decoder_block.py
deleted file mode 100644
index b8eb5c4..0000000
--- a/text_recognizer/networks/transformer/decoder_block.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""Transformer decoder module."""
-from copy import deepcopy
-from typing import Optional, Type
-
-from torch import Tensor, nn
-
-from text_recognizer.networks.transformer.attention import Attention
-from text_recognizer.networks.transformer.embeddings.rotary import RotaryEmbedding
-from text_recognizer.networks.transformer.ff import FeedForward
-
-
-class DecoderBlock(nn.Module):
- """Residual decoder block."""
-
- def __init__(
- self,
- self_attn: Attention,
- norm: Type[nn.Module],
- ff: FeedForward,
- cross_attn: Optional[Attention] = None,
- ) -> None:
- super().__init__()
- self.ln_attn = norm
- self.attn = self_attn
- self.ln_cross_attn = deepcopy(norm)
- self.cross_attn = cross_attn
- self.ln_ff = deepcopy(norm)
- self.ff = ff
-
- def forward(
- self,
- x: Tensor,
- rotary_embedding: RotaryEmbedding,
- context: Optional[Tensor] = None,
- mask: Optional[Tensor] = None,
- ) -> Tensor:
- """Applies decoder block on input signals."""
- x = x + self.attn(self.ln_attn(x), mask=mask, rotary_embedding=rotary_embedding)
- x = x + self.cross_attn(
- x=self.ln_cross_attn(x),
- context=context,
- )
- x = x + self.ff(self.ln_ff(x))
- return x
diff --git a/text_recognizer/networks/transformer/embeddings/axial.py b/text_recognizer/networks/transformer/embeddings/axial.py
deleted file mode 100644
index 9b872a9..0000000
--- a/text_recognizer/networks/transformer/embeddings/axial.py
+++ /dev/null
@@ -1,104 +0,0 @@
-"""Axial attention for multi-dimensional data.
-
-Stolen from:
- https://github.com/lucidrains/axial-attention/blob/
- eff2c10c2e76c735a70a6b995b571213adffbbb7/axial_attention/axial_attention.py#L100
-"""
-from functools import reduce
-from operator import mul
-from typing import Optional, Sequence
-
-import torch
-from torch import Tensor, nn
-
-
-class AxialPositionalEmbedding(nn.Module):
- def __init__(
- self,
- dim: int,
- axial_shape: Sequence[int],
- axial_dims: Optional[Sequence[int]] = None,
- ) -> None:
- super().__init__()
-
- self.dim = dim
- self.shape = axial_shape
- self.max_seq_len = reduce(mul, axial_shape, 1)
-
- self.summed = axial_dims is None
- axial_dims = ((dim,) * len(axial_shape)) if self.summed else axial_dims
-
- assert len(self.shape) == len(
- axial_dims
- ), "number of axial dimensions must equal the number of dimensions in the shape"
- assert (
- self.summed or not self.summed and sum(axial_dims) == dim
- ), f"axial dimensions must sum up to the target dimension {dim}"
-
- self.weights = ParameterList(self, "weights", len(axial_shape))
-
- for ind, (shape, axial_dim) in enumerate(zip(self.shape, axial_dims)):
- ax_shape = [1] * len(self.shape)
- ax_shape[ind] = shape
- ax_shape = (1, *ax_shape, axial_dim)
- ax_emb = nn.Parameter(torch.zeros(ax_shape).normal_(0, 1))
- self.weights.append(ax_emb)
-
- def forward(self, x: Tensor) -> Tensor:
- """Returns axial positional embedding."""
- b, t, _ = x.shape
- assert (
- t <= self.max_seq_len
- ), f"Sequence length ({t}) must be less than the maximum sequence length allowed ({self.max_seq_len})"
- embs = []
-
- for ax_emb in self.weights.to_list():
- axial_dim = ax_emb.shape[-1]
- expand_shape = (b, *self.shape, axial_dim)
- emb = ax_emb.expand(expand_shape).reshape(b, self.max_seq_len, axial_dim)
- embs.append(emb)
-
- pos_emb = sum(embs) if self.summed else torch.cat(embs, dim=-1)
- return pos_emb[:, :t].to(x)
-
-
-# a mock parameter list object until below issue is resolved
-# https://github.com/pytorch/pytorch/issues/36035
-class ParameterList(object):
- def __init__(self, kls, prefix, length):
- self.ind = 0
- self.kls = kls
- self.prefix = prefix
- self.length = length
-
- def _keyname(self, prefix, ind):
- return f"{prefix}_{ind}"
-
- def append(self, x):
- setattr(self.kls, self._keyname(self.prefix, self.ind), x)
- self.ind += 1
-
- def to_list(self):
- return [
- getattr(self.kls, self._keyname(self.prefix, i)) for i in range(self.length)
- ]
-
-
-class AxialPositionalEmbeddingImage(nn.Module):
- def __init__(
- self,
- dim: int,
- axial_shape: Sequence[int],
- axial_dims: Optional[Sequence[int]] = None,
- ) -> None:
- super().__init__()
- axial_dims = (dim // 2, dim // 2) if axial_dims is None else axial_dims
- assert len(axial_shape) == 2, "Axial shape must have 2 dimensions for images"
- self.dim = dim
- self.pos_emb = AxialPositionalEmbedding(dim, axial_shape, axial_dims)
-
- def forward(self, img):
- b, c, h, w = img.shape
- img = img.permute(0, 2, 3, 1).reshape(b, h * w, c)
- pos_emb = self.pos_emb(img)
- return pos_emb.reshape(b, h, w, self.dim).permute(0, 3, 1, 2)
diff --git a/text_recognizer/networks/transformer/embeddings/rotary.py b/text_recognizer/networks/transformer/embeddings/rotary.py
deleted file mode 100644
index ca0a260..0000000
--- a/text_recognizer/networks/transformer/embeddings/rotary.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Roatary embedding.
-
-Stolen from lucidrains:
- https://github.com/lucidrains/rotary-embedding-torch
-
-Explanation of roatary:
- https://blog.eleuther.ai/rotary-embeddings/
-"""
-from inspect import isfunction
-
-from einops import rearrange, repeat
-import torch
-from torch import Tensor, nn
-
-
-class RotaryEmbedding(nn.Module):
- """Rotary positional embedding."""
-
- def __init__(self, dim: int) -> None:
- super().__init__()
- inv_freqs = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
- self.register_buffer("inv_freqs", inv_freqs)
- self.cache = {}
-
- def rotate(self, t: Tensor, dim: int = -2) -> Tensor:
- """Rotate vector."""
- device, n = t.device, t.shape[dim]
- freqs = self.forward(lambda: torch.arange(n, device=device), cache_key=n)
- return apply_rotary_emb(t, freqs)
-
- def forward(self, t: Tensor, cache_key: int) -> Tensor:
- """Encodes tensor x with rotary embeddings."""
- if cache_key in self.cache:
- return self.cache[cache_key]
-
- if isfunction(t):
- t = t()
-
- freqs = self.inv_freqs
- freqs = torch.einsum("..., f -> ... f", t.type(freqs.dtype), freqs)
- freqs = repeat(freqs, "... n -> ... (n r)", r=2)
- self.cache[cache_key] = freqs
- return freqs
-
-
-def rotate_half(x: Tensor) -> Tensor:
- x = rearrange(x, "... (d r) -> ... d r", r=2)
- x1, x2 = x.unbind(dim=-1)
- x = torch.stack((-x2, x1), dim=-1)
- return rearrange(x, "... d r -> ... (d r)")
-
-
-def apply_rotary_emb(t: Tensor, freqs: Tensor, start_index: int = 0) -> Tensor:
- freqs = freqs.to(t)
- rot_dim = freqs.shape[-1]
- end_index = start_index + rot_dim
- assert rot_dim <= t.shape[-1], (
- f"feature dimension {t.shape[-1]} is not of sufficient size to rotate"
- f"in all the positions {rot_dim}"
- )
- t_left, t, t_right = (
- t[..., :start_index],
- t[..., start_index:end_index],
- t[..., end_index:],
- )
- t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
- return torch.cat((t_left, t, t_right), dim=-1)
diff --git a/text_recognizer/networks/transformer/ff.py b/text_recognizer/networks/transformer/ff.py
deleted file mode 100644
index 3ccf5b5..0000000
--- a/text_recognizer/networks/transformer/ff.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""Feedforward layer in transformer.
-
-Stolen from lucidrains:
- https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py
-"""
-from typing import Optional
-
-import torch.nn.functional as F
-from torch import Tensor, nn
-
-
-class GEGLU(nn.Module):
- def __init__(self, dim_in: int, dim_out: int) -> None:
- super().__init__()
- self.fc = nn.Linear(dim_in, dim_out * 2)
-
- def forward(self, x: Tensor) -> Tensor:
- x, gate = self.fc(x).chunk(2, dim=-1)
- return x * F.gelu(gate)
-
-
-class FeedForward(nn.Module):
- def __init__(
- self,
- dim: int,
- dim_out: Optional[int] = None,
- expansion_factor: int = 4,
- glu: bool = True,
- dropout_rate: float = 0.0,
- ) -> None:
- super().__init__()
- inner_dim = dim * expansion_factor
- dim_out = dim_out if dim_out is not None else dim
- in_projection = (
- nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU())
- if not glu
- else GEGLU(dim, inner_dim)
- )
-
- self.mlp = nn.Sequential(
- in_projection, nn.Dropout(dropout_rate), nn.Linear(inner_dim, dim_out)
- )
-
- def forward(self, x: Tensor) -> Tensor:
- return self.mlp(x)
diff --git a/text_recognizer/networks/transformer/norm.py b/text_recognizer/networks/transformer/norm.py
deleted file mode 100644
index 1431327..0000000
--- a/text_recognizer/networks/transformer/norm.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""Normalization layers for transfromers.
-
-Copied from lucidrains:
- https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py
-
-"""
-from typing import Optional, Type
-
-import torch
-from torch import Tensor, nn
-
-
-class RMSNorm(nn.Module):
- """Root mean square layer normalization."""
-
- def __init__(self, dim: int, eps: float = 1e-8) -> None:
- super().__init__()
- self.scale = dim**-0.5
- self.eps = eps
- self.g = nn.Parameter(torch.ones(dim))
-
- def forward(self, x: Tensor) -> Tensor:
- """Applies normalization."""
- norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
- return x / norm.clamp(min=self.eps) * self.g
-
-
-class PreNorm(nn.Module):
- """Applies layer normalization then function."""
-
- def __init__(
- self,
- normalized_shape: int,
- fn: Type[nn.Module],
- context_dim: Optional[int] = None,
- ) -> None:
- super().__init__()
- self.norm = nn.LayerNorm(normalized_shape)
- self.fn = fn
- self.norm_context = (
- nn.LayerNorm(context_dim) if context_dim is not None else None
- )
-
- def forward(self, x: Tensor, **kwargs) -> Tensor:
- """Applies pre norm."""
- x = self.norm(x)
- if self.norm_context is not None:
- context = kwargs["context"]
- normed_context = self.norm_context(context)
- kwargs.update(context=normed_context)
- return self.fn(x, **kwargs)
diff --git a/text_recognizer/optimizers/__init__.py b/text_recognizer/optimizer/__init__.py
index e69de29..e69de29 100644
--- a/text_recognizer/optimizers/__init__.py
+++ b/text_recognizer/optimizer/__init__.py
diff --git a/text_recognizer/optimizers/laprop.py b/text_recognizer/optimizer/laprop.py
index 9b4f4d4..9b4f4d4 100644
--- a/text_recognizer/optimizers/laprop.py
+++ b/text_recognizer/optimizer/laprop.py