diff options
author | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2023-08-25 23:19:14 +0200 |
---|---|---|
committer | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2023-08-25 23:19:14 +0200 |
commit | 49ca6ade1a19f7f9c702171537fe4be0dfcda66d (patch) | |
tree | 20062ed1910758481f3d5fff11159706c7b990c6 /text_recognizer/network/transformer | |
parent | 0421daf6bd97596703f426ba61c401599b538eeb (diff) |
Rename and add flash atten
Diffstat (limited to 'text_recognizer/network/transformer')
-rw-r--r-- | text_recognizer/network/transformer/__init__.py | 1 | ||||
-rw-r--r-- | text_recognizer/network/transformer/attend.py | 94 | ||||
-rw-r--r-- | text_recognizer/network/transformer/attention.py | 56 | ||||
-rw-r--r-- | text_recognizer/network/transformer/decoder.py | 57 | ||||
-rw-r--r-- | text_recognizer/network/transformer/embedding/__init__.py | 1 | ||||
-rw-r--r-- | text_recognizer/network/transformer/embedding/absolute.py | 28 | ||||
-rw-r--r-- | text_recognizer/network/transformer/embedding/l2_norm.py | 9 | ||||
-rw-r--r-- | text_recognizer/network/transformer/embedding/sincos.py | 13 | ||||
-rw-r--r-- | text_recognizer/network/transformer/embedding/token.py | 18 | ||||
-rw-r--r-- | text_recognizer/network/transformer/encoder.py | 46 | ||||
-rw-r--r-- | text_recognizer/network/transformer/ff.py | 22 | ||||
-rw-r--r-- | text_recognizer/network/transformer/norm.py | 22 |
12 files changed, 367 insertions, 0 deletions
diff --git a/text_recognizer/network/transformer/__init__.py b/text_recognizer/network/transformer/__init__.py new file mode 100644 index 0000000..a3f3011 --- /dev/null +++ b/text_recognizer/network/transformer/__init__.py @@ -0,0 +1 @@ +"""Transformer modules.""" diff --git a/text_recognizer/network/transformer/attend.py b/text_recognizer/network/transformer/attend.py new file mode 100644 index 0000000..4e643fb --- /dev/null +++ b/text_recognizer/network/transformer/attend.py @@ -0,0 +1,94 @@ +from typing import Optional +from collections import namedtuple + +import torch +from torch import Tensor, einsum, nn +from einops import rearrange +import torch.nn.functional as F + +Config = namedtuple( + "FlashAttentionConfig", ["enable_flash", "enable_math", "enable_mem_efficient"] +) + + +class Attend(nn.Module): + def __init__(self, use_flash: bool) -> None: + super().__init__() + self.use_flash = use_flash + self.cpu_cfg = Config(True, True, True) + self.cuda_cfg = None + if not torch.cuda.is_available(): + return + + device_properties = torch.cuda.get_device_properties(torch.device("cuda")) + if device_properties.major == 8 and device_properties.minor == 0: + self.cuda_cfg = Config(True, False, False) + else: + self.cuda_cfg = Config(False, True, True) + + def flash_attn(self, q: Tensor, k: Tensor, v: Tensor, causal: bool) -> Tensor: + cfg = self.cuda_cfg if q.is_cuda else self.cpu_cfg + with torch.backends.cuda.sdp_kernel(**cfg._asdict()): + out = F.scaled_dot_product_attention(q, k, v, is_causal=causal) + return out + + def atten( + self, + q: Tensor, + k: Tensor, + v: Tensor, + causal: bool, + mask: Optional[Tensor] = None, + ) -> Tensor: + b = q.shape[0] + energy = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale + + mask_value = -torch.finfo(energy.dtype).max + + if mask is not None: + energy = apply_input_mask(b, k, energy, mask, mask_value) + + if causal: + energy = apply_causal_mask(energy, mask_value) + + attn = F.softmax(energy, dim=-1) + attn = self.dropout(attn) + return einsum("b h i j, b h j d -> b h i d", attn, v) + + def forward( + self, + q: Tensor, + k: Tensor, + v: Tensor, + causal: bool, + mask: Optional[Tensor] = None, + ) -> Tensor: + if self.use_flash: + return self.flash_attn(q, k, v, causal) + else: + return self.atten(q, k, v, causal, mask) + + +def apply_input_mask( + b: int, + k: Tensor, + energy: Tensor, + mask: Optional[Tensor], + mask_value: float, +) -> Tensor: + """Applies an input mask.""" + k_mask = torch.ones((b, k.shape[-2]), device=energy.device).bool() + q_mask = rearrange(mask, "b i -> b () i ()") + k_mask = rearrange(k_mask, "b j -> b () () j") + input_mask = q_mask * k_mask + return energy.masked_fill_(~input_mask, mask_value) + + +def apply_causal_mask( + energy: Tensor, + mask_value: float, +) -> Tensor: + """Applies a causal mask to the energy tensor.""" + i, j, device = *energy.shape[-2:], energy.device + causal_mask = torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1) + return energy.masked_fill(causal_mask, mask_value) diff --git a/text_recognizer/network/transformer/attention.py b/text_recognizer/network/transformer/attention.py new file mode 100644 index 0000000..8e18f8a --- /dev/null +++ b/text_recognizer/network/transformer/attention.py @@ -0,0 +1,56 @@ +"""Implements the attention module for the transformer.""" +from typing import Optional +from text_recognizer.network.transformer.norm import RMSNorm +from text_recognizer.network.transformer.attend import Attend + +import torch +from einops import rearrange +from torch import Tensor, nn + + +class Attention(nn.Module): + """Standard attention.""" + + def __init__( + self, + dim: int, + heads: int, + causal: bool = False, + dim_head: int = 64, + dropout_rate: float = 0.0, + use_flash: bool = True, + ) -> None: + super().__init__() + self.heads = heads + inner_dim = dim_head * heads + self.norm = nn.LayerNorm(dim) + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_k = nn.Linear(dim, inner_dim, bias=False) + self.to_v = nn.Linear(dim, inner_dim, bias=False) + # self.q_norm = RMSNorm(heads, dim_head) + # self.k_norm = RMSNorm(heads, dim_head) + self.attend = Attend(use_flash) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + self.scale = dim**-0.5 + self.causal = causal + self.dropout_rate = dropout_rate + self.dropout = nn.Dropout(p=self.dropout_rate) + + def forward( + self, + x: Tensor, + context: Optional[Tensor] = None, + mask: Optional[Tensor] = None, + ) -> Tensor: + """Computes the attention.""" + x = self.norm(x) + q = self.to_q(x) + k = self.to_k(x if context is None else context) + v = self.to_v(x if context is None else context) + q, k, v = map( + lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), (q, k, v) + ) + out = self.attend(q, k, v, self.causal, mask) + out = rearrange(out, "b h n d -> b n (h d)") + out = self.to_out(out) + return out diff --git a/text_recognizer/network/transformer/decoder.py b/text_recognizer/network/transformer/decoder.py new file mode 100644 index 0000000..06925ba --- /dev/null +++ b/text_recognizer/network/transformer/decoder.py @@ -0,0 +1,57 @@ +"""Transformer decoder module.""" +from typing import Optional +from torch import Tensor, nn + +from text_recognizer.network.transformer.attention import Attention +from text_recognizer.network.transformer.ff import FeedForward + + +class Decoder(nn.Module): + def __init__( + self, + dim: int, + inner_dim: int, + heads: int, + dim_head: int, + depth: int, + dropout_rate: float = 0.0, + ) -> None: + super().__init__() + self.norm = nn.LayerNorm(dim) + self.layers = nn.ModuleList( + [ + nn.ModuleList( + [ + Attention( + dim, + heads, + True, + dim_head, + dropout_rate, + ), + FeedForward(dim, inner_dim, dropout_rate), + Attention( + dim, + heads, + False, + dim_head, + dropout_rate, + ), + ] + ) + for _ in range(depth) + ] + ) + + def forward( + self, + x: Tensor, + context: Tensor, + mask: Optional[Tensor] = None, + ) -> Tensor: + """Applies decoder block on input signals.""" + for self_attn, ff, cross_attn in self.layers: + x = x + self_attn(x, mask=mask) + x = x + ff(x) + x = x + cross_attn(x, context=context) + return self.norm(x) diff --git a/text_recognizer/network/transformer/embedding/__init__.py b/text_recognizer/network/transformer/embedding/__init__.py new file mode 100644 index 0000000..bb3f904 --- /dev/null +++ b/text_recognizer/network/transformer/embedding/__init__.py @@ -0,0 +1 @@ +"""Positional encodings for transformers.""" diff --git a/text_recognizer/network/transformer/embedding/absolute.py b/text_recognizer/network/transformer/embedding/absolute.py new file mode 100644 index 0000000..08b2c2a --- /dev/null +++ b/text_recognizer/network/transformer/embedding/absolute.py @@ -0,0 +1,28 @@ +from typing import Optional + +import torch +from torch import nn, Tensor +from text_recognizer.network.transformer.embedding.l2_norm import l2_norm + + +class AbsolutePositionalEmbedding(nn.Module): + def __init__(self, dim: int, max_length: int, use_l2: bool = False) -> None: + super().__init__() + self.scale = dim**-0.5 if not use_l2 else 1.0 + self.max_length = max_length + self.use_l2 = use_l2 + self.to_embedding = nn.Embedding(max_length, dim) + if self.use_l2: + nn.init.normal_(self.to_embedding.weight, std=1e-5) + + def forward(self, x: Tensor, pos: Optional[Tensor] = None) -> Tensor: + n, device = x.shape[1], x.device + assert ( + n <= self.max_length + ), f"Sequence length {n} is greater than the maximum positional embedding {self.max_length}" + + if pos is None: + pos = torch.arange(n, device=device) + + embedding = self.to_embedding(pos) * self.scale + return l2_norm(embedding) if self.use_l2 else embedding diff --git a/text_recognizer/network/transformer/embedding/l2_norm.py b/text_recognizer/network/transformer/embedding/l2_norm.py new file mode 100644 index 0000000..0e48bca --- /dev/null +++ b/text_recognizer/network/transformer/embedding/l2_norm.py @@ -0,0 +1,9 @@ +from einops import rearrange +import torch.nn.functional as F +from torch import Tensor + + +def l2_norm(t: Tensor, groups=1) -> Tensor: + t = rearrange(t, "... (g d) -> ... g d", g=groups) + t = F.normalize(t, p=2, dim=-1) + return rearrange(t, "... g d -> ... (g d)") diff --git a/text_recognizer/network/transformer/embedding/sincos.py b/text_recognizer/network/transformer/embedding/sincos.py new file mode 100644 index 0000000..ed6b0ab --- /dev/null +++ b/text_recognizer/network/transformer/embedding/sincos.py @@ -0,0 +1,13 @@ +import torch + + +def sincos_2d(h, w, dim, temperature: int = 10000, dtype=torch.float32): + y, x = torch.meshgrid(torch.arange(h), torch.arange(w), indexing="ij") + assert (dim % 4) == 0, "feature dimension must be multiple of 4 for sincos emb" + omega = torch.arange(dim // 4) / (dim // 4 - 1) + omega = 1.0 / (temperature**omega) + + y = y.flatten()[:, None] * omega[None, :] + x = x.flatten()[:, None] * omega[None, :] + pe = torch.cat((x.sin(), x.cos(), y.sin(), y.cos()), dim=1) + return pe.type(dtype) diff --git a/text_recognizer/network/transformer/embedding/token.py b/text_recognizer/network/transformer/embedding/token.py new file mode 100644 index 0000000..1df2fd6 --- /dev/null +++ b/text_recognizer/network/transformer/embedding/token.py @@ -0,0 +1,18 @@ +from torch import nn, Tensor + +from text_recognizer.network.transformer.embedding.l2_norm import l2_norm + + +class TokenEmbedding(nn.Module): + def __init__(self, num_tokens: int, dim: int, use_l2: bool = True) -> None: + super().__init__() + self.use_l2 = use_l2 + self.to_embedding = nn.Embedding(num_tokens, dim) + if self.use_l2: + nn.init.normal_(self.to_embedding.weight, std=1e-5) + else: + nn.init.kaiming_normal_(self.to_embedding.weight) + + def forward(self, x: Tensor) -> Tensor: + embedding = self.to_embedding(x) + return l2_norm(embedding) if self.use_l2 else embedding diff --git a/text_recognizer/network/transformer/encoder.py b/text_recognizer/network/transformer/encoder.py new file mode 100644 index 0000000..ea4b0b3 --- /dev/null +++ b/text_recognizer/network/transformer/encoder.py @@ -0,0 +1,46 @@ +"""Transformer encoder module.""" +from torch import Tensor, nn + +from text_recognizer.network.transformer.attention import Attention +from text_recognizer.network.transformer.ff import FeedForward + + +class Encoder(nn.Module): + def __init__( + self, + dim: int, + inner_dim: int, + heads: int, + dim_head: int, + depth: int, + dropout_rate: float = 0.0, + ) -> None: + super().__init__() + self.norm = nn.LayerNorm(dim) + self.layers = nn.ModuleList( + [ + nn.ModuleList( + [ + Attention( + dim, + heads, + False, + dim_head, + dropout_rate, + ), + FeedForward(dim, inner_dim, dropout_rate), + ] + ) + for _ in range(depth) + ] + ) + + def forward( + self, + x: Tensor, + ) -> Tensor: + """Applies decoder block on input signals.""" + for self_attn, ff in self.layers: + x = x + self_attn(x) + x = x + ff(x) + return self.norm(x) diff --git a/text_recognizer/network/transformer/ff.py b/text_recognizer/network/transformer/ff.py new file mode 100644 index 0000000..9181323 --- /dev/null +++ b/text_recognizer/network/transformer/ff.py @@ -0,0 +1,22 @@ +"""Feedforward layer in transformer.""" +from torch import Tensor, nn + + +class FeedForward(nn.Module): + def __init__( + self, + dim: int, + inner_dim: int, + dropout_rate: float = 0.0, + ) -> None: + super().__init__() + self.ff = nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim), + nn.GELU(), + nn.Dropout(dropout_rate), + nn.Linear(inner_dim, dim), + ) + + def forward(self, x: Tensor) -> Tensor: + return self.ff(x) diff --git a/text_recognizer/network/transformer/norm.py b/text_recognizer/network/transformer/norm.py new file mode 100644 index 0000000..2737754 --- /dev/null +++ b/text_recognizer/network/transformer/norm.py @@ -0,0 +1,22 @@ +"""Normalization layers for transformers. + +Copied from lucidrains: + https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py + +""" +import torch +from torch import Tensor, nn +import torch.nn.functional as F + + +class RMSNorm(nn.Module): + """Root mean square layer normalization.""" + + def __init__(self, heads: int, dim: int) -> None: + super().__init__() + self.scale = dim**-0.5 + self.gamma = nn.Parameter(torch.ones(heads, 1, dim)) + + def forward(self, x: Tensor) -> Tensor: + """Applies normalization.""" + return F.normalize(x, dim=-1) * self.scale * self.gamma |