From 49ca6ade1a19f7f9c702171537fe4be0dfcda66d Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Fri, 25 Aug 2023 23:19:14 +0200 Subject: Rename and add flash atten --- text_recognizer/networks/transformer/__init__.py | 6 -- text_recognizer/networks/transformer/attention.py | 109 --------------------- text_recognizer/networks/transformer/decoder.py | 41 -------- .../networks/transformer/decoder_block.py | 44 --------- .../networks/transformer/embeddings/__init__.py | 1 - .../networks/transformer/embeddings/axial.py | 104 -------------------- .../networks/transformer/embeddings/rotary.py | 67 ------------- text_recognizer/networks/transformer/ff.py | 45 --------- text_recognizer/networks/transformer/norm.py | 51 ---------- 9 files changed, 468 deletions(-) delete mode 100644 text_recognizer/networks/transformer/__init__.py delete mode 100644 text_recognizer/networks/transformer/attention.py delete mode 100644 text_recognizer/networks/transformer/decoder.py delete mode 100644 text_recognizer/networks/transformer/decoder_block.py delete mode 100644 text_recognizer/networks/transformer/embeddings/__init__.py delete mode 100644 text_recognizer/networks/transformer/embeddings/axial.py delete mode 100644 text_recognizer/networks/transformer/embeddings/rotary.py delete mode 100644 text_recognizer/networks/transformer/ff.py delete mode 100644 text_recognizer/networks/transformer/norm.py (limited to 'text_recognizer/networks/transformer') diff --git a/text_recognizer/networks/transformer/__init__.py b/text_recognizer/networks/transformer/__init__.py deleted file mode 100644 index 0d17deb..0000000 --- a/text_recognizer/networks/transformer/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Transformer modules.""" -from text_recognizer.networks.transformer.attention import Attention -from text_recognizer.networks.transformer.decoder import Decoder, DecoderBlock -from text_recognizer.networks.transformer.embeddings.rotary import RotaryEmbedding -from text_recognizer.networks.transformer.ff import FeedForward -from text_recognizer.networks.transformer.norm import RMSNorm diff --git a/text_recognizer/networks/transformer/attention.py b/text_recognizer/networks/transformer/attention.py deleted file mode 100644 index 85f513e..0000000 --- a/text_recognizer/networks/transformer/attention.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Implementes the attention module for the transformer.""" -from typing import Optional - -import torch -import torch.nn.functional as F -from einops import rearrange -from torch import Tensor, einsum, nn - -from text_recognizer.networks.transformer.embeddings.rotary import ( - RotaryEmbedding, -) - - -class Attention(nn.Module): - """Standard attention.""" - - def __init__( - self, - dim: int, - num_heads: int, - causal: bool = False, - dim_head: int = 64, - dropout_rate: float = 0.0, - ) -> None: - super().__init__() - self.dim = dim - self.scale = self.dim**-0.5 - self.num_heads = num_heads - self.dim_head = dim_head - - self.causal = causal - self.dropout_rate = dropout_rate - - # Single key/value head - k_dim = dim_head - v_dim = dim_head - - out_dim = self.num_heads * self.dim_head - - self.to_q = nn.Linear(self.dim, out_dim, bias=False) - self.to_k = nn.Linear(self.dim, k_dim, bias=False) - self.to_v = nn.Linear(self.dim, v_dim, bias=False) - - self.dropout = nn.Dropout(p=self.dropout_rate) - - # Feedforward - self.fc = nn.Linear(out_dim, self.dim) - - def forward( - self, - x: Tensor, - context: Optional[Tensor] = None, - mask: Optional[Tensor] = None, - rotary_embedding: Optional[RotaryEmbedding] = None, - ) -> Tensor: - """Computes the attention.""" - b, device = x.shape[0], x.device - - q = self.to_q(x) - q = rearrange(q, "b n (h d) -> b h n d", h=self.num_heads) - k = self.to_k(context) if context is not None else self.to_k(x) - v = self.to_v(context) if context is not None else self.to_v(x) - - if rotary_embedding is not None: - q, k, v = map(lambda t: rotary_embedding.rotate(t), (q, k, v)) - - energy = einsum("b h i d, b j d -> b h i j", q, k) * self.scale - mask_value = -torch.finfo(energy.dtype).max - energy = apply_input_mask(b, k, energy, mask, mask_value, device) - if self.causal: - energy = apply_causal_mask(energy, mask, mask_value, device) - - attn = F.softmax(energy, dim=-1) - attn = self.dropout(attn) - out = einsum("b h i j, b j d -> b h i d", attn, v) - out = rearrange(out, "b h n d -> b n (h d)") - out = self.fc(out) - return out - - -def apply_input_mask( - b: int, - k: Tensor, - energy: Tensor, - mask: Optional[Tensor], - mask_value: Tensor, - device: str, -) -> Tensor: - """Applies an input mask.""" - if mask is not None: - k_mask = torch.ones((b, k.shape[-2]), device=device).bool() - q_mask = rearrange(mask, "b i -> b () i ()") - k_mask = rearrange(k_mask, "b j -> b () () j") - input_mask = q_mask * k_mask - - energy = energy.masked_fill_(~input_mask, mask_value) - return energy - - -def apply_causal_mask( - energy: Tensor, mask: Tensor, mask_value: Tensor, device: str -) -> Tensor: - """Applies a causal mask to the energy tensor.""" - i, j = energy.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, "i -> () () i ()") < rearrange(r, "j -> () () () j") - mask = F.pad(mask, (j - i, 0), value=False) - energy.masked_fill_(mask, mask_value) - return energy diff --git a/text_recognizer/networks/transformer/decoder.py b/text_recognizer/networks/transformer/decoder.py deleted file mode 100644 index 826bc13..0000000 --- a/text_recognizer/networks/transformer/decoder.py +++ /dev/null @@ -1,41 +0,0 @@ -"""Transformer decoder module.""" -from copy import deepcopy -from typing import Optional - -from torch import Tensor, nn - -from text_recognizer.networks.transformer.decoder_block import DecoderBlock -from text_recognizer.networks.transformer.embeddings.rotary import RotaryEmbedding - - -class Decoder(nn.Module): - """Decoder Network.""" - - def __init__( - self, - depth: int, - dim: int, - block: DecoderBlock, - rotary_embedding: RotaryEmbedding, - ) -> None: - super().__init__() - self.depth = depth - self.rotary_embedding = rotary_embedding - self.blocks = nn.ModuleList([deepcopy(block) for _ in range(self.depth)]) - self.ln = nn.LayerNorm(dim) - - def forward( - self, - x: Tensor, - context: Optional[Tensor] = None, - mask: Optional[Tensor] = None, - ) -> Tensor: - """Applies attention blocks.""" - for block in self.blocks: - x = block( - x=x, - context=context, - mask=mask, - rotary_embedding=self.rotary_embedding, - ) - return self.ln(x) diff --git a/text_recognizer/networks/transformer/decoder_block.py b/text_recognizer/networks/transformer/decoder_block.py deleted file mode 100644 index b8eb5c4..0000000 --- a/text_recognizer/networks/transformer/decoder_block.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Transformer decoder module.""" -from copy import deepcopy -from typing import Optional, Type - -from torch import Tensor, nn - -from text_recognizer.networks.transformer.attention import Attention -from text_recognizer.networks.transformer.embeddings.rotary import RotaryEmbedding -from text_recognizer.networks.transformer.ff import FeedForward - - -class DecoderBlock(nn.Module): - """Residual decoder block.""" - - def __init__( - self, - self_attn: Attention, - norm: Type[nn.Module], - ff: FeedForward, - cross_attn: Optional[Attention] = None, - ) -> None: - super().__init__() - self.ln_attn = norm - self.attn = self_attn - self.ln_cross_attn = deepcopy(norm) - self.cross_attn = cross_attn - self.ln_ff = deepcopy(norm) - self.ff = ff - - def forward( - self, - x: Tensor, - rotary_embedding: RotaryEmbedding, - context: Optional[Tensor] = None, - mask: Optional[Tensor] = None, - ) -> Tensor: - """Applies decoder block on input signals.""" - x = x + self.attn(self.ln_attn(x), mask=mask, rotary_embedding=rotary_embedding) - x = x + self.cross_attn( - x=self.ln_cross_attn(x), - context=context, - ) - x = x + self.ff(self.ln_ff(x)) - return x diff --git a/text_recognizer/networks/transformer/embeddings/__init__.py b/text_recognizer/networks/transformer/embeddings/__init__.py deleted file mode 100644 index bb3f904..0000000 --- a/text_recognizer/networks/transformer/embeddings/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Positional encodings for transformers.""" diff --git a/text_recognizer/networks/transformer/embeddings/axial.py b/text_recognizer/networks/transformer/embeddings/axial.py deleted file mode 100644 index 9b872a9..0000000 --- a/text_recognizer/networks/transformer/embeddings/axial.py +++ /dev/null @@ -1,104 +0,0 @@ -"""Axial attention for multi-dimensional data. - -Stolen from: - https://github.com/lucidrains/axial-attention/blob/ - eff2c10c2e76c735a70a6b995b571213adffbbb7/axial_attention/axial_attention.py#L100 -""" -from functools import reduce -from operator import mul -from typing import Optional, Sequence - -import torch -from torch import Tensor, nn - - -class AxialPositionalEmbedding(nn.Module): - def __init__( - self, - dim: int, - axial_shape: Sequence[int], - axial_dims: Optional[Sequence[int]] = None, - ) -> None: - super().__init__() - - self.dim = dim - self.shape = axial_shape - self.max_seq_len = reduce(mul, axial_shape, 1) - - self.summed = axial_dims is None - axial_dims = ((dim,) * len(axial_shape)) if self.summed else axial_dims - - assert len(self.shape) == len( - axial_dims - ), "number of axial dimensions must equal the number of dimensions in the shape" - assert ( - self.summed or not self.summed and sum(axial_dims) == dim - ), f"axial dimensions must sum up to the target dimension {dim}" - - self.weights = ParameterList(self, "weights", len(axial_shape)) - - for ind, (shape, axial_dim) in enumerate(zip(self.shape, axial_dims)): - ax_shape = [1] * len(self.shape) - ax_shape[ind] = shape - ax_shape = (1, *ax_shape, axial_dim) - ax_emb = nn.Parameter(torch.zeros(ax_shape).normal_(0, 1)) - self.weights.append(ax_emb) - - def forward(self, x: Tensor) -> Tensor: - """Returns axial positional embedding.""" - b, t, _ = x.shape - assert ( - t <= self.max_seq_len - ), f"Sequence length ({t}) must be less than the maximum sequence length allowed ({self.max_seq_len})" - embs = [] - - for ax_emb in self.weights.to_list(): - axial_dim = ax_emb.shape[-1] - expand_shape = (b, *self.shape, axial_dim) - emb = ax_emb.expand(expand_shape).reshape(b, self.max_seq_len, axial_dim) - embs.append(emb) - - pos_emb = sum(embs) if self.summed else torch.cat(embs, dim=-1) - return pos_emb[:, :t].to(x) - - -# a mock parameter list object until below issue is resolved -# https://github.com/pytorch/pytorch/issues/36035 -class ParameterList(object): - def __init__(self, kls, prefix, length): - self.ind = 0 - self.kls = kls - self.prefix = prefix - self.length = length - - def _keyname(self, prefix, ind): - return f"{prefix}_{ind}" - - def append(self, x): - setattr(self.kls, self._keyname(self.prefix, self.ind), x) - self.ind += 1 - - def to_list(self): - return [ - getattr(self.kls, self._keyname(self.prefix, i)) for i in range(self.length) - ] - - -class AxialPositionalEmbeddingImage(nn.Module): - def __init__( - self, - dim: int, - axial_shape: Sequence[int], - axial_dims: Optional[Sequence[int]] = None, - ) -> None: - super().__init__() - axial_dims = (dim // 2, dim // 2) if axial_dims is None else axial_dims - assert len(axial_shape) == 2, "Axial shape must have 2 dimensions for images" - self.dim = dim - self.pos_emb = AxialPositionalEmbedding(dim, axial_shape, axial_dims) - - def forward(self, img): - b, c, h, w = img.shape - img = img.permute(0, 2, 3, 1).reshape(b, h * w, c) - pos_emb = self.pos_emb(img) - return pos_emb.reshape(b, h, w, self.dim).permute(0, 3, 1, 2) diff --git a/text_recognizer/networks/transformer/embeddings/rotary.py b/text_recognizer/networks/transformer/embeddings/rotary.py deleted file mode 100644 index ca0a260..0000000 --- a/text_recognizer/networks/transformer/embeddings/rotary.py +++ /dev/null @@ -1,67 +0,0 @@ -"""Roatary embedding. - -Stolen from lucidrains: - https://github.com/lucidrains/rotary-embedding-torch - -Explanation of roatary: - https://blog.eleuther.ai/rotary-embeddings/ -""" -from inspect import isfunction - -from einops import rearrange, repeat -import torch -from torch import Tensor, nn - - -class RotaryEmbedding(nn.Module): - """Rotary positional embedding.""" - - def __init__(self, dim: int) -> None: - super().__init__() - inv_freqs = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer("inv_freqs", inv_freqs) - self.cache = {} - - def rotate(self, t: Tensor, dim: int = -2) -> Tensor: - """Rotate vector.""" - device, n = t.device, t.shape[dim] - freqs = self.forward(lambda: torch.arange(n, device=device), cache_key=n) - return apply_rotary_emb(t, freqs) - - def forward(self, t: Tensor, cache_key: int) -> Tensor: - """Encodes tensor x with rotary embeddings.""" - if cache_key in self.cache: - return self.cache[cache_key] - - if isfunction(t): - t = t() - - freqs = self.inv_freqs - freqs = torch.einsum("..., f -> ... f", t.type(freqs.dtype), freqs) - freqs = repeat(freqs, "... n -> ... (n r)", r=2) - self.cache[cache_key] = freqs - return freqs - - -def rotate_half(x: Tensor) -> Tensor: - x = rearrange(x, "... (d r) -> ... d r", r=2) - x1, x2 = x.unbind(dim=-1) - x = torch.stack((-x2, x1), dim=-1) - return rearrange(x, "... d r -> ... (d r)") - - -def apply_rotary_emb(t: Tensor, freqs: Tensor, start_index: int = 0) -> Tensor: - freqs = freqs.to(t) - rot_dim = freqs.shape[-1] - end_index = start_index + rot_dim - assert rot_dim <= t.shape[-1], ( - f"feature dimension {t.shape[-1]} is not of sufficient size to rotate" - f"in all the positions {rot_dim}" - ) - t_left, t, t_right = ( - t[..., :start_index], - t[..., start_index:end_index], - t[..., end_index:], - ) - t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin()) - return torch.cat((t_left, t, t_right), dim=-1) diff --git a/text_recognizer/networks/transformer/ff.py b/text_recognizer/networks/transformer/ff.py deleted file mode 100644 index 3ccf5b5..0000000 --- a/text_recognizer/networks/transformer/ff.py +++ /dev/null @@ -1,45 +0,0 @@ -"""Feedforward layer in transformer. - -Stolen from lucidrains: - https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py -""" -from typing import Optional - -import torch.nn.functional as F -from torch import Tensor, nn - - -class GEGLU(nn.Module): - def __init__(self, dim_in: int, dim_out: int) -> None: - super().__init__() - self.fc = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x: Tensor) -> Tensor: - x, gate = self.fc(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__( - self, - dim: int, - dim_out: Optional[int] = None, - expansion_factor: int = 4, - glu: bool = True, - dropout_rate: float = 0.0, - ) -> None: - super().__init__() - inner_dim = dim * expansion_factor - dim_out = dim_out if dim_out is not None else dim - in_projection = ( - nn.Sequential(nn.Linear(dim, inner_dim), nn.GELU()) - if not glu - else GEGLU(dim, inner_dim) - ) - - self.mlp = nn.Sequential( - in_projection, nn.Dropout(dropout_rate), nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x: Tensor) -> Tensor: - return self.mlp(x) diff --git a/text_recognizer/networks/transformer/norm.py b/text_recognizer/networks/transformer/norm.py deleted file mode 100644 index 1431327..0000000 --- a/text_recognizer/networks/transformer/norm.py +++ /dev/null @@ -1,51 +0,0 @@ -"""Normalization layers for transfromers. - -Copied from lucidrains: - https://github.com/lucidrains/x-transformers/blob/main/x_transformers/x_transformers.py - -""" -from typing import Optional, Type - -import torch -from torch import Tensor, nn - - -class RMSNorm(nn.Module): - """Root mean square layer normalization.""" - - def __init__(self, dim: int, eps: float = 1e-8) -> None: - super().__init__() - self.scale = dim**-0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x: Tensor) -> Tensor: - """Applies normalization.""" - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class PreNorm(nn.Module): - """Applies layer normalization then function.""" - - def __init__( - self, - normalized_shape: int, - fn: Type[nn.Module], - context_dim: Optional[int] = None, - ) -> None: - super().__init__() - self.norm = nn.LayerNorm(normalized_shape) - self.fn = fn - self.norm_context = ( - nn.LayerNorm(context_dim) if context_dim is not None else None - ) - - def forward(self, x: Tensor, **kwargs) -> Tensor: - """Applies pre norm.""" - x = self.norm(x) - if self.norm_context is not None: - context = kwargs["context"] - normed_context = self.norm_context(context) - kwargs.update(context=normed_context) - return self.fn(x, **kwargs) -- cgit v1.2.3-70-g09d2