diff options
Diffstat (limited to 'text_recognizer/networks/transformer/embeddings')
-rw-r--r-- | text_recognizer/networks/transformer/embeddings/absolute.py | 34 | ||||
-rw-r--r-- | text_recognizer/networks/transformer/embeddings/fourier.py | 36 |
2 files changed, 0 insertions, 70 deletions
diff --git a/text_recognizer/networks/transformer/embeddings/absolute.py b/text_recognizer/networks/transformer/embeddings/absolute.py deleted file mode 100644 index 9274b55..0000000 --- a/text_recognizer/networks/transformer/embeddings/absolute.py +++ /dev/null @@ -1,34 +0,0 @@ -"""Absolute positional embedding.""" - -import torch -import torch.nn.functional as F -from einops import rearrange -from torch import nn - - -def l2norm(t, groups=1): - t = rearrange(t, "... (g d) -> ... g d", g=groups) - t = F.normalize(t, p=2, dim=-1) - return rearrange(t, "... g d -> ... (g d)") - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len, l2norm_embed=False): - super().__init__() - self.scale = dim**-0.5 if not l2norm_embed else 1.0 - self.max_seq_len = max_seq_len - self.l2norm_embed = l2norm_embed - self.emb = nn.Embedding(max_seq_len, dim) - - def forward(self, x, pos=None): - seq_len = x.shape[1] - assert ( - seq_len <= self.max_seq_len - ), f"you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}" - - if pos is None: - pos = torch.arange(seq_len, device=x.device) - - pos_emb = self.emb(pos) - pos_emb = pos_emb * self.scale - return l2norm(pos_emb) if self.l2norm_embed else pos_emb diff --git a/text_recognizer/networks/transformer/embeddings/fourier.py b/text_recognizer/networks/transformer/embeddings/fourier.py deleted file mode 100644 index 28da7a1..0000000 --- a/text_recognizer/networks/transformer/embeddings/fourier.py +++ /dev/null @@ -1,36 +0,0 @@ -"""Fourier positional embedding.""" -import numpy as np -import torch -from torch import Tensor, nn - - -class PositionalEncoding(nn.Module): - """Encodes a sense of distance or time for transformer networks.""" - - def __init__(self, dim: int, dropout_rate: float, max_len: int = 1000) -> None: - super().__init__() - self.dropout = nn.Dropout(p=dropout_rate) - pe = self.make_pe(dim, max_len) - self.register_buffer("pe", pe) - - @staticmethod - def make_pe(hidden_dim: int, max_len: int) -> Tensor: - """Returns positional encoding.""" - pe = torch.zeros(max_len, hidden_dim) - position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) - div_term = torch.exp( - torch.arange(0, hidden_dim, 2).float() * (-np.log(10000.0) / hidden_dim) - ) - - pe[:, 0::2] = torch.sin(position * div_term) - pe[:, 1::2] = torch.cos(position * div_term) - pe = pe.unsqueeze(1) - return pe - - def forward(self, x: Tensor) -> Tensor: - """Encodes the tensor with a postional embedding.""" - # [T, B, D] - if x.shape[2] != self.pe.shape[2]: - raise ValueError("x shape does not match pe in the 3rd dim.") - x = x + self.pe[: x.shape[0]] - return self.dropout(x) |