From b2ad1ec306d56bbc319b7b41fbbcff04307425d5 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Mon, 11 Sep 2023 22:11:39 +0200 Subject: Remove absolute embedding --- .../network/transformer/embedding/absolute.py | 29 ---------------------- 1 file changed, 29 deletions(-) delete mode 100644 text_recognizer/network/transformer/embedding/absolute.py (limited to 'text_recognizer') diff --git a/text_recognizer/network/transformer/embedding/absolute.py b/text_recognizer/network/transformer/embedding/absolute.py deleted file mode 100644 index db34157..0000000 --- a/text_recognizer/network/transformer/embedding/absolute.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Optional - -import torch -from torch import nn, Tensor - -from .l2_norm import l2_norm - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim: int, max_length: int, use_l2: bool = False) -> None: - super().__init__() - self.scale = dim**-0.5 if not use_l2 else 1.0 - self.max_length = max_length - self.use_l2 = use_l2 - self.to_embedding = nn.Embedding(max_length, dim) - if self.use_l2: - nn.init.normal_(self.to_embedding.weight, std=1e-5) - - def forward(self, x: Tensor, pos: Optional[Tensor] = None) -> Tensor: - n, device = x.shape[1], x.device - assert ( - n <= self.max_length - ), f"Sequence length {n} is greater than the maximum positional embedding {self.max_length}" - - if pos is None: - pos = torch.arange(n, device=device) - - embedding = self.to_embedding(pos) * self.scale - return l2_norm(embedding) if self.use_l2 else embedding -- cgit v1.2.3-70-g09d2