summaryrefslogtreecommitdiff
path: root/text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-05-09 22:46:09 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-05-09 22:46:09 +0200
commitc9c60678673e19ad3367339eb8e7a093e5a98474 (patch)
treeb787a7fbb535c2ee44f935720d75034cc24ffd30 /text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py
parenta2a3133ed5da283888efbdb9924d0e3733c274c8 (diff)
Reformatting of positional encodings and ViT working
Diffstat (limited to 'text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py')
-rw-r--r--text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py16
1 files changed, 16 insertions, 0 deletions
diff --git a/text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py b/text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py
new file mode 100644
index 0000000..9466f6e
--- /dev/null
+++ b/text_recognizer/networks/transformer/positional_encodings/absolute_embedding.py
@@ -0,0 +1,16 @@
+"""Absolute positional embedding."""
+from torch import nn, Tensor
+
+
+class AbsolutePositionalEmbedding(nn.Module):
+ def __init__(self, dim: int, max_seq_len: int) -> None:
+ super().__init__()
+ self.emb = nn.Embedding(max_seq_len, dim)
+ self._weight_init()
+
+ def _weight_init(self) -> None:
+ nn.init.normal_(self.emb.weight, std=0.02)
+
+ def forward(self, x: Tensor) -> Tensor:
+ n = torch.arange(x.shape[1], device=x.device)
+ return self.emb(n)[None, :, :]