summaryrefslogtreecommitdiff
path: root/text_recognizer/networks/transformer/axial_attention/self_attention.py
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2022-09-13 18:46:09 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2022-09-13 18:46:09 +0200
commit143d37636c4533a74c558ca5afb8a579af38de97 (patch)
treeb6740ab7bb954ceea4a2e5e19b7f8553b0039e3c /text_recognizer/networks/transformer/axial_attention/self_attention.py
parentfd9b1570c568d9ce8f1ac7258f05f9977a5cc9c8 (diff)
Remove axial encoder
Diffstat (limited to 'text_recognizer/networks/transformer/axial_attention/self_attention.py')
-rw-r--r--text_recognizer/networks/transformer/axial_attention/self_attention.py40
1 files changed, 0 insertions, 40 deletions
diff --git a/text_recognizer/networks/transformer/axial_attention/self_attention.py b/text_recognizer/networks/transformer/axial_attention/self_attention.py
deleted file mode 100644
index b5e4142..0000000
--- a/text_recognizer/networks/transformer/axial_attention/self_attention.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""Axial self attention module."""
-
-import torch
-from torch import nn
-from torch import Tensor
-
-
-class SelfAttention(nn.Module):
- """Axial self attention module."""
-
- def __init__(
- self,
- dim: int,
- dim_head: int,
- heads: int,
- ) -> None:
- super().__init__()
- self.dim_hidden = heads * dim_head
- self.heads = heads
- self.dim_head = dim_head
- self.to_q = nn.Linear(dim, self.dim_hidden, bias=False)
- self.to_kv = nn.Linear(dim, 2 * self.dim_hidden, bias=False)
- self.to_out = nn.Linear(self.dim_hidden, dim)
-
- def forward(self, x: Tensor) -> Tensor:
- """Applies self attention."""
- q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim=-1))
- b, _, d, h, e = *q.shape, self.heads, self.dim_head
-
- merge_heads = (
- lambda x: x.reshape(b, -1, h, e).transpose(1, 2).reshape(b * h, -1, e)
- )
- q, k, v = map(merge_heads, (q, k, v))
-
- energy = torch.einsum("bie,bje->bij", q, k) * (e ** -0.5)
- energy = energy.softmax(dim=-1)
- attn = torch.einsum("bij,bje->bie", energy, v)
-
- out = attn.reshape(b, h, -1, e).transpose(1, 2).reshape(b, -1, d)
- return self.to_out(out)