From 7be90f5f101d7ace7ff07180950dac4c11086ec1 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Tue, 13 Sep 2022 18:12:13 +0200 Subject: Add axial encoder --- .../transformer/axial_attention/self_attention.py | 40 ++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 text_recognizer/networks/transformer/axial_attention/self_attention.py (limited to 'text_recognizer/networks/transformer/axial_attention/self_attention.py') diff --git a/text_recognizer/networks/transformer/axial_attention/self_attention.py b/text_recognizer/networks/transformer/axial_attention/self_attention.py new file mode 100644 index 0000000..b5e4142 --- /dev/null +++ b/text_recognizer/networks/transformer/axial_attention/self_attention.py @@ -0,0 +1,40 @@ +"""Axial self attention module.""" + +import torch +from torch import nn +from torch import Tensor + + +class SelfAttention(nn.Module): + """Axial self attention module.""" + + def __init__( + self, + dim: int, + dim_head: int, + heads: int, + ) -> None: + super().__init__() + self.dim_hidden = heads * dim_head + self.heads = heads + self.dim_head = dim_head + self.to_q = nn.Linear(dim, self.dim_hidden, bias=False) + self.to_kv = nn.Linear(dim, 2 * self.dim_hidden, bias=False) + self.to_out = nn.Linear(self.dim_hidden, dim) + + def forward(self, x: Tensor) -> Tensor: + """Applies self attention.""" + q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim=-1)) + b, _, d, h, e = *q.shape, self.heads, self.dim_head + + merge_heads = ( + lambda x: x.reshape(b, -1, h, e).transpose(1, 2).reshape(b * h, -1, e) + ) + q, k, v = map(merge_heads, (q, k, v)) + + energy = torch.einsum("bie,bje->bij", q, k) * (e ** -0.5) + energy = energy.softmax(dim=-1) + attn = torch.einsum("bij,bje->bie", energy, v) + + out = attn.reshape(b, h, -1, e).transpose(1, 2).reshape(b, -1, d) + return self.to_out(out) -- cgit v1.2.3-70-g09d2