diff options
author | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2022-09-13 18:46:09 +0200 |
---|---|---|
committer | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2022-09-13 18:46:09 +0200 |
commit | 143d37636c4533a74c558ca5afb8a579af38de97 (patch) | |
tree | b6740ab7bb954ceea4a2e5e19b7f8553b0039e3c /text_recognizer/networks/transformer/axial_attention/encoder.py | |
parent | fd9b1570c568d9ce8f1ac7258f05f9977a5cc9c8 (diff) |
Remove axial encoder
Diffstat (limited to 'text_recognizer/networks/transformer/axial_attention/encoder.py')
-rw-r--r-- | text_recognizer/networks/transformer/axial_attention/encoder.py | 90 |
1 files changed, 0 insertions, 90 deletions
diff --git a/text_recognizer/networks/transformer/axial_attention/encoder.py b/text_recognizer/networks/transformer/axial_attention/encoder.py deleted file mode 100644 index 1cadac1..0000000 --- a/text_recognizer/networks/transformer/axial_attention/encoder.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Axial transformer encoder.""" - -from typing import List, Optional, Type -from text_recognizer.networks.transformer.embeddings.axial import ( - AxialPositionalEmbeddingImage, -) - -from torch import nn, Tensor - -from text_recognizer.networks.transformer.axial_attention.self_attention import ( - SelfAttention, -) -from text_recognizer.networks.transformer.axial_attention.utils import ( - calculate_permutations, - PermuteToForm, - Sequential, -) -from text_recognizer.networks.transformer.norm import PreNorm - - -class AxialEncoder(nn.Module): - """Axial transfomer encoder.""" - - def __init__( - self, - shape: List[int], - dim: int, - depth: int, - heads: int, - dim_head: int, - dim_index: int, - axial_embedding: AxialPositionalEmbeddingImage, - ) -> None: - super().__init__() - - self.shape = shape - self.dim = dim - self.depth = depth - self.heads = heads - self.dim_head = dim_head - self.dim_index = dim_index - self.axial_embedding = axial_embedding - - self.fn = self._build() - - def _build(self) -> Sequential: - permutations = calculate_permutations(2, self.dim_index) - get_ff = lambda: nn.Sequential( - nn.LayerNorm([self.dim, *self.shape]), - nn.Conv2d( - in_channels=self.dim, - out_channels=4 * self.dim, - kernel_size=3, - padding=1, - ), - nn.Mish(inplace=True), - nn.Conv2d( - in_channels=4 * self.dim, - out_channels=self.dim, - kernel_size=3, - padding=1, - ), - ) - - layers = nn.ModuleList([]) - for _ in range(self.depth): - attns = nn.ModuleList( - [ - PermuteToForm( - permutation=permutation, - fn=PreNorm( - self.dim, - SelfAttention( - dim=self.dim, heads=self.heads, dim_head=self.dim_head - ), - ), - ) - for permutation in permutations - ] - ) - convs = nn.ModuleList([get_ff(), get_ff()]) - layers.append(attns) - layers.append(convs) - - return Sequential(layers) - - def forward(self, x: Tensor) -> Tensor: - """Applies fn to input.""" - x += self.axial_embedding(x) - return self.fn(x) |