summaryrefslogtreecommitdiff
path: root/text_recognizer/network/transformer/transformer.py
blob: 298308e59df9495715e85913a42942bcefa005b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from torch import Tensor, nn

from .decoder import Decoder
from .embedding.token import TokenEmbedding
from .vit import Vit


class Transformer(nn.Module):
    def __init__(
        self,
        dim: int,
        num_classes: int,
        encoder: Vit,
        decoder: Decoder,
        token_embedding: TokenEmbedding,
        tie_embeddings: bool,
        pad_index: int,
    ) -> None:
        super().__init__()
        self.token_embedding = token_embedding
        self.to_logits = (
            nn.Linear(dim, num_classes)
            if not tie_embeddings
            else lambda t: t @ self.token_embedding.to_embedding.weight.t()
        )
        self.encoder = encoder
        self.decoder = decoder
        self.pad_index = pad_index

    def encode(self, images: Tensor) -> Tensor:
        return self.encoder(images)

    def decode(self, text: Tensor, img_features: Tensor) -> Tensor:
        text = text.long()
        mask = text != self.pad_index
        tokens = self.token_embedding(text)
        output = self.decoder(tokens, context=img_features, mask=mask)
        return self.to_logits(output)

    def forward(
        self,
        img: Tensor,
        text: Tensor,
    ) -> Tensor:
        """Applies decoder block on input signals."""
        img_features = self.encode(img)
        logits = self.decode(text, img_features)
        return logits  # [B, N, C]