summaryrefslogtreecommitdiff
path: root/text_recognizer/network/transformer/decoder.py
blob: 60e426a586df83a653cf2af6164e25ad18845cbc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
"""Transformer decoder module."""
from typing import Optional

from torch import Tensor, nn

from .attention import Attention


class Decoder(nn.Module):
    def __init__(
        self,
        dim: int,
        ff_mult: int,
        heads: int,
        dim_head: int,
        depth: int,
        dropout_rate: float = 0.0,
        one_kv_head: bool = False,
    ) -> None:
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.layers = nn.ModuleList(
            [
                nn.ModuleList(
                    [
                        Attention(
                            dim=dim,
                            heads=heads,
                            causal=True,
                            dim_head=dim_head,
                            ff_mult=ff_mult,
                            dropout_rate=dropout_rate,
                            use_flash=True,
                            norm_context=False,
                            use_rotary_emb=True,
                            one_kv_head=one_kv_head,
                        ),
                        Attention(
                            dim=dim,
                            heads=heads,
                            causal=False,
                            dim_head=dim_head,
                            ff_mult=ff_mult,
                            dropout_rate=dropout_rate,
                            use_flash=True,
                            norm_context=False,
                            use_rotary_emb=False,
                            one_kv_head=one_kv_head,
                        ),
                    ]
                )
                for _ in range(depth)
            ]
        )

    def self_attn(self, x: Tensor, mask: Tensor) -> Tensor:
        for self_attn, _ in self.layers:
            x = x + self_attn(x, mask=mask)
        return self.norm(x)

    def forward(
        self,
        x: Tensor,
        context: Tensor,
        mask: Optional[Tensor] = None,
    ) -> Tensor:
        """Applies decoder block on input signals."""
        for self_attn, cross_attn in self.layers:
            x = x + self_attn(x, mask=mask)
            x = x + cross_attn(x, context=context)
        return self.norm(x)