summaryrefslogtreecommitdiff
path: root/text_recognizer/networks/perceiver/perceiver.py
blob: d4bca0b1f14bebdf67612b964f2fd29c6580c1d6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
"""Perceiver IO.

A copy from lucidrains.
"""
from typing import Optional

from einops import repeat, rearrange
import torch
from torch import nn, Tensor

from text_recognizer.networks.perceiver.attention import Attention
from text_recognizer.networks.transformer.ff import FeedForward
from text_recognizer.networks.transformer.norm import PreNorm


class PerceiverIO(nn.Module):
    def __init__(
        self,
        dim: int,
        cross_heads: int,
        cross_head_dim: int,
        num_latents: int,
        latent_dim: int,
        latent_heads: int,
        depth: int,
        queries_dim: int,
        logits_dim: int,
    ) -> None:
        super().__init__()
        self.latents = nn.Parameter(torch.randn(num_latents, latent_dim))

        self.cross_attn_block = nn.ModuleList(
            [
                PreNorm(
                    latent_dim,
                    Attention(
                        latent_dim, dim, heads=cross_heads, dim_head=cross_head_dim
                    ),
                    context_dim=dim,
                ),
                PreNorm(latent_dim, FeedForward(latent_dim)),
            ]
        )

        self.layers = nn.ModuleList(
            [
                nn.ModuleList(
                    [
                        PreNorm(
                            latent_dim,
                            Attention(
                                latent_dim, heads=latent_heads, dim_head=latent_dim
                            ),
                        ),
                        PreNorm(latent_dim, FeedForward(latent_dim)),
                    ]
                )
                for _ in range(depth)
            ]
        )

        self.decoder_cross_attn = PreNorm(
            queries_dim,
            Attention(
                queries_dim, latent_dim, heads=cross_heads, dim_head=cross_head_dim
            ),
            context_dim=latent_dim,
        )
        self.decoder_ff = PreNorm(queries_dim, FeedForward(queries_dim))
        self.to_logits = nn.Linear(queries_dim, logits_dim)

    def forward(
        self, data: Tensor, queries: Tensor, mask: Optional[Tensor] = None
    ) -> Tensor:
        b = data.shape[0]
        x = repeat(self.latents, "n d -> b n d", b=b)

        cross_attn, cross_ff = self.cross_attn_block

        x = cross_attn(x, context=data, mask=mask) + x
        x = cross_ff(x) + x

        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x

        if queries.ndim == 2:
            queries = repeat(queries, "nd->bnd", b=b)

        latents = self.decoder_cross_attn(queries, context=x)
        latents = latents + self.decoder_ff(latents)

        return self.to_logits(latents)