From 3a4fa0f1cf3c381a3d81f39e6e9439ec192b2509 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Mon, 11 Sep 2023 22:14:43 +0200 Subject: Refactor vit to encoder only --- text_recognizer/network/vit.py | 49 ++++++------------------------------------ 1 file changed, 6 insertions(+), 43 deletions(-) (limited to 'text_recognizer/network') diff --git a/text_recognizer/network/vit.py b/text_recognizer/network/vit.py index 1fbf3fc..a596792 100644 --- a/text_recognizer/network/vit.py +++ b/text_recognizer/network/vit.py @@ -1,16 +1,11 @@ -"""Transformer module.""" -from typing import Type - from einops.layers.torch import Rearrange from torch import Tensor, nn -from .transformer.embedding.token import TokenEmbedding from .transformer.embedding.sincos import sincos_2d -from .transformer.decoder import Decoder from .transformer.encoder import Encoder -class VisionTransformer(nn.Module): +class Vit(nn.Module): def __init__( self, image_height: int, @@ -18,16 +13,11 @@ class VisionTransformer(nn.Module): patch_height: int, patch_width: int, dim: int, - num_classes: int, encoder: Encoder, - decoder: Decoder, - token_embedding: TokenEmbedding, - pos_embedding: Type[nn.Module], - tie_embeddings: bool, - pad_index: int, + channels: int = 1, ) -> None: super().__init__() - patch_dim = patch_height * patch_width + patch_dim = patch_height * patch_width * channels self.to_patch_embedding = nn.Sequential( Rearrange( "b c (h ph) (w pw) -> b (h w) (ph pw c)", @@ -41,36 +31,9 @@ class VisionTransformer(nn.Module): self.patch_embedding = sincos_2d( h=image_height // patch_height, w=image_width // patch_width, dim=dim ) - self.pos_embedding = pos_embedding - self.token_embedding = token_embedding - self.to_logits = ( - nn.Linear(dim, num_classes) - if not tie_embeddings - else lambda t: t @ self.token_embedding.to_embedding.weight.t() - ) self.encoder = encoder - self.decoder = decoder - self.pad_index = pad_index - def encode(self, img: Tensor) -> Tensor: - x = self.to_patch_embedding(img) - x += self.patch_embedding.to(img.device, dtype=img.dtype) + def forward(self, images: Tensor) -> Tensor: + x = self.to_patch_embedding(images) + x = x + self.patch_embedding.to(images.device, dtype=images.dtype) return self.encoder(x) - - def decode(self, text: Tensor, img_features: Tensor) -> Tensor: - text = text.long() - mask = text != self.pad_index - tokens = self.token_embedding(text) - tokens = tokens + self.pos_embedding(tokens) - output = self.decoder(tokens, context=img_features, mask=mask) - return self.to_logits(output) - - def forward( - self, - img: Tensor, - text: Tensor, - ) -> Tensor: - """Applies decoder block on input signals.""" - img_features = self.encode(img) - logits = self.decode(text, img_features) - return logits # [B, N, C] -- cgit v1.2.3-70-g09d2