summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2024-04-15 21:50:08 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2024-04-15 21:50:08 +0200
commit2c5093dda783cf7618a8554ae649d32b92b84b4c (patch)
treee098b46a3dabf780137750a1a15f3f6a6fdaad42
parentb3fbfd72a8f647161685b28d20b4b61519d8a643 (diff)
Update networks
-rw-r--r--text_recognizer/network/convformer.py16
-rw-r--r--text_recognizer/network/cvit.py44
2 files changed, 50 insertions, 10 deletions
diff --git a/text_recognizer/network/convformer.py b/text_recognizer/network/convformer.py
index 0ee5487..e2b0204 100644
--- a/text_recognizer/network/convformer.py
+++ b/text_recognizer/network/convformer.py
@@ -1,12 +1,10 @@
-from typing import Optional
from einops.layers.torch import Rearrange
from torch import Tensor, nn
-from text_recognizer.network.convnext.convnext import ConvNext
-from .transformer.embedding.token import TokenEmbedding
-from .transformer.embedding.sincos import sincos_2d
from .transformer.decoder import Decoder
+from .transformer.embedding.sincos import sincos_2d
+from .transformer.embedding.token import TokenEmbedding
from .transformer.encoder import Encoder
@@ -24,12 +22,10 @@ class Convformer(nn.Module):
token_embedding: TokenEmbedding,
tie_embeddings: bool,
pad_index: int,
- stem: Optional[ConvNext] = None,
channels: int = 1,
) -> None:
super().__init__()
patch_dim = patch_height * patch_width * channels
- self.stem = stem if stem is not None else nn.Identity()
self.to_patch_embedding = nn.Sequential(
Rearrange(
"b c (h ph) (w pw) -> b (h w) (ph pw c)",
@@ -53,11 +49,11 @@ class Convformer(nn.Module):
self.decoder = decoder
self.pad_index = pad_index
- def encode(self, img: Tensor) -> Tensor:
- x = self.stem(img)
+ def encode(self, images: Tensor) -> Tensor:
+ x = self.encoder(images)
x = self.to_patch_embedding(x)
- x += self.patch_embedding.to(img.device, dtype=img.dtype)
- return self.encoder(x)
+ x = x + self.patch_embedding.to(images.device, dtype=images.dtype)
+ return x
def decode(self, text: Tensor, img_features: Tensor) -> Tensor:
text = text.long()
diff --git a/text_recognizer/network/cvit.py b/text_recognizer/network/cvit.py
new file mode 100644
index 0000000..f9abb8c
--- /dev/null
+++ b/text_recognizer/network/cvit.py
@@ -0,0 +1,44 @@
+from einops.layers.torch import Rearrange
+from torch import Tensor, nn
+
+from text_recognizer.network.convnext.convnext import ConvNext
+
+from .transformer.embedding.sincos import sincos_2d
+from .transformer.encoder import Encoder
+
+
+class CVit(nn.Module):
+ def __init__(
+ self,
+ image_height: int,
+ image_width: int,
+ patch_height: int,
+ patch_width: int,
+ dim: int,
+ encoder: Encoder,
+ stem: ConvNext,
+ channels: int = 1,
+ ) -> None:
+ super().__init__()
+ patch_dim = patch_height * patch_width * channels
+ self.stem = stem
+ self.to_patch_embedding = nn.Sequential(
+ Rearrange(
+ "b c (h ph) (w pw) -> b (h w) (ph pw c)",
+ ph=patch_height,
+ pw=patch_width,
+ ),
+ nn.LayerNorm(patch_dim),
+ nn.Linear(patch_dim, dim),
+ nn.LayerNorm(dim),
+ )
+ self.patch_embedding = sincos_2d(
+ h=image_height // patch_height, w=image_width // patch_width, dim=dim
+ )
+ self.encoder = encoder
+
+ def forward(self, img: Tensor) -> Tensor:
+ x = self.stem(img)
+ x = self.to_patch_embedding(x)
+ x += self.patch_embedding.to(img.device, dtype=img.dtype)
+ return self.encoder(x)