summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2023-09-03 22:54:09 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2023-09-03 22:54:09 +0200
commit1732ed564a738a42c1bf6e8127ae810f5658cb06 (patch)
tree8d7f67793fec820850b4f8fd92f762f7f5e4b9f6
parent53cfc21cffa4e877ad0959170b47b690d2fdb40f (diff)
Revert "Delete convnext"
This reverts commit 7239bce214607c70a7a91358586f265b2f74de7b.
-rw-r--r--notebooks/04-convnext.ipynb248
-rw-r--r--text_recognizer/network/convnext/__init__.py7
-rw-r--r--text_recognizer/network/convnext/attention.py79
-rw-r--r--text_recognizer/network/convnext/convnext.py77
-rw-r--r--text_recognizer/network/convnext/downsample.py21
-rw-r--r--text_recognizer/network/convnext/norm.py18
-rw-r--r--text_recognizer/network/convnext/residual.py16
7 files changed, 466 insertions, 0 deletions
diff --git a/notebooks/04-convnext.ipynb b/notebooks/04-convnext.ipynb
new file mode 100644
index 0000000..5ab71c8
--- /dev/null
+++ b/notebooks/04-convnext.ipynb
@@ -0,0 +1,248 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "id": "7c02ae76-b540-4b16-9492-e9210b3b9249",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The autoreload extension is already loaded. To reload it, use:\n",
+ " %reload_ext autoreload\n"
+ ]
+ }
+ ],
+ "source": [
+ "import os\n",
+ "os.environ['CUDA_VISIBLE_DEVICE'] = ''\n",
+ "import random\n",
+ "\n",
+ "%matplotlib inline\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "import numpy as np\n",
+ "from omegaconf import OmegaConf\n",
+ "\n",
+ "%load_ext autoreload\n",
+ "%autoreload 2\n",
+ "\n",
+ "from importlib.util import find_spec\n",
+ "if find_spec(\"text_recognizer\") is None:\n",
+ " import sys\n",
+ " sys.path.append('..')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "id": "ccdb6dde-47e5-429a-88f2-0764fb7e259a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from hydra import compose, initialize\n",
+ "from omegaconf import OmegaConf\n",
+ "from hydra.utils import instantiate"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "id": "3cf50475-39f2-4642-a7d1-5bcbc0a036f7",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "path = \"../training/conf/network/convnext.yaml\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "id": "e52ecb01-c975-4e55-925d-1182c7aea473",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(path, \"rb\") as f:\n",
+ " cfg = OmegaConf.load(f)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "id": "f939aa37-7b1d-45cc-885c-323c4540bda1",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'_target_': 'text_recognizer.network.convnext.ConvNext', 'dim': 16, 'dim_mults': [2, 4, 8], 'depths': [3, 3, 6], 'downsampling_factors': [[2, 2], [2, 2], [2, 2]], 'attn': {'_target_': 'text_recognizer.network.convnext.TransformerBlock', 'attn': {'_target_': 'text_recognizer.network.convnext.Attention', 'dim': 128, 'heads': 4, 'dim_head': 64, 'scale': 8}, 'ff': {'_target_': 'text_recognizer.network.convnext.FeedForward', 'dim': 128, 'mult': 4}}}"
+ ]
+ },
+ "execution_count": 38,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "cfg"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "a2b420c1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "cfg.dim_mults = [2, 4, 8, 8]\n",
+ "cfg.depths = [3, 3, 6, 6]\n",
+ "cfg.downsampling_factors = [[2, 2], [2, 2], [2, 2], [2, 1]]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "id": "c9589350",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "net = instantiate(cfg)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "id": "618b997c-e6a6-4487-b70c-9d260cb556d3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from torchinfo import summary"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "id": "25759b7b-8deb-4163-b75d-a1357c9fe88f",
+ "metadata": {
+ "scrolled": false
+ },
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "====================================================================================================\n",
+ "Layer (type:depth-idx) Output Shape Param #\n",
+ "====================================================================================================\n",
+ "ConvNext [2, 128, 72, 80] 165,408\n",
+ "├─TransformerBlock: 1-5 [2, 128, 72, 80] (recursive)\n",
+ "│ └─Attention: 2-6 [2, 128, 72, 80] (recursive)\n",
+ "│ │ └─LayerNorm: 3-13 [2, 128, 72, 80] (recursive)\n",
+ "├─Conv2d: 1-3 [2, 16, 576, 640] (recursive)\n",
+ "├─TransformerBlock: 1 -- --\n",
+ "│ └─Attention: 2 -- --\n",
+ "│ │ └─Conv2d: 3-15 [2, 128, 72, 80] (recursive)\n",
+ "│ └─FeedForward: 2-7 [2, 128, 72, 80] (recursive)\n",
+ "│ │ └─Residual: 3-16 [2, 128, 72, 80] (recursive)\n",
+ "│ │ │ └─Sequential: 4-26 [2, 128, 72, 80] (recursive)\n",
+ "├─Conv2d: 1-3 [2, 16, 576, 640] (recursive)\n",
+ "├─ModuleList: 1-4 -- --\n",
+ "│ └─ModuleList: 2-3 -- --\n",
+ "│ │ └─ConvNextBlock: 3-4 [2, 16, 576, 640] --\n",
+ "│ │ │ └─Conv2d: 4-2 [2, 16, 576, 640] 800\n",
+ "│ │ │ └─Sequential: 4-3 [2, 16, 576, 640] 9,280\n",
+ "│ │ │ └─Identity: 4-4 [2, 16, 576, 640] --\n",
+ "│ │ └─ModuleList: 3-5 -- --\n",
+ "│ │ │ └─ConvNextBlock: 4-5 [2, 16, 576, 640] 10,080\n",
+ "│ │ │ └─ConvNextBlock: 4-6 [2, 16, 576, 640] 10,080\n",
+ "│ │ │ └─ConvNextBlock: 4-7 [2, 16, 576, 640] 10,080\n",
+ "│ │ └─Downsample: 3-6 [2, 32, 288, 320] --\n",
+ "│ │ │ └─Sequential: 4-8 [2, 32, 288, 320] 2,080\n",
+ "│ └─ModuleList: 2-4 -- --\n",
+ "│ │ └─ConvNextBlock: 3-7 [2, 32, 288, 320] --\n",
+ "│ │ │ └─Conv2d: 4-9 [2, 32, 288, 320] 1,600\n",
+ "│ │ │ └─Sequential: 4-10 [2, 32, 288, 320] 36,992\n",
+ "│ │ │ └─Identity: 4-11 [2, 32, 288, 320] --\n",
+ "│ │ └─ModuleList: 3-8 -- --\n",
+ "│ │ │ └─ConvNextBlock: 4-12 [2, 32, 288, 320] 38,592\n",
+ "│ │ │ └─ConvNextBlock: 4-13 [2, 32, 288, 320] 38,592\n",
+ "│ │ │ └─ConvNextBlock: 4-14 [2, 32, 288, 320] 38,592\n",
+ "│ │ └─Downsample: 3-9 [2, 64, 144, 160] --\n",
+ "│ │ │ └─Sequential: 4-15 [2, 64, 144, 160] 8,256\n",
+ "│ └─ModuleList: 2-5 -- --\n",
+ "│ │ └─ConvNextBlock: 3-10 [2, 64, 144, 160] --\n",
+ "│ │ │ └─Conv2d: 4-16 [2, 64, 144, 160] 3,200\n",
+ "│ │ │ └─Sequential: 4-17 [2, 64, 144, 160] 147,712\n",
+ "│ │ │ └─Identity: 4-18 [2, 64, 144, 160] --\n",
+ "│ │ └─ModuleList: 3-11 -- --\n",
+ "│ │ │ └─ConvNextBlock: 4-19 [2, 64, 144, 160] 150,912\n",
+ "│ │ │ └─ConvNextBlock: 4-20 [2, 64, 144, 160] 150,912\n",
+ "│ │ │ └─ConvNextBlock: 4-21 [2, 64, 144, 160] 150,912\n",
+ "│ │ │ └─ConvNextBlock: 4-22 [2, 64, 144, 160] 150,912\n",
+ "│ │ │ └─ConvNextBlock: 4-23 [2, 64, 144, 160] 150,912\n",
+ "│ │ │ └─ConvNextBlock: 4-24 [2, 64, 144, 160] 150,912\n",
+ "│ │ └─Downsample: 3-12 [2, 128, 72, 80] --\n",
+ "│ │ │ └─Sequential: 4-25 [2, 128, 72, 80] 32,896\n",
+ "├─TransformerBlock: 1-5 [2, 128, 72, 80] (recursive)\n",
+ "│ └─Attention: 2-6 [2, 128, 72, 80] (recursive)\n",
+ "│ │ └─LayerNorm: 3-13 [2, 128, 72, 80] (recursive)\n",
+ "│ │ └─Conv2d: 3-14 [2, 768, 72, 80] 98,304\n",
+ "│ │ └─Conv2d: 3-15 [2, 128, 72, 80] (recursive)\n",
+ "│ └─FeedForward: 2-7 [2, 128, 72, 80] (recursive)\n",
+ "│ │ └─Residual: 3-16 [2, 128, 72, 80] (recursive)\n",
+ "│ │ │ └─Sequential: 4-26 [2, 128, 72, 80] (recursive)\n",
+ "├─LayerNorm: 1-6 [2, 128, 72, 80] 128\n",
+ "====================================================================================================\n",
+ "Total params: 1,558,144\n",
+ "Trainable params: 1,558,144\n",
+ "Non-trainable params: 0\n",
+ "Total mult-adds (G): 114.00\n",
+ "====================================================================================================\n",
+ "Input size (MB): 2.95\n",
+ "Forward/backward pass size (MB): 3822.06\n",
+ "Params size (MB): 5.57\n",
+ "Estimated Total Size (MB): 3830.58\n",
+ "===================================================================================================="
+ ]
+ },
+ "execution_count": 41,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "summary(net, (2, 1, 576, 640), device=\"cpu\", depth=4)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "05c1d499",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.4"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/text_recognizer/network/convnext/__init__.py b/text_recognizer/network/convnext/__init__.py
new file mode 100644
index 0000000..dcff3fc
--- /dev/null
+++ b/text_recognizer/network/convnext/__init__.py
@@ -0,0 +1,7 @@
+"""Convnext module."""
+from text_recognizer.network.convnext.attention import (
+ Attention,
+ FeedForward,
+ TransformerBlock,
+)
+from text_recognizer.network.convnext.convnext import ConvNext
diff --git a/text_recognizer/network/convnext/attention.py b/text_recognizer/network/convnext/attention.py
new file mode 100644
index 0000000..6bc9692
--- /dev/null
+++ b/text_recognizer/network/convnext/attention.py
@@ -0,0 +1,79 @@
+"""Convolution self attention block."""
+
+import torch.nn.functional as F
+from einops import rearrange
+from torch import Tensor, einsum, nn
+
+from text_recognizer.network.convnext.norm import LayerNorm
+from text_recognizer.network.convnext.residual import Residual
+
+
+def l2norm(t: Tensor) -> Tensor:
+ return F.normalize(t, dim=-1)
+
+
+class FeedForward(nn.Module):
+ def __init__(self, dim: int, mult: int = 4) -> None:
+ super().__init__()
+ inner_dim = int(dim * mult)
+ self.fn = Residual(
+ nn.Sequential(
+ LayerNorm(dim),
+ nn.Conv2d(dim, inner_dim, 1, bias=False),
+ nn.GELU(),
+ LayerNorm(inner_dim),
+ nn.Conv2d(inner_dim, dim, 1, bias=False),
+ )
+ )
+
+ def forward(self, x: Tensor) -> Tensor:
+ return self.fn(x)
+
+
+class Attention(nn.Module):
+ def __init__(
+ self, dim: int, heads: int = 4, dim_head: int = 64, scale: int = 8
+ ) -> None:
+ super().__init__()
+ self.scale = scale
+ self.heads = heads
+ inner_dim = heads * dim_head
+ self.norm = LayerNorm(dim)
+
+ self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias=False)
+ self.to_out = nn.Conv2d(inner_dim, dim, 1, bias=False)
+
+ def forward(self, x: Tensor) -> Tensor:
+ h, w = x.shape[-2:]
+
+ residual = x.clone()
+
+ x = self.norm(x)
+
+ q, k, v = self.to_qkv(x).chunk(3, dim=1)
+ q, k, v = map(
+ lambda t: rearrange(t, "b (h c) ... -> b h (...) c", h=self.heads),
+ (q, k, v),
+ )
+
+ q, k = map(l2norm, (q, k))
+
+ sim = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale
+ attn = sim.softmax(dim=-1)
+
+ out = einsum("b h i j, b h j d -> b h i d", attn, v)
+
+ out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w)
+ return self.to_out(out) + residual
+
+
+class TransformerBlock(nn.Module):
+ def __init__(self, attn: Attention, ff: FeedForward) -> None:
+ super().__init__()
+ self.attn = attn
+ self.ff = ff
+
+ def forward(self, x: Tensor) -> Tensor:
+ x = self.attn(x)
+ x = self.ff(x)
+ return x
diff --git a/text_recognizer/network/convnext/convnext.py b/text_recognizer/network/convnext/convnext.py
new file mode 100644
index 0000000..6acf059
--- /dev/null
+++ b/text_recognizer/network/convnext/convnext.py
@@ -0,0 +1,77 @@
+"""ConvNext module."""
+from typing import Optional, Sequence
+
+from torch import Tensor, nn
+
+from text_recognizer.network.convnext.attention import TransformerBlock
+from text_recognizer.network.convnext.downsample import Downsample
+from text_recognizer.network.convnext.norm import LayerNorm
+
+
+class ConvNextBlock(nn.Module):
+ """ConvNext block."""
+
+ def __init__(self, dim: int, dim_out: int, mult: int) -> None:
+ super().__init__()
+ self.ds_conv = nn.Conv2d(
+ dim, dim, kernel_size=(7, 7), padding="same", groups=dim
+ )
+ self.net = nn.Sequential(
+ LayerNorm(dim),
+ nn.Conv2d(dim, dim_out * mult, kernel_size=(3, 3), padding="same"),
+ nn.GELU(),
+ nn.Conv2d(dim_out * mult, dim_out, kernel_size=(3, 3), padding="same"),
+ )
+ self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
+
+ def forward(self, x: Tensor) -> Tensor:
+ h = self.ds_conv(x)
+ h = self.net(h)
+ return h + self.res_conv(x)
+
+
+class ConvNext(nn.Module):
+ def __init__(
+ self,
+ dim: int = 16,
+ dim_mults: Sequence[int] = (2, 4, 8),
+ depths: Sequence[int] = (3, 3, 6),
+ downsampling_factors: Sequence[Sequence[int]] = ((2, 2), (2, 2), (2, 2)),
+ attn: Optional[TransformerBlock] = None,
+ ) -> None:
+ super().__init__()
+ dims = (dim, *map(lambda m: m * dim, dim_mults))
+ self.attn = attn if attn is not None else nn.Identity()
+ self.out_channels = dims[-1]
+ self.stem = nn.Conv2d(1, dims[0], kernel_size=7, padding="same")
+ self.layers = nn.ModuleList([])
+
+ for i in range(len(dims) - 1):
+ dim_in, dim_out = dims[i], dims[i + 1]
+ self.layers.append(
+ nn.ModuleList(
+ [
+ ConvNextBlock(dim_in, dim_in, 2),
+ nn.ModuleList(
+ [ConvNextBlock(dim_in, dim_in, 2) for _ in range(depths[i])]
+ ),
+ Downsample(dim_in, dim_out, downsampling_factors[i]),
+ ]
+ )
+ )
+ self.norm = LayerNorm(dims[-1])
+
+ def _init_weights(self, m):
+ if isinstance(m, (nn.Conv2d, nn.Linear)):
+ nn.init.trunc_normal_(m.weight, std=0.02)
+ nn.init.constant_(m.bias, 0)
+
+ def forward(self, x: Tensor) -> Tensor:
+ x = self.stem(x)
+ for init_block, blocks, down in self.layers:
+ x = init_block(x)
+ for fn in blocks:
+ x = fn(x)
+ x = down(x)
+ x = self.attn(x)
+ return self.norm(x)
diff --git a/text_recognizer/network/convnext/downsample.py b/text_recognizer/network/convnext/downsample.py
new file mode 100644
index 0000000..a8a0466
--- /dev/null
+++ b/text_recognizer/network/convnext/downsample.py
@@ -0,0 +1,21 @@
+"""Convnext downsample module."""
+from typing import Tuple
+
+from einops.layers.torch import Rearrange
+from torch import Tensor, nn
+
+
+class Downsample(nn.Module):
+ """Downsamples feature maps by patches."""
+
+ def __init__(self, dim: int, dim_out: int, factors: Tuple[int, int]) -> None:
+ super().__init__()
+ s1, s2 = factors
+ self.fn = nn.Sequential(
+ Rearrange("b c (h s1) (w s2) -> b (c s1 s2) h w", s1=s1, s2=s2),
+ nn.Conv2d(dim * s1 * s2, dim_out, 1),
+ )
+
+ def forward(self, x: Tensor) -> Tensor:
+ """Applies patch function."""
+ return self.fn(x)
diff --git a/text_recognizer/network/convnext/norm.py b/text_recognizer/network/convnext/norm.py
new file mode 100644
index 0000000..3355de9
--- /dev/null
+++ b/text_recognizer/network/convnext/norm.py
@@ -0,0 +1,18 @@
+"""Layer norm for conv layers."""
+import torch
+from torch import Tensor, nn
+
+
+class LayerNorm(nn.Module):
+ """Layer norm for convolutions."""
+
+ def __init__(self, dim: int) -> None:
+ super().__init__()
+ self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1))
+
+ def forward(self, x: Tensor) -> Tensor:
+ """Applies layer norm."""
+ eps = 1e-5 if x.dtype == torch.float32 else 1e-3
+ var = torch.var(x, dim=1, unbiased=False, keepdim=True)
+ mean = torch.mean(x, dim=1, keepdim=True)
+ return (x - mean) / (var + eps).sqrt() * self.gamma
diff --git a/text_recognizer/network/convnext/residual.py b/text_recognizer/network/convnext/residual.py
new file mode 100644
index 0000000..dfc2847
--- /dev/null
+++ b/text_recognizer/network/convnext/residual.py
@@ -0,0 +1,16 @@
+"""Generic residual layer."""
+from typing import Callable
+
+from torch import Tensor, nn
+
+
+class Residual(nn.Module):
+ """Residual layer."""
+
+ def __init__(self, fn: Callable) -> None:
+ super().__init__()
+ self.fn = fn
+
+ def forward(self, x: Tensor) -> Tensor:
+ """Applies residual fn."""
+ return self.fn(x) + x