From 72ce2361f97676fc50ebc6b68b9083a402fa30c5 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Mon, 11 Sep 2023 22:11:04 +0200 Subject: Update convnext --- text_recognizer/network/convnext/__init__.py | 7 --- text_recognizer/network/convnext/attention.py | 79 ------------------------- text_recognizer/network/convnext/convnext.py | 41 ++++++++----- text_recognizer/network/convnext/downsample.py | 9 +-- text_recognizer/network/convnext/residual.py | 16 ----- text_recognizer/network/convnext/transformer.py | 68 +++++++++++++++++++++ 6 files changed, 98 insertions(+), 122 deletions(-) delete mode 100644 text_recognizer/network/convnext/attention.py delete mode 100644 text_recognizer/network/convnext/residual.py create mode 100644 text_recognizer/network/convnext/transformer.py (limited to 'text_recognizer') diff --git a/text_recognizer/network/convnext/__init__.py b/text_recognizer/network/convnext/__init__.py index dcff3fc..e69de29 100644 --- a/text_recognizer/network/convnext/__init__.py +++ b/text_recognizer/network/convnext/__init__.py @@ -1,7 +0,0 @@ -"""Convnext module.""" -from text_recognizer.network.convnext.attention import ( - Attention, - FeedForward, - TransformerBlock, -) -from text_recognizer.network.convnext.convnext import ConvNext diff --git a/text_recognizer/network/convnext/attention.py b/text_recognizer/network/convnext/attention.py deleted file mode 100644 index 6bc9692..0000000 --- a/text_recognizer/network/convnext/attention.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Convolution self attention block.""" - -import torch.nn.functional as F -from einops import rearrange -from torch import Tensor, einsum, nn - -from text_recognizer.network.convnext.norm import LayerNorm -from text_recognizer.network.convnext.residual import Residual - - -def l2norm(t: Tensor) -> Tensor: - return F.normalize(t, dim=-1) - - -class FeedForward(nn.Module): - def __init__(self, dim: int, mult: int = 4) -> None: - super().__init__() - inner_dim = int(dim * mult) - self.fn = Residual( - nn.Sequential( - LayerNorm(dim), - nn.Conv2d(dim, inner_dim, 1, bias=False), - nn.GELU(), - LayerNorm(inner_dim), - nn.Conv2d(inner_dim, dim, 1, bias=False), - ) - ) - - def forward(self, x: Tensor) -> Tensor: - return self.fn(x) - - -class Attention(nn.Module): - def __init__( - self, dim: int, heads: int = 4, dim_head: int = 64, scale: int = 8 - ) -> None: - super().__init__() - self.scale = scale - self.heads = heads - inner_dim = heads * dim_head - self.norm = LayerNorm(dim) - - self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias=False) - self.to_out = nn.Conv2d(inner_dim, dim, 1, bias=False) - - def forward(self, x: Tensor) -> Tensor: - h, w = x.shape[-2:] - - residual = x.clone() - - x = self.norm(x) - - q, k, v = self.to_qkv(x).chunk(3, dim=1) - q, k, v = map( - lambda t: rearrange(t, "b (h c) ... -> b h (...) c", h=self.heads), - (q, k, v), - ) - - q, k = map(l2norm, (q, k)) - - sim = einsum("b h i d, b h j d -> b h i j", q, k) * self.scale - attn = sim.softmax(dim=-1) - - out = einsum("b h i j, b h j d -> b h i d", attn, v) - - out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w) - return self.to_out(out) + residual - - -class TransformerBlock(nn.Module): - def __init__(self, attn: Attention, ff: FeedForward) -> None: - super().__init__() - self.attn = attn - self.ff = ff - - def forward(self, x: Tensor) -> Tensor: - x = self.attn(x) - x = self.ff(x) - return x diff --git a/text_recognizer/network/convnext/convnext.py b/text_recognizer/network/convnext/convnext.py index 6acf059..8eea9df 100644 --- a/text_recognizer/network/convnext/convnext.py +++ b/text_recognizer/network/convnext/convnext.py @@ -1,11 +1,27 @@ """ConvNext module.""" from typing import Optional, Sequence +import torch from torch import Tensor, nn -from text_recognizer.network.convnext.attention import TransformerBlock -from text_recognizer.network.convnext.downsample import Downsample -from text_recognizer.network.convnext.norm import LayerNorm +from .transformer import Transformer +from .downsample import Downsample +from .norm import LayerNorm + + +class GRN(nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.eps = eps + self.gamma = nn.Parameter(torch.zeros(dim, 1, 1)) + self.bias = nn.Parameter(torch.zeros(dim, 1, 1)) + + def forward(self, x): + spatial_l2_norm = x.norm(p=2, dim=(2, 3), keepdim=True) + feat_norm = spatial_l2_norm / spatial_l2_norm.mean(dim=-1, keepdim=True).clamp( + min=self.eps + ) + return x * feat_norm * self.gamma + self.bias + x class ConvNextBlock(nn.Module): @@ -13,14 +29,14 @@ class ConvNextBlock(nn.Module): def __init__(self, dim: int, dim_out: int, mult: int) -> None: super().__init__() - self.ds_conv = nn.Conv2d( - dim, dim, kernel_size=(7, 7), padding="same", groups=dim - ) + inner_dim = mult * dim_out + self.ds_conv = nn.Conv2d(dim, dim, kernel_size=7, padding="same", groups=dim) self.net = nn.Sequential( LayerNorm(dim), - nn.Conv2d(dim, dim_out * mult, kernel_size=(3, 3), padding="same"), + nn.Conv2d(dim, inner_dim, kernel_size=3, stride=1, padding="same"), nn.GELU(), - nn.Conv2d(dim_out * mult, dim_out, kernel_size=(3, 3), padding="same"), + GRN(inner_dim), + nn.Conv2d(inner_dim, dim_out, kernel_size=3, stride=1, padding="same"), ) self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity() @@ -36,8 +52,7 @@ class ConvNext(nn.Module): dim: int = 16, dim_mults: Sequence[int] = (2, 4, 8), depths: Sequence[int] = (3, 3, 6), - downsampling_factors: Sequence[Sequence[int]] = ((2, 2), (2, 2), (2, 2)), - attn: Optional[TransformerBlock] = None, + attn: Optional[Transformer] = None, ) -> None: super().__init__() dims = (dim, *map(lambda m: m * dim, dim_mults)) @@ -51,11 +66,10 @@ class ConvNext(nn.Module): self.layers.append( nn.ModuleList( [ - ConvNextBlock(dim_in, dim_in, 2), nn.ModuleList( [ConvNextBlock(dim_in, dim_in, 2) for _ in range(depths[i])] ), - Downsample(dim_in, dim_out, downsampling_factors[i]), + Downsample(dim_in, dim_out), ] ) ) @@ -68,8 +82,7 @@ class ConvNext(nn.Module): def forward(self, x: Tensor) -> Tensor: x = self.stem(x) - for init_block, blocks, down in self.layers: - x = init_block(x) + for blocks, down in self.layers: for fn in blocks: x = fn(x) x = down(x) diff --git a/text_recognizer/network/convnext/downsample.py b/text_recognizer/network/convnext/downsample.py index a8a0466..dcc14aa 100644 --- a/text_recognizer/network/convnext/downsample.py +++ b/text_recognizer/network/convnext/downsample.py @@ -1,6 +1,4 @@ """Convnext downsample module.""" -from typing import Tuple - from einops.layers.torch import Rearrange from torch import Tensor, nn @@ -8,12 +6,11 @@ from torch import Tensor, nn class Downsample(nn.Module): """Downsamples feature maps by patches.""" - def __init__(self, dim: int, dim_out: int, factors: Tuple[int, int]) -> None: + def __init__(self, dim: int, dim_out: int) -> None: super().__init__() - s1, s2 = factors self.fn = nn.Sequential( - Rearrange("b c (h s1) (w s2) -> b (c s1 s2) h w", s1=s1, s2=s2), - nn.Conv2d(dim * s1 * s2, dim_out, 1), + Rearrange("b c (h s1) (w s2) -> b (c s1 s2) h w", s1=2, s2=2), + nn.Conv2d(dim * 4, dim_out, 1), ) def forward(self, x: Tensor) -> Tensor: diff --git a/text_recognizer/network/convnext/residual.py b/text_recognizer/network/convnext/residual.py deleted file mode 100644 index dfc2847..0000000 --- a/text_recognizer/network/convnext/residual.py +++ /dev/null @@ -1,16 +0,0 @@ -"""Generic residual layer.""" -from typing import Callable - -from torch import Tensor, nn - - -class Residual(nn.Module): - """Residual layer.""" - - def __init__(self, fn: Callable) -> None: - super().__init__() - self.fn = fn - - def forward(self, x: Tensor) -> Tensor: - """Applies residual fn.""" - return self.fn(x) + x diff --git a/text_recognizer/network/convnext/transformer.py b/text_recognizer/network/convnext/transformer.py new file mode 100644 index 0000000..6c53c48 --- /dev/null +++ b/text_recognizer/network/convnext/transformer.py @@ -0,0 +1,68 @@ +"""Convolution self attention block.""" + +from einops import rearrange +from torch import Tensor, einsum, nn + +from text_recognizer.network.convnext.norm import LayerNorm + + +class FeedForward(nn.Module): + def __init__(self, dim: int, mult: int = 4) -> None: + super().__init__() + inner_dim = int(dim * mult) + self.fn = nn.Sequential( + LayerNorm(dim), + nn.Conv2d(dim, inner_dim, 1, bias=False), + nn.GELU(), + LayerNorm(inner_dim), + nn.Conv2d(inner_dim, dim, 1, bias=False), + ) + + def forward(self, x: Tensor) -> Tensor: + return self.fn(x) + + +class Attention(nn.Module): + def __init__( + self, dim: int, heads: int = 4, dim_head: int = 64, scale: int = 8 + ) -> None: + super().__init__() + self.scale = scale + self.heads = heads + inner_dim = heads * dim_head + self.norm = LayerNorm(dim) + + self.to_qkv = nn.Conv2d(dim, inner_dim * 3, 1, bias=False) + self.to_out = nn.Conv2d(inner_dim, dim, 1, bias=False) + + def forward(self, x: Tensor) -> Tensor: + h, w = x.shape[-2:] + + x = self.norm(x) + + q, k, v = self.to_qkv(x).chunk(3, dim=1) + q, k, v = map( + lambda t: rearrange(t, "b (h c) ... -> b h (...) c", h=self.heads), + (q, k, v), + ) + + q = q * self.scale + sim = einsum("b h i d, b h j d -> b h i j", q, k) + attn = sim.softmax(dim=-1) + + out = einsum("b h i j, b h j d -> b h i d", attn, v) + + out = rearrange(out, "b h (x y) d -> b (h d) x y", x=h, y=w) + return self.to_out(out) + + +class Transformer(nn.Module): + def __init__(self, attn: Attention, ff: FeedForward) -> None: + super().__init__() + self.attn = attn + self.ff = ff + + def forward(self, x: Tensor) -> Tensor: + x = x + self.attn(x) + x = x + self.ff(x) + return x -- cgit v1.2.3-70-g09d2