From 425af1bce8362efd97682a5042e76a60bfc28060 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Sun, 5 Jun 2022 21:18:56 +0200 Subject: Remove depth wise conv class --- text_recognizer/networks/conformer/conv.py | 13 +++++++++---- text_recognizer/networks/conformer/depth_wise_conv.py | 17 ----------------- 2 files changed, 9 insertions(+), 21 deletions(-) delete mode 100644 text_recognizer/networks/conformer/depth_wise_conv.py (limited to 'text_recognizer/networks') diff --git a/text_recognizer/networks/conformer/conv.py b/text_recognizer/networks/conformer/conv.py index f031dc7..ac13f5d 100644 --- a/text_recognizer/networks/conformer/conv.py +++ b/text_recognizer/networks/conformer/conv.py @@ -4,7 +4,6 @@ from einops.layers.torch import Rearrange from torch import nn, Tensor -from text_recognizer.networks.conformer.depth_wise_conv import DepthwiseConv1D from text_recognizer.networks.conformer.glu import GLU @@ -21,12 +20,18 @@ class ConformerConv(nn.Module): self.layers = nn.Sequential( nn.LayerNorm(dim), Rearrange("b n c -> b c n"), - nn.Conv1D(dim, 2 * inner_dim, 1), + nn.Conv1d(dim, 2 * inner_dim, 1), GLU(dim=1), - DepthwiseConv1D(inner_dim, inner_dim, kernel_size), + nn.Conv1d( + in_channels=inner_dim, + out_channels=inner_dim, + kernel_size=kernel_size, + groups=inner_dim, + padding="same", + ), nn.BatchNorm1d(inner_dim), nn.Mish(inplace=True), - nn.Conv1D(inner_dim, dim, 1), + nn.Conv1d(inner_dim, dim, 1), Rearrange("b c n -> b n c"), nn.Dropout(dropout), ) diff --git a/text_recognizer/networks/conformer/depth_wise_conv.py b/text_recognizer/networks/conformer/depth_wise_conv.py deleted file mode 100644 index 1dbd0b8..0000000 --- a/text_recognizer/networks/conformer/depth_wise_conv.py +++ /dev/null @@ -1,17 +0,0 @@ -"""Depthwise 1D convolution.""" -from torch import nn, Tensor - - -class DepthwiseConv1D(nn.Module): - def __init__(self, in_channels: int, out_channels: int, kernel_size: int) -> None: - super().__init__() - self.conv = nn.Conv1d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - groups=in_channels, - padding="same", - ) - - def forward(self, x: Tensor) -> Tensor: - return self.conv(x) -- cgit v1.2.3-70-g09d2