diff options
Diffstat (limited to 'notebooks/04-conv-transformer.ipynb')
-rw-r--r-- | notebooks/04-conv-transformer.ipynb | 234 |
1 files changed, 0 insertions, 234 deletions
diff --git a/notebooks/04-conv-transformer.ipynb b/notebooks/04-conv-transformer.ipynb deleted file mode 100644 index 0d8b370..0000000 --- a/notebooks/04-conv-transformer.ipynb +++ /dev/null @@ -1,234 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "7c02ae76-b540-4b16-9492-e9210b3b9249", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "os.environ['CUDA_VISIBLE_DEVICE'] = ''\n", - "import random\n", - "\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "\n", - "import numpy as np\n", - "from omegaconf import OmegaConf\n", - "import torch\n", - "%load_ext autoreload\n", - "%autoreload 2\n", - "\n", - "from importlib.util import find_spec\n", - "if find_spec(\"text_recognizer\") is None:\n", - " import sys\n", - " sys.path.append('..')" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "ccdb6dde-47e5-429a-88f2-0764fb7e259a", - "metadata": {}, - "outputs": [], - "source": [ - "from hydra import compose, initialize\n", - "from omegaconf import OmegaConf\n", - "from hydra.utils import instantiate" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "3cf50475-39f2-4642-a7d1-5bcbc0a036f7", - "metadata": {}, - "outputs": [], - "source": [ - "path = \"../training/conf/network/conv_transformer.yaml\"" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "e52ecb01-c975-4e55-925d-1182c7aea473", - "metadata": {}, - "outputs": [], - "source": [ - "with open(path, \"rb\") as f:\n", - " cfg = OmegaConf.load(f)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "f939aa37-7b1d-45cc-885c-323c4540bda1", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'_target_': 'text_recognizer.networks.ConvTransformer', 'encoder': {'_target_': 'text_recognizer.networks.image_encoder.ImageEncoder', 'encoder': {'_target_': 'text_recognizer.networks.convnext.ConvNext', 'dim': 16, 'dim_mults': [2, 4, 8], 'depths': [3, 3, 6], 'downsampling_factors': [[2, 2], [2, 2], [2, 2]]}, 'pixel_embedding': {'_target_': 'text_recognizer.networks.transformer.embeddings.axial.AxialPositionalEmbeddingImage', 'dim': 128, 'axial_shape': [7, 128], 'axial_dims': [64, 64]}}, 'decoder': {'_target_': 'text_recognizer.networks.text_decoder.TextDecoder', 'hidden_dim': 128, 'num_classes': 58, 'pad_index': 3, 'decoder': {'_target_': 'text_recognizer.networks.transformer.Decoder', 'dim': 128, 'depth': 10, 'block': {'_target_': 'text_recognizer.networks.transformer.decoder_block.DecoderBlock', 'self_attn': {'_target_': 'text_recognizer.networks.transformer.Attention', 'dim': 128, 'num_heads': 12, 'dim_head': 64, 'dropout_rate': 0.2, 'causal': True}, 'cross_attn': {'_target_': 'text_recognizer.networks.transformer.Attention', 'dim': 128, 'num_heads': 12, 'dim_head': 64, 'dropout_rate': 0.2, 'causal': False}, 'norm': {'_target_': 'text_recognizer.networks.transformer.RMSNorm', 'dim': 128}, 'ff': {'_target_': 'text_recognizer.networks.transformer.FeedForward', 'dim': 128, 'dim_out': None, 'expansion_factor': 2, 'glu': True, 'dropout_rate': 0.2}}, 'rotary_embedding': {'_target_': 'text_recognizer.networks.transformer.RotaryEmbedding', 'dim': 64}}, 'token_pos_embedding': {'_target_': 'text_recognizer.networks.transformer.embeddings.fourier.PositionalEncoding', 'dim': 128, 'dropout_rate': 0.1, 'max_len': 89}}}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "cfg" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "aaeab329-aeb0-4a1b-aa35-5a2aab81b1d0", - "metadata": { - "scrolled": false - }, - "outputs": [], - "source": [ - "net = instantiate(cfg)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "id": "618b997c-e6a6-4487-b70c-9d260cb556d3", - "metadata": {}, - "outputs": [], - "source": [ - "from torchinfo import summary" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "id": "7daf1f49", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "==============================================================================================================\n", - "Layer (type:depth-idx) Output Shape Param #\n", - "==============================================================================================================\n", - "ConvTransformer [1, 58, 89] --\n", - "├─ImageEncoder: 1-1 [1, 896, 128] --\n", - "│ └─ConvNext: 2-1 [1, 128, 7, 128] --\n", - "│ │ └─Conv2d: 3-1 [1, 16, 56, 1024] 800\n", - "│ │ └─ModuleList: 3-2 -- --\n", - "│ │ │ └─ModuleList: 4-1 -- 42,400\n", - "│ │ │ └─ModuleList: 4-2 -- 162,624\n", - "│ │ │ └─ModuleList: 4-3 -- 1,089,280\n", - "│ │ └─Identity: 3-3 [1, 128, 7, 128] --\n", - "│ │ └─LayerNorm: 3-4 [1, 128, 7, 128] 128\n", - "│ └─AxialPositionalEmbeddingImage: 2-2 [1, 128, 7, 128] --\n", - "│ │ └─AxialPositionalEmbedding: 3-5 [1, 896, 128] 8,640\n", - "├─TextDecoder: 1-2 [1, 58, 89] --\n", - "│ └─Embedding: 2-3 [1, 89, 128] 7,424\n", - "│ └─PositionalEncoding: 2-4 [1, 89, 128] --\n", - "│ │ └─Dropout: 3-6 [1, 89, 128] --\n", - "│ └─Decoder: 2-5 [1, 89, 128] --\n", - "│ │ └─ModuleList: 3-7 -- --\n", - "│ │ │ └─DecoderBlock: 4-4 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-5 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-6 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-7 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-8 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-9 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-10 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-11 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-12 [1, 89, 128] 525,568\n", - "│ │ │ └─DecoderBlock: 4-13 [1, 89, 128] 525,568\n", - "│ │ └─LayerNorm: 3-8 [1, 89, 128] 256\n", - "│ └─Linear: 2-6 [1, 89, 58] 7,482\n", - "==============================================================================================================\n", - "Total params: 6,574,714\n", - "Trainable params: 6,574,714\n", - "Non-trainable params: 0\n", - "Total mult-adds (G): 8.45\n", - "==============================================================================================================\n", - "Input size (MB): 0.23\n", - "Forward/backward pass size (MB): 330.38\n", - "Params size (MB): 26.30\n", - "Estimated Total Size (MB): 356.91\n", - "==============================================================================================================" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "summary(net, ((1, 1, 56, 1024), (1, 89)), device=\"cpu\", depth=4)" - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "id": "25759b7b-8deb-4163-b75d-a1357c9fe88f", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "ename": "RuntimeError", - "evalue": "Failed to run torchinfo. See above stack traces for more details. Executed layers up to: [EfficientNet: 1, Sequential: 2, ZeroPad2d: 3, Conv2d: 3, BatchNorm2d: 3, Mish: 3, MBConvBlock: 3, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Sequential: 2, Conv2d: 3, BatchNorm2d: 3, Dropout: 3, Conv2d: 1]", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m~/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torchinfo/torchinfo.py:290\u001b[0m, in \u001b[0;36mforward_pass\u001b[0;34m(model, x, batch_dim, cache_forward_pass, device, mode, **kwargs)\u001b[0m\n\u001b[1;32m 289\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(x, (\u001b[38;5;28mlist\u001b[39m, \u001b[38;5;28mtuple\u001b[39m)):\n\u001b[0;32m--> 290\u001b[0m _ \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 291\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(x, \u001b[38;5;28mdict\u001b[39m):\n", - "File \u001b[0;32m~/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1148\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1146\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m bw_hook\u001b[38;5;241m.\u001b[39msetup_input_hook(\u001b[38;5;28minput\u001b[39m)\n\u001b[0;32m-> 1148\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1149\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks:\n", - "File \u001b[0;32m~/projects/text-recognizer/text_recognizer/networks/conv_transformer.py:132\u001b[0m, in \u001b[0;36mConvTransformer.forward\u001b[0;34m(self, x, context)\u001b[0m\n\u001b[1;32m 116\u001b[0m \u001b[38;5;124;03m\"\"\"Encodes images into word piece logtis.\u001b[39;00m\n\u001b[1;32m 117\u001b[0m \n\u001b[1;32m 118\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 130\u001b[0m \u001b[38;5;124;03m Tensor: Sequence of logits.\u001b[39;00m\n\u001b[1;32m 131\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m--> 132\u001b[0m z \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mencode\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 133\u001b[0m logits \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdecode(z, context)\n", - "File \u001b[0;32m~/projects/text-recognizer/text_recognizer/networks/conv_transformer.py:82\u001b[0m, in \u001b[0;36mConvTransformer.encode\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 81\u001b[0m z \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconv(z)\n\u001b[0;32m---> 82\u001b[0m z \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpixel_embedding\u001b[49m\u001b[43m(\u001b[49m\u001b[43mz\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 83\u001b[0m z \u001b[38;5;241m=\u001b[39m z\u001b[38;5;241m.\u001b[39mflatten(start_dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m2\u001b[39m)\n", - "File \u001b[0;32m~/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torch/nn/modules/module.py:1148\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1146\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m bw_hook\u001b[38;5;241m.\u001b[39msetup_input_hook(\u001b[38;5;28minput\u001b[39m)\n\u001b[0;32m-> 1148\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1149\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks:\n", - "File \u001b[0;32m~/projects/text-recognizer/text_recognizer/networks/transformer/embeddings/axial.py:40\u001b[0m, in \u001b[0;36mAxialPositionalEmbedding.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x):\n\u001b[0;32m---> 40\u001b[0m b, t, _ \u001b[38;5;241m=\u001b[39m x\u001b[38;5;241m.\u001b[39mshape\n\u001b[1;32m 41\u001b[0m \u001b[38;5;28;01massert\u001b[39;00m (\n\u001b[1;32m 42\u001b[0m t \u001b[38;5;241m<\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmax_seq_len\n\u001b[1;32m 43\u001b[0m ), \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSequence length (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mt\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m) must be less than the maximum sequence length allowed (\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmax_seq_len\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m)\u001b[39m\u001b[38;5;124m\"\u001b[39m\n", - "\u001b[0;31mValueError\u001b[0m: too many values to unpack (expected 3)", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", - "Input \u001b[0;32mIn [22]\u001b[0m, in \u001b[0;36m<cell line: 1>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43msummary\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnet\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m576\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m640\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m682\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcpu\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdepth\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m4\u001b[39;49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torchinfo/torchinfo.py:218\u001b[0m, in \u001b[0;36msummary\u001b[0;34m(model, input_size, input_data, batch_dim, cache_forward_pass, col_names, col_width, depth, device, dtypes, mode, row_settings, verbose, **kwargs)\u001b[0m\n\u001b[1;32m 211\u001b[0m validate_user_params(\n\u001b[1;32m 212\u001b[0m input_data, input_size, columns, col_width, device, dtypes, verbose\n\u001b[1;32m 213\u001b[0m )\n\u001b[1;32m 215\u001b[0m x, correct_input_size \u001b[38;5;241m=\u001b[39m process_input(\n\u001b[1;32m 216\u001b[0m input_data, input_size, batch_dim, device, dtypes\n\u001b[1;32m 217\u001b[0m )\n\u001b[0;32m--> 218\u001b[0m summary_list \u001b[38;5;241m=\u001b[39m \u001b[43mforward_pass\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 219\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbatch_dim\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcache_forward_pass\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmodel_mode\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 220\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 221\u001b[0m formatting \u001b[38;5;241m=\u001b[39m FormattingOptions(depth, verbose, columns, col_width, rows)\n\u001b[1;32m 222\u001b[0m results \u001b[38;5;241m=\u001b[39m ModelStatistics(\n\u001b[1;32m 223\u001b[0m summary_list, correct_input_size, get_total_memory_used(x), formatting\n\u001b[1;32m 224\u001b[0m )\n", - "File \u001b[0;32m~/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torchinfo/torchinfo.py:299\u001b[0m, in \u001b[0;36mforward_pass\u001b[0;34m(model, x, batch_dim, cache_forward_pass, device, mode, **kwargs)\u001b[0m\n\u001b[1;32m 297\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 298\u001b[0m executed_layers \u001b[38;5;241m=\u001b[39m [layer \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m summary_list \u001b[38;5;28;01mif\u001b[39;00m layer\u001b[38;5;241m.\u001b[39mexecuted]\n\u001b[0;32m--> 299\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\n\u001b[1;32m 300\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mFailed to run torchinfo. See above stack traces for more details. \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 301\u001b[0m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mExecuted layers up to: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mexecuted_layers\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 302\u001b[0m ) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01me\u001b[39;00m\n\u001b[1;32m 303\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[1;32m 304\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m hooks:\n", - "\u001b[0;31mRuntimeError\u001b[0m: Failed to run torchinfo. See above stack traces for more details. Executed layers up to: [EfficientNet: 1, Sequential: 2, ZeroPad2d: 3, Conv2d: 3, BatchNorm2d: 3, Mish: 3, MBConvBlock: 3, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, MBConvBlock: 3, InvertedBottleneck: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, Depthwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Mish: 6, SqueezeAndExcite: 4, Sequential: 5, Conv2d: 6, Mish: 6, Conv2d: 6, Pointwise: 4, Sequential: 5, Conv2d: 6, BatchNorm2d: 6, Sequential: 2, Conv2d: 3, BatchNorm2d: 3, Dropout: 3, Conv2d: 1]" - ] - } - ], - "source": [ - "summary(net, ((1, 1, 576, 640), (1, 682)), device=\"cpu\", depth=4)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "248a0cb1", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.4" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} |