diff options
Diffstat (limited to 'notebooks/01-look-at-emnist.ipynb')
-rw-r--r-- | notebooks/01-look-at-emnist.ipynb | 33 |
1 files changed, 20 insertions, 13 deletions
diff --git a/notebooks/01-look-at-emnist.ipynb b/notebooks/01-look-at-emnist.ipynb index a8b361b..8c5d54e 100644 --- a/notebooks/01-look-at-emnist.ipynb +++ b/notebooks/01-look-at-emnist.ipynb @@ -4,7 +4,16 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/aktersnurra/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torchvision/io/image.py:13: UserWarning: Failed to load image Python extension: /home/aktersnurra/.cache/pypoetry/virtualenvs/text-recognizer-ejNaVa9M-py3.9/lib/python3.9/site-packages/torchvision/image.so: undefined symbol: _ZNK3c1010TensorImpl36is_contiguous_nondefault_policy_implENS_12MemoryFormatE\n", + " warn(f\"Failed to load image Python extension: {e}\")\n" + ] + } + ], "source": [ "%load_ext autoreload\n", "%autoreload 2\n", @@ -27,17 +36,15 @@ "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "EMNIST Dataset\n", - "Num classes: 83\n", - "Mapping: ['<b>', '<s>', '</s>', '<p>', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', ' ', '!', '\"', '#', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '?']\n", - "Dims: (1, 28, 28)\n", - "Train/val/test sizes: 260276, 65070, 54028\n", - "Batch x stats: (torch.Size([128, 1, 28, 28]), torch.float32, tensor(0.), tensor(0.1673), tensor(0.3277), tensor(1.))\n", - "Batch y stats: (torch.Size([128]), torch.int64, tensor(4), tensor(65))\n", - "\n" + "ename": "TypeError", + "evalue": "__init__() missing 1 required positional argument: 'tokenizer'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn [2], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m data \u001b[38;5;241m=\u001b[39m EMNIST()\n\u001b[1;32m 2\u001b[0m data\u001b[38;5;241m.\u001b[39mprepare_data()\n\u001b[1;32m 3\u001b[0m data\u001b[38;5;241m.\u001b[39msetup()\n", + "File \u001b[0;32m~/projects/text-recognizer/text_recognizer/data/emnist.py:33\u001b[0m, in \u001b[0;36mEMNIST.__init__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m---> 33\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 34\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdims \u001b[38;5;241m=\u001b[39m (\u001b[38;5;241m1\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtokenizer\u001b[38;5;241m.\u001b[39minput_size)\n", + "\u001b[0;31mTypeError\u001b[0m: __init__() missing 1 required positional argument: 'tokenizer'" ] } ], @@ -120,7 +127,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.9.4" } }, "nbformat": 4, |