summaryrefslogtreecommitdiff
path: root/notebooks
diff options
context:
space:
mode:
authorGustaf Rydholm <gustaf.rydholm@gmail.com>2021-09-30 23:59:05 +0200
committerGustaf Rydholm <gustaf.rydholm@gmail.com>2021-09-30 23:59:05 +0200
commit7275523f225703e1e4e3b28582703150afc9af29 (patch)
tree88ce2c05eca25f897fc26c62c65fbd30d6d9020f /notebooks
parentdc3110e567f8ac3ad27048d3f346abac623658c0 (diff)
Update to effnet notebook
Diffstat (limited to 'notebooks')
-rw-r--r--notebooks/04-efficientnet-transformer.ipynb124
1 files changed, 20 insertions, 104 deletions
diff --git a/notebooks/04-efficientnet-transformer.ipynb b/notebooks/04-efficientnet-transformer.ipynb
index 3ee1cb2..1f0bd33 100644
--- a/notebooks/04-efficientnet-transformer.ipynb
+++ b/notebooks/04-efficientnet-transformer.ipynb
@@ -50,7 +50,7 @@
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 4,
"id": "e52ecb01-c975-4e55-925d-1182c7aea473",
"metadata": {},
"outputs": [],
@@ -61,17 +61,17 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 5,
"id": "f939aa37-7b1d-45cc-885c-323c4540bda1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
- "{'defaults': [{'override /mapping': None}, {'override /criterion': None}, {'override /datamodule': None}, {'override /network': None}, {'override /model': None}, {'override /lr_schedulers': None}, {'override /optimizers': None}], 'criterion': {'_target_': 'torch.nn.CrossEntropyLoss', 'ignore_index': 3}, 'mapping': {'_target_': 'text_recognizer.data.emnist_mapping.EmnistMapping'}, 'callbacks': {'stochastic_weight_averaging': {'_target_': 'pytorch_lightning.callbacks.StochasticWeightAveraging', 'swa_epoch_start': 0.8, 'swa_lrs': 0.05, 'annealing_epochs': 10, 'annealing_strategy': 'cos', 'device': None}}, 'optimizers': {'madgrad': {'_target_': 'madgrad.MADGRAD', 'lr': 0.0001, 'momentum': 0.9, 'weight_decay': 0, 'eps': 1e-06, 'parameters': 'network'}}, 'lr_schedulers': {'network': {'_target_': 'torch.optim.lr_scheduler.CosineAnnealingLR', 'T_max': 1024, 'eta_min': 4.5e-06, 'last_epoch': -1, 'interval': 'epoch', 'monitor': 'val/loss'}}, 'datamodule': {'_target_': 'text_recognizer.data.iam_lines.IAMLines', 'batch_size': 24, 'num_workers': 12, 'train_fraction': 0.8, 'augment': True, 'pin_memory': False}, 'network': {'_target_': 'text_recognizer.networks.conv_transformer.ConvTransformer', 'input_dims': [1, 56, 1024], 'hidden_dim': 128, 'encoder_dim': 1280, 'dropout_rate': 0.2, 'num_classes': 58, 'pad_index': 3, 'encoder': {'_target_': 'text_recognizer.networks.encoders.efficientnet.EfficientNet', 'arch': 'b0', 'out_channels': 1280, 'stochastic_dropout_rate': 0.2, 'bn_momentum': 0.99, 'bn_eps': 0.001}, 'decoder': {'_target_': 'text_recognizer.networks.transformer.Decoder', 'dim': 128, 'depth': 3, 'num_heads': 4, 'attn_fn': 'text_recognizer.networks.transformer.attention.Attention', 'attn_kwargs': {'dim_head': 32, 'dropout_rate': 0.2}, 'norm_fn': 'torch.nn.LayerNorm', 'ff_fn': 'text_recognizer.networks.transformer.mlp.FeedForward', 'ff_kwargs': {'dim_out': None, 'expansion_factor': 4, 'glu': True, 'dropout_rate': 0.2}, 'cross_attend': True, 'pre_norm': True, 'rotary_emb': None}}, 'model': {'_target_': 'text_recognizer.models.transformer.TransformerLitModel', 'max_output_len': 89, 'start_token': '<s>', 'end_token': '<e>', 'pad_token': '<p>'}, 'trainer': {'_target_': 'pytorch_lightning.Trainer', 'stochastic_weight_avg': False, 'auto_scale_batch_size': 'binsearch', 'auto_lr_find': False, 'gradient_clip_val': 0, 'fast_dev_run': False, 'gpus': 1, 'precision': 16, 'max_epochs': 1024, 'terminate_on_nan': True, 'weights_summary': 'top', 'limit_train_batches': 1.0, 'limit_val_batches': 1.0, 'limit_test_batches': 1.0, 'resume_from_checkpoint': None, 'accumulate_grad_batches': 4, 'overfit_batches': 0.0}, 'summary': [[1, 1, 56, 1024], [1, 89]]}"
+ "{'defaults': [{'override /mapping': None}, {'override /criterion': None}, {'override /datamodule': None}, {'override /network': None}, {'override /model': None}, {'override /lr_schedulers': None}, {'override /optimizers': None}], 'criterion': {'_target_': 'text_recognizer.criterions.label_smoothing.LabelSmoothingLoss', 'smoothing': 0.1, 'ignore_index': 1000}, 'mapping': {'_target_': 'text_recognizer.data.word_piece_mapping.WordPieceMapping', 'num_features': 1000, 'tokens': 'iamdb_1kwp_tokens_1000.txt', 'lexicon': 'iamdb_1kwp_lex_1000.txt', 'data_dir': None, 'use_words': False, 'prepend_wordsep': False, 'special_tokens': ['<s>', '<e>', '<p>']}, 'callbacks': {'stochastic_weight_averaging': {'_target_': 'pytorch_lightning.callbacks.StochasticWeightAveraging', 'swa_epoch_start': 0.8, 'swa_lrs': 0.05, 'annealing_epochs': 10, 'annealing_strategy': 'cos', 'device': None}}, 'optimizers': {'madgrad': {'_target_': 'madgrad.MADGRAD', 'lr': 0.0001, 'momentum': 0.9, 'weight_decay': 0, 'eps': 1e-06, 'parameters': 'network'}}, 'lr_schedulers': {'network': {'_target_': 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'mode': 'min', 'factor': 0.1, 'patience': 10, 'threshold': 0.0001, 'threshold_mode': 'rel', 'cooldown': 0, 'min_lr': 1e-07, 'eps': 1e-08, 'interval': 'epoch', 'monitor': 'val/loss'}}, 'datamodule': {'_target_': 'text_recognizer.data.iam_lines.IAMLines', 'batch_size': 16, 'num_workers': 12, 'train_fraction': 0.8, 'augment': True, 'pin_memory': False}, 'network': {'_target_': 'text_recognizer.networks.conv_transformer.ConvTransformer', 'input_dims': [1, 56, 1024], 'hidden_dim': 128, 'encoder_dim': 1280, 'dropout_rate': 0.2, 'num_classes': 1006, 'pad_index': 1000, 'encoder': {'_target_': 'text_recognizer.networks.encoders.efficientnet.EfficientNet', 'arch': 'b0', 'out_channels': 1280, 'stochastic_dropout_rate': 0.2, 'bn_momentum': 0.99, 'bn_eps': 0.001}, 'decoder': {'_target_': 'text_recognizer.networks.transformer.Decoder', 'dim': 128, 'depth': 3, 'num_heads': 4, 'attn_fn': 'text_recognizer.networks.transformer.attention.Attention', 'attn_kwargs': {'dim_head': 32, 'dropout_rate': 0.2}, 'norm_fn': 'torch.nn.LayerNorm', 'ff_fn': 'text_recognizer.networks.transformer.mlp.FeedForward', 'ff_kwargs': {'dim_out': None, 'expansion_factor': 4, 'glu': True, 'dropout_rate': 0.2}, 'cross_attend': True, 'pre_norm': True, 'rotary_emb': None}}, 'model': {'_target_': 'text_recognizer.models.transformer.TransformerLitModel', 'max_output_len': 89, 'start_token': '<s>', 'end_token': '<e>', 'pad_token': '<p>'}, 'trainer': {'_target_': 'pytorch_lightning.Trainer', 'stochastic_weight_avg': True, 'auto_scale_batch_size': 'binsearch', 'auto_lr_find': False, 'gradient_clip_val': 0, 'fast_dev_run': False, 'gpus': 1, 'precision': 16, 'max_epochs': 1024, 'terminate_on_nan': True, 'weights_summary': None, 'limit_train_batches': 1.0, 'limit_val_batches': 1.0, 'limit_test_batches': 1.0, 'resume_from_checkpoint': None, 'accumulate_grad_batches': 4, 'overfit_batches': 0.0}, 'summary': [[1, 1, 56, 1024], [1, 89]]}"
]
},
- "execution_count": 8,
+ "execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -82,49 +82,7 @@
},
{
"cell_type": "code",
- "execution_count": 23,
- "id": "2fdd47e6-aac0-47e9-b224-f46c1f015f17",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "[[1, 1, 56, 1024], [1, 89]]"
- ]
- },
- "execution_count": 23,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "list(map(lambda x: list(x), cfg.summary))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 17,
- "id": "92405411-f944-4519-9a6d-13b0761c2de6",
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "list"
- ]
- },
- "execution_count": 17,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "type(list(cfg.summary))"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
+ "execution_count": 7,
"id": "aaeab329-aeb0-4a1b-aa35-5a2aab81b1d0",
"metadata": {},
"outputs": [],
@@ -134,7 +92,7 @@
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 8,
"id": "618b997c-e6a6-4487-b70c-9d260cb556d3",
"metadata": {},
"outputs": [],
@@ -144,7 +102,7 @@
},
{
"cell_type": "code",
- "execution_count": 24,
+ "execution_count": 14,
"id": "25759b7b-8deb-4163-b75d-a1357c9fe88f",
"metadata": {},
"outputs": [
@@ -155,74 +113,32 @@
"Layer (type:depth-idx) Output Shape Param #\n",
"====================================================================================================\n",
"ConvTransformer -- --\n",
- "├─EfficientNet: 1 -- --\n",
- "│ └─ModuleList: 2-1 -- --\n",
- "├─Decoder: 1 -- --\n",
- "│ └─ModuleList: 2-2 -- --\n",
- "│ │ └─ModuleList: 3-1 -- 2,097,536\n",
- "│ │ └─ModuleList: 3-2 -- 2,097,536\n",
- "│ │ └─ModuleList: 3-3 -- 198,016\n",
- "│ │ └─ModuleList: 3-4 -- 2,097,536\n",
- "│ │ └─ModuleList: 3-5 -- 2,097,536\n",
- "│ │ └─ModuleList: 3-6 -- 198,016\n",
- "│ │ └─ModuleList: 3-7 -- 2,097,536\n",
- "│ │ └─ModuleList: 3-8 -- 2,097,536\n",
- "│ │ └─ModuleList: 3-9 -- 198,016\n",
- "├─EfficientNet: 1-1 [1, 1280, 1, 32] --\n",
- "│ └─Sequential: 2-3 [1, 32, 28, 512] --\n",
- "│ │ └─ZeroPad2d: 3-10 [1, 1, 57, 1025] --\n",
- "│ │ └─Conv2d: 3-11 [1, 32, 28, 512] 288\n",
- "│ │ └─BatchNorm2d: 3-12 [1, 32, 28, 512] 64\n",
- "│ │ └─Mish: 3-13 [1, 32, 28, 512] --\n",
- "│ └─ModuleList: 2-1 -- --\n",
- "│ │ └─MBConvBlock: 3-14 [1, 16, 28, 512] 1,448\n",
- "│ │ └─MBConvBlock: 3-15 [1, 24, 14, 256] 9,864\n",
- "│ │ └─MBConvBlock: 3-16 [1, 24, 14, 256] 19,380\n",
- "│ │ └─MBConvBlock: 3-17 [1, 40, 7, 128] 24,020\n",
- "│ │ └─MBConvBlock: 3-18 [1, 40, 7, 128] 55,340\n",
- "│ │ └─MBConvBlock: 3-19 [1, 80, 3, 64] 61,180\n",
- "│ │ └─MBConvBlock: 3-20 [1, 80, 3, 64] 199,000\n",
- "│ │ └─MBConvBlock: 3-21 [1, 80, 3, 64] 199,000\n",
- "│ │ └─MBConvBlock: 3-22 [1, 112, 3, 64] 222,104\n",
- "│ │ └─MBConvBlock: 3-23 [1, 112, 3, 64] 396,872\n",
- "│ │ └─MBConvBlock: 3-24 [1, 112, 3, 64] 396,872\n",
- "│ │ └─MBConvBlock: 3-25 [1, 192, 1, 32] 450,792\n",
- "│ │ └─MBConvBlock: 3-26 [1, 192, 1, 32] 1,141,152\n",
- "│ │ └─MBConvBlock: 3-27 [1, 192, 1, 32] 1,141,152\n",
- "│ │ └─MBConvBlock: 3-28 [1, 192, 1, 32] 1,141,152\n",
- "│ │ └─MBConvBlock: 3-29 [1, 320, 1, 32] 1,270,432\n",
- "│ └─Sequential: 2-4 [1, 1280, 1, 32] --\n",
- "│ │ └─Conv2d: 3-30 [1, 1280, 1, 32] 409,600\n",
- "│ │ └─BatchNorm2d: 3-31 [1, 1280, 1, 32] 2,560\n",
- "├─Sequential: 1-2 [1, 128, 32] --\n",
- "│ └─Conv2d: 2-5 [1, 128, 1, 32] 163,968\n",
- "│ └─PositionalEncoding2D: 2-6 [1, 128, 1, 32] --\n",
- "│ └─Flatten: 2-7 [1, 128, 32] --\n",
- "├─Embedding: 1-3 [1, 89, 128] 7,424\n",
+ "├─EfficientNet: 1-1 [1, 1280, 1, 32] 7,142,272\n",
+ "├─Sequential: 1-2 [1, 128, 32] 163,968\n",
+ "├─Embedding: 1-3 [1, 89, 128] 128,768\n",
"├─PositionalEncoding: 1-4 [1, 89, 128] --\n",
- "│ └─Dropout: 2-8 [1, 89, 128] --\n",
- "├─Decoder: 1-5 [1, 89, 128] --\n",
- "├─Linear: 1-6 [1, 89, 58] 7,482\n",
+ "├─Decoder: 1-5 [1, 89, 128] 13,179,264\n",
+ "├─Linear: 1-6 [1, 89, 1006] 129,774\n",
"====================================================================================================\n",
- "Total params: 20,500,410\n",
- "Trainable params: 20,500,410\n",
+ "Total params: 20,744,046\n",
+ "Trainable params: 20,744,046\n",
"Non-trainable params: 0\n",
- "Total mult-adds (M): 714.87\n",
+ "Total mult-adds (M): 715.11\n",
"====================================================================================================\n",
"Input size (MB): 0.23\n",
- "Forward/backward pass size (MB): 184.29\n",
- "Params size (MB): 82.00\n",
- "Estimated Total Size (MB): 266.52\n",
+ "Forward/backward pass size (MB): 184.97\n",
+ "Params size (MB): 82.98\n",
+ "Estimated Total Size (MB): 268.17\n",
"===================================================================================================="
]
},
- "execution_count": 24,
+ "execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
- "summary(net, list(map(lambda x: list(x), cfg.summary)), device=\"cpu\")"
+ "summary(net, list(map(lambda x: list(x), cfg.summary)), device=\"cpu\", depth=1)"
]
},
{