diff options
Diffstat (limited to 'src/text_recognizer/networks/transformer')
3 files changed, 2 insertions, 1 deletions
diff --git a/src/text_recognizer/networks/transformer/positional_encoding.py b/src/text_recognizer/networks/transformer/positional_encoding.py index a47141b..1ba5537 100644 --- a/src/text_recognizer/networks/transformer/positional_encoding.py +++ b/src/text_recognizer/networks/transformer/positional_encoding.py @@ -13,6 +13,7 @@ class PositionalEncoding(nn.Module): ) -> None: super().__init__() self.dropout = nn.Dropout(p=dropout_rate) + self.max_len = max_len pe = torch.zeros(max_len, hidden_dim) position = torch.arange(0, max_len).unsqueeze(1) diff --git a/src/text_recognizer/networks/transformer/sparse_transformer.py b/src/text_recognizer/networks/transformer/sparse_transformer.py deleted file mode 100644 index 8c391c8..0000000 --- a/src/text_recognizer/networks/transformer/sparse_transformer.py +++ /dev/null @@ -1 +0,0 @@ -"""Encoder and Decoder modules using spares activations.""" diff --git a/src/text_recognizer/networks/transformer/transformer.py b/src/text_recognizer/networks/transformer/transformer.py index 1c9c7dd..c6e943e 100644 --- a/src/text_recognizer/networks/transformer/transformer.py +++ b/src/text_recognizer/networks/transformer/transformer.py @@ -230,6 +230,7 @@ class Transformer(nn.Module): ) -> Tensor: """Forward pass through the transformer.""" if src.shape[0] != trg.shape[0]: + print(trg.shape) raise RuntimeError("The batch size of the src and trg must be the same.") if src.shape[2] != trg.shape[2]: raise RuntimeError( |