From 8291a87c64f9a5f18caec82201bea15579b49730 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Sun, 10 Oct 2021 18:04:50 +0200 Subject: Move data utils to submodules --- text_recognizer/data/transforms/word_piece.py | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 text_recognizer/data/transforms/word_piece.py (limited to 'text_recognizer/data/transforms') diff --git a/text_recognizer/data/transforms/word_piece.py b/text_recognizer/data/transforms/word_piece.py new file mode 100644 index 0000000..6bf5472 --- /dev/null +++ b/text_recognizer/data/transforms/word_piece.py @@ -0,0 +1,48 @@ +"""Target transform for word pieces.""" +from pathlib import Path +from typing import Optional, Union, Set + +import torch +from torch import Tensor + +from text_recognizer.data.mappings.word_piece_mapping import WordPieceMapping + + +class WordPiece: + """Converts EMNIST indices to Word Piece indices.""" + + def __init__( + self, + num_features: int = 1000, + tokens: str = "iamdb_1kwp_tokens_1000.txt", + lexicon: str = "iamdb_1kwp_lex_1000.txt", + data_dir: Optional[Union[str, Path]] = None, + use_words: bool = False, + prepend_wordsep: bool = False, + special_tokens: Set[str] = {"", "", "

"}, + extra_symbols: Optional[Set[str]] = {"\n",}, + max_len: int = 451, + ) -> None: + self.mapping = WordPieceMapping( + data_dir=data_dir, + num_features=num_features, + tokens=tokens, + lexicon=lexicon, + use_words=use_words, + prepend_wordsep=prepend_wordsep, + special_tokens=special_tokens, + extra_symbols=extra_symbols, + ) + self.max_len = max_len + + def __call__(self, x: Tensor) -> Tensor: + """Converts Emnist target tensor to Word piece target tensor.""" + y = self.mapping.emnist_to_wordpiece_indices(x) + if len(y) < self.max_len: + pad_len = self.max_len - len(y) + y = torch.cat( + (y, torch.LongTensor([self.mapping.get_index("

")] * pad_len)) + ) + else: + y = y[: self.max_len] + return y -- cgit v1.2.3-70-g09d2