From 07a5b84b001756b06c6dc0c54f396ea42b11067b Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Wed, 27 Oct 2021 22:10:14 +0200 Subject: Rename mappings --- text_recognizer/data/iam_lines.py | 2 +- text_recognizer/data/iam_paragraphs.py | 2 +- text_recognizer/data/iam_synthetic_paragraphs.py | 2 +- text_recognizer/data/mappings/base.py | 37 +++++++++++ text_recognizer/data/mappings/base_mapping.py | 37 ----------- text_recognizer/data/mappings/emnist.py | 60 ++++++++++++++++++ text_recognizer/data/mappings/emnist_mapping.py | 60 ------------------ text_recognizer/data/mappings/word_piece.py | 72 +++++++++++++++++++++ .../data/mappings/word_piece_mapping.py | 74 ---------------------- 9 files changed, 172 insertions(+), 174 deletions(-) create mode 100644 text_recognizer/data/mappings/base.py delete mode 100644 text_recognizer/data/mappings/base_mapping.py create mode 100644 text_recognizer/data/mappings/emnist.py delete mode 100644 text_recognizer/data/mappings/emnist_mapping.py create mode 100644 text_recognizer/data/mappings/word_piece.py delete mode 100644 text_recognizer/data/mappings/word_piece_mapping.py (limited to 'text_recognizer') diff --git a/text_recognizer/data/iam_lines.py b/text_recognizer/data/iam_lines.py index efd1cde..672a6f0 100644 --- a/text_recognizer/data/iam_lines.py +++ b/text_recognizer/data/iam_lines.py @@ -20,8 +20,8 @@ from text_recognizer.data.base_dataset import ( split_dataset, ) from text_recognizer.data.iam import IAM -from text_recognizer.data.mappings.emnist_mapping import EmnistMapping from text_recognizer.data.utils import image_utils +from text_recognizer.data.mappings.emnist import EmnistMapping from text_recognizer.data.transforms.load_transform import load_transform_from_file diff --git a/text_recognizer/data/iam_paragraphs.py b/text_recognizer/data/iam_paragraphs.py index 26674e0..033b93e 100644 --- a/text_recognizer/data/iam_paragraphs.py +++ b/text_recognizer/data/iam_paragraphs.py @@ -17,7 +17,7 @@ from text_recognizer.data.base_dataset import ( split_dataset, ) from text_recognizer.data.iam import IAM -from text_recognizer.data.mappings.emnist_mapping import EmnistMapping +from text_recognizer.data.mappings.emnist import EmnistMapping from text_recognizer.data.transforms.load_transform import load_transform_from_file diff --git a/text_recognizer/data/iam_synthetic_paragraphs.py b/text_recognizer/data/iam_synthetic_paragraphs.py index d6fea43..d906399 100644 --- a/text_recognizer/data/iam_synthetic_paragraphs.py +++ b/text_recognizer/data/iam_synthetic_paragraphs.py @@ -25,7 +25,7 @@ from text_recognizer.data.iam_paragraphs import ( NEW_LINE_TOKEN, resize_image, ) -from text_recognizer.data.mappings.emnist_mapping import EmnistMapping +from text_recognizer.data.mappings.emnist import EmnistMapping from text_recognizer.data.transforms.load_transform import load_transform_from_file diff --git a/text_recognizer/data/mappings/base.py b/text_recognizer/data/mappings/base.py new file mode 100644 index 0000000..572ac95 --- /dev/null +++ b/text_recognizer/data/mappings/base.py @@ -0,0 +1,37 @@ +"""Mapping to and from word pieces.""" +from abc import ABC, abstractmethod +from typing import Dict, List + +from torch import Tensor + + +class AbstractMapping(ABC): + def __init__( + self, input_size: List[int], mapping: List[str], inverse_mapping: Dict[str, int] + ) -> None: + self.input_size = input_size + self.mapping = mapping + self.inverse_mapping = inverse_mapping + + def __len__(self) -> int: + return len(self.mapping) + + @property + def num_classes(self) -> int: + return self.__len__() + + @abstractmethod + def get_token(self, *args, **kwargs) -> str: + ... + + @abstractmethod + def get_index(self, *args, **kwargs) -> Tensor: + ... + + @abstractmethod + def get_text(self, *args, **kwargs) -> str: + ... + + @abstractmethod + def get_indices(self, *args, **kwargs) -> Tensor: + ... diff --git a/text_recognizer/data/mappings/base_mapping.py b/text_recognizer/data/mappings/base_mapping.py deleted file mode 100644 index 572ac95..0000000 --- a/text_recognizer/data/mappings/base_mapping.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Mapping to and from word pieces.""" -from abc import ABC, abstractmethod -from typing import Dict, List - -from torch import Tensor - - -class AbstractMapping(ABC): - def __init__( - self, input_size: List[int], mapping: List[str], inverse_mapping: Dict[str, int] - ) -> None: - self.input_size = input_size - self.mapping = mapping - self.inverse_mapping = inverse_mapping - - def __len__(self) -> int: - return len(self.mapping) - - @property - def num_classes(self) -> int: - return self.__len__() - - @abstractmethod - def get_token(self, *args, **kwargs) -> str: - ... - - @abstractmethod - def get_index(self, *args, **kwargs) -> Tensor: - ... - - @abstractmethod - def get_text(self, *args, **kwargs) -> str: - ... - - @abstractmethod - def get_indices(self, *args, **kwargs) -> Tensor: - ... diff --git a/text_recognizer/data/mappings/emnist.py b/text_recognizer/data/mappings/emnist.py new file mode 100644 index 0000000..655169e --- /dev/null +++ b/text_recognizer/data/mappings/emnist.py @@ -0,0 +1,60 @@ +"""Emnist mapping.""" +from typing import List, Optional, Set, Union + +import torch +from torch import Tensor + +from text_recognizer.data.emnist import emnist_mapping +from text_recognizer.data.mappings.base import AbstractMapping + + +class EmnistMapping(AbstractMapping): + """Mapping for EMNIST labels.""" + + def __init__( + self, extra_symbols: Optional[Set[str]] = None, lower: bool = True + ) -> None: + self.extra_symbols = set(extra_symbols) if extra_symbols is not None else None + self.mapping, self.inverse_mapping, self.input_size = emnist_mapping( + self.extra_symbols + ) + if lower: + self._to_lower() + super().__init__(self.input_size, self.mapping, self.inverse_mapping) + + def _to_lower(self) -> None: + """Converts mapping to lowercase letters only.""" + + def _filter(x: int) -> int: + if 40 <= x: + return x - 26 + return x + + self.inverse_mapping = {v: _filter(k) for k, v in enumerate(self.mapping)} + self.mapping = [c for c in self.mapping if not c.isupper()] + + def get_token(self, index: Union[int, Tensor]) -> str: + """Returns token for index value.""" + if (index := int(index)) <= len(self.mapping): + return self.mapping[index] + raise KeyError(f"Index ({index}) not in mapping.") + + def get_index(self, token: str) -> Tensor: + """Returns index value of token.""" + if token in self.inverse_mapping: + return torch.LongTensor([self.inverse_mapping[token]]) + raise KeyError(f"Token ({token}) not found in inverse mapping.") + + def get_text(self, indices: Union[List[int], Tensor]) -> str: + """Returns the text from a list of indices.""" + if isinstance(indices, Tensor): + indices = indices.tolist() + return "".join([self.mapping[index] for index in indices]) + + def get_indices(self, text: str) -> Tensor: + """Returns tensor of indices for a string.""" + return Tensor([self.inverse_mapping[token] for token in text]) + + def __getitem__(self, x: Union[int, Tensor]) -> str: + """Returns text for a list of indices.""" + return self.get_token(x) diff --git a/text_recognizer/data/mappings/emnist_mapping.py b/text_recognizer/data/mappings/emnist_mapping.py deleted file mode 100644 index 3eed3d8..0000000 --- a/text_recognizer/data/mappings/emnist_mapping.py +++ /dev/null @@ -1,60 +0,0 @@ -"""Emnist mapping.""" -from typing import List, Optional, Set, Union - -import torch -from torch import Tensor - -from text_recognizer.data.mappings.base_mapping import AbstractMapping -from text_recognizer.data.emnist import emnist_mapping - - -class EmnistMapping(AbstractMapping): - """Mapping for EMNIST labels.""" - - def __init__( - self, extra_symbols: Optional[Set[str]] = None, lower: bool = True - ) -> None: - self.extra_symbols = set(extra_symbols) if extra_symbols is not None else None - self.mapping, self.inverse_mapping, self.input_size = emnist_mapping( - self.extra_symbols - ) - if lower: - self._to_lower() - super().__init__(self.input_size, self.mapping, self.inverse_mapping) - - def _to_lower(self) -> None: - """Converts mapping to lowercase letters only.""" - - def _filter(x: int) -> int: - if 40 <= x: - return x - 26 - return x - - self.inverse_mapping = {v: _filter(k) for k, v in enumerate(self.mapping)} - self.mapping = [c for c in self.mapping if not c.isupper()] - - def get_token(self, index: Union[int, Tensor]) -> str: - """Returns token for index value.""" - if (index := int(index)) <= len(self.mapping): - return self.mapping[index] - raise KeyError(f"Index ({index}) not in mapping.") - - def get_index(self, token: str) -> Tensor: - """Returns index value of token.""" - if token in self.inverse_mapping: - return torch.LongTensor([self.inverse_mapping[token]]) - raise KeyError(f"Token ({token}) not found in inverse mapping.") - - def get_text(self, indices: Union[List[int], Tensor]) -> str: - """Returns the text from a list of indices.""" - if isinstance(indices, Tensor): - indices = indices.tolist() - return "".join([self.mapping[index] for index in indices]) - - def get_indices(self, text: str) -> Tensor: - """Returns tensor of indices for a string.""" - return Tensor([self.inverse_mapping[token] for token in text]) - - def __getitem__(self, x: Union[int, Tensor]) -> str: - """Returns text for a list of indices.""" - return self.get_token(x) diff --git a/text_recognizer/data/mappings/word_piece.py b/text_recognizer/data/mappings/word_piece.py new file mode 100644 index 0000000..861c3bd --- /dev/null +++ b/text_recognizer/data/mappings/word_piece.py @@ -0,0 +1,72 @@ +"""Word piece mapping.""" +from typing import List, Set, Union + +import torch +from torch import Tensor + +from text_recognizer.data.mappings.emnist import EmnistMapping +from text_recognizer.data.utils.iam_preprocessor import Preprocessor + + +class WordPieceMapping(EmnistMapping): + """Word piece mapping.""" + + def __init__( + self, + num_features: int = 1000, + tokens: str = "iamdb_1kwp_tokens_1000.txt", + lexicon: str = "iamdb_1kwp_lex_1000.txt", + use_words: bool = False, + prepend_wordsep: bool = False, + special_tokens: Set[str] = {"", "", "

"}, + extra_symbols: Set[str] = {"\n"}, + ) -> None: + super().__init__(extra_symbols=extra_symbols) + special_tokens = set(special_tokens) + if self.extra_symbols is not None: + special_tokens = special_tokens | set(extra_symbols) + + self.wordpiece_processor = Preprocessor( + num_features=num_features, + tokens=tokens, + lexicon=lexicon, + use_words=use_words, + prepend_wordsep=prepend_wordsep, + special_tokens=special_tokens, + ) + + def __len__(self) -> int: + """Return number of word pieces.""" + return len(self.wordpiece_processor.tokens) + + def get_token(self, index: Union[int, Tensor]) -> str: + """Returns token for index.""" + if (index := int(index)) <= self.wordpiece_processor.num_tokens: + return self.wordpiece_processor.tokens[index] + raise KeyError(f"Index ({index}) not in mapping.") + + def get_index(self, token: str) -> Tensor: + """Returns index of token.""" + if token in self.wordpiece_processor.tokens: + return torch.LongTensor([self.wordpiece_processor.tokens_to_index[token]]) + raise KeyError(f"Token ({token}) not found in inverse mapping.") + + def get_text(self, indices: Union[List[int], Tensor]) -> str: + """Returns text from indices.""" + if isinstance(indices, Tensor): + indices = indices.tolist() + return self.wordpiece_processor.to_text(indices) + + def get_indices(self, text: str) -> Tensor: + """Returns indices of text.""" + return self.wordpiece_processor.to_index(text) + + def emnist_to_wordpiece_indices(self, x: Tensor) -> Tensor: + """Returns word pieces indices from emnist indices.""" + text = "".join([self.mapping[i] for i in x]) + text = text.lower().replace(" ", "▁") + return torch.LongTensor(self.wordpiece_processor.to_index(text)) + + def __getitem__(self, x: Union[int, Tensor]) -> str: + """Returns token for word piece index.""" + return self.get_token(x) diff --git a/text_recognizer/data/mappings/word_piece_mapping.py b/text_recognizer/data/mappings/word_piece_mapping.py deleted file mode 100644 index f9e4e7a..0000000 --- a/text_recognizer/data/mappings/word_piece_mapping.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Word piece mapping.""" -from pathlib import Path -from typing import List, Optional, Set, Union - -from loguru import logger as log -import torch -from torch import Tensor - -from text_recognizer.data.mappings.emnist_mapping import EmnistMapping -from text_recognizer.data.utils.iam_preprocessor import Preprocessor - - -class WordPieceMapping(EmnistMapping): - """Word piece mapping.""" - - def __init__( - self, - num_features: int = 1000, - tokens: str = "iamdb_1kwp_tokens_1000.txt", - lexicon: str = "iamdb_1kwp_lex_1000.txt", - use_words: bool = False, - prepend_wordsep: bool = False, - special_tokens: Set[str] = {"", "", "

"}, - extra_symbols: Set[str] = {"\n"}, - ) -> None: - super().__init__(extra_symbols=extra_symbols) - special_tokens = set(special_tokens) - if self.extra_symbols is not None: - special_tokens = special_tokens | set(extra_symbols) - - self.wordpiece_processor = Preprocessor( - num_features=num_features, - tokens=tokens, - lexicon=lexicon, - use_words=use_words, - prepend_wordsep=prepend_wordsep, - special_tokens=special_tokens, - ) - - def __len__(self) -> int: - """Return number of word pieces.""" - return len(self.wordpiece_processor.tokens) - - def get_token(self, index: Union[int, Tensor]) -> str: - """Returns token for index.""" - if (index := int(index)) <= self.wordpiece_processor.num_tokens: - return self.wordpiece_processor.tokens[index] - raise KeyError(f"Index ({index}) not in mapping.") - - def get_index(self, token: str) -> Tensor: - """Returns index of token.""" - if token in self.wordpiece_processor.tokens: - return torch.LongTensor([self.wordpiece_processor.tokens_to_index[token]]) - raise KeyError(f"Token ({token}) not found in inverse mapping.") - - def get_text(self, indices: Union[List[int], Tensor]) -> str: - """Returns text from indices.""" - if isinstance(indices, Tensor): - indices = indices.tolist() - return self.wordpiece_processor.to_text(indices) - - def get_indices(self, text: str) -> Tensor: - """Returns indices of text.""" - return self.wordpiece_processor.to_index(text) - - def emnist_to_wordpiece_indices(self, x: Tensor) -> Tensor: - """Returns word pieces indices from emnist indices.""" - text = "".join([self.mapping[i] for i in x]) - text = text.lower().replace(" ", "▁") - return torch.LongTensor(self.wordpiece_processor.to_index(text)) - - def __getitem__(self, x: Union[int, Tensor]) -> str: - """Returns token for word piece index.""" - return self.get_token(x) -- cgit v1.2.3-70-g09d2