diff options
author | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2021-04-22 08:15:58 +0200 |
---|---|---|
committer | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2021-04-22 08:15:58 +0200 |
commit | 1ca8b0b9e0613c1e02f6a5d8b49e20c4d6916412 (patch) | |
tree | 5e610ac459c9b254f8826e92372346f01f8e2412 /text_recognizer/data/mappings.py | |
parent | ffa4be4bf4e3758e01d52a9c1f354a05a90b93de (diff) |
Fixed training script, able to train vqvae
Diffstat (limited to 'text_recognizer/data/mappings.py')
-rw-r--r-- | text_recognizer/data/mappings.py | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/text_recognizer/data/mappings.py b/text_recognizer/data/mappings.py index cfa0ec7..f4016ba 100644 --- a/text_recognizer/data/mappings.py +++ b/text_recognizer/data/mappings.py @@ -8,7 +8,7 @@ import torch from torch import Tensor from text_recognizer.data.emnist import emnist_mapping -from text_recognizer.datasets.iam_preprocessor import Preprocessor +from text_recognizer.data.iam_preprocessor import Preprocessor class AbstractMapping(ABC): @@ -57,14 +57,14 @@ class EmnistMapping(AbstractMapping): class WordPieceMapping(EmnistMapping): def __init__( self, - num_features: int, - tokens: str, - lexicon: str, + num_features: int = 1000, + tokens: str = "iamdb_1kwp_tokens_1000.txt" , + lexicon: str = "iamdb_1kwp_lex_1000.txt", data_dir: Optional[Union[str, Path]] = None, use_words: bool = False, prepend_wordsep: bool = False, special_tokens: Sequence[str] = ("<s>", "<e>", "<p>"), - extra_symbols: Optional[Sequence[str]] = None, + extra_symbols: Optional[Sequence[str]] = ("\n", ), ) -> None: super().__init__(extra_symbols) self.wordpiece_processor = self._configure_wordpiece_processor( @@ -78,8 +78,8 @@ class WordPieceMapping(EmnistMapping): extra_symbols, ) + @staticmethod def _configure_wordpiece_processor( - self, num_features: int, tokens: str, lexicon: str, @@ -90,7 +90,7 @@ class WordPieceMapping(EmnistMapping): extra_symbols: Optional[Sequence[str]], ) -> Preprocessor: data_dir = ( - (Path(__file__).resolve().parents[2] / "data" / "raw" / "iam" / "iamdb") + (Path(__file__).resolve().parents[2] / "data" / "downloaded" / "iam" / "iamdb") if data_dir is None else Path(data_dir) ) @@ -138,6 +138,6 @@ class WordPieceMapping(EmnistMapping): return self.wordpiece_processor.to_index(text) def emnist_to_wordpiece_indices(self, x: Tensor) -> Tensor: - text = self.mapping.get_text(x) + text = "".join([self.mapping[i] for i in x]) text = text.lower().replace(" ", "▁") return torch.LongTensor(self.wordpiece_processor.to_index(text)) |