summaryrefslogtreecommitdiff
path: root/text_recognizer/data/word_piece_mapping.py
blob: 2f650cd7935153f6cee77d2652dbe0e6c8b0db52 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
"""Word piece mapping."""
from pathlib import Path
from typing import List, Optional, Union, Set

import torch
from loguru import logger as log
from torch import Tensor

from text_recognizer.data.emnist_mapping import EmnistMapping
from text_recognizer.data.iam_preprocessor import Preprocessor


class WordPieceMapping(EmnistMapping):
    def __init__(
        self,
        data_dir: Optional[Path] = None,
        num_features: int = 1000,
        tokens: str = "iamdb_1kwp_tokens_1000.txt",
        lexicon: str = "iamdb_1kwp_lex_1000.txt",
        use_words: bool = False,
        prepend_wordsep: bool = False,
        special_tokens: Set[str] = {"<s>", "<e>", "<p>"},
        extra_symbols: Set[str] = {"\n",},
    ) -> None:
        super().__init__(extra_symbols=extra_symbols)
        self.data_dir = (
            (
                Path(__file__).resolve().parents[2]
                / "data"
                / "downloaded"
                / "iam"
                / "iamdb"
            )
            if data_dir is None
            else Path(data_dir)
        )
        log.debug(f"Using data dir: {self.data_dir}")
        if not self.data_dir.exists():
            raise RuntimeError(f"Could not locate iamdb directory at {self.data_dir}")

        processed_path = (
            Path(__file__).resolve().parents[2] / "data" / "processed" / "iam_lines"
        )

        tokens_path = processed_path / tokens
        lexicon_path = processed_path / lexicon

        special_tokens = set(special_tokens)
        if self.extra_symbols is not None:
            special_tokens = special_tokens | set(extra_symbols)

        self.wordpiece_processor = Preprocessor(
            data_dir=self.data_dir,
            num_features=num_features,
            tokens_path=tokens_path,
            lexicon_path=lexicon_path,
            use_words=use_words,
            prepend_wordsep=prepend_wordsep,
            special_tokens=special_tokens,
        )

    def __len__(self) -> int:
        return len(self.wordpiece_processor.tokens)

    def get_token(self, index: Union[int, Tensor]) -> str:
        if (index := int(index)) <= self.wordpiece_processor.num_tokens:
            return self.wordpiece_processor.tokens[index]
        raise KeyError(f"Index ({index}) not in mapping.")

    def get_index(self, token: str) -> Tensor:
        if token in self.wordpiece_processor.tokens:
            return torch.LongTensor([self.wordpiece_processor.tokens_to_index[token]])
        raise KeyError(f"Token ({token}) not found in inverse mapping.")

    def get_text(self, indices: Union[List[int], Tensor]) -> str:
        if isinstance(indices, Tensor):
            indices = indices.tolist()
        return self.wordpiece_processor.to_text(indices)

    def get_indices(self, text: str) -> Tensor:
        return self.wordpiece_processor.to_index(text)

    def emnist_to_wordpiece_indices(self, x: Tensor) -> Tensor:
        text = "".join([self.mapping[i] for i in x])
        text = text.lower().replace(" ", "▁")
        return torch.LongTensor(self.wordpiece_processor.to_index(text))

    def __getitem__(self, x: Union[int, Tensor]) -> str:
        return self.get_token(x)