summaryrefslogtreecommitdiff
path: root/src/text_recognizer/datasets/emnist_dataset.py
blob: b92b57d4228737a6998d3b64aebe610e1503030a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
"""Fetches a PyTorch DataLoader with the EMNIST dataset."""

import json
from pathlib import Path
from typing import Callable, Dict, List, Optional, Type

from loguru import logger
import numpy as np
from PIL import Image
from torch.utils.data import DataLoader
from torchvision.datasets import EMNIST
from torchvision.transforms import Compose, ToTensor


DATA_DIRNAME = Path(__file__).resolve().parents[3] / "data"
ESSENTIALS_FILENAME = Path(__file__).resolve().parents[0] / "emnist_essentials.json"


class Transpose:
    """Transposes the EMNIST image to the correct orientation."""

    def __call__(self, image: Image) -> np.ndarray:
        """Swaps axis."""
        return np.array(image).swapaxes(0, 1)


def save_emnist_essentials(emnsit_dataset: type = EMNIST) -> None:
    """Extract and saves EMNIST essentials."""
    labels = emnsit_dataset.classes
    labels.sort()
    mapping = [(i, str(label)) for i, label in enumerate(labels)]
    essentials = {
        "mapping": mapping,
        "input_shape": tuple(emnsit_dataset[0][0].shape[:]),
    }
    logger.info("Saving emnist essentials...")
    with open(ESSENTIALS_FILENAME, "w") as f:
        json.dump(essentials, f)


def download_emnist() -> None:
    """Download the EMNIST dataset via the PyTorch class."""
    logger.info(f"Data directory is: {DATA_DIRNAME}")
    dataset = EMNIST(root=DATA_DIRNAME, split="byclass", download=True)
    save_emnist_essentials(dataset)


def load_emnist_mapping() -> Dict[int, str]:
    """Load the EMNIST mapping."""
    with open(str(ESSENTIALS_FILENAME)) as f:
        essentials = json.load(f)
    return dict(essentials["mapping"])


class EmnistDataLoader:
    """Class for Emnist DataLoaders."""

    def __init__(
        self,
        splits: List[str],
        sample_to_balance: bool = False,
        subsample_fraction: float = None,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
        batch_size: int = 128,
        shuffle: bool = False,
        num_workers: int = 0,
        cuda: bool = True,
        seed: int = 4711,
    ) -> None:
        """Fetches DataLoaders.

        Args:
            splits (List[str]): One or both of the dataset splits "train" and "val".
            sample_to_balance (bool): If true, resamples the unbalanced if the split "byclass" is selected.
                Defaults to False.
            subsample_fraction (float): The fraction of the dataset will be loaded. If None or 0 the entire
                dataset will be loaded.
            transform (Optional[Callable]):  A function/transform that takes in an PIL image and returns a
                transformed version. E.g, transforms.RandomCrop. Defaults to None.
            target_transform (Optional[Callable]): A function/transform that takes in the target and
                transforms it. Defaults to None.
            batch_size (int): How many samples per batch to load. Defaults to 128.
            shuffle (bool): Set to True to have the data reshuffled at every epoch. Defaults to False.
            num_workers (int): How many subprocesses to use for data loading. 0 means that the data will be
                loaded in the main process. Defaults to 0.
            cuda (bool): If True, the data loader will copy Tensors into CUDA pinned memory before returning
                them. Defaults to True.
            seed (int): Seed for sampling.

        """
        self.splits = splits
        self.sample_to_balance = sample_to_balance
        if subsample_fraction is not None:
            assert (
                0.0 < subsample_fraction < 1.0
            ), " The subsample fraction must be in (0, 1)."
        self.subsample_fraction = subsample_fraction
        self.transform = transform
        self.target_transform = target_transform
        self.batch_size = batch_size
        self.shuffle = shuffle
        self.num_workers = num_workers
        self.cuda = cuda
        self.seed = seed
        self._data_loaders = self._fetch_emnist_data_loaders()

    @property
    def __name__(self) -> str:
        """Returns the name of the dataset."""
        return "Emnist"

    def __call__(self, split: str) -> DataLoader:
        """Returns the `split` DataLoader.

        Args:
            split (str): The dataset split, i.e. train or val.

        Returns:
            DataLoader: A PyTorch DataLoader.

        Raises:
            ValueError: If the split does not exist.

        """
        try:
            return self._data_loaders[split]
        except KeyError:
            raise ValueError(f"Split {split} does not exist.")

    def _sample_to_balance(self, dataset: type = EMNIST) -> EMNIST:
        """Because the dataset is not balanced, we take at most the mean number of instances per class."""
        np.random.seed(self.seed)
        x = dataset.data
        y = dataset.targets
        num_to_sample = int(np.bincount(y.flatten()).mean())
        all_sampled_indices = []
        for label in np.unique(y.flatten()):
            inds = np.where(y == label)[0]
            sampled_indices = np.unique(np.random.choice(inds, num_to_sample))
            all_sampled_indices.append(sampled_indices)
        indices = np.concatenate(all_sampled_indices)
        x_sampled = x[indices]
        y_sampled = y[indices]
        dataset.data = x_sampled
        dataset.targets = y_sampled

        return dataset

    def _subsample(self, dataset: type = EMNIST) -> EMNIST:
        """Subsamples the dataset to the specified fraction."""
        x = dataset.data
        y = dataset.targets
        num_samples = int(x.shape[0] * self.subsample_fraction)
        x_sampled = x[:num_samples]
        y_sampled = y[:num_samples]
        dataset.data = x_sampled
        dataset.targets = y_sampled

        return dataset

    def _fetch_emnist_dataset(self, train: bool) -> EMNIST:
        """Fetch the EMNIST dataset."""
        if self.transform is None:
            transform = Compose([Transpose(), ToTensor()])

        dataset = EMNIST(
            root=DATA_DIRNAME,
            split="byclass",
            train=train,
            download=False,
            transform=transform,
            target_transform=self.target_transform,
        )

        if self.sample_to_balance:
            dataset = self._sample_to_balance(dataset)

        if self.subsample_fraction is not None:
            dataset = self._subsample(dataset)

        return dataset

    def _fetch_emnist_data_loaders(self) -> Dict[str, DataLoader]:
        """Fetches the EMNIST dataset and return a Dict of PyTorch DataLoaders."""
        data_loaders = {}

        for split in ["train", "val"]:
            if split in self.splits:

                if split == "train":
                    train = True
                else:
                    train = False

                dataset = self._fetch_emnist_dataset(train)

                data_loader = DataLoader(
                    dataset=dataset,
                    batch_size=self.batch_size,
                    shuffle=self.shuffle,
                    num_workers=self.num_workers,
                    pin_memory=self.cuda,
                )

                data_loaders[split] = data_loader

        return data_loaders