diff options
author | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2022-09-27 23:18:20 +0200 |
---|---|---|
committer | Gustaf Rydholm <gustaf.rydholm@gmail.com> | 2022-09-27 23:18:20 +0200 |
commit | 723cf87846bf7297326fc82973a1e148af317638 (patch) | |
tree | 0bd49e42a3ca8c2c9e16472355b7f262cef27e05 /text_recognizer/data/transforms/embed_crop.py | |
parent | 2dcbd13eb311d0a4b31e086a53d2afc55d50e0b5 (diff) |
Remove transforms
Diffstat (limited to 'text_recognizer/data/transforms/embed_crop.py')
-rw-r--r-- | text_recognizer/data/transforms/embed_crop.py | 37 |
1 files changed, 0 insertions, 37 deletions
diff --git a/text_recognizer/data/transforms/embed_crop.py b/text_recognizer/data/transforms/embed_crop.py deleted file mode 100644 index 7421d0e..0000000 --- a/text_recognizer/data/transforms/embed_crop.py +++ /dev/null @@ -1,37 +0,0 @@ -"""Transforms for PyTorch datasets.""" -import random - -from PIL import Image - - -class EmbedCrop: - - IMAGE_HEIGHT = 56 - IMAGE_WIDTH = 1024 - - def __init__(self, augment: bool) -> None: - self.augment = augment - - def __call__(self, crop: Image) -> Image: - # Crop is PIL.Image of dtype="L" (so value range is [0, 255]) - image = Image.new("L", (self.IMAGE_WIDTH, self.IMAGE_HEIGHT)) - - # Resize crop. - crop_width, crop_height = crop.size - new_crop_height = self.IMAGE_HEIGHT - new_crop_width = int(new_crop_height * (crop_width / crop_height)) - - if self.augment: - # Add random stretching - new_crop_width = int(new_crop_width * random.uniform(0.9, 1.1)) - new_crop_width = min(new_crop_width, self.IMAGE_WIDTH) - crop_resized = crop.resize( - (new_crop_width, new_crop_height), resample=Image.BILINEAR - ) - - # Embed in image - x = min(28, self.IMAGE_WIDTH - new_crop_width) - y = self.IMAGE_HEIGHT - new_crop_height - image.paste(crop_resized, (x, y)) - - return image |