summaryrefslogtreecommitdiff
path: root/text_recognizer/models
diff options
context:
space:
mode:
Diffstat (limited to 'text_recognizer/models')
-rw-r--r--text_recognizer/models/base.py3
-rw-r--r--text_recognizer/models/metrics.py4
-rw-r--r--text_recognizer/models/transformer.py1
3 files changed, 4 insertions, 4 deletions
diff --git a/text_recognizer/models/base.py b/text_recognizer/models/base.py
index dfb4ca4..caf63c1 100644
--- a/text_recognizer/models/base.py
+++ b/text_recognizer/models/base.py
@@ -23,12 +23,9 @@ class BaseLitModel(LightningModule):
criterion_config: DictConfig = attr.ib(converter=DictConfig)
optimizer_config: DictConfig = attr.ib(converter=DictConfig)
lr_scheduler_config: DictConfig = attr.ib(converter=DictConfig)
-
interval: str = attr.ib()
monitor: str = attr.ib(default="val/loss")
-
loss_fn: Type[nn.Module] = attr.ib(init=False)
-
train_acc: torchmetrics.Accuracy = attr.ib(
init=False, default=torchmetrics.Accuracy()
)
diff --git a/text_recognizer/models/metrics.py b/text_recognizer/models/metrics.py
index 9793157..0eb42dc 100644
--- a/text_recognizer/models/metrics.py
+++ b/text_recognizer/models/metrics.py
@@ -26,7 +26,9 @@ class CharacterErrorRate(Metric):
bsz = preds.shape[0]
for index in range(bsz):
pred = [p for p in preds[index].tolist() if p not in self.ignore_indices]
- target = [t for t in targets[index].tolist() if t not in self.ignore_indices]
+ target = [
+ t for t in targets[index].tolist() if t not in self.ignore_indices
+ ]
distance = editdistance.distance(pred, target)
error = distance / max(len(pred), len(target))
self.error += error
diff --git a/text_recognizer/models/transformer.py b/text_recognizer/models/transformer.py
index 7a9d566..0e01bb5 100644
--- a/text_recognizer/models/transformer.py
+++ b/text_recognizer/models/transformer.py
@@ -13,6 +13,7 @@ from text_recognizer.models.base import BaseLitModel
@attr.s(auto_attribs=True)
class TransformerLitModel(BaseLitModel):
"""A PyTorch Lightning model for transformer networks."""
+
mapping: Type[AbstractMapping] = attr.ib()
start_token: str = attr.ib()
end_token: str = attr.ib()