summaryrefslogtreecommitdiff
path: root/src/text_recognizer/networks/mlp.py
blob: 2a41790db5780fccff5a00b7ad0ca8e97f41b142 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
"""Defines the MLP network."""
from typing import Callable, Optional

import torch
from torch import nn


class MLP(nn.Module):
    """Multi layered perceptron network."""

    def __init__(
        self,
        input_size: int,
        output_size: int,
        hidden_size: int,
        num_layers: int,
        dropout_rate: float,
        activation_fn: Optional[Callable] = None,
    ) -> None:
        """Initialization of the MLP network.

        Args:
            input_size (int): The input shape of the network.
            output_size (int): Number of classes in the dataset.
            hidden_size (int): The number of `neurons` in each hidden layer.
            num_layers (int): The number of hidden layers.
            dropout_rate (float): The dropout rate at each layer.
            activation_fn (Optional[Callable]): The activation function in the hidden layers, (default:
                nn.ReLU()).

        """
        super().__init__()

        if activation_fn is None:
            activation_fn = nn.ReLU(inplace=True)

        self.layers = [
            nn.Linear(in_features=input_size, out_features=hidden_size),
            activation_fn,
        ]

        for _ in range(num_layers):
            self.layers += [
                nn.Linear(in_features=hidden_size, out_features=hidden_size),
                activation_fn,
            ]

            if dropout_rate:
                self.layers.append(nn.Dropout(p=dropout_rate))

        self.layers.append(nn.Linear(in_features=hidden_size, out_features=output_size))

        self.layers = nn.Sequential(*self.layers)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """The feedforward."""
        x = torch.flatten(x, start_dim=1)
        return self.layers(x)


# def test():
#     x = torch.randn([1, 28, 28])
#     input_size = torch.flatten(x).shape[0]
#     output_size = 10
#     hidden_size = 128
#     num_layers = 5
#     dropout_rate = 0.25
#     activation_fn = nn.GELU()
#     net = MLP(
#         input_size=input_size,
#         output_size=output_size,
#         hidden_size=hidden_size,
#         num_layers=num_layers,
#         dropout_rate=dropout_rate,
#         activation_fn=activation_fn,
#     )
#     from torchsummary import summary
#
#     summary(net, (1, 28, 28), device="cpu")
#
#     out = net(x)