From 87ea75c803400ca74c4f1b863c496165ed802fc2 Mon Sep 17 00:00:00 2001 From: Gustaf Rydholm Date: Mon, 7 Jun 2021 22:57:24 +0200 Subject: Working feedforward of full transformer arch in notebook --- text_recognizer/networks/transformer/attention.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'text_recognizer/networks/transformer/attention.py') diff --git a/text_recognizer/networks/transformer/attention.py b/text_recognizer/networks/transformer/attention.py index a3b53f0..7bafc58 100644 --- a/text_recognizer/networks/transformer/attention.py +++ b/text_recognizer/networks/transformer/attention.py @@ -91,12 +91,12 @@ class Attention(nn.Module): def forward( self, x: Tensor, - context: Optional[Tensor], - mask: Optional[Tensor], - context_mask: Optional[Tensor], + context: Optional[Tensor] = None, + mask: Optional[Tensor] = None, + context_mask: Optional[Tensor] = None, rotary_pos_emb: Optional[Tensor] = None, ) -> Tuple[Tensor, Tensor]: - b, n, _, device = x.shape, x.device + b, n, _, device = *x.shape, x.device q, k, v = self.qkv_fn(x) q, k = ( self._apply_rotary_emb(q, k, rotary_pos_emb) -- cgit v1.2.3-70-g09d2