a = torch.einsum('nhtd,nhsd->nhts', q, k) / math.sqrt(q.size(3))
if self.causal:
- mask = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
- < torch.arange(a.size(3), device = q.device)[None, None, None, :]
- a = a.masked_fill(mask, float('-inf'))
+ forbidden_attention = torch.arange(a.size(2), device = q.device)[None, None, :, None] \
+ < torch.arange(a.size(3), device = q.device)[None, None, None, :]
+ a = a.masked_fill(forbidden_attention, float('-inf'))
a = a.softmax(dim = 3)
a = F.dropout(a, self.attention_dropout, self.training)