##############################
class QKVAttention(nn.Module):
- def __init__(self, dim_in, dim_qk, dim_v, nb_heads = 1, causal = False, attention_dropout = 0.0):
+ def __init__(self, dim_in, dim_qk, dim_v,
+ nb_heads = 1, causal = False, attention_dropout = 0.0):
super().__init__()
def randw(*d):
self.causal = causal
self.attention_dropout = attention_dropout
- def forward(self, x):
- q = torch.einsum('ntc,hdc->nhtd', x, self.w_q)
- k = torch.einsum('ntc,hdc->nhtd', x, self.w_k)
- v = torch.einsum('ntc,hdc->nhtd', x, self.w_v)
- r = math.sqrt(q.size(3))
- a = torch.einsum('nhtd,nhsd->nhts', q, k).div(r)
+ def forward(self, x_q, x_kv = None):
+ if x_kv is None: x_kv = x_q
+ q = torch.einsum('ntc,hdc->nhtd', x_q, self.w_q)
+ k = torch.einsum('ntc,hdc->nhtd', x_kv, self.w_k)
+ v = torch.einsum('ntc,hdc->nhtd', x_kv, self.w_v)
+ a = torch.einsum('nhtd,nhsd->nhts', q, k) / math.sqrt(q.size(3))
if self.causal:
mask = torch.tril(q.new_ones(a.size(2), a.size(3)))[None, None, :, :] == 0
a = a.masked_fill(mask, float('-inf'))