def randw(*d):
return nn.Parameter(torch.empty(*d).normal_(0, 1 / math.sqrt(d[-1])))
- self.wq = randw(nb_heads, dim_qk, dim_in)
- self.wk = randw(nb_heads, dim_qk, dim_in)
- self.wv = randw(nb_heads, dim_v, dim_in)
+ self.w_q = randw(nb_heads, dim_qk, dim_in)
+ self.w_k = randw(nb_heads, dim_qk, dim_in)
+ self.w_v = randw(nb_heads, dim_v, dim_in)
self.causal = causal
self.attention_dropout = attention_dropout
def forward(self, x):
- q = torch.einsum('ntc,hdc->nhtd', x, self.wq)
- k = torch.einsum('ntc,hdc->nhtd', x, self.wk)
- v = torch.einsum('ntc,hdc->nhtd', x, self.wv)
+ q = torch.einsum('ntc,hdc->nhtd', x, self.w_q)
+ k = torch.einsum('ntc,hdc->nhtd', x, self.w_k)
+ v = torch.einsum('ntc,hdc->nhtd', x, self.w_v)
r = math.sqrt(q.size(3))
a = torch.einsum('nhtd,nhsd->nhts', q, k).div(r)
if self.causal: