self.w_q = randw(nb_heads, dim_qk, dim_in)
self.w_k = randw(nb_heads, dim_qk, dim_in)
self.w_q = randw(nb_heads, dim_qk, dim_in)
self.w_k = randw(nb_heads, dim_qk, dim_in)
if bs_q.first == 0:
self.cache_k = x_q.new_zeros(
x_q.size(0), self.w_k.size(0), x_q.size(1), self.w_k.size(1)
if bs_q.first == 0:
self.cache_k = x_q.new_zeros(
x_q.size(0), self.w_k.size(0), x_q.size(1), self.w_k.size(1)
q = torch.einsum(
"ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_q
)
q = torch.einsum(
"ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_q
)
self.cache_k[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
"ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_k
)
self.cache_k[:, :, bs_q.first : bs_q.first + bs_q.nb] = torch.einsum(
"ntc,hdc->nhtd", x_q[:, bs_q.first : bs_q.first + bs_q.nb], self.w_k
)
- bs.x = F.pad(bs.x, (1, -1))
+ # print(f"GENERATE {bs.first} {bs.first+bs.nb}")
+ bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
- self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False
+ self,
+ input,
+ ar_mask,
+ seq_logproba,
+ temperature=1.0,
+ deterministic_synthesis=False,
+ forbidden_tokens=None,
+ forced_biases=None,
if to_generate.min() > 0:
self(
BracketedSequence(input, 0, to_generate.min())
) # Needed to initialize the model's cache
for s in range(to_generate.min(), to_generate.max() + 1):
output = self(BracketedSequence(input, s, 1)).x
if to_generate.min() > 0:
self(
BracketedSequence(input, 0, to_generate.min())
) # Needed to initialize the model's cache
for s in range(to_generate.min(), to_generate.max() + 1):
output = self(BracketedSequence(input, s, 1)).x
if forbidden_tokens is not None:
logits = logits.masked_fill(forbidden_tokens, float("-inf"))
if forbidden_tokens is not None:
logits = logits.masked_fill(forbidden_tokens, float("-inf"))
input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
+ def record_attention(self, v=True):
+ for m in self.modules():
+ if isinstance(m, QKVAttention):
+ m.record_attention = v
+
+ def retrieve_attention(self):
+ a = []
+ for m in self.modules():
+ if isinstance(m, QKVAttention):
+ a.append(m.a)
+ return a
+
y2 = torch.randn_like(y1)
for s in range(x.size(1)):
z = model(BracketedSequence(x, s, 1))
y2 = torch.randn_like(y1)
for s in range(x.size(1)):
z = model(BracketedSequence(x, s, 1))