return self.x[:, self.first : self.first + self.nb]
def complete(self):
- return self.first == 0 and self.nb == x.size(1)
+ return self.first == 0 and self.nb == self.x.size(1)
######################################################################
class QKVAttention(nn.Module):
def __init__(
- self, dim_in, dim_qk, dim_v, nb_heads=1, causal=False, attention_dropout=0.0
+ self,
+ dim_in,
+ dim_qk,
+ dim_v,
+ nb_heads=1,
+ causal=False,
+ attention_dropout=0.0,
):
super().__init__()
self.causal = causal
self.attention_dropout = attention_dropout
+ self.record_attention = False
self.w_q = randw(nb_heads, dim_qk, dim_in)
self.w_k = randw(nb_heads, dim_qk, dim_in)
)
a = a.softmax(dim=3)
+
+ if self.record_attention:
+ self.a = a
+
a = F.dropout(a, self.attention_dropout, self.training)
y = torch.einsum(
m.weight.fill_(1.0)
def forward(self, bs):
+ # print(f"GENERATE {bs.first} {bs.first+bs.nb}")
bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
bs = self.embedding(bs)
bs = self.trunk(bs)
# unchanged.
def masked_inplace_autoregression(
- self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False
+ self,
+ input,
+ ar_mask,
+ seq_logproba,
+ temperature=1.0,
+ deterministic_synthesis=False,
+ forbidden_tokens=None,
+ forced_biases=None,
):
to_generate = (ar_mask.sum(0) > 0).nonzero()
+
if to_generate.min() > 0:
self(
BracketedSequence(input, 0, to_generate.min())
) # Needed to initialize the model's cache
for s in range(to_generate.min(), to_generate.max() + 1):
output = self(BracketedSequence(input, s, 1)).x
+
logits = output[:, s]
+
+ logits = (logits / temperature).log_softmax(dim=-1)
+
if forbidden_tokens is not None:
logits = logits.masked_fill(forbidden_tokens, float("-inf"))
+
+ if forced_biases is not None:
+ logits = logits + forced_biases[None, :]
+
if deterministic_synthesis:
- t_next = logits.argmax(1)
+ t_next = logits.argmax(-1)
else:
dist = torch.distributions.categorical.Categorical(logits=logits)
t_next = dist.sample()
+
+ all_n = torch.arange(t_next.size(0))
+ seq_logproba += logits[all_n, t_next].sum(dim=-1)
+
input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
+ def record_attention(self, v=True):
+ for m in self.modules():
+ if isinstance(m, QKVAttention):
+ m.record_attention = v
+
+ def retrieve_attention(self):
+ a = []
+ for m in self.modules():
+ if isinstance(m, QKVAttention):
+ a.append(m.a)
+ return a
+
######################################################################
dim_keys=2,
dim_hidden=2,
nb_heads=2,
- nb_blocks=1,
+ nb_blocks=2,
dropout=0.1,
causal=True,
)
model.eval()
-
y1 = model(BracketedSequence(x)).x
y2 = torch.randn_like(y1)
for s in range(x.size(1)):