return self.x[:, self.first : self.first + self.nb]
def complete(self):
- return self.first == 0 and self.nb == x.size(1)
+ return self.first == 0 and self.nb == self.x.size(1)
######################################################################
m.weight.fill_(1.0)
def forward(self, bs):
+ # print(f"GENERATE {bs.first} {bs.first+bs.nb}")
bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
bs = self.embedding(bs)
bs = self.trunk(bs)
# unchanged.
def masked_inplace_autoregression(
- self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False
+ self,
+ input,
+ ar_mask,
+ temperature=1.0,
+ deterministic_synthesis=False,
+ forbidden_tokens=None,
+ forced_biases=None,
):
+ sum_logits = 0
to_generate = (ar_mask.sum(0) > 0).nonzero()
if to_generate.min() > 0:
self(
) # Needed to initialize the model's cache
for s in range(to_generate.min(), to_generate.max() + 1):
output = self(BracketedSequence(input, s, 1)).x
- logits = output[:, s]
+ logits = output[:, s] / temperature
if forbidden_tokens is not None:
logits = logits.masked_fill(forbidden_tokens, float("-inf"))
+ if forced_biases is not None:
+ logits = logits + forced_biases[None, :]
if deterministic_synthesis:
t_next = logits.argmax(1)
else:
dist = torch.distributions.categorical.Categorical(logits=logits)
t_next = dist.sample()
+ sum_logits += logits.log_softmax(dim=-1)[
+ torch.arange(t_next.size(0)), t_next
+ ].sum()
input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
+ return sum_logits
+
def record_attention(self, v=True):
for m in self.modules():
if isinstance(m, QKVAttention):