self,
input,
ar_mask,
+ seq_logproba,
+ temperature=1.0,
deterministic_synthesis=False,
forbidden_tokens=None,
forced_biases=None,
):
to_generate = (ar_mask.sum(0) > 0).nonzero()
+
if to_generate.min() > 0:
self(
BracketedSequence(input, 0, to_generate.min())
) # Needed to initialize the model's cache
for s in range(to_generate.min(), to_generate.max() + 1):
output = self(BracketedSequence(input, s, 1)).x
+
logits = output[:, s]
+
+ logits = (logits / temperature).log_softmax(dim=-1)
+
if forbidden_tokens is not None:
logits = logits.masked_fill(forbidden_tokens, float("-inf"))
+
if forced_biases is not None:
logits = logits + forced_biases[None, :]
+
if deterministic_synthesis:
- t_next = logits.argmax(1)
+ t_next = logits.argmax(-1)
else:
dist = torch.distributions.categorical.Categorical(logits=logits)
t_next = dist.sample()
+
+ all_n = torch.arange(t_next.size(0))
+ seq_logproba += logits[all_n, t_next].sum(dim=-1)
+
input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
def record_attention(self, v=True):