Update.
[culture.git] / mygpt.py
index 0400b48..7047849 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -46,7 +46,7 @@ class BracketedSequence:
         return self.x[:, self.first : self.first + self.nb]
 
     def complete(self):
-        return self.first == 0 and self.nb == x.size(1)
+        return self.first == 0 and self.nb == self.x.size(1)
 
 
 ######################################################################
@@ -264,6 +264,7 @@ class MyGPT(nn.Module):
                     m.weight.fill_(1.0)
 
     def forward(self, bs):
+        # print(f"GENERATE {bs.first} {bs.first+bs.nb}")
         bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
         bs = self.embedding(bs)
         bs = self.trunk(bs)
@@ -275,23 +276,43 @@ class MyGPT(nn.Module):
     # unchanged.
 
     def masked_inplace_autoregression(
-        self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False
+        self,
+        input,
+        ar_mask,
+        seq_logproba,
+        temperature=1.0,
+        deterministic_synthesis=False,
+        forbidden_tokens=None,
+        forced_biases=None,
     ):
         to_generate = (ar_mask.sum(0) > 0).nonzero()
+
         if to_generate.min() > 0:
             self(
                 BracketedSequence(input, 0, to_generate.min())
             )  # Needed to initialize the model's cache
         for s in range(to_generate.min(), to_generate.max() + 1):
             output = self(BracketedSequence(input, s, 1)).x
+
             logits = output[:, s]
+
+            logits = (logits / temperature).log_softmax(dim=-1)
+
             if forbidden_tokens is not None:
                 logits = logits.masked_fill(forbidden_tokens, float("-inf"))
+
+            if forced_biases is not None:
+                logits = logits + forced_biases[None, :]
+
             if deterministic_synthesis:
-                t_next = logits.argmax(1)
+                t_next = logits.argmax(-1)
             else:
                 dist = torch.distributions.categorical.Categorical(logits=logits)
                 t_next = dist.sample()
+
+            all_n = torch.arange(t_next.size(0))
+            seq_logproba += logits[all_n, t_next].sum(dim=-1)
+
             input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
 
     def record_attention(self, v=True):