Update.
[culture.git] / mygpt.py
index ac1c55e..7119c7a 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -46,7 +46,7 @@ class BracketedSequence:
         return self.x[:, self.first : self.first + self.nb]
 
     def complete(self):
-        return self.first == 0 and self.nb == x.size(1)
+        return self.first == 0 and self.nb == self.x.size(1)
 
 
 ######################################################################
@@ -169,9 +169,6 @@ class QKVAttention(nn.Module):
             "nhtd,nhsd->nhts", q, self.cache_k[:, :, : bs_q.first + bs_q.nb]
         ) / math.sqrt(self.w_q.size(1))
 
-        if self.record_attention:
-            self.a = a
-
         if self.causal:
             if bs_q.first == 0:
                 self.cache_attzero = (
@@ -186,6 +183,10 @@ class QKVAttention(nn.Module):
             )
 
         a = a.softmax(dim=3)
+
+        if self.record_attention:
+            self.a = a
+
         a = F.dropout(a, self.attention_dropout, self.training)
 
         y = torch.einsum(
@@ -263,36 +264,13 @@ class MyGPT(nn.Module):
                     m.weight.fill_(1.0)
 
     def forward(self, bs):
+        # print(f"GENERATE {bs.first} {bs.first+bs.nb}")
         bs = BracketedSequence(F.pad(bs.x, (1, -1)), bs.first, bs.nb)
         bs = self.embedding(bs)
         bs = self.trunk(bs)
         bs = self.readout(bs)
         return bs
 
-    # ar_mask is a tensor with 0s and 1s, of same shape as input, with
-    # 1s where tokens should be generated. The others are kept
-    # unchanged.
-
-    def masked_inplace_autoregression(
-        self, input, ar_mask, forbidden_tokens=None, deterministic_synthesis=False
-    ):
-        to_generate = (ar_mask.sum(0) > 0).nonzero()
-        if to_generate.min() > 0:
-            self(
-                BracketedSequence(input, 0, to_generate.min())
-            )  # Needed to initialize the model's cache
-        for s in range(to_generate.min(), to_generate.max() + 1):
-            output = self(BracketedSequence(input, s, 1)).x
-            logits = output[:, s]
-            if forbidden_tokens is not None:
-                logits = logits.masked_fill(forbidden_tokens, float("-inf"))
-            if deterministic_synthesis:
-                t_next = logits.argmax(1)
-            else:
-                dist = torch.distributions.categorical.Categorical(logits=logits)
-                t_next = dist.sample()
-            input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
-
     def record_attention(self, v=True):
         for m in self.modules():
             if isinstance(m, QKVAttention):