Update.
[culture.git] / mygpt.py
index 3e63567..7047849 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -279,13 +279,12 @@ class MyGPT(nn.Module):
         self,
         input,
         ar_mask,
         self,
         input,
         ar_mask,
+        seq_logproba,
         temperature=1.0,
         deterministic_synthesis=False,
         forbidden_tokens=None,
         forced_biases=None,
     ):
         temperature=1.0,
         deterministic_synthesis=False,
         forbidden_tokens=None,
         forced_biases=None,
     ):
-        sum_logits = 0
-
         to_generate = (ar_mask.sum(0) > 0).nonzero()
 
         if to_generate.min() > 0:
         to_generate = (ar_mask.sum(0) > 0).nonzero()
 
         if to_generate.min() > 0:
@@ -297,7 +296,7 @@ class MyGPT(nn.Module):
 
             logits = output[:, s]
 
 
             logits = output[:, s]
 
-            logits = logits.log_softmax(dim=1) / temperature
+            logits = (logits / temperature).log_softmax(dim=-1)
 
             if forbidden_tokens is not None:
                 logits = logits.masked_fill(forbidden_tokens, float("-inf"))
 
             if forbidden_tokens is not None:
                 logits = logits.masked_fill(forbidden_tokens, float("-inf"))
@@ -306,17 +305,15 @@ class MyGPT(nn.Module):
                 logits = logits + forced_biases[None, :]
 
             if deterministic_synthesis:
                 logits = logits + forced_biases[None, :]
 
             if deterministic_synthesis:
-                t_next = logits.argmax(1)
+                t_next = logits.argmax(-1)
             else:
                 dist = torch.distributions.categorical.Categorical(logits=logits)
                 t_next = dist.sample()
             else:
                 dist = torch.distributions.categorical.Categorical(logits=logits)
                 t_next = dist.sample()
-                sum_logits += logits.log_softmax(dim=1)[
-                    torch.arange(t_next.size(0)), t_next
-                ].sum()
 
 
-            input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
+            all_n = torch.arange(t_next.size(0))
+            seq_logproba += logits[all_n, t_next].sum(dim=-1)
 
 
-        return sum_logits
+            input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
 
     def record_attention(self, v=True):
         for m in self.modules():
 
     def record_attention(self, v=True):
         for m in self.modules():