Update.
[culture.git] / tasks.py
index 43f7d53..50ded2c 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -22,7 +22,7 @@ def masked_inplace_autoregression(
     batch_size,
     input,
     ar_mask,
-    summed_logits,
+    seq_logproba,
     temperature,
     deterministic_synthesis,
     forbidden_tokens=None,
@@ -32,7 +32,11 @@ def masked_inplace_autoregression(
 ):
     assert input.size() == ar_mask.size()
 
-    batches = zip(input.split(batch_size), ar_mask.split(batch_size))
+    batches = zip(
+        input.split(batch_size),
+        ar_mask.split(batch_size),
+        seq_logproba.split(batch_size),
+    )
 
     if progress_bar_desc is not None:
         batches = tqdm.tqdm(
@@ -46,11 +50,11 @@ def masked_inplace_autoregression(
         t = model.training
         model.eval()
 
-        for input, ar_mask in batches:
+        for input, ar_mask, seq_logproba in batches:
             model.masked_inplace_autoregression(
                 input=input,
                 ar_mask=ar_mask,
-                summed_logits=summed_logits,
+                seq_logproba=seq_logproba,
                 temperature=temperature,
                 deterministic_synthesis=deterministic_synthesis,
                 forbidden_tokens=forbidden_tokens,
@@ -81,7 +85,7 @@ class Task:
 import world
 
 
-class World(Task):
+class QuizzMachine(Task):
     def save_image(self, input, result_dir, filename, logger):
         img = world.seq2img(input.to("cpu"), self.height, self.width)
         image_name = os.path.join(result_dir, filename)
@@ -178,13 +182,14 @@ class World(Task):
             input = input[:nmax]
             ar_mask = self.make_ar_mask(input)
             result = input.clone() * (1 - ar_mask)
+            seq_logproba = torch.empty(input.size(0), device=self.device)
 
             masked_inplace_autoregression(
                 model=model,
                 batch_size=self.batch_size,
                 input=result,
                 ar_mask=ar_mask,
-                summed_logits=None,
+                seq_logproba=seq_logproba,
                 temperature=1.0,
                 deterministic_synthesis=deterministic_synthesis,
                 progress_bar_desc=None,
@@ -218,13 +223,14 @@ class World(Task):
         input = self.test_w_quizzes[:96]
         ar_mask = self.make_ar_mask(input)
         result = input.clone() * (1 - ar_mask)
+        seq_logproba = torch.empty(input.size(0), device=self.device)
 
         masked_inplace_autoregression(
             model=model,
             batch_size=self.batch_size,
             input=result,
             ar_mask=ar_mask,
-            summed_logits=None,
+            seq_logproba=seq_logproba,
             temperature=1.0,
             deterministic_synthesis=deterministic_synthesis,
             progress_bar_desc=None,
@@ -262,7 +268,7 @@ class World(Task):
         nb,
         model,
         other_models,
-        desired_average_logits=None,
+        min_ave_seq_logproba,
     ):
         ###############################################################
         # Generate quizzes with model
@@ -272,41 +278,41 @@ class World(Task):
         )
 
         ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
-        summed_logits = torch.empty(nb, device=self.device)
+        seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
 
         temperature = 1
         d_temperature = 1
 
         while True:
-            summed_logits[...] = 0
+            seq_logproba[...] = 0
 
             masked_inplace_autoregression(
                 model=model,
                 batch_size=self.batch_size,
                 input=c_quizzes,
                 ar_mask=ar_mask,
-                summed_logits=summed_logits,
+                seq_logproba=seq_logproba,
                 temperature=temperature,
                 deterministic_synthesis=False,
                 progress_bar_desc="sampling c_quizzes",
                 device=self.device,
             )
 
-            average_logits = summed_logits.mean()
+            ave_seq_logproba = seq_logproba.mean()
 
-            logger(f"{average_logits=} {desired_average_logits=}")
+            logger(f"{ave_seq_logproba=} {min_ave_seq_logproba=}")
 
-            if desired_average_logits is None:
+            if min_ave_seq_logproba is None:
                 break
 
             # Oh man that's ugly
-            if average_logits < desired_average_logits * 1.1:
+            if ave_seq_logproba < min_ave_seq_logproba * 1.1:
                 if d_temperature > 0:
-                    d_temperature *= -0.5
+                    d_temperature *= -1 / 3
                 temperature += d_temperature
-            elif average_logits > desired_average_logits:
+            elif ave_seq_logproba > min_ave_seq_logproba:
                 if d_temperature < 0:
-                    d_temperature *= -0.5
+                    d_temperature *= -1 / 3
                 temperature += d_temperature
             else:
                 break
@@ -326,6 +332,7 @@ class World(Task):
         )
 
         ar_mask = self.make_ar_mask(c_quizzes)
+        seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
 
         ###############################################################
         # Check how many of the other models can solve them in both
@@ -341,7 +348,7 @@ class World(Task):
                 batch_size=self.batch_size,
                 input=result,
                 ar_mask=ar_mask,
-                summed_logits=None,
+                seq_logproba=seq_logproba,
                 temperature=1.0,
                 deterministic_synthesis=True,
                 progress_bar_desc="solving c_quizzes",
@@ -357,7 +364,7 @@ class World(Task):
                 batch_size=self.batch_size,
                 input=reverse_result,
                 ar_mask=ar_mask,
-                summed_logits=None,
+                seq_logproba=seq_logproba,
                 temperature=1.0,
                 deterministic_synthesis=True,
                 progress_bar_desc="solving reversed c_quizzes",
@@ -372,9 +379,4 @@ class World(Task):
 
         nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0)
 
-        # filename = os.path.join(result_dir, "correct_{n_epoch:04d}.dat")
-        # with open(filename, "w") as f:
-        # for k in nb_correct:
-        # f.write(f"{k}\n")
-
-        return c_quizzes, nb_correct, summed_logits.mean()
+        return c_quizzes, nb_correct, seq_logproba.mean()