Update.
[picoclvr.git] / tasks.py
index 5153836..845b5b3 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -5,7 +5,7 @@
 
 # Written by Francois Fleuret <francois@fleuret.org>
 
-import math, os, tqdm
+import math, os, tqdm, warnings
 
 import torch, torchvision
 
@@ -27,6 +27,7 @@ def masked_inplace_autoregression(
     ar_mask,
     deterministic_synthesis,
     forbidden_tokens=None,
+    logit_biases=None,
     progress_bar_desc="autoregression",
     device=torch.device("cpu"),
 ):
@@ -48,7 +49,11 @@ def masked_inplace_autoregression(
 
         for input, ar_mask in batches:
             model.masked_inplace_autoregression(
-                input, ar_mask, forbidden_tokens, deterministic_synthesis
+                input,
+                ar_mask,
+                deterministic_synthesis,
+                forbidden_tokens,
+                logit_biases,
             )
 
         model.train(t)
@@ -1862,10 +1867,10 @@ class QMLP(Task):
 
 ######################################################################
 
-import escape
+import greed
 
 
-class Escape(Task):
+class Greed(Task):
     def __init__(
         self,
         nb_train_samples,
@@ -1885,15 +1890,20 @@ class Escape(Task):
         self.height = height
         self.width = width
 
-        states, actions, rewards = escape.generate_episodes(
+        states, actions, rewards = greed.generate_episodes(
             nb_train_samples + nb_test_samples, height, width, T, nb_walls
         )
-        seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
+        seq = greed.episodes2seq(states, actions, rewards)
         # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
         self.train_input = seq[:nb_train_samples].to(self.device)
         self.test_input = seq[nb_train_samples:].to(self.device)
 
-        self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+        self.state_len = self.height * self.width
+        self.index_lookahead_reward = 0
+        self.index_states = 1
+        self.index_action = self.state_len + 1
+        self.index_reward = self.state_len + 2
+        self.it_len = self.state_len + 3  # lookahead_reward / state / action / reward
 
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
@@ -1905,21 +1915,24 @@ class Escape(Task):
         for batch in tqdm.tqdm(
             input.split(self.batch_size), dynamic_ncols=True, desc=desc
         ):
+            t = torch.arange(batch.size(1), device=batch.device)[None, :]
+            u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device)
+            lr_mask = (t <= u).long() * (
+                t % self.it_len == self.index_lookahead_reward
+            ).long()
+
+            batch = lr_mask * greed.lookahead_reward2code(2) + (1 - lr_mask) * batch
             yield batch
 
     def vocabulary_size(self):
-        return self.nb_codes
+        return greed.nb_codes
 
     def thinking_autoregression(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
     ):
-        result = self.test_input[:250].clone()
-        t = torch.arange(result.size(1), device=result.device)[None, :]
-
-        state_len = self.height * self.width
-        it_len = state_len + 3  # state / action / reward / lookahead_reward
+        snapshots = []
 
-        def ar(result, ar_mask):
+        def ar(result, ar_mask, logit_biases=None):
             ar_mask = ar_mask.expand_as(result)
             result *= 1 - ar_mask
             masked_inplace_autoregression(
@@ -1927,56 +1940,67 @@ class Escape(Task):
                 self.batch_size,
                 result,
                 ar_mask,
-                deterministic_synthesis,
+                deterministic_synthesis=deterministic_synthesis,
+                logit_biases=logit_biases,
                 device=self.device,
                 progress_bar_desc=None,
             )
+            warnings.warn("keeping thinking snapshots", RuntimeWarning)
+            snapshots.append(result[:10].detach().clone())
 
         # Generate iteration after iteration
 
+        result = self.test_input[:250].clone()
+        # Erase all the content but that of the first iteration
+        result[:, self.it_len :] = -1
+        # Set the lookahead_reward of the firs to UNKNOWN
+        result[:, self.index_lookahead_reward] = greed.lookahead_reward2code(2)
+
+        t = torch.arange(result.size(1), device=result.device)[None, :]
+
         for u in tqdm.tqdm(
-            range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+            range(0, result.size(1), self.it_len),
+            desc="thinking",
         ):
-            # Put the lookahead reward to either 0 or -1 for the
-            # current iteration, with a proba that depends with the
-            # sequence index, so that we have diverse examples, sample
-            # the next state
-            s = -(
-                torch.rand(result.size(0), device=result.device)
-                <= torch.linspace(0, 1, result.size(0), device=result.device)
+            # Generate the next state but keep the initial one, the
+            # lookahead_reward of previous iterations are set to
+            # UNKNOWN
+            if u > 0:
+                result[
+                    :, u + self.index_lookahead_reward
+                ] = greed.lookahead_reward2code(2)
+                ar_mask = (t >= u + self.index_states).long() * (
+                    t < u + self.index_states + self.state_len
+                ).long()
+                ar(result, ar_mask)
+
+            # Generate the action and reward with lookahead_reward to +1
+            result[:, u + self.index_lookahead_reward] = greed.lookahead_reward2code(1)
+            ar_mask = (t >= u + self.index_action).long() * (
+                t <= u + self.index_reward
             ).long()
-            result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
-            ar_mask = (t >= u).long() * (t < u + state_len).long()
             ar(result, ar_mask)
 
-            # Put the lookahead reward to +1 for the current
-            # iteration, sample the action and reward
-            s = 1
-            result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
-            ar_mask = (t >= u + state_len).long() * (t < u + state_len + 2).long()
-            ar(result, ar_mask)
+            # Set the lookahead_reward to UNKNOWN for the next iterations
+            result[:, u + self.index_lookahead_reward] = greed.lookahead_reward2code(2)
 
-            # Fix the previous lookahead rewards in a consistant state
-            for v in range(0, u, it_len):
-                # Extract the rewards
-                r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
-                r = r - escape.first_rewards_code - 1
-                r = r.clamp(min=-1, max=1)  # the reward is predicted hence can be weird
-                a = r.min(dim=1).values
-                b = r.max(dim=1).values
-                s = (a < 0).long() * a + (a >= 0).long() * b
-                result[:, v + state_len + 2] = (
-                    s + 1 + escape.first_lookahead_rewards_code
-                )
+        filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
+        with open(filename, "w") as f:
+            for n in range(10):
+                for s in snapshots:
+                    lr, s, a, r = greed.seq2episodes(
+                        s[n : n + 1], self.height, self.width
+                    )
+                    str = greed.episodes2str(
+                        lr, s, a, r, unicode=True, ansi_colors=True
+                    )
+                    f.write(str)
+                f.write("\n\n")
 
         # Saving the generated sequences
 
-        s, a, r, lr = escape.seq2episodes(
-            result, self.height, self.width, lookahead=True
-        )
-        str = escape.episodes2str(
-            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
-        )
+        lr, s, a, r = greed.seq2episodes(result, self.height, self.width)
+        str = greed.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
@@ -1990,12 +2014,12 @@ class Escape(Task):
 
         # Saving the ground truth
 
-        s, a, r, lr = escape.seq2episodes(
-            result, self.height, self.width, lookahead=True
-        )
-        str = escape.episodes2str(
-            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        lr, s, a, r = greed.seq2episodes(
+            result,
+            self.height,
+            self.width,
         )
+        str = greed.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
@@ -2022,12 +2046,12 @@ class Escape(Task):
 
         # Saving the generated sequences
 
-        s, a, r, lr = escape.seq2episodes(
-            result, self.height, self.width, lookahead=True
-        )
-        str = escape.episodes2str(
-            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        lr, s, a, r = greed.seq2episodes(
+            result,
+            self.height,
+            self.width,
         )
+        str = greed.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f: