Oups
[picoclvr.git] / tasks.py
index 1d967f9..c0ad5ff 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -5,7 +5,7 @@
 
 # Written by Francois Fleuret <francois@fleuret.org>
 
-import math, os, tqdm
+import math, os, tqdm, warnings
 
 import torch, torchvision
 
@@ -63,7 +63,7 @@ def masked_inplace_autoregression(
 
 
 class Task:
-    def batches(self, split="train"):
+    def batches(self, split="train", nb_to_use=-1, desc=None):
         pass
 
     def vocabulary_size(self):
@@ -489,7 +489,7 @@ class PicoCLVR(Task):
         self.train_input = self.tensorize(self.train_descr)
         self.test_input = self.tensorize(self.test_descr)
 
-    def batches(self, split="train"):
+    def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
         for batch in tqdm.tqdm(
@@ -1685,7 +1685,7 @@ class Grid(Task):
         self.t_nul = self.token2id["#"]
         self.t_true = self.token2id["true"]
         self.t_false = self.token2id["false"]
-        self.t_pipe = self.token2id["|"]
+        self.t_pipe = self.token2id["|"]
 
         # Tokenize the train and test sets
         self.train_input = self.str2tensor(self.train_descr)
@@ -1694,7 +1694,7 @@ class Grid(Task):
             None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
         )
 
-    def batches(self, split="train"):
+    def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
         for batch in tqdm.tqdm(
@@ -1823,7 +1823,7 @@ class QMLP(Task):
 
         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
 
-    def batches(self, split="train"):
+    def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
         for batch in tqdm.tqdm(
@@ -1867,10 +1867,10 @@ class QMLP(Task):
 
 ######################################################################
 
-import escape
+import greed
 
 
-class Escape(Task):
+class Greed(Task):
     def __init__(
         self,
         nb_train_samples,
@@ -1880,6 +1880,7 @@ class Escape(Task):
         width,
         T,
         nb_walls,
+        nb_coins,
         logger=None,
         device=torch.device("cpu"),
     ):
@@ -1887,17 +1888,28 @@ class Escape(Task):
 
         self.batch_size = batch_size
         self.device = device
-        self.height = height
-        self.width = width
 
-        states, actions, rewards = escape.generate_episodes(
-            nb_train_samples + nb_test_samples, height, width, T, nb_walls
+        self.world = greed.GreedWorld(height, width, T, nb_walls, nb_coins)
+
+        states, actions, rewards = self.world.generate_episodes(
+            nb_train_samples + nb_test_samples
         )
-        seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
-        # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
+        seq = self.world.episodes2seq(states, actions, rewards)
         self.train_input = seq[:nb_train_samples].to(self.device)
         self.test_input = seq[nb_train_samples:].to(self.device)
 
+    def wipe_lookahead_rewards(self, batch):
+        t = torch.arange(batch.size(1), device=batch.device)[None, :]
+        u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device)
+        lr_mask = (t <= u).long() * (
+            t % self.world.it_len == self.world.index_lookahead_reward
+        ).long()
+
+        return (
+            lr_mask * self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
+            + (1 - lr_mask) * batch
+        )
+
     def batches(self, split="train", nb_to_use=-1, desc=None):
         assert split in {"train", "test"}
         input = self.train_input if split == "train" else self.test_input
@@ -1908,24 +1920,15 @@ class Escape(Task):
         for batch in tqdm.tqdm(
             input.split(self.batch_size), dynamic_ncols=True, desc=desc
         ):
-            yield batch
+            yield self.wipe_lookahead_rewards(batch)
 
     def vocabulary_size(self):
-        return escape.nb_codes
+        return self.world.nb_codes
 
     def thinking_autoregression(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
     ):
-        result = self.test_input[:250].clone()
-        t = torch.arange(result.size(1), device=result.device)[None, :]
-
-        state_len = self.height * self.width
-        index_action = state_len
-        index_reward = state_len + 1
-        index_lookahead_reward = state_len + 2
-        it_len = state_len + 3  # state / action / reward / lookahead_reward
-
-        result[:, it_len:] = -1
+        snapshots = []
 
         def ar(result, ar_mask, logit_biases=None):
             ar_mask = ar_mask.expand_as(result)
@@ -1940,61 +1943,68 @@ class Escape(Task):
                 device=self.device,
                 progress_bar_desc=None,
             )
+            warnings.warn("keeping thinking snapshots", RuntimeWarning)
+            snapshots.append(result[:100].detach().clone())
 
         # Generate iteration after iteration
 
-        optimistic_bias = result.new_zeros(escape.nb_codes, device=result.device)
-        optimistic_bias[escape.lookahead_reward2code(-1)] = -math.log(1e1)
-        optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1)
+        result = self.test_input[:250].clone()
+        # Erase all the content but that of the first iteration
+        result[:, self.world.it_len :] = -1
+        # Set the lookahead_reward of the firs to UNKNOWN
+        result[:, self.world.index_lookahead_reward] = self.world.lookahead_reward2code(
+            greed.REWARD_UNKNOWN
+        )
 
-        snapshots = []
+        t = torch.arange(result.size(1), device=result.device)[None, :]
 
         for u in tqdm.tqdm(
-            range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+            range(0, result.size(1), self.world.it_len),
+            desc="thinking",
         ):
-            # Re-generate the lookahead_reward pessimistically in the
-            # previous iterations
-            ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
-            ar(result, ar_mask, logit_biases=-optimistic_bias)
-            snapshots.append(result[:10].detach().clone())
-
-            # Generate the state
-            ar_mask = (t >= u).long() * (t < u + state_len).long()
+            # Generate the next state but keep the initial one, the
+            # lookahead_reward of previous iterations are set to
+            # UNKNOWN
+            if u > 0:
+                result[
+                    :, u + self.world.index_lookahead_reward
+                ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
+                ar_mask = (t >= u + self.world.index_states).long() * (
+                    t < u + self.world.index_states + self.world.state_len
+                ).long()
+                ar(result, ar_mask)
+
+            # Generate the action and reward with lookahead_reward to +1
+            result[
+                :, u + self.world.index_lookahead_reward
+            ] = self.world.lookahead_reward2code(greed.REWARD_PLUS)
+            ar_mask = (t >= u + self.world.index_reward).long() * (
+                t <= u + self.world.index_action
+            ).long()
             ar(result, ar_mask)
-            snapshots.append(result[:10].detach().clone())
-
-            # Re-generate the lookahead_reward optimistically in the
-            # previous iterations
-            ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
-            ar(result, ar_mask, logit_biases=optimistic_bias)
-            snapshots.append(result[:10].detach().clone())
 
-            # Generate the action and reward
-            ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long()
-            ar(result, ar_mask)
-            snapshots.append(result[:10].detach().clone())
+            # Set the lookahead_reward to UNKNOWN for the next iterations
+            result[
+                :, u + self.world.index_lookahead_reward
+            ] = self.world.lookahead_reward2code(greed.REWARD_UNKNOWN)
 
         filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
-            for n in range(10):
+            for n in range(snapshots[0].size(0)):
                 for s in snapshots:
-                    s, a, r, lr = escape.seq2episodes(
-                        s[n : n + 1], self.height, self.width, lookahead=True
+                    lr, s, a, r = self.world.seq2episodes(
+                        s[n : n + 1],
                     )
-                    str = escape.episodes2str(
-                        s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+                    str = self.world.episodes2str(
+                        lr, s, a, r, unicode=True, ansi_colors=True
                     )
                     f.write(str)
                 f.write("\n\n")
 
         # Saving the generated sequences
 
-        s, a, r, lr = escape.seq2episodes(
-            result, self.height, self.width, lookahead=True
-        )
-        str = escape.episodes2str(
-            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
-        )
+        lr, s, a, r = self.world.seq2episodes(result)
+        str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
@@ -2004,16 +2014,14 @@ class Escape(Task):
     def produce_results(
         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
     ):
-        result = self.test_input[:250].clone()
+        result = self.wipe_lookahead_rewards(self.test_input[:250].clone())
 
         # Saving the ground truth
 
-        s, a, r, lr = escape.seq2episodes(
-            result, self.height, self.width, lookahead=True
-        )
-        str = escape.episodes2str(
-            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        lr, s, a, r = self.world.seq2episodes(
+            result,
         )
+        str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f:
@@ -2023,8 +2031,7 @@ class Escape(Task):
         # Re-generating from the first frame
 
         ar_mask = (
-            torch.arange(result.size(1), device=result.device)
-            >= self.height * self.width + 3
+            torch.arange(result.size(1), device=result.device) >= self.world.it_len
         ).long()[None, :]
         ar_mask = ar_mask.expand_as(result)
         result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
@@ -2040,12 +2047,10 @@ class Escape(Task):
 
         # Saving the generated sequences
 
-        s, a, r, lr = escape.seq2episodes(
-            result, self.height, self.width, lookahead=True
-        )
-        str = escape.episodes2str(
-            s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+        lr, s, a, r = self.world.seq2episodes(
+            result,
         )
+        str = self.world.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
 
         filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
         with open(filename, "w") as f: