+
+import escape
+
+
+class Escape(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ height,
+ width,
+ T,
+ nb_walls,
+ logger=None,
+ device=torch.device("cpu"),
+ ):
+ super().__init__()
+
+ self.batch_size = batch_size
+ self.device = device
+ self.height = height
+ self.width = width
+
+ states, actions, rewards = escape.generate_episodes(
+ nb_train_samples + nb_test_samples, height, width, T, nb_walls
+ )
+ seq = escape.episodes2seq(states, actions, rewards)
+ # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
+ self.train_input = seq[:nb_train_samples].to(self.device)
+ self.test_input = seq[nb_train_samples:].to(self.device)
+
+ self.state_len = self.height * self.width
+ self.index_lookahead_reward = 0
+ self.index_states = 1
+ self.index_action = self.state_len + 1
+ self.index_reward = self.state_len + 2
+ self.it_len = self.state_len + 3 # lookahead_reward / state / action / reward
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ t = torch.arange(batch.size(1), device=batch.device)[None, :]
+ u = torch.randint(batch.size(1), (batch.size(0), 1), device=batch.device)
+ lr_mask = (t <= u).long() * (
+ t % self.it_len == self.index_lookahead_reward
+ ).long()
+
+ batch = lr_mask * escape.lookahead_reward2code(2) + (1 - lr_mask) * batch
+ yield batch
+
+ def vocabulary_size(self):
+ return escape.nb_codes
+
+ def thinking_autoregression(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+ ):
+ snapshots = []
+
+ def ar(result, ar_mask, logit_biases=None):
+ ar_mask = ar_mask.expand_as(result)
+ result *= 1 - ar_mask
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis=deterministic_synthesis,
+ logit_biases=logit_biases,
+ device=self.device,
+ progress_bar_desc=None,
+ )
+ warnings.warn("keeping thinking snapshots", RuntimeWarning)
+ snapshots.append(result[:10].detach().clone())
+
+ # Generate iteration after iteration
+
+ result = self.test_input[:250].clone()
+ # Erase all the content but that of the first iteration
+ result[:, self.it_len :] = -1
+ # Set the lookahead_reward of the firs to UNKNOWN
+ result[:, self.index_lookahead_reward] = escape.lookahead_reward2code(2)
+
+ t = torch.arange(result.size(1), device=result.device)[None, :]
+
+ for u in tqdm.tqdm(
+ range(0, result.size(1), self.it_len),
+ desc="thinking",
+ ):
+ # Generate the next state but keep the initial one, the
+ # lookahead_reward of previous iterations are set to
+ # UNKNOWN
+ if u > 0:
+ result[
+ :, u + self.index_lookahead_reward
+ ] = escape.lookahead_reward2code(2)
+ ar_mask = (t >= u + self.index_states).long() * (
+ t < u + self.index_states + self.state_len
+ ).long()
+ ar(result, ar_mask)
+
+ # Generate the action and reward with lookahead_reward to +1
+ result[:, u + self.index_lookahead_reward] = escape.lookahead_reward2code(1)
+ ar_mask = (t >= u + self.index_action).long() * (
+ t <= u + self.index_reward
+ ).long()
+ ar(result, ar_mask)
+
+ # Set the lookahead_reward to UNKNOWN for the next iterations
+ result[:, u + self.index_lookahead_reward] = escape.lookahead_reward2code(2)
+
+ filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ for n in range(10):
+ for s in snapshots:
+ lr, s, a, r = escape.seq2episodes(
+ s[n : n + 1], self.height, self.width
+ )
+ str = escape.episodes2str(
+ lr, s, a, r, unicode=True, ansi_colors=True
+ )
+ f.write(str)
+ f.write("\n\n")
+
+ # Saving the generated sequences
+
+ lr, s, a, r = escape.seq2episodes(result, self.height, self.width)
+ str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
+
+ filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ f.write(str)
+ logger(f"wrote {filename}")
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+ ):
+ result = self.test_input[:250].clone()
+
+ # Saving the ground truth
+
+ lr, s, a, r = escape.seq2episodes(
+ result,
+ self.height,
+ self.width,
+ )
+ str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
+
+ filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ f.write(str)
+ logger(f"wrote {filename}")
+
+ # Re-generating from the first frame
+
+ ar_mask = (
+ torch.arange(result.size(1), device=result.device)
+ >= self.height * self.width + 3
+ ).long()[None, :]
+ ar_mask = ar_mask.expand_as(result)
+ result *= 1 - ar_mask # paraaaaanoiaaaaaaa
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ # Saving the generated sequences
+
+ lr, s, a, r = escape.seq2episodes(
+ result,
+ self.height,
+ self.width,
+ )
+ str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
+
+ filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ f.write(str)
+ logger(f"wrote {filename}")
+
+ self.thinking_autoregression(
+ n_epoch, model, result_dir, logger, deterministic_synthesis, nmax
+ )
+
+
+######################################################################