states, actions, rewards = escape.generate_episodes(
nb_train_samples + nb_test_samples, height, width, T
)
- seq = escape.episodes2seq(states, actions, rewards)
+ seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
+ # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
self.train_input = seq[:nb_train_samples].to(self.device)
self.test_input = seq[nb_train_samples:].to(self.device)
self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
- # if logger is not None:
- # for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
- # logger(f"train_sequences {self.problem.seq2str(s)}")
- # a = "".join(["01"[x.item()] for x in a])
- # logger(f" {a}")
-
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
def vocabulary_size(self):
return self.nb_codes
+ def thinking_autoregression(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+ ):
+ result = self.test_input[:100].clone()
+ t = torch.arange(result.size(1), device=result.device)[None, :]
+
+ state_len = self.height * self.width
+ it_len = state_len + 3 # state / action / reward / lookahead_reward
+
+ def ar(result, ar_mask):
+ ar_mask = ar_mask.expand_as(result)
+ result *= 1 - ar_mask
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ progress_bar_desc=None,
+ )
+
+ # Generate iteration after iteration
+
+ for u in tqdm.tqdm(
+ range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+ ):
+ # Put the lookahead reward to -1 for the current iteration,
+ # sample the next state
+ s = -1
+ result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+ ar_mask = (t >= u).long() * (t < u + state_len).long()
+ ar(result, ar_mask)
+
+ # Put the lookahead reward to +1 for the current
+ # iteration, sample the action and reward
+ s = 1
+ result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+ ar_mask = (t >= u + state_len).long() * (t < u + state_len + 2).long()
+ ar(result, ar_mask)
+
+ # Fix the previous lookahead rewards in a consistant state
+ for v in range(0, u, it_len):
+ # Extract the rewards
+ r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
+ r = r - escape.first_lookahead_rewards_code - 1
+ a = r.min(dim=1).values
+ b = r.max(dim=1).values
+ s = (a < 0).long() * a + (a >= 0).long() * b
+ result[:, v + state_len + 2] = (
+ s + 1 + escape.first_lookahead_rewards_code
+ )
+
+ # Saving the generated sequences
+
+ s, a, r, lr = escape.seq2episodes(
+ result, self.height, self.width, lookahead=True
+ )
+ str = escape.episodes2str(
+ s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+ )
+
+ filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ f.write(str)
+ logger(f"wrote {filename}")
+
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
):
result = self.test_input[:100].clone()
+
+ # Saving the ground truth
+
+ s, a, r, lr = escape.seq2episodes(
+ result, self.height, self.width, lookahead=True
+ )
+ str = escape.episodes2str(
+ s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+ )
+
+ filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ f.write(str)
+ logger(f"wrote {filename}")
+
+ # Re-generating from the first frame
+
ar_mask = (
torch.arange(result.size(1), device=result.device)
- > self.height * self.width + 2
+ >= self.height * self.width + 3
).long()[None, :]
ar_mask = ar_mask.expand_as(result)
result *= 1 - ar_mask # paraaaaanoiaaaaaaa
device=self.device,
)
- s, a, r = escape.seq2episodes(result, self.height, self.width)
- str = escape.episodes2str(s, a, r, unicode=True, ansi_colors=True)
+ # Saving the generated sequences
+
+ s, a, r, lr = escape.seq2episodes(
+ result, self.height, self.width, lookahead=True
+ )
+ str = escape.episodes2str(
+ s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+ )
filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
with open(filename, "w") as f:
f.write(str)
logger(f"wrote {filename}")
+ self.thinking_autoregression(
+ n_epoch, model, result_dir, logger, deterministic_synthesis, nmax
+ )
+
######################################################################