- # Fix the previous lookahead rewards in a consistant state
- for v in range(0, u, it_len):
- # Extract the rewards
- r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
- r = r - escape.first_lookahead_rewards_code - 1
- a = r.min(dim=1).values
- b = r.max(dim=1).values
- s = (a < 0).long() * a + (a >= 0).long() * b
- result[:, v + state_len + 2] = (
- s + 1 + escape.first_lookahead_rewards_code
- )
+ # Re-generate the lookahead_reward
+ ar_mask = (t % self.it_len == self.index_lookahead_reward).long() * (
+ t <= u + self.index_lookahead_reward
+ ).long()
+ ar(result, ar_mask, logit_biases=optimistic_bias)
+
+ # Generate the action and reward
+ ar_mask = (t >= u + self.index_action).long() * (
+ t <= u + self.index_reward
+ ).long()
+ ar(result, ar_mask)
+
+ filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ for n in range(10):
+ for s in snapshots:
+ lr, s, a, r = escape.seq2episodes(
+ s[n : n + 1], self.height, self.width
+ )
+ str = escape.episodes2str(
+ lr, s, a, r, unicode=True, ansi_colors=True
+ )
+ f.write(str)
+ f.write("\n\n")