- for u in range(itl, result.size(1) - itl + 1, itl):
- print(f"{itl=} {u=} {result.size(1)=}")
- result[:, u - 1] = (-1) + 1 + escape.first_lookahead_rewards_code
- ar_mask = (t >= u).long() * (t < u + self.height * self.width).long()
- ar_mask = ar_mask[None, :]
- ar_mask = ar_mask.expand_as(result)
- result *= 1 - ar_mask
- ar()
- result[:, u - 1] = (1) + 1 + escape.first_lookahead_rewards_code
- ar_mask = (t >= self.height * self.width).long() * (
- t < self.height * self.width + 2
- ).long()
- ar_mask = ar_mask[None, :]
- ar_mask = ar_mask.expand_as(result)
- result *= 1 - ar_mask
- ar()
+ # Generate iteration after iteration
+
+ optimistic_bias = result.new_zeros(self.nb_codes, device=result.device)
+ optimistic_bias[escape.lookahead_reward2code(-1)] = -math.log(1e1)
+ optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1)
+
+ for u in tqdm.tqdm(
+ range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+ ):
+ # Re-generate the lookahead_reward pessimistically in the
+ # previous iterations
+ ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
+ ar(result, ar_mask, logit_biases=-optimistic_bias)
+
+ # Generate the state
+ ar_mask = (t >= u).long() * (t < u + state_len).long()
+ ar(result, ar_mask)
+
+ # Re-generate the lookahead_reward optimistically in the
+ # previous iterations
+ ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
+ ar(result, ar_mask, logit_biases=optimistic_bias)
+
+ # Generate the action and reward
+ ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long()
+ ar(result, ar_mask)