range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
):
# Put the lookahead reward to either 0 or -1 for the
- # current iteration, sample the next state
- s = -(torch.rand(result.size(0), device=result.device) < 0.2).long()
+ # current iteration, with a proba that depends with the
+ # sequence index, so that we have diverse examples, sample
+ # the next state
+ s = -(
+ torch.rand(result.size(0), device=result.device)
+ <= torch.linspace(0, 1, result.size(0), device=result.device)
+ ).long()
result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
ar_mask = (t >= u).long() * (t < u + state_len).long()
ar(result, ar_mask)
# Extract the rewards
r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
r = r - escape.first_rewards_code - 1
+ r = r.clamp(min=-1, max=1) # the reward is predicted hence can be weird
a = r.min(dim=1).values
b = r.max(dim=1).values
s = (a < 0).long() * a + (a >= 0).long() * b