):
# Put the lookahead reward to either 0 or -1 for the
# current iteration, sample the next state
- s = -1 # (torch.rand(result.size(0), device = result.device) < 0.2).long()
+ s = -(torch.rand(result.size(0), device=result.device) < 0.2).long()
result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
ar_mask = (t >= u).long() * (t < u + state_len).long()
ar(result, ar_mask)
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
):
- result = self.test_input[:100].clone()
+ result = self.test_input[:250].clone()
# Saving the ground truth