######################################################################
-def generate_episodes(nb, height=6, width=6, T=10):
+def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3):
rnd = torch.rand(nb, height, width)
rnd[:, 0, :] = 0
rnd[:, -1, :] = 0
rnd[:, :, -1] = 0
wall = 0
- for k in range(3):
+ for k in range(nb_walls):
wall = wall + (
rnd.flatten(1).argmax(dim=1)[:, None]
== torch.arange(rnd.flatten(1).size(1))[None, :]
).long().reshape(rnd.size())
+
rnd = rnd * (1 - wall.clamp(max=1))
states = wall[:, None, :, :].expand(-1, T, -1, -1).clone()
######################################################################
if __name__ == "__main__":
- nb, height, width, T = 25, 5, 7, 25
- states, actions, rewards = generate_episodes(nb, height, width, T)
+ nb, height, width, T, nb_walls = 25, 5, 7, 25, 5
+ states, actions, rewards = generate_episodes(nb, height, width, T, nb_walls)
seq = episodes2seq(states, actions, rewards, lookahead_delta=T)
s, a, r, lr = seq2episodes(seq, height, width, lookahead=True)
print(episodes2str(s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True))