From 62ad2378c60cdf322c0111279bd45fbef8365fc2 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Sun, 24 Mar 2024 23:10:53 +0100 Subject: [PATCH] Update. --- escape.py | 9 +++++---- main.py | 7 +++++-- tasks.py | 13 +++++++------ 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/escape.py b/escape.py index 1c1bc20..43843f0 100755 --- a/escape.py +++ b/escape.py @@ -25,7 +25,7 @@ nb_codes = first_lookahead_rewards_code + nb_lookahead_rewards_codes ###################################################################### -def generate_episodes(nb, height=6, width=6, T=10): +def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3): rnd = torch.rand(nb, height, width) rnd[:, 0, :] = 0 rnd[:, -1, :] = 0 @@ -33,11 +33,12 @@ def generate_episodes(nb, height=6, width=6, T=10): rnd[:, :, -1] = 0 wall = 0 - for k in range(3): + for k in range(nb_walls): wall = wall + ( rnd.flatten(1).argmax(dim=1)[:, None] == torch.arange(rnd.flatten(1).size(1))[None, :] ).long().reshape(rnd.size()) + rnd = rnd * (1 - wall.clamp(max=1)) states = wall[:, None, :, :].expand(-1, T, -1, -1).clone() @@ -280,8 +281,8 @@ def episodes2str( ###################################################################### if __name__ == "__main__": - nb, height, width, T = 25, 5, 7, 25 - states, actions, rewards = generate_episodes(nb, height, width, T) + nb, height, width, T, nb_walls = 25, 5, 7, 25, 5 + states, actions, rewards = generate_episodes(nb, height, width, T, nb_walls) seq = episodes2seq(states, actions, rewards, lookahead_delta=T) s, a, r, lr = seq2episodes(seq, height, width, lookahead=True) print(episodes2str(s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True)) diff --git a/main.py b/main.py index 2edfa14..e855b06 100755 --- a/main.py +++ b/main.py @@ -178,12 +178,14 @@ parser.add_argument("--mixing_deterministic_start", action="store_true", default ############################## # escape options -parser.add_argument("--escape_height", type=int, default=4) +parser.add_argument("--escape_height", type=int, default=5) -parser.add_argument("--escape_width", type=int, default=6) +parser.add_argument("--escape_width", type=int, default=7) parser.add_argument("--escape_T", type=int, default=25) +parser.add_argument("--escape_nb_walls", type=int, default=5) + ###################################################################### args = parser.parse_args() @@ -622,6 +624,7 @@ elif args.task == "escape": height=args.escape_height, width=args.escape_width, T=args.escape_T, + nb_walls=args.escape_nb_walls, logger=log_string, device=device, ) diff --git a/tasks.py b/tasks.py index 829eb24..56c2b0f 100755 --- a/tasks.py +++ b/tasks.py @@ -1874,6 +1874,7 @@ class Escape(Task): height, width, T, + nb_walls, logger=None, device=torch.device("cpu"), ): @@ -1885,7 +1886,7 @@ class Escape(Task): self.width = width states, actions, rewards = escape.generate_episodes( - nb_train_samples + nb_test_samples, height, width, T + nb_train_samples + nb_test_samples, height, width, T, nb_walls ) seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T) # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3] @@ -1912,7 +1913,7 @@ class Escape(Task): def thinking_autoregression( self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 ): - result = self.test_input[:100].clone() + result = self.test_input[:250].clone() t = torch.arange(result.size(1), device=result.device)[None, :] state_len = self.height * self.width @@ -1936,9 +1937,9 @@ class Escape(Task): for u in tqdm.tqdm( range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking" ): - # Put the lookahead reward to -1 for the current iteration, - # sample the next state - s = -1 + # Put the lookahead reward to either 0 or -1 for the + # current iteration, sample the next state + s = -1 # (torch.rand(result.size(0), device = result.device) < 0.2).long() result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code ar_mask = (t >= u).long() * (t < u + state_len).long() ar(result, ar_mask) @@ -1954,7 +1955,7 @@ class Escape(Task): for v in range(0, u, it_len): # Extract the rewards r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)] - r = r - escape.first_lookahead_rewards_code - 1 + r = r - escape.first_rewards_code - 1 a = r.min(dim=1).values b = r.max(dim=1).values s = (a < 0).long() * a + (a >= 0).long() * b -- 2.39.5