From 09952eb1ee41e279a1cb7797d2de997c6bcaa5af Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Mon, 25 Mar 2024 17:08:26 +0100 Subject: [PATCH] Update. --- escape.py | 145 +++++++++++++++++++++--------------------------------- tasks.py | 27 ++++++++-- 2 files changed, 80 insertions(+), 92 deletions(-) diff --git a/escape.py b/escape.py index 6f4af35..f2a1662 100755 --- a/escape.py +++ b/escape.py @@ -11,13 +11,13 @@ from torch.nn import functional as F ###################################################################### -nb_state_codes = 4 +nb_states_codes = 4 nb_actions_codes = 5 nb_rewards_codes = 3 nb_lookahead_rewards_codes = 3 -first_state_code = 0 -first_actions_code = first_state_code + nb_state_codes +first_states_code = 0 +first_actions_code = first_states_code + nb_states_codes first_rewards_code = first_actions_code + nb_actions_codes first_lookahead_rewards_code = first_rewards_code + nb_rewards_codes nb_codes = first_lookahead_rewards_code + nb_lookahead_rewards_codes @@ -25,8 +25,16 @@ nb_codes = first_lookahead_rewards_code + nb_lookahead_rewards_codes ###################################################################### +def state2code(r): + return r + first_states_code + + +def code2state(r): + return r - first_states_code + + def action2code(r): - return first_actions_code + r + return r + first_actions_code def code2action(r): @@ -34,7 +42,7 @@ def code2action(r): def reward2code(r): - return first_rewards_code + r + 1 + return r + 1 + first_rewards_code def code2reward(r): @@ -42,7 +50,7 @@ def code2reward(r): def lookahead_reward2code(r): - return first_lookahead_rewards_code + r + 1 + return r + 1 + first_lookahead_rewards_code def code2lookahead_reward(r): @@ -133,68 +141,39 @@ def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3): ###################################################################### -def episodes2seq(states, actions, rewards, lookahead_delta=None): - states = states.flatten(2) + first_state_code - actions = actions[:, :, None] + first_actions_code - - if lookahead_delta is not None: - a = rewards.new_zeros(rewards.size()) - b = rewards.new_zeros(rewards.size()) - for t in range(a.size(1) - 1): - a[:, t] = rewards[:, t + 1 :].min(dim=-1).values - b[:, t] = rewards[:, t + 1 :].max(dim=-1).values - s = (a < 0).long() * a + (a >= 0).long() * b - lookahead_rewards = (1 + s[:, :, None]) + first_lookahead_rewards_code - - r = rewards[:, :, None] - rewards = (r + 1) + first_rewards_code - - # assert ( - # states.min() >= first_state_code - # and states.max() < first_state_code + nb_state_codes - # ) - # assert ( - # actions.min() >= first_actions_code - # and actions.max() < first_actions_code + nb_actions_codes - # ) - # assert ( - # rewards.min() >= first_rewards_code - # and rewards.max() < first_rewards_code + nb_rewards_codes - # ) - - if lookahead_delta is None: - return torch.cat([states, actions, rewards], dim=2).flatten(1) - else: - # assert ( - # lookahead_rewards.min() >= first_lookahead_rewards_code - # and lookahead_rewards.max() - # < first_lookahead_rewards_code + nb_lookahead_rewards_codes - # ) - return torch.cat([states, actions, rewards, lookahead_rewards], dim=2).flatten( - 1 - ) - - -def seq2episodes(seq, height, width, lookahead=False): - seq = seq.reshape(seq.size(0), -1, height * width + (3 if lookahead else 2)) - states = seq[:, :, : height * width] - first_state_code +def episodes2seq(states, actions, rewards): + neg = rewards.new_zeros(rewards.size()) + pos = rewards.new_zeros(rewards.size()) + for t in range(neg.size(1) - 1): + neg[:, t] = rewards[:, t:].min(dim=-1).values + pos[:, t] = rewards[:, t:].max(dim=-1).values + s = (neg < 0).long() * neg + (neg >= 0).long() * pos + + return torch.cat( + [ + lookahead_reward2code(s[:, :, None]), + state2code(states.flatten(2)), + action2code(actions[:, :, None]), + reward2code(rewards[:, :, None]), + ], + dim=2, + ).flatten(1) + + +def seq2episodes(seq, height, width): + seq = seq.reshape(seq.size(0), -1, height * width + 3) + lookahead_rewards = code2lookahead_reward(seq[:, :, 0]) + states = code2state(seq[:, :, 1 : height * width + 1]) states = states.reshape(states.size(0), states.size(1), height, width) - actions = seq[:, :, height * width] - first_actions_code - rewards = seq[:, :, height * width + 1] - first_rewards_code - 1 - - if lookahead: - lookahead_rewards = ( - seq[:, :, height * width + 2] - first_lookahead_rewards_code - 1 - ) - return states, actions, rewards, lookahead_rewards - else: - return states, actions, rewards + actions = code2action(seq[:, :, height * width + 1]) + rewards = code2reward(seq[:, :, height * width + 2]) + return lookahead_rewards, states, actions, rewards def seq2str(seq): def token2str(t): - if t >= first_state_code and t < first_state_code + nb_state_codes: - return " #@$"[t - first_state_code] + if t >= first_states_code and t < first_states_code + nb_states_codes: + return " #@$"[t - first_states_code] elif t >= first_actions_code and t < first_actions_code + nb_actions_codes: return "ISNEW"[t - first_actions_code] elif t >= first_rewards_code and t < first_rewards_code + nb_rewards_codes: @@ -214,7 +193,7 @@ def seq2str(seq): def episodes2str( - states, actions, rewards, lookahead_rewards=None, unicode=False, ansi_colors=False + lookahead_rewards, states, actions, rewards, unicode=False, ansi_colors=False ): if unicode: symbols = "·█@$" @@ -263,27 +242,17 @@ def episodes2str( + sb_lr ) - if lookahead_rewards is None: - result += ( - vert - + vert.join([status_bar(a, r) for a, r in zip(actions[n], rewards[n])]) - + vert - + "\n" - ) - else: - result += ( - vert - + vert.join( - [ - status_bar(a, r, lr) - for a, r, lr in zip( - actions[n], rewards[n], lookahead_rewards[n] - ) - ] - ) - + vert - + "\n" + result += ( + vert + + vert.join( + [ + status_bar(a, r, lr) + for a, r, lr in zip(actions[n], rewards[n], lookahead_rewards[n]) + ] ) + + vert + + "\n" + ) result += hline @@ -297,11 +266,11 @@ def episodes2str( ###################################################################### if __name__ == "__main__": - nb, height, width, T, nb_walls = 25, 5, 7, 25, 5 + nb, height, width, T, nb_walls = 5, 5, 7, 20, 5 states, actions, rewards = generate_episodes(nb, height, width, T, nb_walls) - seq = episodes2seq(states, actions, rewards, lookahead_delta=T) - s, a, r, lr = seq2episodes(seq, height, width, lookahead=True) - print(episodes2str(s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True)) + seq = episodes2seq(states, actions, rewards) + lr, s, a, r = seq2episodes(seq, height, width) + print(episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)) # print() # for s in seq2str(seq): # print(s) diff --git a/tasks.py b/tasks.py index 29f1e5a..1d967f9 100755 --- a/tasks.py +++ b/tasks.py @@ -1898,8 +1898,6 @@ class Escape(Task): self.train_input = seq[:nb_train_samples].to(self.device) self.test_input = seq[nb_train_samples:].to(self.device) - self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 - def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input @@ -1913,7 +1911,7 @@ class Escape(Task): yield batch def vocabulary_size(self): - return self.nb_codes + return escape.nb_codes def thinking_autoregression( self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 @@ -1927,6 +1925,8 @@ class Escape(Task): index_lookahead_reward = state_len + 2 it_len = state_len + 3 # state / action / reward / lookahead_reward + result[:, it_len:] = -1 + def ar(result, ar_mask, logit_biases=None): ar_mask = ar_mask.expand_as(result) result *= 1 - ar_mask @@ -1943,10 +1943,12 @@ class Escape(Task): # Generate iteration after iteration - optimistic_bias = result.new_zeros(self.nb_codes, device=result.device) + optimistic_bias = result.new_zeros(escape.nb_codes, device=result.device) optimistic_bias[escape.lookahead_reward2code(-1)] = -math.log(1e1) optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1) + snapshots = [] + for u in tqdm.tqdm( range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking" ): @@ -1954,19 +1956,36 @@ class Escape(Task): # previous iterations ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long() ar(result, ar_mask, logit_biases=-optimistic_bias) + snapshots.append(result[:10].detach().clone()) # Generate the state ar_mask = (t >= u).long() * (t < u + state_len).long() ar(result, ar_mask) + snapshots.append(result[:10].detach().clone()) # Re-generate the lookahead_reward optimistically in the # previous iterations ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long() ar(result, ar_mask, logit_biases=optimistic_bias) + snapshots.append(result[:10].detach().clone()) # Generate the action and reward ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long() ar(result, ar_mask) + snapshots.append(result[:10].detach().clone()) + + filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt") + with open(filename, "w") as f: + for n in range(10): + for s in snapshots: + s, a, r, lr = escape.seq2episodes( + s[n : n + 1], self.height, self.width, lookahead=True + ) + str = escape.episodes2str( + s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True + ) + f.write(str) + f.write("\n\n") # Saving the generated sequences -- 2.39.5