######################################################################
-nb_state_codes = 4
+nb_states_codes = 4
nb_actions_codes = 5
nb_rewards_codes = 3
nb_lookahead_rewards_codes = 3
-first_state_code = 0
-first_actions_code = first_state_code + nb_state_codes
+first_states_code = 0
+first_actions_code = first_states_code + nb_states_codes
first_rewards_code = first_actions_code + nb_actions_codes
first_lookahead_rewards_code = first_rewards_code + nb_rewards_codes
nb_codes = first_lookahead_rewards_code + nb_lookahead_rewards_codes
######################################################################
+def state2code(r):
+ return r + first_states_code
+
+
+def code2state(r):
+ return r - first_states_code
+
+
def action2code(r):
- return first_actions_code + r
+ return r + first_actions_code
def code2action(r):
def reward2code(r):
- return first_rewards_code + r + 1
+ return r + 1 + first_rewards_code
def code2reward(r):
def lookahead_reward2code(r):
- return first_lookahead_rewards_code + r + 1
+ return r + 1 + first_lookahead_rewards_code
def code2lookahead_reward(r):
######################################################################
-def episodes2seq(states, actions, rewards, lookahead_delta=None):
- states = states.flatten(2) + first_state_code
- actions = actions[:, :, None] + first_actions_code
-
- if lookahead_delta is not None:
- a = rewards.new_zeros(rewards.size())
- b = rewards.new_zeros(rewards.size())
- for t in range(a.size(1) - 1):
- a[:, t] = rewards[:, t + 1 :].min(dim=-1).values
- b[:, t] = rewards[:, t + 1 :].max(dim=-1).values
- s = (a < 0).long() * a + (a >= 0).long() * b
- lookahead_rewards = (1 + s[:, :, None]) + first_lookahead_rewards_code
-
- r = rewards[:, :, None]
- rewards = (r + 1) + first_rewards_code
-
- # assert (
- # states.min() >= first_state_code
- # and states.max() < first_state_code + nb_state_codes
- # )
- # assert (
- # actions.min() >= first_actions_code
- # and actions.max() < first_actions_code + nb_actions_codes
- # )
- # assert (
- # rewards.min() >= first_rewards_code
- # and rewards.max() < first_rewards_code + nb_rewards_codes
- # )
-
- if lookahead_delta is None:
- return torch.cat([states, actions, rewards], dim=2).flatten(1)
- else:
- # assert (
- # lookahead_rewards.min() >= first_lookahead_rewards_code
- # and lookahead_rewards.max()
- # < first_lookahead_rewards_code + nb_lookahead_rewards_codes
- # )
- return torch.cat([states, actions, rewards, lookahead_rewards], dim=2).flatten(
- 1
- )
-
-
-def seq2episodes(seq, height, width, lookahead=False):
- seq = seq.reshape(seq.size(0), -1, height * width + (3 if lookahead else 2))
- states = seq[:, :, : height * width] - first_state_code
+def episodes2seq(states, actions, rewards):
+ neg = rewards.new_zeros(rewards.size())
+ pos = rewards.new_zeros(rewards.size())
+ for t in range(neg.size(1) - 1):
+ neg[:, t] = rewards[:, t:].min(dim=-1).values
+ pos[:, t] = rewards[:, t:].max(dim=-1).values
+ s = (neg < 0).long() * neg + (neg >= 0).long() * pos
+
+ return torch.cat(
+ [
+ lookahead_reward2code(s[:, :, None]),
+ state2code(states.flatten(2)),
+ action2code(actions[:, :, None]),
+ reward2code(rewards[:, :, None]),
+ ],
+ dim=2,
+ ).flatten(1)
+
+
+def seq2episodes(seq, height, width):
+ seq = seq.reshape(seq.size(0), -1, height * width + 3)
+ lookahead_rewards = code2lookahead_reward(seq[:, :, 0])
+ states = code2state(seq[:, :, 1 : height * width + 1])
states = states.reshape(states.size(0), states.size(1), height, width)
- actions = seq[:, :, height * width] - first_actions_code
- rewards = seq[:, :, height * width + 1] - first_rewards_code - 1
-
- if lookahead:
- lookahead_rewards = (
- seq[:, :, height * width + 2] - first_lookahead_rewards_code - 1
- )
- return states, actions, rewards, lookahead_rewards
- else:
- return states, actions, rewards
+ actions = code2action(seq[:, :, height * width + 1])
+ rewards = code2reward(seq[:, :, height * width + 2])
+ return lookahead_rewards, states, actions, rewards
def seq2str(seq):
def token2str(t):
- if t >= first_state_code and t < first_state_code + nb_state_codes:
- return " #@$"[t - first_state_code]
+ if t >= first_states_code and t < first_states_code + nb_states_codes:
+ return " #@$"[t - first_states_code]
elif t >= first_actions_code and t < first_actions_code + nb_actions_codes:
return "ISNEW"[t - first_actions_code]
elif t >= first_rewards_code and t < first_rewards_code + nb_rewards_codes:
def episodes2str(
- states, actions, rewards, lookahead_rewards=None, unicode=False, ansi_colors=False
+ lookahead_rewards, states, actions, rewards, unicode=False, ansi_colors=False
):
if unicode:
symbols = "·█@$"
+ sb_lr
)
- if lookahead_rewards is None:
- result += (
- vert
- + vert.join([status_bar(a, r) for a, r in zip(actions[n], rewards[n])])
- + vert
- + "\n"
- )
- else:
- result += (
- vert
- + vert.join(
- [
- status_bar(a, r, lr)
- for a, r, lr in zip(
- actions[n], rewards[n], lookahead_rewards[n]
- )
- ]
- )
- + vert
- + "\n"
+ result += (
+ vert
+ + vert.join(
+ [
+ status_bar(a, r, lr)
+ for a, r, lr in zip(actions[n], rewards[n], lookahead_rewards[n])
+ ]
)
+ + vert
+ + "\n"
+ )
result += hline
######################################################################
if __name__ == "__main__":
- nb, height, width, T, nb_walls = 25, 5, 7, 25, 5
+ nb, height, width, T, nb_walls = 5, 5, 7, 20, 5
states, actions, rewards = generate_episodes(nb, height, width, T, nb_walls)
- seq = episodes2seq(states, actions, rewards, lookahead_delta=T)
- s, a, r, lr = seq2episodes(seq, height, width, lookahead=True)
- print(episodes2str(s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True))
+ seq = episodes2seq(states, actions, rewards)
+ lr, s, a, r = seq2episodes(seq, height, width)
+ print(episodes2str(lr, s, a, r, unicode=True, ansi_colors=True))
# print()
# for s in seq2str(seq):
# print(s)
self.train_input = seq[:nb_train_samples].to(self.device)
self.test_input = seq[nb_train_samples:].to(self.device)
- self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
-
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
yield batch
def vocabulary_size(self):
- return self.nb_codes
+ return escape.nb_codes
def thinking_autoregression(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
index_lookahead_reward = state_len + 2
it_len = state_len + 3 # state / action / reward / lookahead_reward
+ result[:, it_len:] = -1
+
def ar(result, ar_mask, logit_biases=None):
ar_mask = ar_mask.expand_as(result)
result *= 1 - ar_mask
# Generate iteration after iteration
- optimistic_bias = result.new_zeros(self.nb_codes, device=result.device)
+ optimistic_bias = result.new_zeros(escape.nb_codes, device=result.device)
optimistic_bias[escape.lookahead_reward2code(-1)] = -math.log(1e1)
optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1)
+ snapshots = []
+
for u in tqdm.tqdm(
range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
):
# previous iterations
ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
ar(result, ar_mask, logit_biases=-optimistic_bias)
+ snapshots.append(result[:10].detach().clone())
# Generate the state
ar_mask = (t >= u).long() * (t < u + state_len).long()
ar(result, ar_mask)
+ snapshots.append(result[:10].detach().clone())
# Re-generate the lookahead_reward optimistically in the
# previous iterations
ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
ar(result, ar_mask, logit_biases=optimistic_bias)
+ snapshots.append(result[:10].detach().clone())
# Generate the action and reward
ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long()
ar(result, ar_mask)
+ snapshots.append(result[:10].detach().clone())
+
+ filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ for n in range(10):
+ for s in snapshots:
+ s, a, r, lr = escape.seq2episodes(
+ s[n : n + 1], self.height, self.width, lookahead=True
+ )
+ str = escape.episodes2str(
+ s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+ )
+ f.write(str)
+ f.write("\n\n")
# Saving the generated sequences