nb_states_codes = 5
nb_actions_codes = 5
nb_rewards_codes = 3
-nb_lookahead_rewards_codes = 3
+nb_lookahead_rewards_codes = 4 # stands for -1, 0, +1, and UNKNOWN
first_states_code = 0
first_actions_code = first_states_code + nb_states_codes
def lookahead_reward2code(r):
+ # -1, 0, +1 or 2 for UNKNOWN
return r + 1 + first_lookahead_rewards_code
######################################################################
-def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3, nb_coins=3):
+def generate_episodes(nb, height=6, width=6, T=10, nb_walls=3, nb_coins=2):
rnd = torch.rand(nb, height, width)
rnd[:, 0, :] = 0
rnd[:, -1, :] = 0
t >= first_lookahead_rewards_code
and t < first_lookahead_rewards_code + nb_lookahead_rewards_codes
):
- return "n.p"[t - first_lookahead_rewards_code]
+ return "n.pU"[t - first_lookahead_rewards_code]
else:
return "?"
self.train_input = seq[:nb_train_samples].to(self.device)
self.test_input = seq[nb_train_samples:].to(self.device)
+ self.state_len = self.height * self.width
+ self.index_lookahead_reward = 0
+ self.index_states = 1
+ self.index_action = self.state_len + 1
+ self.index_reward = self.state_len + 2
+ self.it_len = self.state_len + 3 # lookahead_reward / state / action / reward
+
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
for batch in tqdm.tqdm(
input.split(self.batch_size), dynamic_ncols=True, desc=desc
):
+ t = torch.arange(input.size(1), device=input.device)[None, :]
+ u = torch.randint(input.size(1), (input.size(0), 1), device=input.device)
+ lr_mask = (t <= u).long() * (
+ t % self.it_len == self.index_lookahead_reward
+ ).long()
+
+ input = lr_mask * escape.lookahead_reward2code(2) + (1 - lr_mask) * input
yield batch
def vocabulary_size(self):
result = self.test_input[:250].clone()
t = torch.arange(result.size(1), device=result.device)[None, :]
- state_len = self.height * self.width
- index_lookahead_reward = 0
- index_states = 1
- index_action = state_len + 1
- index_reward = state_len + 2
- it_len = state_len + 3 # lookahead_reward / state / action / reward
-
- result[:, it_len:] = -1
+ result[:, self.it_len :] = -1
snapshots = []
optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1)
for u in tqdm.tqdm(
- range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+ range(self.it_len, result.size(1) - self.it_len + 1, self.it_len),
+ desc="thinking",
):
- lr, _, _, _ = escape.seq2episodes(result[:, :u], self.height, self.width)
-
# Generate the lookahead_reward and state
- ar_mask = (t % it_len == index_lookahead_reward).long() * (
- t <= u + index_lookahead_reward
+ ar_mask = (t % self.it_len == self.index_lookahead_reward).long() * (
+ t <= u + self.index_lookahead_reward
).long()
ar(result, ar_mask)
# Generate the lookahead_reward and state
- ar_mask = (t >= u + index_states).long() * (
- t < u + index_states + state_len
+ ar_mask = (t >= u + self.index_states).long() * (
+ t < u + self.index_states + self.state_len
).long()
ar(result, ar_mask)
# Re-generate the lookahead_reward
- ar_mask = (t % it_len == index_lookahead_reward).long() * (
- t <= u + index_lookahead_reward
+ ar_mask = (t % self.it_len == self.index_lookahead_reward).long() * (
+ t <= u + self.index_lookahead_reward
).long()
ar(result, ar_mask, logit_biases=optimistic_bias)
# Generate the action and reward
- ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long()
+ ar_mask = (t >= u + self.index_action).long() * (
+ t <= u + self.index_reward
+ ).long()
ar(result, ar_mask)
filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")