ar_mask,
deterministic_synthesis,
forbidden_tokens=None,
+ logit_biases=None,
progress_bar_desc="autoregression",
device=torch.device("cpu"),
):
for input, ar_mask in batches:
model.masked_inplace_autoregression(
- input, ar_mask, forbidden_tokens, deterministic_synthesis
+ input,
+ ar_mask,
+ deterministic_synthesis,
+ forbidden_tokens,
+ logit_biases,
)
model.train(t)
self.train_input = seq[:nb_train_samples].to(self.device)
self.test_input = seq[nb_train_samples:].to(self.device)
- self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
-
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
yield batch
def vocabulary_size(self):
- return self.nb_codes
+ return escape.nb_codes
def thinking_autoregression(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
t = torch.arange(result.size(1), device=result.device)[None, :]
state_len = self.height * self.width
+ index_action = state_len
+ index_reward = state_len + 1
+ index_lookahead_reward = state_len + 2
it_len = state_len + 3 # state / action / reward / lookahead_reward
- def ar(result, ar_mask):
+ result[:, it_len:] = -1
+
+ def ar(result, ar_mask, logit_biases=None):
ar_mask = ar_mask.expand_as(result)
result *= 1 - ar_mask
masked_inplace_autoregression(
self.batch_size,
result,
ar_mask,
- deterministic_synthesis,
+ deterministic_synthesis=deterministic_synthesis,
+ logit_biases=logit_biases,
device=self.device,
progress_bar_desc=None,
)
# Generate iteration after iteration
+ optimistic_bias = result.new_zeros(escape.nb_codes, device=result.device)
+ optimistic_bias[escape.lookahead_reward2code(-1)] = -math.log(1e1)
+ optimistic_bias[escape.lookahead_reward2code(1)] = math.log(1e1)
+
+ snapshots = []
+
for u in tqdm.tqdm(
range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
):
- # Put the lookahead reward to either 0 or -1 for the
- # current iteration, sample the next state
- s = -1 # (torch.rand(result.size(0), device = result.device) < 0.2).long()
- result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
+ # Re-generate the lookahead_reward pessimistically in the
+ # previous iterations
+ ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
+ ar(result, ar_mask, logit_biases=-optimistic_bias)
+ snapshots.append(result[:10].detach().clone())
+
+ # Generate the state
ar_mask = (t >= u).long() * (t < u + state_len).long()
ar(result, ar_mask)
+ snapshots.append(result[:10].detach().clone())
+
+ # Re-generate the lookahead_reward optimistically in the
+ # previous iterations
+ ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
+ ar(result, ar_mask, logit_biases=optimistic_bias)
+ snapshots.append(result[:10].detach().clone())
- # Put the lookahead reward to +1 for the current
- # iteration, sample the action and reward
- s = 1
- result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code
- ar_mask = (t >= u + state_len).long() * (t < u + state_len + 2).long()
+ # Generate the action and reward
+ ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long()
ar(result, ar_mask)
+ snapshots.append(result[:10].detach().clone())
- # Fix the previous lookahead rewards in a consistant state
- for v in range(0, u, it_len):
- # Extract the rewards
- r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)]
- r = r - escape.first_rewards_code - 1
- a = r.min(dim=1).values
- b = r.max(dim=1).values
- s = (a < 0).long() * a + (a >= 0).long() * b
- result[:, v + state_len + 2] = (
- s + 1 + escape.first_lookahead_rewards_code
- )
+ filename = os.path.join(result_dir, f"test_thinking_compute_{n_epoch:04d}.txt")
+ with open(filename, "w") as f:
+ for n in range(10):
+ for s in snapshots:
+ lr, s, a, r = escape.seq2episodes(
+ s[n : n + 1], self.height, self.width, lookahead=True
+ )
+ str = escape.episodes2str(
+ lr, s, a, r, unicode=True, ansi_colors=True
+ )
+ f.write(str)
+ f.write("\n\n")
# Saving the generated sequences
- s, a, r, lr = escape.seq2episodes(
- result, self.height, self.width, lookahead=True
- )
- str = escape.episodes2str(
- s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
- )
+ s, a, r, lr = escape.seq2episodes(result, self.height, self.width)
+ str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
with open(filename, "w") as f:
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
):
- result = self.test_input[:100].clone()
+ result = self.test_input[:250].clone()
# Saving the ground truth
s, a, r, lr = escape.seq2episodes(
- result, self.height, self.width, lookahead=True
- )
- str = escape.episodes2str(
- s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+ result,
+ self.height,
+ self.width,
)
+ str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
with open(filename, "w") as f:
# Saving the generated sequences
s, a, r, lr = escape.seq2episodes(
- result, self.height, self.width, lookahead=True
- )
- str = escape.episodes2str(
- s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
+ result,
+ self.height,
+ self.width,
)
+ str = escape.episodes2str(lr, s, a, r, unicode=True, ansi_colors=True)
filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
with open(filename, "w") as f: