ar_mask,
deterministic_synthesis,
forbidden_tokens=None,
+ logit_biases=None,
progress_bar_desc="autoregression",
device=torch.device("cpu"),
):
for input, ar_mask in batches:
model.masked_inplace_autoregression(
- input, ar_mask, forbidden_tokens, deterministic_synthesis
+ input,
+ ar_mask,
+ deterministic_synthesis,
+ forbidden_tokens,
+ logit_biases,
)
model.train(t)
height,
width,
T,
+ nb_walls,
logger=None,
device=torch.device("cpu"),
):
self.width = width
states, actions, rewards = escape.generate_episodes(
- nb_train_samples + nb_test_samples, height, width, 3 * T
+ nb_train_samples + nb_test_samples, height, width, T, nb_walls
)
seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
- seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
+ # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
self.train_input = seq[:nb_train_samples].to(self.device)
self.test_input = seq[nb_train_samples:].to(self.device)
def thinking_autoregression(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
):
- result = self.test_input[:100].clone()
- t = torch.arange(result.size(1), device=result.device)
- itl = self.height * self.width + 3
+ result = self.test_input[:250].clone()
+ t = torch.arange(result.size(1), device=result.device)[None, :]
- def ar():
+ state_len = self.height * self.width
+ index_action = state_len
+ index_reward = state_len + 1
+ index_lookahead_reward = state_len + 2
+ it_len = state_len + 3 # state / action / reward / lookahead_reward
+
+ def ar(result, ar_mask, logit_biases=None):
+ ar_mask = ar_mask.expand_as(result)
+ result *= 1 - ar_mask
masked_inplace_autoregression(
model,
self.batch_size,
result,
ar_mask,
- deterministic_synthesis,
+ deterministic_synthesis=deterministic_synthesis,
+ logit_biases=logit_biases,
device=self.device,
+ progress_bar_desc=None,
)
- for u in range(itl, result.size(1) - itl + 1, itl):
- print(f"{itl=} {u=} {result.size(1)=}")
- result[:, u - 1] = (-1) + 1 + escape.first_lookahead_rewards_code
- ar_mask = (t >= u).long() * (t < u + self.height * self.width).long()
- ar_mask = ar_mask[None, :]
- ar_mask = ar_mask.expand_as(result)
- result *= 1 - ar_mask
- ar()
- result[:, u - 1] = (1) + 1 + escape.first_lookahead_rewards_code
- ar_mask = (t >= self.height * self.width).long() * (
- t < self.height * self.width + 2
- ).long()
- ar_mask = ar_mask[None, :]
- ar_mask = ar_mask.expand_as(result)
- result *= 1 - ar_mask
- ar()
+ # Generate iteration after iteration
+
+ optimistic_bias = result.new_zeros(self.nb_codes, device=result.device)
+ optimistic_bias[(-1) + escape.first_lookahead_rewards_code + 1] = math.log(1e-1)
+ optimistic_bias[(1) + escape.first_lookahead_rewards_code + 1] = math.log(1e1)
+
+ for u in tqdm.tqdm(
+ range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking"
+ ):
+ # Generate the lookahead_reward pessimistically
+ ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
+ ar(result, ar_mask, logit_biases=-optimistic_bias)
+
+ # Generate the state
+ ar_mask = (t >= u).long() * (t < u + state_len).long()
+ ar(result, ar_mask)
+
+ # Generate the lookahead_reward optimistically
+ ar_mask = (t < u).long() * (t % it_len == index_lookahead_reward).long()
+ ar(result, ar_mask, logit_biases=optimistic_bias)
+
+ # Generate the action and reward
+ ar_mask = (t >= u + index_action).long() * (t <= u + index_reward).long()
+ ar(result, ar_mask)
# Saving the generated sequences
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
):
- result = self.test_input[:100].clone()
+ result = self.test_input[:250].clone()
# Saving the ground truth