X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=51538366383be455f160ab6158392627ecea5190;hb=19ec7f3e4030ddece2647983dcf1bed5eb0d9544;hp=d21e2648466a606b3067fc680feb7305a6b95781;hpb=8ea809c43242d3a2e063692105919a86c3f6fe6b;p=picoclvr.git diff --git a/tasks.py b/tasks.py index d21e264..5153836 100755 --- a/tasks.py +++ b/tasks.py @@ -99,7 +99,6 @@ class TaskFromFile(Task): ).to("cpu") if shuffle: - print("SHUFFLING!") i = torch.randperm(input.size(0)) input = input[i].contiguous() pred_mask = pred_mask[i].contiguous() @@ -1862,3 +1861,182 @@ class QMLP(Task): ###################################################################### + +import escape + + +class Escape(Task): + def __init__( + self, + nb_train_samples, + nb_test_samples, + batch_size, + height, + width, + T, + nb_walls, + logger=None, + device=torch.device("cpu"), + ): + super().__init__() + + self.batch_size = batch_size + self.device = device + self.height = height + self.width = width + + states, actions, rewards = escape.generate_episodes( + nb_train_samples + nb_test_samples, height, width, T, nb_walls + ) + seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T) + # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3] + self.train_input = seq[:nb_train_samples].to(self.device) + self.test_input = seq[nb_train_samples:].to(self.device) + + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield batch + + def vocabulary_size(self): + return self.nb_codes + + def thinking_autoregression( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + result = self.test_input[:250].clone() + t = torch.arange(result.size(1), device=result.device)[None, :] + + state_len = self.height * self.width + it_len = state_len + 3 # state / action / reward / lookahead_reward + + def ar(result, ar_mask): + ar_mask = ar_mask.expand_as(result) + result *= 1 - ar_mask + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + device=self.device, + progress_bar_desc=None, + ) + + # Generate iteration after iteration + + for u in tqdm.tqdm( + range(it_len, result.size(1) - it_len + 1, it_len), desc="thinking" + ): + # Put the lookahead reward to either 0 or -1 for the + # current iteration, with a proba that depends with the + # sequence index, so that we have diverse examples, sample + # the next state + s = -( + torch.rand(result.size(0), device=result.device) + <= torch.linspace(0, 1, result.size(0), device=result.device) + ).long() + result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code + ar_mask = (t >= u).long() * (t < u + state_len).long() + ar(result, ar_mask) + + # Put the lookahead reward to +1 for the current + # iteration, sample the action and reward + s = 1 + result[:, u - 1] = s + 1 + escape.first_lookahead_rewards_code + ar_mask = (t >= u + state_len).long() * (t < u + state_len + 2).long() + ar(result, ar_mask) + + # Fix the previous lookahead rewards in a consistant state + for v in range(0, u, it_len): + # Extract the rewards + r = result[:, range(v + state_len + 1 + it_len, u + it_len - 1, it_len)] + r = r - escape.first_rewards_code - 1 + r = r.clamp(min=-1, max=1) # the reward is predicted hence can be weird + a = r.min(dim=1).values + b = r.max(dim=1).values + s = (a < 0).long() * a + (a >= 0).long() * b + result[:, v + state_len + 2] = ( + s + 1 + escape.first_lookahead_rewards_code + ) + + # Saving the generated sequences + + s, a, r, lr = escape.seq2episodes( + result, self.height, self.width, lookahead=True + ) + str = escape.episodes2str( + s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True + ) + + filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + def produce_results( + self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + ): + result = self.test_input[:250].clone() + + # Saving the ground truth + + s, a, r, lr = escape.seq2episodes( + result, self.height, self.width, lookahead=True + ) + str = escape.episodes2str( + s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True + ) + + filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + # Re-generating from the first frame + + ar_mask = ( + torch.arange(result.size(1), device=result.device) + >= self.height * self.width + 3 + ).long()[None, :] + ar_mask = ar_mask.expand_as(result) + result *= 1 - ar_mask # paraaaaanoiaaaaaaa + + masked_inplace_autoregression( + model, + self.batch_size, + result, + ar_mask, + deterministic_synthesis, + device=self.device, + ) + + # Saving the generated sequences + + s, a, r, lr = escape.seq2episodes( + result, self.height, self.width, lookahead=True + ) + str = escape.episodes2str( + s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True + ) + + filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt") + with open(filename, "w") as f: + f.write(str) + logger(f"wrote {filename}") + + self.thinking_autoregression( + n_epoch, model, result_dir, logger, deterministic_synthesis, nmax + ) + + +######################################################################