3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 # from graph import save_attention_image
18 save_attention_image = None
20 ######################################################################
23 def masked_inplace_autoregression(
28 deterministic_synthesis,
29 forbidden_tokens=None,
30 progress_bar_desc="autoregression",
31 device=torch.device("cpu"),
33 assert input.size() == ar_mask.size()
35 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
37 if progress_bar_desc is not None:
41 desc=progress_bar_desc,
42 total=(input.size(0) + batch_size - 1) // batch_size,
45 with torch.autograd.no_grad():
49 for input, ar_mask in batches:
50 model.masked_inplace_autoregression(
51 input, ar_mask, forbidden_tokens, deterministic_synthesis
57 ######################################################################
61 def batches(self, split="train"):
64 def vocabulary_size(self):
68 self, n_epoch, model, result_dir, logger, deterministic_synthesis
73 class TaskFromFile(Task):
74 def tensorize(self, pairs, shuffle):
75 len_max = max([len(x[0]) for x in pairs])
81 [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
89 pred_mask = torch.cat(
93 [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
102 i = torch.randperm(input.size(0))
103 input = input[i].contiguous()
104 pred_mask = pred_mask[i].contiguous()
106 return input, pred_mask
108 # trim all the tensors in the tuple z to remove as much token from
109 # left and right in the first tensor. If z is a tuple, all its
110 # elements are trimed according to the triming for the first
111 def trim(self, z, token="#"):
112 n = self.char2id[token]
115 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
116 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
117 return tuple([t[:, a:b] for t in z])
119 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
120 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
131 device=torch.device("cpu"),
133 self.batch_size = batch_size
136 def read_file(filename, nb=-1):
138 with open(filename, "r") as f:
140 sequence = f.readline().strip()
143 pred_mask = f.readline().strip()
144 assert len(sequence) == len(pred_mask)
145 assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
146 pairs.append((sequence, pred_mask))
152 assert len(pairs) == nb
156 train_pairs = read_file(train_filename, nb_train_samples)
157 test_pairs = read_file(test_filename, nb_test_samples)
159 symbols = ["#"] + list(
160 set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"])
162 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
163 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
165 self.train_input, self.train_pred_masks = self.tensorize(
166 train_pairs, shuffle=shuffle
168 self.test_input, self.test_pred_masks = self.tensorize(
169 test_pairs, shuffle=shuffle
172 def batches(self, split="train", nb_to_use=-1, desc=None):
173 assert split in {"train", "test"}
174 input = self.train_input if split == "train" else self.test_input
176 input = input[:nb_to_use]
178 desc = f"epoch-{split}"
179 for batch in tqdm.tqdm(
180 input.split(self.batch_size), dynamic_ncols=True, desc=desc
182 yield self.trim(batch).to(self.device)
184 def vocabulary_size(self):
185 return len(self.char2id)
187 def tensor2str(self, t):
188 return ["".join([self.id2char[x.item()] for x in s]) for s in t]
191 self, n_epoch, model, result_dir, logger, deterministic_synthesis
193 correct = self.trim(self.test_input[:1000]).to(self.device)
194 result = correct.clone()
195 pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
196 ar_mask = (pred_mask > 0).long()
197 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
199 logger(f"----------------------------------------------------------")
201 for e in self.tensor2str(result[:50]):
202 logger(f"test_before {e}")
204 masked_inplace_autoregression(
209 deterministic_synthesis,
213 logger(f"----------------------------------------------------------")
215 for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])):
216 logger(f"test_after {e}")
217 logger(f"correct {c}")
219 logger(f"----------------------------------------------------------")
221 err_mask = (pred_mask == 2).long()
222 nb_total = err_mask.sum().item()
223 nb_correct = ((correct == result).long() * err_mask).sum().item()
225 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
226 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
242 device=torch.device("cpu"),
247 self.batch_size = batch_size
249 self.problem = problem
251 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
254 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
258 self.train_input, self.train_ar_mask = self.train_input.to(
260 ), self.train_ar_mask.to(device)
261 self.test_input, self.test_ar_mask = self.test_input.to(
263 ), self.test_ar_mask.to(device)
265 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
267 # A bit of paranoia never hurts
268 assert self.nb_codes <= max_nb_codes
269 assert self.train_input.min() >= 0
270 assert self.test_input.min() >= 0
271 assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
276 assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
282 if logger is not None:
283 for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
284 logger(f"train_sequences {self.problem.seq2str(s)}")
285 a = "".join(["01"[x.item()] for x in a])
288 def batches(self, split="train", nb_to_use=-1, desc=None):
289 assert split in {"train", "test"}
290 input = self.train_input if split == "train" else self.test_input
292 input = input[:nb_to_use]
294 desc = f"epoch-{split}"
295 for batch in tqdm.tqdm(
296 input.split(self.batch_size), dynamic_ncols=True, desc=desc
300 def vocabulary_size(self):
304 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
306 def compute_accuracy(input, ar_mask, logger=None):
307 input, ar_mask = input[:nmax], ar_mask[:nmax]
308 result = input.clone() * (1 - ar_mask)
310 masked_inplace_autoregression(
315 deterministic_synthesis,
316 progress_bar_desc=None,
320 log_ground_truth = ar_mask.min() == 0
322 if logger is not None:
323 for sp, st in zip(result[:10], input[:10]):
325 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
329 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
332 nb_total, nb_correct = self.problem.compute_nb_correct(
333 input, ar_mask, result
336 # nb_total = ar_mask.sum().item()
337 # nb_correct = ((result == input).long() * ar_mask).sum().item()
339 return nb_total, nb_correct
341 train_nb_total, train_nb_correct = compute_accuracy(
342 self.train_input, self.train_ar_mask
346 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
349 test_nb_total, test_nb_correct = compute_accuracy(
350 self.test_input, self.test_ar_mask, logger
354 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
357 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
359 if save_attention_image is not None:
361 ns = torch.randint(self.test_input.size(0), (1,)).item()
362 input = self.test_input[ns : ns + 1].clone()
364 with torch.autograd.no_grad():
367 # model.record_attention(True)
368 model(BracketedSequence(input))
370 # ram = model.retrieve_attention()
371 # model.record_attention(False)
373 # tokens_output = [c for c in self.problem.seq2str(input[0])]
374 # tokens_input = ["n/a"] + tokens_output[:-1]
375 # for n_head in range(ram[0].size(1)):
376 # filename = os.path.join(
377 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
379 # attention_matrices = [m[0, n_head] for m in ram]
380 # save_attention_image(
384 # attention_matrices,
386 ##min_total_attention=0.9,
390 # logger(f"wrote {filename}")
393 ######################################################################
398 class PicoCLVR(Task):
399 # Make a tensor from a list of strings
400 def tensorize(self, descr):
401 token_descr = [s.strip().split(" ") for s in descr]
402 l = max([len(s) for s in token_descr])
403 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
404 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
405 return torch.tensor(id_descr, device=self.device)
407 # Make a list of strings from a tensor
408 def detensorize(self, x):
409 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
411 # trim all the tensors in the tuple z to remove as much token from
412 # left and right in the first tensor. If z is a tuple, all its
413 # elements are trimed according to the triming for the first
414 def trim(self, z, token="<nul>"):
415 n = self.token2id[token]
418 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
419 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
420 return tuple([t[:, a:b] for t in z])
422 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
423 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
426 ######################
437 device=torch.device("cpu"),
443 def generate_descr(nb, cache_suffix, pruner):
444 return picoclvr.generate(
454 self.batch_size = batch_size
456 self.pruner_train = pruner_train
457 self.pruner_eval = pruner_eval
459 if logger is not None:
461 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
464 self.train_descr = generate_descr(
465 nb_train_samples, "train", pruner=self.pruner_train
467 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
469 # Build the tokenizer
470 tokens = {"<nul>", "<img>"}
471 for d in [self.train_descr, self.test_descr]:
473 for t in s.strip().split(" "):
475 # make this set a sorted list to get the same tensors given
477 tokens = list(tokens)
479 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
480 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
481 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
483 # Tokenize the train and test sets
484 self.train_input = self.tensorize(self.train_descr)
485 self.test_input = self.tensorize(self.test_descr)
487 def batches(self, split="train"):
488 assert split in {"train", "test"}
489 input = self.train_input if split == "train" else self.test_input
490 for batch in tqdm.tqdm(
491 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
493 yield self.trim(batch)
495 def vocabulary_size(self):
496 return len(self.token2id)
498 def compute_missing_properties(
499 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
501 acc_nb_requested_properties = []
502 acc_nb_missing_properties = []
505 for input in tqdm.tqdm(
506 self.test_input.split(self.batch_size),
508 desc=f"test-properties",
510 result = input.clone()
511 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
512 result = (1 - ar_mask) * result + ar_mask * self.t_nul
513 masked_inplace_autoregression(
518 deterministic_synthesis,
519 progress_bar_desc=None,
523 result_descr = self.detensorize(result)
524 np = picoclvr.nb_properties(
530 nb_requested_properties, _, nb_missing_properties = zip(*np)
531 acc_nb_requested_properties += nb_requested_properties
532 acc_nb_missing_properties += nb_missing_properties
533 acc_nb_results += len(result_descr)
535 nb_requested_properties = sum(acc_nb_requested_properties)
536 nb_missing_properties = sum(acc_nb_missing_properties)
538 prefix = "" if pruner is None else "pruned_"
539 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
541 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
544 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
548 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
551 ######################################################################
554 self, n_epoch, model, result_dir, logger, deterministic_synthesis
556 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
558 if self.pruner_eval is not None:
559 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
561 nb_tokens_to_generate = self.height * self.width + 3
566 for primer_descr in [
567 "red above green <sep> green top <sep> blue right of red",
568 "there is red <sep> there is yellow <sep> there is blue",
569 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
570 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
572 primer += [primer_descr + " <img>"] * nb_per_primer
574 result = self.tensorize(primer)
575 fill = result.new_full(
576 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
578 result = torch.cat((result, fill), 1)
579 ar_mask = (result == self.t_nul).long()
580 masked_inplace_autoregression(
585 deterministic_synthesis,
588 result_descr = self.detensorize(result)
590 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
592 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
593 acc_nb_results = len(result_descr)
595 nb_requested_properties = sum(acc_nb_requested_properties)
596 nb_missing_properties = sum(acc_nb_missing_properties)
599 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
601 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
604 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
607 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
611 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
615 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
621 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
622 torchvision.utils.save_image(
623 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
625 logger(f"wrote {image_name}")
628 ######################################################################
633 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
637 self.nb_train_samples = (nb_train_samples,)
638 self.nb_test_samples = (nb_test_samples,)
639 self.batch_size = batch_size
641 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
642 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
643 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
644 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
646 def batches(self, split="train", nb_to_use=-1, desc=None):
647 assert split in {"train", "test"}
648 input = self.train_input if split == "train" else self.test_input
650 input = input[:nb_to_use]
652 desc = f"epoch-{split}"
653 for batch in tqdm.tqdm(
654 input.split(self.batch_size), dynamic_ncols=True, desc=desc
658 def vocabulary_size(self):
662 self, n_epoch, model, result_dir, logger, deterministic_synthesis
664 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
665 ar_mask = torch.full_like(results, 1)
666 masked_inplace_autoregression(
671 deterministic_synthesis,
674 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
675 torchvision.utils.save_image(
676 1 - results.reshape(-1, 1, 28, 28) / 255.0,
681 logger(f"wrote {image_name}")
684 ######################################################################
690 def map2seq(self, *m):
691 return torch.cat([x.flatten(1) for x in m], 1)
693 def seq2map(self, s):
694 s = s.reshape(s.size(0), -1, self.height, self.width)
695 return (s[:, k] for k in range(s.size(1)))
705 device=torch.device("cpu"),
709 self.batch_size = batch_size
714 train_mazes, train_paths, _ = maze.create_maze_data(
719 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
721 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
723 test_mazes, test_paths, _ = maze.create_maze_data(
728 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
730 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
732 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
734 def batches(self, split="train", nb_to_use=-1, desc=None):
735 assert split in {"train", "test"}
736 input = self.train_input if split == "train" else self.test_input
738 input = input[:nb_to_use]
740 desc = f"epoch-{split}"
741 for batch in tqdm.tqdm(
742 input.split(self.batch_size), dynamic_ncols=True, desc=desc
746 def vocabulary_size(self):
750 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
752 nb_total, nb_correct = 0, 0
754 self.width * self.height,
755 self.width * self.height,
760 for input in self.batches(split, nb_to_use):
761 result = input.clone()
762 ar_mask = result.new_zeros(result.size())
763 ar_mask[:, self.height * self.width :] = 1
764 result *= 1 - ar_mask
765 masked_inplace_autoregression(
770 deterministic_synthesis,
771 progress_bar_desc=None,
774 mazes, paths = self.seq2map(result)
775 path_correctness = maze.path_correctness(mazes, paths)
776 nb_correct += path_correctness.long().sum()
777 nb_total += mazes.size(0)
779 optimal_path_lengths = (
780 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
782 predicted_path_lengths = (
783 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
785 optimal_path_lengths = optimal_path_lengths[path_correctness]
786 predicted_path_lengths = predicted_path_lengths[path_correctness]
787 count[optimal_path_lengths, predicted_path_lengths] += 1
793 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
796 return nb_total, nb_correct, count
799 self, n_epoch, model, result_dir, logger, deterministic_synthesis
801 train_nb_total, train_nb_correct, count = self.compute_error(
805 deterministic_synthesis=deterministic_synthesis,
808 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
811 test_nb_total, test_nb_correct, count = self.compute_error(
815 deterministic_synthesis=deterministic_synthesis,
818 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
821 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
823 if count is not None:
824 proportion_optimal = count.diagonal().sum().float() / count.sum()
825 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
827 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
829 for i in range(count.size(0)):
830 for j in range(count.size(1)):
831 eol = " " if j < count.size(1) - 1 else "\n"
832 f.write(f"{count[i,j]}{eol}")
834 input = self.test_input[:48]
835 result = input.clone()
836 ar_mask = result.new_zeros(result.size())
837 ar_mask[:, self.height * self.width :] = 1
838 result *= 1 - ar_mask
839 masked_inplace_autoregression(
844 deterministic_synthesis,
848 mazes, paths = self.seq2map(input)
849 _, predicted_paths = self.seq2map(result)
851 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
856 predicted_paths=predicted_paths,
857 path_correct=maze.path_correctness(mazes, predicted_paths),
858 path_optimal=maze.path_optimality(paths, predicted_paths),
860 logger(f"wrote {filename}")
863 ######################################################################
880 device=torch.device("cpu"),
884 self.batch_size = batch_size
888 self.prompt_length = prompt_length
890 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
899 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
909 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
911 def batches(self, split="train", nb_to_use=-1, desc=None):
912 assert split in {"train", "test"}
913 input = self.train_input if split == "train" else self.test_input
915 input = input[:nb_to_use]
917 desc = f"epoch-{split}"
918 for batch in tqdm.tqdm(
919 input.split(self.batch_size), dynamic_ncols=True, desc=desc
923 def vocabulary_size(self):
927 self, n_epoch, model, result_dir, logger, deterministic_synthesis
929 def compute_nb_correct(input, prior_visits):
930 result = input.clone()
931 i = torch.arange(result.size(1), device=result.device)[None, :]
933 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
937 result *= 1 - ar_mask
939 masked_inplace_autoregression(
944 deterministic_synthesis,
948 nb_total = ((prior_visits > 0) * ar_mask).sum()
950 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
952 return nb_total, nb_correct
954 test_nb_total, test_nb_correct = compute_nb_correct(
955 self.test_input[:1000], self.test_prior_visits[:1000]
959 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
962 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
965 ######################################################################
981 fraction_values_for_train=None,
982 device=torch.device("cpu"),
986 self.batch_size = batch_size
987 self.nb_steps = nb_steps
988 self.nb_stacks = nb_stacks
989 self.nb_digits = nb_digits
992 if fraction_values_for_train is None:
993 values_for_train = None
994 values_for_test = None
996 all = torch.randperm(10**nb_digits)
997 nb_for_train = int(all.size(0) * fraction_values_for_train)
998 values_for_train = all[:nb_for_train]
999 values_for_test = all[nb_for_train:]
1001 self.train_input, self.train_stack_counts = stack.generate_sequences(
1010 self.test_input, self.test_stack_counts = stack.generate_sequences(
1019 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
1020 counts = self.test_stack_counts.flatten()[i.flatten()]
1021 counts = F.one_hot(counts).sum(0)
1022 logger(f"test_pop_stack_counts {counts}")
1024 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1026 def batches(self, split="train", nb_to_use=-1, desc=None):
1027 assert split in {"train", "test"}
1028 input = self.train_input if split == "train" else self.test_input
1030 input = input[:nb_to_use]
1032 desc = f"epoch-{split}"
1033 for batch in tqdm.tqdm(
1034 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1038 def vocabulary_size(self):
1039 return self.nb_codes
1041 def produce_results(
1042 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1044 def compute_nb_correct(input):
1045 result = input.clone()
1046 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1047 ar_mask = (result != input).long()
1048 masked_inplace_autoregression(
1053 deterministic_synthesis,
1057 errors = ((result != input).long() * ar_mask).reshape(
1058 -1, 1 + self.nb_digits
1060 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
1062 nb_total = ar_mask.max(1).values.sum()
1063 nb_correct = nb_total - errors.max(1).values.sum()
1065 return nb_total, nb_correct
1067 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1070 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1073 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1075 ##############################################################
1076 # Log a few generated sequences
1077 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1078 result = input.clone()
1079 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1080 ar_mask = (result != input).long()
1082 # for n in range(result.size(0)):
1084 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1087 masked_inplace_autoregression(
1092 deterministic_synthesis,
1096 for n in range(result.size(0)):
1098 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1100 ##############################################################
1103 ######################################################################
1109 def tensorize(self, sequences):
1110 len_max = max([len(x) for x in sequences])
1116 self.token2id[str(c)]
1117 for c in s + ["<nul>"] * (len_max - len(s))
1126 def seq2str(self, seq):
1127 return " ".join([self.id2token[i] for i in seq])
1134 nb_starting_values=3,
1140 device=torch.device("cpu"),
1144 self.batch_size = batch_size
1145 self.device = device
1146 self.no_prog = no_prog
1150 nb_starting_values=nb_starting_values,
1151 nb_result_values_max=4 * nb_starting_values,
1152 max_input=max_input,
1156 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1161 nb_starting_values=nb_starting_values,
1162 nb_result_values_max=4 * nb_starting_values,
1163 max_input=max_input,
1167 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1171 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1173 val_max = max([x if type(x) is int else 0 for x in symbols])
1174 symbols = list(filter(lambda x: type(x) is str, symbols))
1176 symbols += [str(n) for n in range(val_max + 1)]
1177 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1178 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1180 self.t_nul = self.token2id["<nul>"]
1181 self.t_input = self.token2id["<in>"]
1182 self.t_output = self.token2id["<out>"]
1183 self.t_prog = self.token2id["<prg>"]
1184 self.t_end = self.token2id["<end>"]
1186 self.train_input = self.tensorize(train_sequences)
1187 self.test_input = self.tensorize(test_sequences)
1190 # Excise the program from every train and test example
1191 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1195 ((self.train_input == self.t_prog).long() * k)
1196 .max(1, keepdim=True)
1199 self.train_input = (
1200 self.train_input * (k <= p).long()
1201 + self.t_end * (k == p + 1).long()
1202 + self.t_nul * (k > p + 1).long()
1204 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1208 ((self.test_input == self.t_prog).long() * k)
1209 .max(1, keepdim=True)
1213 self.test_input * (k <= p).long()
1214 + self.t_end * (k == p + 1).long()
1215 + self.t_nul * (k > p + 1).long()
1218 if logger is not None:
1219 logger(f"value_max {val_max}")
1220 for x in self.train_input[:25]:
1221 end = (x != self.t_nul).nonzero().max().item() + 1
1222 seq = [self.id2token[i.item()] for i in x[:end]]
1224 logger(f"example_seq {s}")
1226 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1228 def batches(self, split="train", nb_to_use=-1, desc=None):
1229 assert split in {"train", "test"}
1230 input = self.train_input if split == "train" else self.test_input
1232 input = input[:nb_to_use]
1234 desc = f"epoch-{split}"
1235 for batch in tqdm.tqdm(
1236 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1238 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1239 batch = batch[:, :last].to(self.device)
1242 def vocabulary_size(self):
1243 return self.nb_codes
1245 def produce_results(
1246 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1248 # --------------------------------------------------------------------
1249 def compute_nb_errors_prog(input, nb_to_log=0):
1250 result = input.clone()
1251 s = (result == self.t_prog).long()
1252 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1253 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1255 masked_inplace_autoregression(
1260 deterministic_synthesis,
1264 sum_nb_total, sum_nb_errors = 0, 0
1265 for one_input, one_result in zip(input, result):
1266 seq = [self.id2token[i.item()] for i in one_result]
1267 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1269 sum_nb_errors += 0 if nb_errors == 0 else 1
1271 gt_seq = [self.id2token[i.item()] for i in one_input]
1272 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1273 gt_prog = " ".join([str(x) for x in gt_prog])
1274 prog = " ".join([str(x) for x in prog])
1275 comment = "*" if nb_errors == 0 else "-"
1276 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1277 for start_stack, target_stack, result_stack, correct in stacks:
1278 comment = "*" if correct else "-"
1279 start_stack = " ".join([str(x) for x in start_stack])
1280 target_stack = " ".join([str(x) for x in target_stack])
1281 result_stack = " ".join([str(x) for x in result_stack])
1283 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1287 return sum_nb_total, sum_nb_errors
1289 # --------------------------------------------------------------------
1290 def compute_nb_errors_output(input, nb_to_log=0):
1291 result = input.clone()
1292 k = torch.arange(result.size(1), device=result.device)[None, :]
1294 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1297 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1299 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1300 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1302 masked_inplace_autoregression(
1307 deterministic_synthesis,
1311 sum_nb_total, sum_nb_errors = 0, 0
1312 for one_input, one_result, i, j in zip(
1313 input, result, last_output_idx, first_prog_idx
1315 seq = [self.id2token[i.item()] for i in one_result]
1317 correct = (one_input - one_result).abs().max() == 0
1318 sum_nb_errors += 0 if correct else 1
1321 self.id2token[i.item()] for i in one_result[i : j + 1]
1324 self.id2token[i.item()] for i in one_input[i : j + 1]
1326 comment = "*" if correct else "-"
1327 result_stack = " ".join([str(x) for x in result_stack])
1328 target_stack = " ".join([str(x) for x in target_stack])
1330 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1334 return sum_nb_total, sum_nb_errors
1336 # --------------------------------------------------------------------
1338 if not self.no_prog:
1339 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1340 self.test_input[:1000].to(self.device), nb_to_log=10
1344 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1347 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1349 test_nb_total, test_nb_errors = compute_nb_errors_output(
1350 self.test_input[:1000].to(self.device), nb_to_log=10
1354 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1357 if save_attention_image is None:
1358 logger("no save_attention_image (is pycairo installed?)")
1360 ns = torch.randint(self.test_input.size(0), (1,)).item()
1361 input = self.test_input[ns : ns + 1].clone()
1362 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1363 input = input[:, :last].to(self.device)
1365 with torch.autograd.no_grad():
1368 model.record_attention(True)
1369 model(BracketedSequence(input))
1371 ram = model.retrieve_attention()
1372 model.record_attention(False)
1374 tokens_output = [self.id2token[i.item()] for i in input[0]]
1375 tokens_input = ["n/a"] + tokens_output[:-1]
1376 for n_head in range(ram[0].size(1)):
1377 filename = os.path.join(
1378 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1380 attention_matrices = [m[0, n_head] for m in ram]
1381 save_attention_image(
1387 # min_total_attention=0.9,
1391 logger(f"wrote {filename}")
1394 ######################################################################
1401 def tensorize(self, sequences):
1402 len_max = max([len(x) for x in sequences])
1407 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1424 device=torch.device("cpu"),
1428 self.batch_size = batch_size
1429 self.device = device
1431 train_sequences = expr.generate_sequences(
1433 nb_variables=nb_variables,
1434 length=sequence_length,
1435 operand_max=operand_max,
1436 result_max=result_max,
1439 test_sequences = expr.generate_sequences(
1441 nb_variables=nb_variables,
1442 length=sequence_length,
1443 operand_max=operand_max,
1444 result_max=result_max,
1447 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1450 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1451 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1453 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1455 self.train_input = self.tensorize(train_sequences)
1456 self.test_input = self.tensorize(test_sequences)
1458 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1460 def batches(self, split="train", nb_to_use=-1, desc=None):
1461 assert split in {"train", "test"}
1462 input = self.train_input if split == "train" else self.test_input
1464 input = input[:nb_to_use]
1466 desc = f"epoch-{split}"
1467 for batch in tqdm.tqdm(
1468 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1470 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1471 batch = batch[:, :last]
1474 def vocabulary_size(self):
1475 return self.nb_codes
1477 def seq2str(self, s):
1478 return "".join([self.id2char[k.item()] for k in s])
1480 def produce_results(
1486 deterministic_synthesis,
1489 def compute_nb_correct(input):
1490 result = input.clone()
1491 s = (result == self.space).long()
1492 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1493 result = (1 - ar_mask) * result + ar_mask * self.filler
1494 masked_inplace_autoregression(
1499 deterministic_synthesis,
1503 nb_total = input.size(0)
1504 nb_correct = (input == result).long().min(1).values.sum()
1506 #######################################################################
1507 # Comput predicted vs. true variable values
1509 nb_delta = torch.zeros(5, dtype=torch.int64)
1512 values_input = expr.extract_results([self.seq2str(s) for s in input])
1513 values_result = expr.extract_results([self.seq2str(s) for s in result])
1515 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1517 with open(filename, "w") as f:
1518 for i, r in zip(values_input, values_result):
1519 for n, vi in i.items():
1521 f.write(f"{vi} {-1 if vr is None else vr}\n")
1523 if vr is None or vr < 0:
1527 if d >= nb_delta.size(0):
1532 ######################################################################
1534 return nb_total, nb_correct, nb_delta, nb_missed
1541 ) = compute_nb_correct(self.test_input[:10000])
1544 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1547 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1549 nb_total = test_nb_delta.sum() + test_nb_missed
1550 for d in range(test_nb_delta.size(0)):
1552 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1555 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1558 ##############################################################
1559 # Log a few generated sequences
1560 if input_file is None:
1561 input = self.test_input[:10]
1563 with open(input_file, "r") as f:
1564 sequences = [e.strip() for e in f.readlines()]
1565 sequences = [s + " " + "#" * 50 for s in sequences]
1566 input = self.tensorize(sequences)
1568 result = input.clone()
1569 s = (result == self.space).long()
1570 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1571 result = (1 - ar_mask) * result + ar_mask * self.filler
1573 for n in range(result.size(0)):
1574 logger(f"test_before {self.seq2str(result[n])}")
1576 masked_inplace_autoregression(
1581 deterministic_synthesis,
1585 correct = (1 - ar_mask) * self.space + ar_mask * input
1586 for n in range(result.size(0)):
1587 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1588 logger(f"test_after {self.seq2str(result[n])} {comment}")
1589 logger(f"truth {self.seq2str(correct[n])}")
1590 ##############################################################
1593 ######################################################################
1599 # Make a tensor from a list of strings
1600 def str2tensor(self, descr):
1601 token_descr = [s.strip().split(" ") for s in descr]
1602 l = max([len(s) for s in token_descr])
1603 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1604 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1605 return torch.tensor(id_descr, device=self.device)
1607 # Make a list of strings from a tensor
1608 def tensor2str(self, x):
1609 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1611 # trim all the tensors in the tuple z to remove as much token from
1612 # left and right in the first tensor. If z is a tuple, all its
1613 # elements are trimed according to the triming for the first
1614 def trim(self, z, token="#"):
1615 n = self.token2id[token]
1616 if type(z) == tuple:
1618 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1619 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1620 return tuple([t[:, a:b] for t in z])
1622 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1623 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1626 ######################
1636 device=torch.device("cpu"),
1640 self.device = device
1641 self.batch_size = batch_size
1642 self.grid_factory = grid.GridFactory(size=size)
1643 self.fraction_play = fraction_play
1645 if logger is not None:
1647 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1650 self.train_descr = self.grid_factory.generate_samples(
1651 nb=nb_train_samples,
1652 fraction_play=fraction_play,
1653 progress_bar=lambda r: tqdm.tqdm(r),
1656 self.test_descr = self.grid_factory.generate_samples(
1657 nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
1660 if fraction_play > 0:
1661 self.play_descr = self.grid_factory.generate_samples(
1662 nb=25, fraction_play=1.0, progress_bar=lambda r: tqdm.tqdm(r)
1665 self.play_descr = []
1667 # Build the tokenizer
1669 for d in [self.train_descr, self.test_descr, self.play_descr]:
1671 for t in s.strip().split(" "):
1673 # make this set a sorted list to get the same tensors given
1675 tokens = list(tokens)
1677 tokens = ["#"] + tokens
1678 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1679 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1680 self.t_nul = self.token2id["#"]
1681 self.t_true = self.token2id["true"]
1682 self.t_false = self.token2id["false"]
1683 self.t_pipe = self.token2id["|"]
1685 # Tokenize the train and test sets
1686 self.train_input = self.str2tensor(self.train_descr)
1687 self.test_input = self.str2tensor(self.test_descr)
1689 None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
1692 def batches(self, split="train"):
1693 assert split in {"train", "test"}
1694 input = self.train_input if split == "train" else self.test_input
1695 for batch in tqdm.tqdm(
1696 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1698 yield self.trim(batch)
1700 def vocabulary_size(self):
1701 return len(self.token2id)
1703 def produce_results(
1704 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1706 correct = self.test_input[:1000]
1707 result = correct.clone()
1708 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1709 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1711 logger(f"----------------------------------------------------------")
1713 for e in self.tensor2str(result[:10]):
1714 logger(f"test_before {e}")
1716 masked_inplace_autoregression(
1721 deterministic_synthesis,
1725 logger(f"----------------------------------------------------------")
1727 for e in self.tensor2str(result[:10]):
1728 logger(f"test_after {e}")
1730 logger(f"----------------------------------------------------------")
1732 nb_total = ar_mask.sum().item()
1733 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1735 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1736 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1738 if self.play_input is not None:
1739 result = self.play_input.clone()
1740 ar_mask = (result == self.t_pipe).long().cumsum(dim=1).clamp(max=1)
1741 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1743 logger(f"----------------------------------------------------------")
1745 for e in self.tensor2str(result[:10]):
1746 logger(f"play_before {e}")
1748 masked_inplace_autoregression(
1753 deterministic_synthesis,
1757 logger(f"----------------------------------------------------------")
1759 for e in self.tensor2str(result[:10]):
1760 logger(f"play_after {e}")
1762 logger(f"----------------------------------------------------------")
1765 ######################################################################
1771 ######################
1780 device=torch.device("cpu"),
1784 self.device = device
1785 self.batch_size = batch_size
1786 self.nb_samples_per_mlp = 256
1788 if logger is not None:
1790 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1793 seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1794 nb_mlps=nb_train_samples + nb_test_samples,
1795 nb_samples=self.nb_samples_per_mlp,
1799 nb_mlps_per_batch=1024,
1802 self.train_input = seq[:nb_train_samples]
1803 self.train_q_test_set = q_test_set[:nb_train_samples]
1804 self.train_ref_test_errors = test_error[:nb_train_samples]
1805 self.test_input = seq[nb_train_samples:]
1806 self.test_q_test_set = q_test_set[nb_train_samples:]
1807 self.test_ref_test_errors = test_error[nb_train_samples:]
1809 filename = os.path.join(result_dir, f"train_errors_ref.dat")
1810 with open(filename, "w") as f:
1811 for e in self.train_ref_test_errors:
1814 filename = os.path.join(result_dir, f"test_errors_ref.dat")
1815 with open(filename, "w") as f:
1816 for e in self.test_ref_test_errors:
1819 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1821 def batches(self, split="train"):
1822 assert split in {"train", "test"}
1823 input = self.train_input if split == "train" else self.test_input
1824 for batch in tqdm.tqdm(
1825 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1829 def vocabulary_size(self):
1830 return self.nb_codes
1832 def produce_results(
1833 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1835 correct = self.test_input[:1000]
1836 result = correct.clone()
1838 torch.arange(result.size(1), device=result.device)
1839 > self.nb_samples_per_mlp * 3 + 1
1841 ar_mask = ar_mask.expand_as(result)
1842 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1844 masked_inplace_autoregression(
1849 deterministic_synthesis,
1853 q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1854 q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1855 error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1857 filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1858 with open(filename, "w") as f:
1859 for e in error_test:
1863 ######################################################################
1878 device=torch.device("cpu"),
1882 self.batch_size = batch_size
1883 self.device = device
1884 self.height = height
1887 states, actions, rewards = escape.generate_episodes(
1888 nb_train_samples + nb_test_samples, height, width, T
1890 seq = escape.episodes2seq(states, actions, rewards, lookahead_delta=T)
1891 # seq = seq[:, seq.size(1) // 3 : 2 * seq.size(1) // 3]
1892 self.train_input = seq[:nb_train_samples].to(self.device)
1893 self.test_input = seq[nb_train_samples:].to(self.device)
1895 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1897 def batches(self, split="train", nb_to_use=-1, desc=None):
1898 assert split in {"train", "test"}
1899 input = self.train_input if split == "train" else self.test_input
1901 input = input[:nb_to_use]
1903 desc = f"epoch-{split}"
1904 for batch in tqdm.tqdm(
1905 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1909 def vocabulary_size(self):
1910 return self.nb_codes
1912 def thinking_autoregression(
1913 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
1915 result = self.test_input[:100].clone()
1916 t = torch.arange(result.size(1), device=result.device)
1917 state_len = self.height * self.width
1918 iteration_len = state_len + 3
1921 masked_inplace_autoregression(
1926 deterministic_synthesis,
1931 iteration_len, result.size(1) - iteration_len + 1, iteration_len
1933 # Put a lookahead reward to -1, sample the next state
1934 result[:, u - 1] = (-1) + 1 + escape.first_lookahead_rewards_code
1935 ar_mask = (t >= u).long() * (t < u + state_len).long()
1936 ar_mask = ar_mask[None, :]
1937 ar_mask = ar_mask.expand_as(result)
1938 result *= 1 - ar_mask
1941 # Put a lookahead reward to +1, sample the action and reward
1942 result[:, u - 1] = (1) + 1 + escape.first_lookahead_rewards_code
1943 ar_mask = (t >= state_len).long() * (t < state_len + 2).long()
1944 ar_mask = ar_mask[None, :]
1945 ar_mask = ar_mask.expand_as(result)
1946 result *= 1 - ar_mask
1949 # Saving the generated sequences
1951 s, a, r, lr = escape.seq2episodes(
1952 result, self.height, self.width, lookahead=True
1954 str = escape.episodes2str(
1955 s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
1958 filename = os.path.join(result_dir, f"test_thinking_seq_{n_epoch:04d}.txt")
1959 with open(filename, "w") as f:
1961 logger(f"wrote {filename}")
1963 def produce_results(
1964 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
1966 result = self.test_input[:100].clone()
1968 # Saving the ground truth
1970 s, a, r, lr = escape.seq2episodes(
1971 result, self.height, self.width, lookahead=True
1973 str = escape.episodes2str(
1974 s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
1977 filename = os.path.join(result_dir, f"test_true_seq_{n_epoch:04d}.txt")
1978 with open(filename, "w") as f:
1980 logger(f"wrote {filename}")
1982 # Re-generating from the first frame
1985 torch.arange(result.size(1), device=result.device)
1986 >= self.height * self.width + 3
1988 ar_mask = ar_mask.expand_as(result)
1989 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1991 masked_inplace_autoregression(
1996 deterministic_synthesis,
2000 # Saving the generated sequences
2002 s, a, r, lr = escape.seq2episodes(
2003 result, self.height, self.width, lookahead=True
2005 str = escape.episodes2str(
2006 s, a, r, lookahead_rewards=lr, unicode=True, ansi_colors=True
2009 filename = os.path.join(result_dir, f"test_seq_{n_epoch:04d}.txt")
2010 with open(filename, "w") as f:
2012 logger(f"wrote {filename}")
2014 self.thinking_autoregression(
2015 n_epoch, model, result_dir, logger, deterministic_synthesis, nmax
2019 ######################################################################