3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 # from graph import save_attention_image
18 save_attention_image = None
20 ######################################################################
23 def masked_inplace_autoregression(
28 deterministic_synthesis,
29 forbidden_tokens=None,
30 progress_bar_desc="autoregression",
31 device=torch.device("cpu"),
33 assert input.size() == ar_mask.size()
35 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
37 if progress_bar_desc is not None:
41 desc=progress_bar_desc,
42 total=(input.size(0) + batch_size - 1) // batch_size,
45 with torch.autograd.no_grad():
49 for input, ar_mask in batches:
50 model.masked_inplace_autoregression(
51 input, ar_mask, forbidden_tokens, deterministic_synthesis
57 ######################################################################
61 def batches(self, split="train"):
64 def vocabulary_size(self):
68 self, n_epoch, model, result_dir, logger, deterministic_synthesis
73 class TaskFromFile(Task):
74 def tensorize(self, pairs):
75 len_max = max([len(x[0]) for x in pairs])
81 [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
89 pred_mask = torch.cat(
93 [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
101 return input, pred_mask
103 # trim all the tensors in the tuple z to remove as much token from
104 # left and right in the first tensor. If z is a tuple, all its
105 # elements are trimed according to the triming for the first
106 def trim(self, z, token="#"):
107 n = self.char2id[token]
110 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
111 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
112 return tuple([t[:, a:b] for t in z])
114 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
115 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
124 device=torch.device("cpu"),
126 self.batch_size = batch_size
130 with open(filename, "r") as f:
131 for _ in range(nb_train_samples + nb_test_samples):
132 sequence = f.readline().strip()
133 pred_mask = f.readline().strip()
134 assert len(sequence) == len(pred_mask)
135 assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
136 pairs.append((sequence, pred_mask))
138 symbols = ["#"] + list(set("".join([x[0] for x in pairs])) - set(["#"]))
139 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
140 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
142 self.train_input, self.train_pred_masks = self.tensorize(
143 pairs[:nb_train_samples]
145 self.test_input, self.test_pred_masks = self.tensorize(pairs[nb_train_samples:])
147 assert self.train_input.size(0) == nb_train_samples
148 assert self.test_input.size(0) == nb_test_samples
150 def batches(self, split="train", nb_to_use=-1, desc=None):
151 assert split in {"train", "test"}
152 input = self.train_input if split == "train" else self.test_input
154 input = input[:nb_to_use]
156 desc = f"epoch-{split}"
157 for batch in tqdm.tqdm(
158 input.split(self.batch_size), dynamic_ncols=True, desc=desc
160 yield self.trim(batch).to(self.device)
162 def vocabulary_size(self):
163 return len(self.char2id)
165 def tensor2str(self, t):
166 return ["".join([self.id2char[x.item()] for x in s]) for s in t]
169 self, n_epoch, model, result_dir, logger, deterministic_synthesis
171 correct = self.trim(self.test_input[:1000]).to(self.device)
172 result = correct.clone()
173 pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
174 ar_mask = (pred_mask > 0).long()
175 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
177 logger(f"----------------------------------------------------------")
179 for e in self.tensor2str(result[:10]):
180 logger(f"test_before {e}")
182 masked_inplace_autoregression(
187 deterministic_synthesis,
191 logger(f"----------------------------------------------------------")
193 for e, c in zip(self.tensor2str(result[:10]), self.tensor2str(correct[:10])):
194 logger(f"test_after {e}")
195 logger(f"correct {c}")
197 logger(f"----------------------------------------------------------")
199 err_mask = (pred_mask == 2).long()
200 nb_total = err_mask.sum().item()
201 nb_correct = ((correct == result).long() * err_mask).sum().item()
203 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
204 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
220 device=torch.device("cpu"),
225 self.batch_size = batch_size
227 self.problem = problem
229 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
232 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
236 self.train_input, self.train_ar_mask = self.train_input.to(
238 ), self.train_ar_mask.to(device)
239 self.test_input, self.test_ar_mask = self.test_input.to(
241 ), self.test_ar_mask.to(device)
243 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
245 # A bit of paranoia never hurts
246 assert self.nb_codes <= max_nb_codes
247 assert self.train_input.min() >= 0
248 assert self.test_input.min() >= 0
249 assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
254 assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
260 if logger is not None:
261 for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
262 logger(f"train_sequences {self.problem.seq2str(s)}")
263 a = "".join(["01"[x.item()] for x in a])
266 def batches(self, split="train", nb_to_use=-1, desc=None):
267 assert split in {"train", "test"}
268 input = self.train_input if split == "train" else self.test_input
270 input = input[:nb_to_use]
272 desc = f"epoch-{split}"
273 for batch in tqdm.tqdm(
274 input.split(self.batch_size), dynamic_ncols=True, desc=desc
278 def vocabulary_size(self):
282 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
284 def compute_accuracy(input, ar_mask, logger=None):
285 input, ar_mask = input[:nmax], ar_mask[:nmax]
286 result = input.clone() * (1 - ar_mask)
288 masked_inplace_autoregression(
293 deterministic_synthesis,
294 progress_bar_desc=None,
298 log_ground_truth = ar_mask.min() == 0
300 if logger is not None:
301 for sp, st in zip(result[:10], input[:10]):
303 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
307 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
310 nb_total, nb_correct = self.problem.compute_nb_correct(
311 input, ar_mask, result
314 # nb_total = ar_mask.sum().item()
315 # nb_correct = ((result == input).long() * ar_mask).sum().item()
317 return nb_total, nb_correct
319 train_nb_total, train_nb_correct = compute_accuracy(
320 self.train_input, self.train_ar_mask
324 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
327 test_nb_total, test_nb_correct = compute_accuracy(
328 self.test_input, self.test_ar_mask, logger
332 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
335 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
337 if save_attention_image is not None:
339 ns = torch.randint(self.test_input.size(0), (1,)).item()
340 input = self.test_input[ns : ns + 1].clone()
342 with torch.autograd.no_grad():
345 # model.record_attention(True)
346 model(BracketedSequence(input))
348 # ram = model.retrieve_attention()
349 # model.record_attention(False)
351 # tokens_output = [c for c in self.problem.seq2str(input[0])]
352 # tokens_input = ["n/a"] + tokens_output[:-1]
353 # for n_head in range(ram[0].size(1)):
354 # filename = os.path.join(
355 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
357 # attention_matrices = [m[0, n_head] for m in ram]
358 # save_attention_image(
362 # attention_matrices,
364 ##min_total_attention=0.9,
368 # logger(f"wrote {filename}")
371 ######################################################################
376 class PicoCLVR(Task):
377 # Make a tensor from a list of strings
378 def tensorize(self, descr):
379 token_descr = [s.strip().split(" ") for s in descr]
380 l = max([len(s) for s in token_descr])
381 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
382 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
383 return torch.tensor(id_descr, device=self.device)
385 # Make a list of strings from a tensor
386 def detensorize(self, x):
387 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
389 # trim all the tensors in the tuple z to remove as much token from
390 # left and right in the first tensor. If z is a tuple, all its
391 # elements are trimed according to the triming for the first
392 def trim(self, z, token="<nul>"):
393 n = self.token2id[token]
396 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
397 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
398 return tuple([t[:, a:b] for t in z])
400 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
401 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
404 ######################
415 device=torch.device("cpu"),
421 def generate_descr(nb, cache_suffix, pruner):
422 return picoclvr.generate(
432 self.batch_size = batch_size
434 self.pruner_train = pruner_train
435 self.pruner_eval = pruner_eval
437 if logger is not None:
439 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
442 self.train_descr = generate_descr(
443 nb_train_samples, "train", pruner=self.pruner_train
445 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
447 # Build the tokenizer
448 tokens = {"<nul>", "<img>"}
449 for d in [self.train_descr, self.test_descr]:
451 for t in s.strip().split(" "):
453 # make this set a sorted list to get the same tensors given
455 tokens = list(tokens)
457 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
458 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
459 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
461 # Tokenize the train and test sets
462 self.train_input = self.tensorize(self.train_descr)
463 self.test_input = self.tensorize(self.test_descr)
465 def batches(self, split="train"):
466 assert split in {"train", "test"}
467 input = self.train_input if split == "train" else self.test_input
468 for batch in tqdm.tqdm(
469 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
471 yield self.trim(batch)
473 def vocabulary_size(self):
474 return len(self.token2id)
476 def compute_missing_properties(
477 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
479 acc_nb_requested_properties = []
480 acc_nb_missing_properties = []
483 for input in tqdm.tqdm(
484 self.test_input.split(self.batch_size),
486 desc=f"test-properties",
488 result = input.clone()
489 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
490 result = (1 - ar_mask) * result + ar_mask * self.t_nul
491 masked_inplace_autoregression(
496 deterministic_synthesis,
497 progress_bar_desc=None,
501 result_descr = self.detensorize(result)
502 np = picoclvr.nb_properties(
508 nb_requested_properties, _, nb_missing_properties = zip(*np)
509 acc_nb_requested_properties += nb_requested_properties
510 acc_nb_missing_properties += nb_missing_properties
511 acc_nb_results += len(result_descr)
513 nb_requested_properties = sum(acc_nb_requested_properties)
514 nb_missing_properties = sum(acc_nb_missing_properties)
516 prefix = "" if pruner is None else "pruned_"
517 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
519 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
522 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
526 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
529 ######################################################################
532 self, n_epoch, model, result_dir, logger, deterministic_synthesis
534 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
536 if self.pruner_eval is not None:
537 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
539 nb_tokens_to_generate = self.height * self.width + 3
544 for primer_descr in [
545 "red above green <sep> green top <sep> blue right of red",
546 "there is red <sep> there is yellow <sep> there is blue",
547 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
548 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
550 primer += [primer_descr + " <img>"] * nb_per_primer
552 result = self.tensorize(primer)
553 fill = result.new_full(
554 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
556 result = torch.cat((result, fill), 1)
557 ar_mask = (result == self.t_nul).long()
558 masked_inplace_autoregression(
563 deterministic_synthesis,
566 result_descr = self.detensorize(result)
568 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
570 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
571 acc_nb_results = len(result_descr)
573 nb_requested_properties = sum(acc_nb_requested_properties)
574 nb_missing_properties = sum(acc_nb_missing_properties)
577 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
579 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
582 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
585 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
589 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
593 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
599 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
600 torchvision.utils.save_image(
601 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
603 logger(f"wrote {image_name}")
606 ######################################################################
611 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
615 self.nb_train_samples = (nb_train_samples,)
616 self.nb_test_samples = (nb_test_samples,)
617 self.batch_size = batch_size
619 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
620 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
621 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
622 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
624 def batches(self, split="train", nb_to_use=-1, desc=None):
625 assert split in {"train", "test"}
626 input = self.train_input if split == "train" else self.test_input
628 input = input[:nb_to_use]
630 desc = f"epoch-{split}"
631 for batch in tqdm.tqdm(
632 input.split(self.batch_size), dynamic_ncols=True, desc=desc
636 def vocabulary_size(self):
640 self, n_epoch, model, result_dir, logger, deterministic_synthesis
642 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
643 ar_mask = torch.full_like(results, 1)
644 masked_inplace_autoregression(
649 deterministic_synthesis,
652 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
653 torchvision.utils.save_image(
654 1 - results.reshape(-1, 1, 28, 28) / 255.0,
659 logger(f"wrote {image_name}")
662 ######################################################################
668 def map2seq(self, *m):
669 return torch.cat([x.flatten(1) for x in m], 1)
671 def seq2map(self, s):
672 s = s.reshape(s.size(0), -1, self.height, self.width)
673 return (s[:, k] for k in range(s.size(1)))
683 device=torch.device("cpu"),
687 self.batch_size = batch_size
692 train_mazes, train_paths, _ = maze.create_maze_data(
697 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
699 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
701 test_mazes, test_paths, _ = maze.create_maze_data(
706 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
708 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
710 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
712 def batches(self, split="train", nb_to_use=-1, desc=None):
713 assert split in {"train", "test"}
714 input = self.train_input if split == "train" else self.test_input
716 input = input[:nb_to_use]
718 desc = f"epoch-{split}"
719 for batch in tqdm.tqdm(
720 input.split(self.batch_size), dynamic_ncols=True, desc=desc
724 def vocabulary_size(self):
728 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
730 nb_total, nb_correct = 0, 0
732 self.width * self.height,
733 self.width * self.height,
738 for input in self.batches(split, nb_to_use):
739 result = input.clone()
740 ar_mask = result.new_zeros(result.size())
741 ar_mask[:, self.height * self.width :] = 1
742 result *= 1 - ar_mask
743 masked_inplace_autoregression(
748 deterministic_synthesis,
749 progress_bar_desc=None,
752 mazes, paths = self.seq2map(result)
753 path_correctness = maze.path_correctness(mazes, paths)
754 nb_correct += path_correctness.long().sum()
755 nb_total += mazes.size(0)
757 optimal_path_lengths = (
758 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
760 predicted_path_lengths = (
761 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
763 optimal_path_lengths = optimal_path_lengths[path_correctness]
764 predicted_path_lengths = predicted_path_lengths[path_correctness]
765 count[optimal_path_lengths, predicted_path_lengths] += 1
771 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
774 return nb_total, nb_correct, count
777 self, n_epoch, model, result_dir, logger, deterministic_synthesis
779 train_nb_total, train_nb_correct, count = self.compute_error(
783 deterministic_synthesis=deterministic_synthesis,
786 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
789 test_nb_total, test_nb_correct, count = self.compute_error(
793 deterministic_synthesis=deterministic_synthesis,
796 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
799 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
801 if count is not None:
802 proportion_optimal = count.diagonal().sum().float() / count.sum()
803 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
805 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
807 for i in range(count.size(0)):
808 for j in range(count.size(1)):
809 eol = " " if j < count.size(1) - 1 else "\n"
810 f.write(f"{count[i,j]}{eol}")
812 input = self.test_input[:48]
813 result = input.clone()
814 ar_mask = result.new_zeros(result.size())
815 ar_mask[:, self.height * self.width :] = 1
816 result *= 1 - ar_mask
817 masked_inplace_autoregression(
822 deterministic_synthesis,
826 mazes, paths = self.seq2map(input)
827 _, predicted_paths = self.seq2map(result)
829 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
834 predicted_paths=predicted_paths,
835 path_correct=maze.path_correctness(mazes, predicted_paths),
836 path_optimal=maze.path_optimality(paths, predicted_paths),
838 logger(f"wrote {filename}")
841 ######################################################################
858 device=torch.device("cpu"),
862 self.batch_size = batch_size
866 self.prompt_length = prompt_length
868 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
877 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
887 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
889 def batches(self, split="train", nb_to_use=-1, desc=None):
890 assert split in {"train", "test"}
891 input = self.train_input if split == "train" else self.test_input
893 input = input[:nb_to_use]
895 desc = f"epoch-{split}"
896 for batch in tqdm.tqdm(
897 input.split(self.batch_size), dynamic_ncols=True, desc=desc
901 def vocabulary_size(self):
905 self, n_epoch, model, result_dir, logger, deterministic_synthesis
907 def compute_nb_correct(input, prior_visits):
908 result = input.clone()
909 i = torch.arange(result.size(1), device=result.device)[None, :]
911 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
915 result *= 1 - ar_mask
917 masked_inplace_autoregression(
922 deterministic_synthesis,
926 nb_total = ((prior_visits > 0) * ar_mask).sum()
928 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
930 return nb_total, nb_correct
932 test_nb_total, test_nb_correct = compute_nb_correct(
933 self.test_input[:1000], self.test_prior_visits[:1000]
937 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
940 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
943 ######################################################################
959 fraction_values_for_train=None,
960 device=torch.device("cpu"),
964 self.batch_size = batch_size
965 self.nb_steps = nb_steps
966 self.nb_stacks = nb_stacks
967 self.nb_digits = nb_digits
970 if fraction_values_for_train is None:
971 values_for_train = None
972 values_for_test = None
974 all = torch.randperm(10**nb_digits)
975 nb_for_train = int(all.size(0) * fraction_values_for_train)
976 values_for_train = all[:nb_for_train]
977 values_for_test = all[nb_for_train:]
979 self.train_input, self.train_stack_counts = stack.generate_sequences(
988 self.test_input, self.test_stack_counts = stack.generate_sequences(
997 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
998 counts = self.test_stack_counts.flatten()[i.flatten()]
999 counts = F.one_hot(counts).sum(0)
1000 logger(f"test_pop_stack_counts {counts}")
1002 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1004 def batches(self, split="train", nb_to_use=-1, desc=None):
1005 assert split in {"train", "test"}
1006 input = self.train_input if split == "train" else self.test_input
1008 input = input[:nb_to_use]
1010 desc = f"epoch-{split}"
1011 for batch in tqdm.tqdm(
1012 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1016 def vocabulary_size(self):
1017 return self.nb_codes
1019 def produce_results(
1020 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1022 def compute_nb_correct(input):
1023 result = input.clone()
1024 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1025 ar_mask = (result != input).long()
1026 masked_inplace_autoregression(
1031 deterministic_synthesis,
1035 errors = ((result != input).long() * ar_mask).reshape(
1036 -1, 1 + self.nb_digits
1038 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
1040 nb_total = ar_mask.max(1).values.sum()
1041 nb_correct = nb_total - errors.max(1).values.sum()
1043 return nb_total, nb_correct
1045 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1048 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1051 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1053 ##############################################################
1054 # Log a few generated sequences
1055 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1056 result = input.clone()
1057 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1058 ar_mask = (result != input).long()
1060 # for n in range(result.size(0)):
1062 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1065 masked_inplace_autoregression(
1070 deterministic_synthesis,
1074 for n in range(result.size(0)):
1076 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1078 ##############################################################
1081 ######################################################################
1087 def tensorize(self, sequences):
1088 len_max = max([len(x) for x in sequences])
1094 self.token2id[str(c)]
1095 for c in s + ["<nul>"] * (len_max - len(s))
1104 def seq2str(self, seq):
1105 return " ".join([self.id2token[i] for i in seq])
1112 nb_starting_values=3,
1118 device=torch.device("cpu"),
1122 self.batch_size = batch_size
1123 self.device = device
1124 self.no_prog = no_prog
1128 nb_starting_values=nb_starting_values,
1129 nb_result_values_max=4 * nb_starting_values,
1130 max_input=max_input,
1134 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1139 nb_starting_values=nb_starting_values,
1140 nb_result_values_max=4 * nb_starting_values,
1141 max_input=max_input,
1145 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1149 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1151 val_max = max([x if type(x) is int else 0 for x in symbols])
1152 symbols = list(filter(lambda x: type(x) is str, symbols))
1154 symbols += [str(n) for n in range(val_max + 1)]
1155 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1156 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1158 self.t_nul = self.token2id["<nul>"]
1159 self.t_input = self.token2id["<in>"]
1160 self.t_output = self.token2id["<out>"]
1161 self.t_prog = self.token2id["<prg>"]
1162 self.t_end = self.token2id["<end>"]
1164 self.train_input = self.tensorize(train_sequences)
1165 self.test_input = self.tensorize(test_sequences)
1168 # Excise the program from every train and test example
1169 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1173 ((self.train_input == self.t_prog).long() * k)
1174 .max(1, keepdim=True)
1177 self.train_input = (
1178 self.train_input * (k <= p).long()
1179 + self.t_end * (k == p + 1).long()
1180 + self.t_nul * (k > p + 1).long()
1182 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1186 ((self.test_input == self.t_prog).long() * k)
1187 .max(1, keepdim=True)
1191 self.test_input * (k <= p).long()
1192 + self.t_end * (k == p + 1).long()
1193 + self.t_nul * (k > p + 1).long()
1196 if logger is not None:
1197 logger(f"value_max {val_max}")
1198 for x in self.train_input[:25]:
1199 end = (x != self.t_nul).nonzero().max().item() + 1
1200 seq = [self.id2token[i.item()] for i in x[:end]]
1202 logger(f"example_seq {s}")
1204 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1206 def batches(self, split="train", nb_to_use=-1, desc=None):
1207 assert split in {"train", "test"}
1208 input = self.train_input if split == "train" else self.test_input
1210 input = input[:nb_to_use]
1212 desc = f"epoch-{split}"
1213 for batch in tqdm.tqdm(
1214 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1216 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1217 batch = batch[:, :last].to(self.device)
1220 def vocabulary_size(self):
1221 return self.nb_codes
1223 def produce_results(
1224 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1226 # --------------------------------------------------------------------
1227 def compute_nb_errors_prog(input, nb_to_log=0):
1228 result = input.clone()
1229 s = (result == self.t_prog).long()
1230 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1231 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1233 masked_inplace_autoregression(
1238 deterministic_synthesis,
1242 sum_nb_total, sum_nb_errors = 0, 0
1243 for one_input, one_result in zip(input, result):
1244 seq = [self.id2token[i.item()] for i in one_result]
1245 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1247 sum_nb_errors += 0 if nb_errors == 0 else 1
1249 gt_seq = [self.id2token[i.item()] for i in one_input]
1250 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1251 gt_prog = " ".join([str(x) for x in gt_prog])
1252 prog = " ".join([str(x) for x in prog])
1253 comment = "*" if nb_errors == 0 else "-"
1254 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1255 for start_stack, target_stack, result_stack, correct in stacks:
1256 comment = "*" if correct else "-"
1257 start_stack = " ".join([str(x) for x in start_stack])
1258 target_stack = " ".join([str(x) for x in target_stack])
1259 result_stack = " ".join([str(x) for x in result_stack])
1261 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1265 return sum_nb_total, sum_nb_errors
1267 # --------------------------------------------------------------------
1268 def compute_nb_errors_output(input, nb_to_log=0):
1269 result = input.clone()
1270 k = torch.arange(result.size(1), device=result.device)[None, :]
1272 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1275 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1277 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1278 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1280 masked_inplace_autoregression(
1285 deterministic_synthesis,
1289 sum_nb_total, sum_nb_errors = 0, 0
1290 for one_input, one_result, i, j in zip(
1291 input, result, last_output_idx, first_prog_idx
1293 seq = [self.id2token[i.item()] for i in one_result]
1295 correct = (one_input - one_result).abs().max() == 0
1296 sum_nb_errors += 0 if correct else 1
1299 self.id2token[i.item()] for i in one_result[i : j + 1]
1302 self.id2token[i.item()] for i in one_input[i : j + 1]
1304 comment = "*" if correct else "-"
1305 result_stack = " ".join([str(x) for x in result_stack])
1306 target_stack = " ".join([str(x) for x in target_stack])
1308 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1312 return sum_nb_total, sum_nb_errors
1314 # --------------------------------------------------------------------
1316 if not self.no_prog:
1317 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1318 self.test_input[:1000].to(self.device), nb_to_log=10
1322 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1325 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1327 test_nb_total, test_nb_errors = compute_nb_errors_output(
1328 self.test_input[:1000].to(self.device), nb_to_log=10
1332 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1335 if save_attention_image is None:
1336 logger("no save_attention_image (is pycairo installed?)")
1338 ns = torch.randint(self.test_input.size(0), (1,)).item()
1339 input = self.test_input[ns : ns + 1].clone()
1340 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1341 input = input[:, :last].to(self.device)
1343 with torch.autograd.no_grad():
1346 model.record_attention(True)
1347 model(BracketedSequence(input))
1349 ram = model.retrieve_attention()
1350 model.record_attention(False)
1352 tokens_output = [self.id2token[i.item()] for i in input[0]]
1353 tokens_input = ["n/a"] + tokens_output[:-1]
1354 for n_head in range(ram[0].size(1)):
1355 filename = os.path.join(
1356 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1358 attention_matrices = [m[0, n_head] for m in ram]
1359 save_attention_image(
1365 # min_total_attention=0.9,
1369 logger(f"wrote {filename}")
1372 ######################################################################
1379 def tensorize(self, sequences):
1380 len_max = max([len(x) for x in sequences])
1385 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1402 device=torch.device("cpu"),
1406 self.batch_size = batch_size
1407 self.device = device
1409 train_sequences = expr.generate_sequences(
1411 nb_variables=nb_variables,
1412 length=sequence_length,
1413 operand_max=operand_max,
1414 result_max=result_max,
1417 test_sequences = expr.generate_sequences(
1419 nb_variables=nb_variables,
1420 length=sequence_length,
1421 operand_max=operand_max,
1422 result_max=result_max,
1425 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1428 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1429 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1431 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1433 self.train_input = self.tensorize(train_sequences)
1434 self.test_input = self.tensorize(test_sequences)
1436 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1438 def batches(self, split="train", nb_to_use=-1, desc=None):
1439 assert split in {"train", "test"}
1440 input = self.train_input if split == "train" else self.test_input
1442 input = input[:nb_to_use]
1444 desc = f"epoch-{split}"
1445 for batch in tqdm.tqdm(
1446 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1448 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1449 batch = batch[:, :last]
1452 def vocabulary_size(self):
1453 return self.nb_codes
1455 def seq2str(self, s):
1456 return "".join([self.id2char[k.item()] for k in s])
1458 def produce_results(
1464 deterministic_synthesis,
1467 def compute_nb_correct(input):
1468 result = input.clone()
1469 s = (result == self.space).long()
1470 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1471 result = (1 - ar_mask) * result + ar_mask * self.filler
1472 masked_inplace_autoregression(
1477 deterministic_synthesis,
1481 nb_total = input.size(0)
1482 nb_correct = (input == result).long().min(1).values.sum()
1484 #######################################################################
1485 # Comput predicted vs. true variable values
1487 nb_delta = torch.zeros(5, dtype=torch.int64)
1490 values_input = expr.extract_results([self.seq2str(s) for s in input])
1491 values_result = expr.extract_results([self.seq2str(s) for s in result])
1493 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1495 with open(filename, "w") as f:
1496 for i, r in zip(values_input, values_result):
1497 for n, vi in i.items():
1499 f.write(f"{vi} {-1 if vr is None else vr}\n")
1501 if vr is None or vr < 0:
1505 if d >= nb_delta.size(0):
1510 ######################################################################
1512 return nb_total, nb_correct, nb_delta, nb_missed
1519 ) = compute_nb_correct(self.test_input[:10000])
1522 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1525 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1527 nb_total = test_nb_delta.sum() + test_nb_missed
1528 for d in range(test_nb_delta.size(0)):
1530 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1533 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1536 ##############################################################
1537 # Log a few generated sequences
1538 if input_file is None:
1539 input = self.test_input[:10]
1541 with open(input_file, "r") as f:
1542 sequences = [e.strip() for e in f.readlines()]
1543 sequences = [s + " " + "#" * 50 for s in sequences]
1544 input = self.tensorize(sequences)
1546 result = input.clone()
1547 s = (result == self.space).long()
1548 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1549 result = (1 - ar_mask) * result + ar_mask * self.filler
1551 for n in range(result.size(0)):
1552 logger(f"test_before {self.seq2str(result[n])}")
1554 masked_inplace_autoregression(
1559 deterministic_synthesis,
1563 correct = (1 - ar_mask) * self.space + ar_mask * input
1564 for n in range(result.size(0)):
1565 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1566 logger(f"test_after {self.seq2str(result[n])} {comment}")
1567 logger(f"truth {self.seq2str(correct[n])}")
1568 ##############################################################
1571 ######################################################################
1577 # Make a tensor from a list of strings
1578 def str2tensor(self, descr):
1579 token_descr = [s.strip().split(" ") for s in descr]
1580 l = max([len(s) for s in token_descr])
1581 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1582 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1583 return torch.tensor(id_descr, device=self.device)
1585 # Make a list of strings from a tensor
1586 def tensor2str(self, x):
1587 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1589 # trim all the tensors in the tuple z to remove as much token from
1590 # left and right in the first tensor. If z is a tuple, all its
1591 # elements are trimed according to the triming for the first
1592 def trim(self, z, token="#"):
1593 n = self.token2id[token]
1594 if type(z) == tuple:
1596 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1597 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1598 return tuple([t[:, a:b] for t in z])
1600 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1601 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1604 ######################
1614 device=torch.device("cpu"),
1618 self.device = device
1619 self.batch_size = batch_size
1620 self.grid_factory = grid.GridFactory(size=size)
1621 self.fraction_play = fraction_play
1623 if logger is not None:
1625 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1628 self.train_descr = self.grid_factory.generate_samples(
1629 nb=nb_train_samples,
1630 fraction_play=fraction_play,
1631 progress_bar=lambda r: tqdm.tqdm(r),
1634 self.test_descr = self.grid_factory.generate_samples(
1635 nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
1638 if fraction_play > 0:
1639 self.play_descr = self.grid_factory.generate_samples(
1640 nb=25, fraction_play=1.0, progress_bar=lambda r: tqdm.tqdm(r)
1643 self.play_descr = []
1645 # Build the tokenizer
1647 for d in [self.train_descr, self.test_descr, self.play_descr]:
1649 for t in s.strip().split(" "):
1651 # make this set a sorted list to get the same tensors given
1653 tokens = list(tokens)
1655 tokens = ["#"] + tokens
1656 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1657 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1658 self.t_nul = self.token2id["#"]
1659 self.t_true = self.token2id["true"]
1660 self.t_false = self.token2id["false"]
1661 self.t_pipe = self.token2id["|"]
1663 # Tokenize the train and test sets
1664 self.train_input = self.str2tensor(self.train_descr)
1665 self.test_input = self.str2tensor(self.test_descr)
1667 None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
1670 def batches(self, split="train"):
1671 assert split in {"train", "test"}
1672 input = self.train_input if split == "train" else self.test_input
1673 for batch in tqdm.tqdm(
1674 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1676 yield self.trim(batch)
1678 def vocabulary_size(self):
1679 return len(self.token2id)
1681 def produce_results(
1682 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1684 correct = self.test_input[:1000]
1685 result = correct.clone()
1686 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1687 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1689 logger(f"----------------------------------------------------------")
1691 for e in self.tensor2str(result[:10]):
1692 logger(f"test_before {e}")
1694 masked_inplace_autoregression(
1699 deterministic_synthesis,
1703 logger(f"----------------------------------------------------------")
1705 for e in self.tensor2str(result[:10]):
1706 logger(f"test_after {e}")
1708 logger(f"----------------------------------------------------------")
1710 nb_total = ar_mask.sum().item()
1711 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1713 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1714 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1716 if self.play_input is not None:
1717 result = self.play_input.clone()
1718 ar_mask = (result == self.t_pipe).long().cumsum(dim=1).clamp(max=1)
1719 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1721 logger(f"----------------------------------------------------------")
1723 for e in self.tensor2str(result[:10]):
1724 logger(f"play_before {e}")
1726 masked_inplace_autoregression(
1731 deterministic_synthesis,
1735 logger(f"----------------------------------------------------------")
1737 for e in self.tensor2str(result[:10]):
1738 logger(f"play_after {e}")
1740 logger(f"----------------------------------------------------------")
1743 ######################################################################
1749 ######################
1758 device=torch.device("cpu"),
1762 self.device = device
1763 self.batch_size = batch_size
1764 self.nb_samples_per_mlp = 256
1766 if logger is not None:
1768 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1771 seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1772 nb_mlps=nb_train_samples + nb_test_samples,
1773 nb_samples=self.nb_samples_per_mlp,
1777 nb_mlps_per_batch=1024,
1780 self.train_input = seq[:nb_train_samples]
1781 self.train_q_test_set = q_test_set[:nb_train_samples]
1782 self.train_ref_test_errors = test_error[:nb_train_samples]
1783 self.test_input = seq[nb_train_samples:]
1784 self.test_q_test_set = q_test_set[nb_train_samples:]
1785 self.test_ref_test_errors = test_error[nb_train_samples:]
1787 filename = os.path.join(result_dir, f"train_errors_ref.dat")
1788 with open(filename, "w") as f:
1789 for e in self.train_ref_test_errors:
1792 filename = os.path.join(result_dir, f"test_errors_ref.dat")
1793 with open(filename, "w") as f:
1794 for e in self.test_ref_test_errors:
1797 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1799 def batches(self, split="train"):
1800 assert split in {"train", "test"}
1801 input = self.train_input if split == "train" else self.test_input
1802 for batch in tqdm.tqdm(
1803 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1807 def vocabulary_size(self):
1808 return self.nb_codes
1810 def produce_results(
1811 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1813 correct = self.test_input[:1000]
1814 result = correct.clone()
1816 torch.arange(result.size(1), device=result.device)
1817 > self.nb_samples_per_mlp * 3 + 1
1819 ar_mask = ar_mask.expand_as(result)
1820 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1822 masked_inplace_autoregression(
1827 deterministic_synthesis,
1831 q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1832 q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1833 error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1835 filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1836 with open(filename, "w") as f:
1837 for e in error_test:
1841 ######################################################################