3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
18 from graph import save_attention_image
20 save_attention_image = None
22 ######################################################################
25 def masked_inplace_autoregression(
30 deterministic_synthesis,
31 forbidden_tokens=None,
32 progress_bar_desc="autoregression",
33 device=torch.device("cpu"),
35 assert input.size() == ar_mask.size()
37 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
39 if progress_bar_desc is not None:
43 desc=progress_bar_desc,
44 total=(input.size(0) + batch_size - 1) // batch_size,
47 with torch.autograd.no_grad():
51 for input, ar_mask in batches:
52 model.masked_inplace_autoregression(
53 input, ar_mask, forbidden_tokens, deterministic_synthesis
59 ######################################################################
63 def batches(self, split="train"):
66 def vocabulary_size(self):
70 self, n_epoch, model, result_dir, logger, deterministic_synthesis
88 device=torch.device("cpu"),
93 self.batch_size = batch_size
95 self.problem = problem
97 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
100 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
104 self.train_input, self.train_ar_mask = self.train_input.to(
106 ), self.train_ar_mask.to(device)
107 self.test_input, self.test_ar_mask = self.test_input.to(
109 ), self.test_ar_mask.to(device)
111 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
113 # A bit of paranoia never hurts
114 assert self.nb_codes <= max_nb_codes
115 assert self.train_input.min() >= 0
116 assert self.test_input.min() >= 0
117 assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
122 assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
128 if logger is not None:
129 for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
130 logger(f"train_sequences {self.problem.seq2str(s)}")
131 a = "".join(["01"[x.item()] for x in a])
134 def batches(self, split="train", nb_to_use=-1, desc=None):
135 assert split in {"train", "test"}
136 input = self.train_input if split == "train" else self.test_input
138 input = input[:nb_to_use]
140 desc = f"epoch-{split}"
141 for batch in tqdm.tqdm(
142 input.split(self.batch_size), dynamic_ncols=True, desc=desc
146 def vocabulary_size(self):
150 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
152 def compute_accuracy(input, ar_mask, logger=None):
153 input, ar_mask = input[:nmax], ar_mask[:nmax]
154 result = input.clone() * (1 - ar_mask)
156 masked_inplace_autoregression(
161 deterministic_synthesis,
162 progress_bar_desc=None,
166 log_ground_truth = ar_mask.min() == 0
168 if logger is not None:
169 for sp, st in zip(result[:10], input[:10]):
171 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
175 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
178 nb_total, nb_correct = self.problem.compute_nb_correct(
179 input, ar_mask, result
182 # nb_total = ar_mask.sum().item()
183 # nb_correct = ((result == input).long() * ar_mask).sum().item()
185 return nb_total, nb_correct
187 train_nb_total, train_nb_correct = compute_accuracy(
188 self.train_input, self.train_ar_mask
192 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
195 test_nb_total, test_nb_correct = compute_accuracy(
196 self.test_input, self.test_ar_mask, logger
200 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
203 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
205 if save_attention_image is None:
206 logger("no save_attention_image (is pycairo installed?)")
209 ns = torch.randint(self.test_input.size(0), (1,)).item()
210 input = self.test_input[ns : ns + 1].clone()
212 with torch.autograd.no_grad():
215 # model.record_attention(True)
216 model(BracketedSequence(input))
218 # ram = model.retrieve_attention()
219 # model.record_attention(False)
221 # tokens_output = [c for c in self.problem.seq2str(input[0])]
222 # tokens_input = ["n/a"] + tokens_output[:-1]
223 # for n_head in range(ram[0].size(1)):
224 # filename = os.path.join(
225 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
227 # attention_matrices = [m[0, n_head] for m in ram]
228 # save_attention_image(
232 # attention_matrices,
234 ##min_total_attention=0.9,
238 # logger(f"wrote {filename}")
241 ######################################################################
246 class PicoCLVR(Task):
247 # Make a tensor from a list of strings
248 def tensorize(self, descr):
249 token_descr = [s.strip().split(" ") for s in descr]
250 l = max([len(s) for s in token_descr])
251 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
252 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
253 return torch.tensor(id_descr, device=self.device)
255 # Make a list of strings from a tensor
256 def detensorize(self, x):
257 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
259 # trim all the tensors in the tuple z to remove as much token from
260 # left and right in the first tensor. If z is a tuple, all its
261 # elements are trimed according to the triming for the first
262 def trim(self, z, token="<nul>"):
263 n = self.token2id[token]
266 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
267 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
268 return tuple([t[:, a:b] for t in z])
270 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
271 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
274 ######################
285 device=torch.device("cpu"),
291 def generate_descr(nb, cache_suffix, pruner):
292 return picoclvr.generate(
302 self.batch_size = batch_size
304 self.pruner_train = pruner_train
305 self.pruner_eval = pruner_eval
307 if logger is not None:
309 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
312 self.train_descr = generate_descr(
313 nb_train_samples, "train", pruner=self.pruner_train
315 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
317 # Build the tokenizer
318 tokens = {"<nul>", "<img>"}
319 for d in [self.train_descr, self.test_descr]:
321 for t in s.strip().split(" "):
323 # make this set a sorted list to get the same tensors given
325 tokens = list(tokens)
327 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
328 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
329 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
331 # Tokenize the train and test sets
332 self.train_input = self.tensorize(self.train_descr)
333 self.test_input = self.tensorize(self.test_descr)
335 def batches(self, split="train"):
336 assert split in {"train", "test"}
337 input = self.train_input if split == "train" else self.test_input
338 for batch in tqdm.tqdm(
339 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
341 yield self.trim(batch)
343 def vocabulary_size(self):
344 return len(self.token2id)
346 def compute_missing_properties(
347 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
349 acc_nb_requested_properties = []
350 acc_nb_missing_properties = []
353 for input in tqdm.tqdm(
354 self.test_input.split(self.batch_size),
356 desc=f"test-properties",
358 result = input.clone()
359 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
360 result = (1 - ar_mask) * result + ar_mask * self.t_nul
361 masked_inplace_autoregression(
366 deterministic_synthesis,
367 progress_bar_desc=None,
371 result_descr = self.detensorize(result)
372 np = picoclvr.nb_properties(
378 nb_requested_properties, _, nb_missing_properties = zip(*np)
379 acc_nb_requested_properties += nb_requested_properties
380 acc_nb_missing_properties += nb_missing_properties
381 acc_nb_results += len(result_descr)
383 nb_requested_properties = sum(acc_nb_requested_properties)
384 nb_missing_properties = sum(acc_nb_missing_properties)
386 prefix = "" if pruner is None else "pruned_"
387 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
389 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
392 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
396 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
399 ######################################################################
402 self, n_epoch, model, result_dir, logger, deterministic_synthesis
404 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
406 if self.pruner_eval is not None:
407 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
409 nb_tokens_to_generate = self.height * self.width + 3
414 for primer_descr in [
415 "red above green <sep> green top <sep> blue right of red",
416 "there is red <sep> there is yellow <sep> there is blue",
417 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
418 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
420 primer += [primer_descr + " <img>"] * nb_per_primer
422 result = self.tensorize(primer)
423 fill = result.new_full(
424 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
426 result = torch.cat((result, fill), 1)
427 ar_mask = (result == self.t_nul).long()
428 masked_inplace_autoregression(
433 deterministic_synthesis,
436 result_descr = self.detensorize(result)
438 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
440 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
441 acc_nb_results = len(result_descr)
443 nb_requested_properties = sum(acc_nb_requested_properties)
444 nb_missing_properties = sum(acc_nb_missing_properties)
447 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
449 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
452 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
455 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
459 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
463 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
469 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
470 torchvision.utils.save_image(
471 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
473 logger(f"wrote {image_name}")
476 ######################################################################
481 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
485 self.nb_train_samples = (nb_train_samples,)
486 self.nb_test_samples = (nb_test_samples,)
487 self.batch_size = batch_size
489 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
490 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
491 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
492 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
494 def batches(self, split="train", nb_to_use=-1, desc=None):
495 assert split in {"train", "test"}
496 input = self.train_input if split == "train" else self.test_input
498 input = input[:nb_to_use]
500 desc = f"epoch-{split}"
501 for batch in tqdm.tqdm(
502 input.split(self.batch_size), dynamic_ncols=True, desc=desc
506 def vocabulary_size(self):
510 self, n_epoch, model, result_dir, logger, deterministic_synthesis
512 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
513 ar_mask = torch.full_like(results, 1)
514 masked_inplace_autoregression(
519 deterministic_synthesis,
522 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
523 torchvision.utils.save_image(
524 1 - results.reshape(-1, 1, 28, 28) / 255.0,
529 logger(f"wrote {image_name}")
532 ######################################################################
538 def map2seq(self, *m):
539 return torch.cat([x.flatten(1) for x in m], 1)
541 def seq2map(self, s):
542 s = s.reshape(s.size(0), -1, self.height, self.width)
543 return (s[:, k] for k in range(s.size(1)))
553 device=torch.device("cpu"),
557 self.batch_size = batch_size
562 train_mazes, train_paths, _ = maze.create_maze_data(
567 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
569 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
571 test_mazes, test_paths, _ = maze.create_maze_data(
576 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
578 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
580 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
582 def batches(self, split="train", nb_to_use=-1, desc=None):
583 assert split in {"train", "test"}
584 input = self.train_input if split == "train" else self.test_input
586 input = input[:nb_to_use]
588 desc = f"epoch-{split}"
589 for batch in tqdm.tqdm(
590 input.split(self.batch_size), dynamic_ncols=True, desc=desc
594 def vocabulary_size(self):
598 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
600 nb_total, nb_correct = 0, 0
602 self.width * self.height,
603 self.width * self.height,
608 for input in self.batches(split, nb_to_use):
609 result = input.clone()
610 ar_mask = result.new_zeros(result.size())
611 ar_mask[:, self.height * self.width :] = 1
612 result *= 1 - ar_mask
613 masked_inplace_autoregression(
618 deterministic_synthesis,
619 progress_bar_desc=None,
622 mazes, paths = self.seq2map(result)
623 path_correctness = maze.path_correctness(mazes, paths)
624 nb_correct += path_correctness.long().sum()
625 nb_total += mazes.size(0)
627 optimal_path_lengths = (
628 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
630 predicted_path_lengths = (
631 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
633 optimal_path_lengths = optimal_path_lengths[path_correctness]
634 predicted_path_lengths = predicted_path_lengths[path_correctness]
635 count[optimal_path_lengths, predicted_path_lengths] += 1
641 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
644 return nb_total, nb_correct, count
647 self, n_epoch, model, result_dir, logger, deterministic_synthesis
649 train_nb_total, train_nb_correct, count = self.compute_error(
653 deterministic_synthesis=deterministic_synthesis,
656 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
659 test_nb_total, test_nb_correct, count = self.compute_error(
663 deterministic_synthesis=deterministic_synthesis,
666 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
669 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
671 if count is not None:
672 proportion_optimal = count.diagonal().sum().float() / count.sum()
673 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
675 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
677 for i in range(count.size(0)):
678 for j in range(count.size(1)):
679 eol = " " if j < count.size(1) - 1 else "\n"
680 f.write(f"{count[i,j]}{eol}")
682 input = self.test_input[:48]
683 result = input.clone()
684 ar_mask = result.new_zeros(result.size())
685 ar_mask[:, self.height * self.width :] = 1
686 result *= 1 - ar_mask
687 masked_inplace_autoregression(
692 deterministic_synthesis,
696 mazes, paths = self.seq2map(input)
697 _, predicted_paths = self.seq2map(result)
699 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
704 predicted_paths=predicted_paths,
705 path_correct=maze.path_correctness(mazes, predicted_paths),
706 path_optimal=maze.path_optimality(paths, predicted_paths),
708 logger(f"wrote {filename}")
711 ######################################################################
728 device=torch.device("cpu"),
732 self.batch_size = batch_size
736 self.prompt_length = prompt_length
738 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
747 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
757 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
759 def batches(self, split="train", nb_to_use=-1, desc=None):
760 assert split in {"train", "test"}
761 input = self.train_input if split == "train" else self.test_input
763 input = input[:nb_to_use]
765 desc = f"epoch-{split}"
766 for batch in tqdm.tqdm(
767 input.split(self.batch_size), dynamic_ncols=True, desc=desc
771 def vocabulary_size(self):
775 self, n_epoch, model, result_dir, logger, deterministic_synthesis
777 def compute_nb_correct(input, prior_visits):
778 result = input.clone()
779 i = torch.arange(result.size(1), device=result.device)[None, :]
781 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
785 result *= 1 - ar_mask
787 masked_inplace_autoregression(
792 deterministic_synthesis,
796 nb_total = ((prior_visits > 0) * ar_mask).sum()
798 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
800 return nb_total, nb_correct
802 test_nb_total, test_nb_correct = compute_nb_correct(
803 self.test_input[:1000], self.test_prior_visits[:1000]
807 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
810 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
813 ######################################################################
829 fraction_values_for_train=None,
830 device=torch.device("cpu"),
834 self.batch_size = batch_size
835 self.nb_steps = nb_steps
836 self.nb_stacks = nb_stacks
837 self.nb_digits = nb_digits
840 if fraction_values_for_train is None:
841 values_for_train = None
842 values_for_test = None
844 all = torch.randperm(10**nb_digits)
845 nb_for_train = int(all.size(0) * fraction_values_for_train)
846 values_for_train = all[:nb_for_train]
847 values_for_test = all[nb_for_train:]
849 self.train_input, self.train_stack_counts = stack.generate_sequences(
858 self.test_input, self.test_stack_counts = stack.generate_sequences(
867 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
868 counts = self.test_stack_counts.flatten()[i.flatten()]
869 counts = F.one_hot(counts).sum(0)
870 logger(f"test_pop_stack_counts {counts}")
872 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
874 def batches(self, split="train", nb_to_use=-1, desc=None):
875 assert split in {"train", "test"}
876 input = self.train_input if split == "train" else self.test_input
878 input = input[:nb_to_use]
880 desc = f"epoch-{split}"
881 for batch in tqdm.tqdm(
882 input.split(self.batch_size), dynamic_ncols=True, desc=desc
886 def vocabulary_size(self):
890 self, n_epoch, model, result_dir, logger, deterministic_synthesis
892 def compute_nb_correct(input):
893 result = input.clone()
894 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
895 ar_mask = (result != input).long()
896 masked_inplace_autoregression(
901 deterministic_synthesis,
905 errors = ((result != input).long() * ar_mask).reshape(
906 -1, 1 + self.nb_digits
908 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
910 nb_total = ar_mask.max(1).values.sum()
911 nb_correct = nb_total - errors.max(1).values.sum()
913 return nb_total, nb_correct
915 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
918 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
921 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
923 ##############################################################
924 # Log a few generated sequences
925 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
926 result = input.clone()
927 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
928 ar_mask = (result != input).long()
930 # for n in range(result.size(0)):
932 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
935 masked_inplace_autoregression(
940 deterministic_synthesis,
944 for n in range(result.size(0)):
946 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
948 ##############################################################
951 ######################################################################
957 def tensorize(self, sequences):
958 len_max = max([len(x) for x in sequences])
964 self.token2id[str(c)]
965 for c in s + ["<nul>"] * (len_max - len(s))
974 def seq2str(self, seq):
975 return " ".join([self.id2token[i] for i in seq])
982 nb_starting_values=3,
988 device=torch.device("cpu"),
992 self.batch_size = batch_size
994 self.no_prog = no_prog
998 nb_starting_values=nb_starting_values,
999 nb_result_values_max=4 * nb_starting_values,
1000 max_input=max_input,
1004 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1009 nb_starting_values=nb_starting_values,
1010 nb_result_values_max=4 * nb_starting_values,
1011 max_input=max_input,
1015 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1019 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1021 val_max = max([x if type(x) is int else 0 for x in symbols])
1022 symbols = list(filter(lambda x: type(x) is str, symbols))
1024 symbols += [str(n) for n in range(val_max + 1)]
1025 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1026 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1028 self.t_nul = self.token2id["<nul>"]
1029 self.t_input = self.token2id["<in>"]
1030 self.t_output = self.token2id["<out>"]
1031 self.t_prog = self.token2id["<prg>"]
1032 self.t_end = self.token2id["<end>"]
1034 self.train_input = self.tensorize(train_sequences)
1035 self.test_input = self.tensorize(test_sequences)
1038 # Excise the program from every train and test example
1039 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1043 ((self.train_input == self.t_prog).long() * k)
1044 .max(1, keepdim=True)
1047 self.train_input = (
1048 self.train_input * (k <= p).long()
1049 + self.t_end * (k == p + 1).long()
1050 + self.t_nul * (k > p + 1).long()
1052 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1056 ((self.test_input == self.t_prog).long() * k)
1057 .max(1, keepdim=True)
1061 self.test_input * (k <= p).long()
1062 + self.t_end * (k == p + 1).long()
1063 + self.t_nul * (k > p + 1).long()
1066 if logger is not None:
1067 logger(f"value_max {val_max}")
1068 for x in self.train_input[:25]:
1069 end = (x != self.t_nul).nonzero().max().item() + 1
1070 seq = [self.id2token[i.item()] for i in x[:end]]
1072 logger(f"example_seq {s}")
1074 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1076 def batches(self, split="train", nb_to_use=-1, desc=None):
1077 assert split in {"train", "test"}
1078 input = self.train_input if split == "train" else self.test_input
1080 input = input[:nb_to_use]
1082 desc = f"epoch-{split}"
1083 for batch in tqdm.tqdm(
1084 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1086 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1087 batch = batch[:, :last].to(self.device)
1090 def vocabulary_size(self):
1091 return self.nb_codes
1093 def produce_results(
1094 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1096 # --------------------------------------------------------------------
1097 def compute_nb_errors_prog(input, nb_to_log=0):
1098 result = input.clone()
1099 s = (result == self.t_prog).long()
1100 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1101 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1103 masked_inplace_autoregression(
1108 deterministic_synthesis,
1112 sum_nb_total, sum_nb_errors = 0, 0
1113 for one_input, one_result in zip(input, result):
1114 seq = [self.id2token[i.item()] for i in one_result]
1115 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1117 sum_nb_errors += 0 if nb_errors == 0 else 1
1119 gt_seq = [self.id2token[i.item()] for i in one_input]
1120 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1121 gt_prog = " ".join([str(x) for x in gt_prog])
1122 prog = " ".join([str(x) for x in prog])
1123 comment = "*" if nb_errors == 0 else "-"
1124 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1125 for start_stack, target_stack, result_stack, correct in stacks:
1126 comment = "*" if correct else "-"
1127 start_stack = " ".join([str(x) for x in start_stack])
1128 target_stack = " ".join([str(x) for x in target_stack])
1129 result_stack = " ".join([str(x) for x in result_stack])
1131 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1135 return sum_nb_total, sum_nb_errors
1137 # --------------------------------------------------------------------
1138 def compute_nb_errors_output(input, nb_to_log=0):
1139 result = input.clone()
1140 k = torch.arange(result.size(1), device=result.device)[None, :]
1142 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1145 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1147 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1148 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1150 masked_inplace_autoregression(
1155 deterministic_synthesis,
1159 sum_nb_total, sum_nb_errors = 0, 0
1160 for one_input, one_result, i, j in zip(
1161 input, result, last_output_idx, first_prog_idx
1163 seq = [self.id2token[i.item()] for i in one_result]
1165 correct = (one_input - one_result).abs().max() == 0
1166 sum_nb_errors += 0 if correct else 1
1169 self.id2token[i.item()] for i in one_result[i : j + 1]
1172 self.id2token[i.item()] for i in one_input[i : j + 1]
1174 comment = "*" if correct else "-"
1175 result_stack = " ".join([str(x) for x in result_stack])
1176 target_stack = " ".join([str(x) for x in target_stack])
1178 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1182 return sum_nb_total, sum_nb_errors
1184 # --------------------------------------------------------------------
1186 if not self.no_prog:
1187 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1188 self.test_input[:1000].to(self.device), nb_to_log=10
1192 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1195 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1197 test_nb_total, test_nb_errors = compute_nb_errors_output(
1198 self.test_input[:1000].to(self.device), nb_to_log=10
1202 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1205 if save_attention_image is None:
1206 logger("no save_attention_image (is pycairo installed?)")
1208 ns = torch.randint(self.test_input.size(0), (1,)).item()
1209 input = self.test_input[ns : ns + 1].clone()
1210 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1211 input = input[:, :last].to(self.device)
1213 with torch.autograd.no_grad():
1216 model.record_attention(True)
1217 model(BracketedSequence(input))
1219 ram = model.retrieve_attention()
1220 model.record_attention(False)
1222 tokens_output = [self.id2token[i.item()] for i in input[0]]
1223 tokens_input = ["n/a"] + tokens_output[:-1]
1224 for n_head in range(ram[0].size(1)):
1225 filename = os.path.join(
1226 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1228 attention_matrices = [m[0, n_head] for m in ram]
1229 save_attention_image(
1235 # min_total_attention=0.9,
1239 logger(f"wrote {filename}")
1242 ######################################################################
1249 def tensorize(self, sequences):
1250 len_max = max([len(x) for x in sequences])
1255 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1272 device=torch.device("cpu"),
1276 self.batch_size = batch_size
1277 self.device = device
1279 train_sequences = expr.generate_sequences(
1281 nb_variables=nb_variables,
1282 length=sequence_length,
1283 operand_max=operand_max,
1284 result_max=result_max,
1287 test_sequences = expr.generate_sequences(
1289 nb_variables=nb_variables,
1290 length=sequence_length,
1291 operand_max=operand_max,
1292 result_max=result_max,
1295 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1298 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1299 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1301 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1303 self.train_input = self.tensorize(train_sequences)
1304 self.test_input = self.tensorize(test_sequences)
1306 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1308 def batches(self, split="train", nb_to_use=-1, desc=None):
1309 assert split in {"train", "test"}
1310 input = self.train_input if split == "train" else self.test_input
1312 input = input[:nb_to_use]
1314 desc = f"epoch-{split}"
1315 for batch in tqdm.tqdm(
1316 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1318 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1319 batch = batch[:, :last]
1322 def vocabulary_size(self):
1323 return self.nb_codes
1325 def seq2str(self, s):
1326 return "".join([self.id2char[k.item()] for k in s])
1328 def produce_results(
1334 deterministic_synthesis,
1337 def compute_nb_correct(input):
1338 result = input.clone()
1339 s = (result == self.space).long()
1340 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1341 result = (1 - ar_mask) * result + ar_mask * self.filler
1342 masked_inplace_autoregression(
1347 deterministic_synthesis,
1351 nb_total = input.size(0)
1352 nb_correct = (input == result).long().min(1).values.sum()
1354 #######################################################################
1355 # Comput predicted vs. true variable values
1357 nb_delta = torch.zeros(5, dtype=torch.int64)
1360 values_input = expr.extract_results([self.seq2str(s) for s in input])
1361 values_result = expr.extract_results([self.seq2str(s) for s in result])
1363 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1365 with open(filename, "w") as f:
1366 for i, r in zip(values_input, values_result):
1367 for n, vi in i.items():
1369 f.write(f"{vi} {-1 if vr is None else vr}\n")
1371 if vr is None or vr < 0:
1375 if d >= nb_delta.size(0):
1380 ######################################################################
1382 return nb_total, nb_correct, nb_delta, nb_missed
1389 ) = compute_nb_correct(self.test_input[:10000])
1392 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1395 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1397 nb_total = test_nb_delta.sum() + test_nb_missed
1398 for d in range(test_nb_delta.size(0)):
1400 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1403 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1406 ##############################################################
1407 # Log a few generated sequences
1408 if input_file is None:
1409 input = self.test_input[:10]
1411 with open(input_file, "r") as f:
1412 sequences = [e.strip() for e in f.readlines()]
1413 sequences = [s + " " + "#" * 50 for s in sequences]
1414 input = self.tensorize(sequences)
1416 result = input.clone()
1417 s = (result == self.space).long()
1418 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1419 result = (1 - ar_mask) * result + ar_mask * self.filler
1421 for n in range(result.size(0)):
1422 logger(f"test_before {self.seq2str(result[n])}")
1424 masked_inplace_autoregression(
1429 deterministic_synthesis,
1433 correct = (1 - ar_mask) * self.space + ar_mask * input
1434 for n in range(result.size(0)):
1435 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1436 logger(f"test_after {self.seq2str(result[n])} {comment}")
1437 logger(f"truth {self.seq2str(correct[n])}")
1438 ##############################################################
1441 ######################################################################
1447 # Make a tensor from a list of strings
1448 def str2tensor(self, descr):
1449 token_descr = [s.strip().split(" ") for s in descr]
1450 l = max([len(s) for s in token_descr])
1451 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1452 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1453 return torch.tensor(id_descr, device=self.device)
1455 # Make a list of strings from a tensor
1456 def tensor2str(self, x):
1457 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1459 # trim all the tensors in the tuple z to remove as much token from
1460 # left and right in the first tensor. If z is a tuple, all its
1461 # elements are trimed according to the triming for the first
1462 def trim(self, z, token="#"):
1463 n = self.token2id[token]
1464 if type(z) == tuple:
1466 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1467 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1468 return tuple([t[:, a:b] for t in z])
1470 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1471 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1474 ######################
1483 device=torch.device("cpu"),
1487 self.device = device
1488 self.batch_size = batch_size
1489 self.grid_factory = grid.GridFactory(size=size)
1491 if logger is not None:
1493 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1496 self.train_descr = self.grid_factory.generate_samples(
1497 nb_train_samples, lambda r: tqdm.tqdm(r)
1499 self.test_descr = self.grid_factory.generate_samples(
1500 nb_test_samples, lambda r: tqdm.tqdm(r)
1503 # Build the tokenizer
1505 for d in [self.train_descr, self.test_descr]:
1507 for t in s.strip().split(" "):
1509 # make this set a sorted list to get the same tensors given
1511 tokens = list(tokens)
1513 tokens = ["#"] + tokens
1514 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1515 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1516 self.t_nul = self.token2id["#"]
1517 self.t_true = self.token2id["true"]
1518 self.t_false = self.token2id["false"]
1520 # Tokenize the train and test sets
1521 self.train_input = self.str2tensor(self.train_descr)
1522 self.test_input = self.str2tensor(self.test_descr)
1524 def batches(self, split="train"):
1525 assert split in {"train", "test"}
1526 input = self.train_input if split == "train" else self.test_input
1527 for batch in tqdm.tqdm(
1528 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1530 yield self.trim(batch)
1532 def vocabulary_size(self):
1533 return len(self.token2id)
1535 def produce_results(
1536 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1538 correct = self.test_input[:1000]
1539 result = correct.clone()
1540 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1541 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1543 logger(f"----------------------------------------------------------")
1545 for e in self.tensor2str(result[:10]):
1546 logger(f"test_before {e}")
1548 masked_inplace_autoregression(
1553 deterministic_synthesis,
1557 logger(f"----------------------------------------------------------")
1559 for e in self.tensor2str(result[:10]):
1560 logger(f"test_after {e}")
1562 logger(f"----------------------------------------------------------")
1564 nb_total = ar_mask.sum().item()
1565 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1567 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1568 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1571 ######################################################################
1577 ######################
1586 device=torch.device("cpu"),
1590 self.device = device
1591 self.batch_size = batch_size
1592 self.nb_samples_per_mlp = 256
1594 if logger is not None:
1596 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1599 seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1600 nb_mlps=nb_train_samples + nb_test_samples,
1601 nb_samples=self.nb_samples_per_mlp,
1605 nb_mlps_per_batch=1024,
1608 self.train_input = seq[:nb_train_samples]
1609 self.train_q_test_set = q_test_set[:nb_train_samples]
1610 self.train_ref_test_errors = test_error[:nb_train_samples]
1611 self.test_input = seq[nb_train_samples:]
1612 self.test_q_test_set = q_test_set[nb_train_samples:]
1613 self.test_ref_test_errors = test_error[nb_train_samples:]
1615 filename = os.path.join(result_dir, f"train_errors_ref.dat")
1616 with open(filename, "w") as f:
1617 for e in self.train_ref_test_errors:
1620 filename = os.path.join(result_dir, f"test_errors_ref.dat")
1621 with open(filename, "w") as f:
1622 for e in self.test_ref_test_errors:
1625 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1627 def batches(self, split="train"):
1628 assert split in {"train", "test"}
1629 input = self.train_input if split == "train" else self.test_input
1630 for batch in tqdm.tqdm(
1631 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1635 def vocabulary_size(self):
1636 return self.nb_codes
1638 def produce_results(
1639 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1641 correct = self.test_input[:1000]
1642 result = correct.clone()
1644 torch.arange(result.size(1), device=result.device)
1645 > self.nb_samples_per_mlp * 3 + 1
1647 ar_mask = ar_mask.expand_as(result)
1648 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1650 masked_inplace_autoregression(
1655 deterministic_synthesis,
1659 q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1660 q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1661 error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1663 filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1664 with open(filename, "w") as f:
1665 for e in error_test:
1669 ######################################################################