3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 # from graph import save_attention_image
18 save_attention_image = None
20 ######################################################################
23 def masked_inplace_autoregression(
28 deterministic_synthesis,
29 forbidden_tokens=None,
30 progress_bar_desc="autoregression",
31 device=torch.device("cpu"),
33 assert input.size() == ar_mask.size()
35 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
37 if progress_bar_desc is not None:
41 desc=progress_bar_desc,
42 total=(input.size(0) + batch_size - 1) // batch_size,
45 with torch.autograd.no_grad():
49 for input, ar_mask in batches:
50 model.masked_inplace_autoregression(
51 input, ar_mask, forbidden_tokens, deterministic_synthesis
57 ######################################################################
61 def batches(self, split="train"):
64 def vocabulary_size(self):
68 self, n_epoch, model, result_dir, logger, deterministic_synthesis
73 class TaskFromFile(Task):
74 def tensorize(self, pairs):
75 len_max = max([len(x[0]) for x in pairs])
81 [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
89 pred_mask = torch.cat(
93 [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
101 return input, pred_mask
103 # trim all the tensors in the tuple z to remove as much token from
104 # left and right in the first tensor. If z is a tuple, all its
105 # elements are trimed according to the triming for the first
106 def trim(self, z, token="#"):
107 n = self.char2id[token]
110 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
111 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
112 return tuple([t[:, a:b] for t in z])
114 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
115 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
125 device=torch.device("cpu"),
127 self.batch_size = batch_size
130 def read_file(filename, nb=-1):
132 with open(filename, "r") as f:
134 sequence = f.readline().strip()
137 pred_mask = f.readline().strip()
138 assert len(sequence) == len(pred_mask)
139 assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
140 pairs.append((sequence, pred_mask))
146 assert len(pairs) == nb
150 train_pairs = read_file(train_filename, nb_train_samples)
151 test_pairs = read_file(test_filename, nb_test_samples)
153 symbols = ["#"] + list(
154 set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"])
156 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
157 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
159 self.train_input, self.train_pred_masks = self.tensorize(train_pairs)
160 self.test_input, self.test_pred_masks = self.tensorize(test_pairs)
162 def batches(self, split="train", nb_to_use=-1, desc=None):
163 assert split in {"train", "test"}
164 input = self.train_input if split == "train" else self.test_input
166 input = input[:nb_to_use]
168 desc = f"epoch-{split}"
169 for batch in tqdm.tqdm(
170 input.split(self.batch_size), dynamic_ncols=True, desc=desc
172 yield self.trim(batch).to(self.device)
174 def vocabulary_size(self):
175 return len(self.char2id)
177 def tensor2str(self, t):
178 return ["".join([self.id2char[x.item()] for x in s]) for s in t]
181 self, n_epoch, model, result_dir, logger, deterministic_synthesis
183 correct = self.trim(self.test_input[:1000]).to(self.device)
184 result = correct.clone()
185 pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
186 ar_mask = (pred_mask > 0).long()
187 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
189 logger(f"----------------------------------------------------------")
191 for e in self.tensor2str(result[:50]):
192 logger(f"test_before {e}")
194 masked_inplace_autoregression(
199 deterministic_synthesis,
203 logger(f"----------------------------------------------------------")
205 for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])):
206 logger(f"test_after {e}")
207 logger(f"correct {c}")
209 logger(f"----------------------------------------------------------")
211 err_mask = (pred_mask == 2).long()
212 nb_total = err_mask.sum().item()
213 nb_correct = ((correct == result).long() * err_mask).sum().item()
215 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
216 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
232 device=torch.device("cpu"),
237 self.batch_size = batch_size
239 self.problem = problem
241 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
244 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
248 self.train_input, self.train_ar_mask = self.train_input.to(
250 ), self.train_ar_mask.to(device)
251 self.test_input, self.test_ar_mask = self.test_input.to(
253 ), self.test_ar_mask.to(device)
255 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
257 # A bit of paranoia never hurts
258 assert self.nb_codes <= max_nb_codes
259 assert self.train_input.min() >= 0
260 assert self.test_input.min() >= 0
261 assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
266 assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
272 if logger is not None:
273 for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
274 logger(f"train_sequences {self.problem.seq2str(s)}")
275 a = "".join(["01"[x.item()] for x in a])
278 def batches(self, split="train", nb_to_use=-1, desc=None):
279 assert split in {"train", "test"}
280 input = self.train_input if split == "train" else self.test_input
282 input = input[:nb_to_use]
284 desc = f"epoch-{split}"
285 for batch in tqdm.tqdm(
286 input.split(self.batch_size), dynamic_ncols=True, desc=desc
290 def vocabulary_size(self):
294 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
296 def compute_accuracy(input, ar_mask, logger=None):
297 input, ar_mask = input[:nmax], ar_mask[:nmax]
298 result = input.clone() * (1 - ar_mask)
300 masked_inplace_autoregression(
305 deterministic_synthesis,
306 progress_bar_desc=None,
310 log_ground_truth = ar_mask.min() == 0
312 if logger is not None:
313 for sp, st in zip(result[:10], input[:10]):
315 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
319 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
322 nb_total, nb_correct = self.problem.compute_nb_correct(
323 input, ar_mask, result
326 # nb_total = ar_mask.sum().item()
327 # nb_correct = ((result == input).long() * ar_mask).sum().item()
329 return nb_total, nb_correct
331 train_nb_total, train_nb_correct = compute_accuracy(
332 self.train_input, self.train_ar_mask
336 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
339 test_nb_total, test_nb_correct = compute_accuracy(
340 self.test_input, self.test_ar_mask, logger
344 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
347 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
349 if save_attention_image is not None:
351 ns = torch.randint(self.test_input.size(0), (1,)).item()
352 input = self.test_input[ns : ns + 1].clone()
354 with torch.autograd.no_grad():
357 # model.record_attention(True)
358 model(BracketedSequence(input))
360 # ram = model.retrieve_attention()
361 # model.record_attention(False)
363 # tokens_output = [c for c in self.problem.seq2str(input[0])]
364 # tokens_input = ["n/a"] + tokens_output[:-1]
365 # for n_head in range(ram[0].size(1)):
366 # filename = os.path.join(
367 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
369 # attention_matrices = [m[0, n_head] for m in ram]
370 # save_attention_image(
374 # attention_matrices,
376 ##min_total_attention=0.9,
380 # logger(f"wrote {filename}")
383 ######################################################################
388 class PicoCLVR(Task):
389 # Make a tensor from a list of strings
390 def tensorize(self, descr):
391 token_descr = [s.strip().split(" ") for s in descr]
392 l = max([len(s) for s in token_descr])
393 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
394 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
395 return torch.tensor(id_descr, device=self.device)
397 # Make a list of strings from a tensor
398 def detensorize(self, x):
399 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
401 # trim all the tensors in the tuple z to remove as much token from
402 # left and right in the first tensor. If z is a tuple, all its
403 # elements are trimed according to the triming for the first
404 def trim(self, z, token="<nul>"):
405 n = self.token2id[token]
408 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
409 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
410 return tuple([t[:, a:b] for t in z])
412 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
413 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
416 ######################
427 device=torch.device("cpu"),
433 def generate_descr(nb, cache_suffix, pruner):
434 return picoclvr.generate(
444 self.batch_size = batch_size
446 self.pruner_train = pruner_train
447 self.pruner_eval = pruner_eval
449 if logger is not None:
451 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
454 self.train_descr = generate_descr(
455 nb_train_samples, "train", pruner=self.pruner_train
457 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
459 # Build the tokenizer
460 tokens = {"<nul>", "<img>"}
461 for d in [self.train_descr, self.test_descr]:
463 for t in s.strip().split(" "):
465 # make this set a sorted list to get the same tensors given
467 tokens = list(tokens)
469 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
470 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
471 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
473 # Tokenize the train and test sets
474 self.train_input = self.tensorize(self.train_descr)
475 self.test_input = self.tensorize(self.test_descr)
477 def batches(self, split="train"):
478 assert split in {"train", "test"}
479 input = self.train_input if split == "train" else self.test_input
480 for batch in tqdm.tqdm(
481 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
483 yield self.trim(batch)
485 def vocabulary_size(self):
486 return len(self.token2id)
488 def compute_missing_properties(
489 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
491 acc_nb_requested_properties = []
492 acc_nb_missing_properties = []
495 for input in tqdm.tqdm(
496 self.test_input.split(self.batch_size),
498 desc=f"test-properties",
500 result = input.clone()
501 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
502 result = (1 - ar_mask) * result + ar_mask * self.t_nul
503 masked_inplace_autoregression(
508 deterministic_synthesis,
509 progress_bar_desc=None,
513 result_descr = self.detensorize(result)
514 np = picoclvr.nb_properties(
520 nb_requested_properties, _, nb_missing_properties = zip(*np)
521 acc_nb_requested_properties += nb_requested_properties
522 acc_nb_missing_properties += nb_missing_properties
523 acc_nb_results += len(result_descr)
525 nb_requested_properties = sum(acc_nb_requested_properties)
526 nb_missing_properties = sum(acc_nb_missing_properties)
528 prefix = "" if pruner is None else "pruned_"
529 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
531 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
534 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
538 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
541 ######################################################################
544 self, n_epoch, model, result_dir, logger, deterministic_synthesis
546 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
548 if self.pruner_eval is not None:
549 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
551 nb_tokens_to_generate = self.height * self.width + 3
556 for primer_descr in [
557 "red above green <sep> green top <sep> blue right of red",
558 "there is red <sep> there is yellow <sep> there is blue",
559 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
560 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
562 primer += [primer_descr + " <img>"] * nb_per_primer
564 result = self.tensorize(primer)
565 fill = result.new_full(
566 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
568 result = torch.cat((result, fill), 1)
569 ar_mask = (result == self.t_nul).long()
570 masked_inplace_autoregression(
575 deterministic_synthesis,
578 result_descr = self.detensorize(result)
580 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
582 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
583 acc_nb_results = len(result_descr)
585 nb_requested_properties = sum(acc_nb_requested_properties)
586 nb_missing_properties = sum(acc_nb_missing_properties)
589 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
591 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
594 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
597 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
601 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
605 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
611 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
612 torchvision.utils.save_image(
613 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
615 logger(f"wrote {image_name}")
618 ######################################################################
623 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
627 self.nb_train_samples = (nb_train_samples,)
628 self.nb_test_samples = (nb_test_samples,)
629 self.batch_size = batch_size
631 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
632 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
633 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
634 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
636 def batches(self, split="train", nb_to_use=-1, desc=None):
637 assert split in {"train", "test"}
638 input = self.train_input if split == "train" else self.test_input
640 input = input[:nb_to_use]
642 desc = f"epoch-{split}"
643 for batch in tqdm.tqdm(
644 input.split(self.batch_size), dynamic_ncols=True, desc=desc
648 def vocabulary_size(self):
652 self, n_epoch, model, result_dir, logger, deterministic_synthesis
654 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
655 ar_mask = torch.full_like(results, 1)
656 masked_inplace_autoregression(
661 deterministic_synthesis,
664 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
665 torchvision.utils.save_image(
666 1 - results.reshape(-1, 1, 28, 28) / 255.0,
671 logger(f"wrote {image_name}")
674 ######################################################################
680 def map2seq(self, *m):
681 return torch.cat([x.flatten(1) for x in m], 1)
683 def seq2map(self, s):
684 s = s.reshape(s.size(0), -1, self.height, self.width)
685 return (s[:, k] for k in range(s.size(1)))
695 device=torch.device("cpu"),
699 self.batch_size = batch_size
704 train_mazes, train_paths, _ = maze.create_maze_data(
709 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
711 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
713 test_mazes, test_paths, _ = maze.create_maze_data(
718 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
720 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
722 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
724 def batches(self, split="train", nb_to_use=-1, desc=None):
725 assert split in {"train", "test"}
726 input = self.train_input if split == "train" else self.test_input
728 input = input[:nb_to_use]
730 desc = f"epoch-{split}"
731 for batch in tqdm.tqdm(
732 input.split(self.batch_size), dynamic_ncols=True, desc=desc
736 def vocabulary_size(self):
740 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
742 nb_total, nb_correct = 0, 0
744 self.width * self.height,
745 self.width * self.height,
750 for input in self.batches(split, nb_to_use):
751 result = input.clone()
752 ar_mask = result.new_zeros(result.size())
753 ar_mask[:, self.height * self.width :] = 1
754 result *= 1 - ar_mask
755 masked_inplace_autoregression(
760 deterministic_synthesis,
761 progress_bar_desc=None,
764 mazes, paths = self.seq2map(result)
765 path_correctness = maze.path_correctness(mazes, paths)
766 nb_correct += path_correctness.long().sum()
767 nb_total += mazes.size(0)
769 optimal_path_lengths = (
770 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
772 predicted_path_lengths = (
773 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
775 optimal_path_lengths = optimal_path_lengths[path_correctness]
776 predicted_path_lengths = predicted_path_lengths[path_correctness]
777 count[optimal_path_lengths, predicted_path_lengths] += 1
783 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
786 return nb_total, nb_correct, count
789 self, n_epoch, model, result_dir, logger, deterministic_synthesis
791 train_nb_total, train_nb_correct, count = self.compute_error(
795 deterministic_synthesis=deterministic_synthesis,
798 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
801 test_nb_total, test_nb_correct, count = self.compute_error(
805 deterministic_synthesis=deterministic_synthesis,
808 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
811 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
813 if count is not None:
814 proportion_optimal = count.diagonal().sum().float() / count.sum()
815 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
817 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
819 for i in range(count.size(0)):
820 for j in range(count.size(1)):
821 eol = " " if j < count.size(1) - 1 else "\n"
822 f.write(f"{count[i,j]}{eol}")
824 input = self.test_input[:48]
825 result = input.clone()
826 ar_mask = result.new_zeros(result.size())
827 ar_mask[:, self.height * self.width :] = 1
828 result *= 1 - ar_mask
829 masked_inplace_autoregression(
834 deterministic_synthesis,
838 mazes, paths = self.seq2map(input)
839 _, predicted_paths = self.seq2map(result)
841 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
846 predicted_paths=predicted_paths,
847 path_correct=maze.path_correctness(mazes, predicted_paths),
848 path_optimal=maze.path_optimality(paths, predicted_paths),
850 logger(f"wrote {filename}")
853 ######################################################################
870 device=torch.device("cpu"),
874 self.batch_size = batch_size
878 self.prompt_length = prompt_length
880 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
889 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
899 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
901 def batches(self, split="train", nb_to_use=-1, desc=None):
902 assert split in {"train", "test"}
903 input = self.train_input if split == "train" else self.test_input
905 input = input[:nb_to_use]
907 desc = f"epoch-{split}"
908 for batch in tqdm.tqdm(
909 input.split(self.batch_size), dynamic_ncols=True, desc=desc
913 def vocabulary_size(self):
917 self, n_epoch, model, result_dir, logger, deterministic_synthesis
919 def compute_nb_correct(input, prior_visits):
920 result = input.clone()
921 i = torch.arange(result.size(1), device=result.device)[None, :]
923 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
927 result *= 1 - ar_mask
929 masked_inplace_autoregression(
934 deterministic_synthesis,
938 nb_total = ((prior_visits > 0) * ar_mask).sum()
940 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
942 return nb_total, nb_correct
944 test_nb_total, test_nb_correct = compute_nb_correct(
945 self.test_input[:1000], self.test_prior_visits[:1000]
949 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
952 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
955 ######################################################################
971 fraction_values_for_train=None,
972 device=torch.device("cpu"),
976 self.batch_size = batch_size
977 self.nb_steps = nb_steps
978 self.nb_stacks = nb_stacks
979 self.nb_digits = nb_digits
982 if fraction_values_for_train is None:
983 values_for_train = None
984 values_for_test = None
986 all = torch.randperm(10**nb_digits)
987 nb_for_train = int(all.size(0) * fraction_values_for_train)
988 values_for_train = all[:nb_for_train]
989 values_for_test = all[nb_for_train:]
991 self.train_input, self.train_stack_counts = stack.generate_sequences(
1000 self.test_input, self.test_stack_counts = stack.generate_sequences(
1009 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
1010 counts = self.test_stack_counts.flatten()[i.flatten()]
1011 counts = F.one_hot(counts).sum(0)
1012 logger(f"test_pop_stack_counts {counts}")
1014 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1016 def batches(self, split="train", nb_to_use=-1, desc=None):
1017 assert split in {"train", "test"}
1018 input = self.train_input if split == "train" else self.test_input
1020 input = input[:nb_to_use]
1022 desc = f"epoch-{split}"
1023 for batch in tqdm.tqdm(
1024 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1028 def vocabulary_size(self):
1029 return self.nb_codes
1031 def produce_results(
1032 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1034 def compute_nb_correct(input):
1035 result = input.clone()
1036 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1037 ar_mask = (result != input).long()
1038 masked_inplace_autoregression(
1043 deterministic_synthesis,
1047 errors = ((result != input).long() * ar_mask).reshape(
1048 -1, 1 + self.nb_digits
1050 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
1052 nb_total = ar_mask.max(1).values.sum()
1053 nb_correct = nb_total - errors.max(1).values.sum()
1055 return nb_total, nb_correct
1057 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1060 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1063 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1065 ##############################################################
1066 # Log a few generated sequences
1067 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1068 result = input.clone()
1069 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1070 ar_mask = (result != input).long()
1072 # for n in range(result.size(0)):
1074 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1077 masked_inplace_autoregression(
1082 deterministic_synthesis,
1086 for n in range(result.size(0)):
1088 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1090 ##############################################################
1093 ######################################################################
1099 def tensorize(self, sequences):
1100 len_max = max([len(x) for x in sequences])
1106 self.token2id[str(c)]
1107 for c in s + ["<nul>"] * (len_max - len(s))
1116 def seq2str(self, seq):
1117 return " ".join([self.id2token[i] for i in seq])
1124 nb_starting_values=3,
1130 device=torch.device("cpu"),
1134 self.batch_size = batch_size
1135 self.device = device
1136 self.no_prog = no_prog
1140 nb_starting_values=nb_starting_values,
1141 nb_result_values_max=4 * nb_starting_values,
1142 max_input=max_input,
1146 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1151 nb_starting_values=nb_starting_values,
1152 nb_result_values_max=4 * nb_starting_values,
1153 max_input=max_input,
1157 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1161 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1163 val_max = max([x if type(x) is int else 0 for x in symbols])
1164 symbols = list(filter(lambda x: type(x) is str, symbols))
1166 symbols += [str(n) for n in range(val_max + 1)]
1167 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1168 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1170 self.t_nul = self.token2id["<nul>"]
1171 self.t_input = self.token2id["<in>"]
1172 self.t_output = self.token2id["<out>"]
1173 self.t_prog = self.token2id["<prg>"]
1174 self.t_end = self.token2id["<end>"]
1176 self.train_input = self.tensorize(train_sequences)
1177 self.test_input = self.tensorize(test_sequences)
1180 # Excise the program from every train and test example
1181 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1185 ((self.train_input == self.t_prog).long() * k)
1186 .max(1, keepdim=True)
1189 self.train_input = (
1190 self.train_input * (k <= p).long()
1191 + self.t_end * (k == p + 1).long()
1192 + self.t_nul * (k > p + 1).long()
1194 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1198 ((self.test_input == self.t_prog).long() * k)
1199 .max(1, keepdim=True)
1203 self.test_input * (k <= p).long()
1204 + self.t_end * (k == p + 1).long()
1205 + self.t_nul * (k > p + 1).long()
1208 if logger is not None:
1209 logger(f"value_max {val_max}")
1210 for x in self.train_input[:25]:
1211 end = (x != self.t_nul).nonzero().max().item() + 1
1212 seq = [self.id2token[i.item()] for i in x[:end]]
1214 logger(f"example_seq {s}")
1216 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1218 def batches(self, split="train", nb_to_use=-1, desc=None):
1219 assert split in {"train", "test"}
1220 input = self.train_input if split == "train" else self.test_input
1222 input = input[:nb_to_use]
1224 desc = f"epoch-{split}"
1225 for batch in tqdm.tqdm(
1226 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1228 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1229 batch = batch[:, :last].to(self.device)
1232 def vocabulary_size(self):
1233 return self.nb_codes
1235 def produce_results(
1236 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1238 # --------------------------------------------------------------------
1239 def compute_nb_errors_prog(input, nb_to_log=0):
1240 result = input.clone()
1241 s = (result == self.t_prog).long()
1242 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1243 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1245 masked_inplace_autoregression(
1250 deterministic_synthesis,
1254 sum_nb_total, sum_nb_errors = 0, 0
1255 for one_input, one_result in zip(input, result):
1256 seq = [self.id2token[i.item()] for i in one_result]
1257 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1259 sum_nb_errors += 0 if nb_errors == 0 else 1
1261 gt_seq = [self.id2token[i.item()] for i in one_input]
1262 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1263 gt_prog = " ".join([str(x) for x in gt_prog])
1264 prog = " ".join([str(x) for x in prog])
1265 comment = "*" if nb_errors == 0 else "-"
1266 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1267 for start_stack, target_stack, result_stack, correct in stacks:
1268 comment = "*" if correct else "-"
1269 start_stack = " ".join([str(x) for x in start_stack])
1270 target_stack = " ".join([str(x) for x in target_stack])
1271 result_stack = " ".join([str(x) for x in result_stack])
1273 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1277 return sum_nb_total, sum_nb_errors
1279 # --------------------------------------------------------------------
1280 def compute_nb_errors_output(input, nb_to_log=0):
1281 result = input.clone()
1282 k = torch.arange(result.size(1), device=result.device)[None, :]
1284 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1287 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1289 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1290 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1292 masked_inplace_autoregression(
1297 deterministic_synthesis,
1301 sum_nb_total, sum_nb_errors = 0, 0
1302 for one_input, one_result, i, j in zip(
1303 input, result, last_output_idx, first_prog_idx
1305 seq = [self.id2token[i.item()] for i in one_result]
1307 correct = (one_input - one_result).abs().max() == 0
1308 sum_nb_errors += 0 if correct else 1
1311 self.id2token[i.item()] for i in one_result[i : j + 1]
1314 self.id2token[i.item()] for i in one_input[i : j + 1]
1316 comment = "*" if correct else "-"
1317 result_stack = " ".join([str(x) for x in result_stack])
1318 target_stack = " ".join([str(x) for x in target_stack])
1320 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1324 return sum_nb_total, sum_nb_errors
1326 # --------------------------------------------------------------------
1328 if not self.no_prog:
1329 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1330 self.test_input[:1000].to(self.device), nb_to_log=10
1334 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1337 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1339 test_nb_total, test_nb_errors = compute_nb_errors_output(
1340 self.test_input[:1000].to(self.device), nb_to_log=10
1344 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1347 if save_attention_image is None:
1348 logger("no save_attention_image (is pycairo installed?)")
1350 ns = torch.randint(self.test_input.size(0), (1,)).item()
1351 input = self.test_input[ns : ns + 1].clone()
1352 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1353 input = input[:, :last].to(self.device)
1355 with torch.autograd.no_grad():
1358 model.record_attention(True)
1359 model(BracketedSequence(input))
1361 ram = model.retrieve_attention()
1362 model.record_attention(False)
1364 tokens_output = [self.id2token[i.item()] for i in input[0]]
1365 tokens_input = ["n/a"] + tokens_output[:-1]
1366 for n_head in range(ram[0].size(1)):
1367 filename = os.path.join(
1368 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1370 attention_matrices = [m[0, n_head] for m in ram]
1371 save_attention_image(
1377 # min_total_attention=0.9,
1381 logger(f"wrote {filename}")
1384 ######################################################################
1391 def tensorize(self, sequences):
1392 len_max = max([len(x) for x in sequences])
1397 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1414 device=torch.device("cpu"),
1418 self.batch_size = batch_size
1419 self.device = device
1421 train_sequences = expr.generate_sequences(
1423 nb_variables=nb_variables,
1424 length=sequence_length,
1425 operand_max=operand_max,
1426 result_max=result_max,
1429 test_sequences = expr.generate_sequences(
1431 nb_variables=nb_variables,
1432 length=sequence_length,
1433 operand_max=operand_max,
1434 result_max=result_max,
1437 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1440 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1441 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1443 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1445 self.train_input = self.tensorize(train_sequences)
1446 self.test_input = self.tensorize(test_sequences)
1448 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1450 def batches(self, split="train", nb_to_use=-1, desc=None):
1451 assert split in {"train", "test"}
1452 input = self.train_input if split == "train" else self.test_input
1454 input = input[:nb_to_use]
1456 desc = f"epoch-{split}"
1457 for batch in tqdm.tqdm(
1458 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1460 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1461 batch = batch[:, :last]
1464 def vocabulary_size(self):
1465 return self.nb_codes
1467 def seq2str(self, s):
1468 return "".join([self.id2char[k.item()] for k in s])
1470 def produce_results(
1476 deterministic_synthesis,
1479 def compute_nb_correct(input):
1480 result = input.clone()
1481 s = (result == self.space).long()
1482 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1483 result = (1 - ar_mask) * result + ar_mask * self.filler
1484 masked_inplace_autoregression(
1489 deterministic_synthesis,
1493 nb_total = input.size(0)
1494 nb_correct = (input == result).long().min(1).values.sum()
1496 #######################################################################
1497 # Comput predicted vs. true variable values
1499 nb_delta = torch.zeros(5, dtype=torch.int64)
1502 values_input = expr.extract_results([self.seq2str(s) for s in input])
1503 values_result = expr.extract_results([self.seq2str(s) for s in result])
1505 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1507 with open(filename, "w") as f:
1508 for i, r in zip(values_input, values_result):
1509 for n, vi in i.items():
1511 f.write(f"{vi} {-1 if vr is None else vr}\n")
1513 if vr is None or vr < 0:
1517 if d >= nb_delta.size(0):
1522 ######################################################################
1524 return nb_total, nb_correct, nb_delta, nb_missed
1531 ) = compute_nb_correct(self.test_input[:10000])
1534 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1537 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1539 nb_total = test_nb_delta.sum() + test_nb_missed
1540 for d in range(test_nb_delta.size(0)):
1542 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1545 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1548 ##############################################################
1549 # Log a few generated sequences
1550 if input_file is None:
1551 input = self.test_input[:10]
1553 with open(input_file, "r") as f:
1554 sequences = [e.strip() for e in f.readlines()]
1555 sequences = [s + " " + "#" * 50 for s in sequences]
1556 input = self.tensorize(sequences)
1558 result = input.clone()
1559 s = (result == self.space).long()
1560 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1561 result = (1 - ar_mask) * result + ar_mask * self.filler
1563 for n in range(result.size(0)):
1564 logger(f"test_before {self.seq2str(result[n])}")
1566 masked_inplace_autoregression(
1571 deterministic_synthesis,
1575 correct = (1 - ar_mask) * self.space + ar_mask * input
1576 for n in range(result.size(0)):
1577 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1578 logger(f"test_after {self.seq2str(result[n])} {comment}")
1579 logger(f"truth {self.seq2str(correct[n])}")
1580 ##############################################################
1583 ######################################################################
1589 # Make a tensor from a list of strings
1590 def str2tensor(self, descr):
1591 token_descr = [s.strip().split(" ") for s in descr]
1592 l = max([len(s) for s in token_descr])
1593 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1594 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1595 return torch.tensor(id_descr, device=self.device)
1597 # Make a list of strings from a tensor
1598 def tensor2str(self, x):
1599 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1601 # trim all the tensors in the tuple z to remove as much token from
1602 # left and right in the first tensor. If z is a tuple, all its
1603 # elements are trimed according to the triming for the first
1604 def trim(self, z, token="#"):
1605 n = self.token2id[token]
1606 if type(z) == tuple:
1608 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1609 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1610 return tuple([t[:, a:b] for t in z])
1612 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1613 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1616 ######################
1626 device=torch.device("cpu"),
1630 self.device = device
1631 self.batch_size = batch_size
1632 self.grid_factory = grid.GridFactory(size=size)
1633 self.fraction_play = fraction_play
1635 if logger is not None:
1637 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1640 self.train_descr = self.grid_factory.generate_samples(
1641 nb=nb_train_samples,
1642 fraction_play=fraction_play,
1643 progress_bar=lambda r: tqdm.tqdm(r),
1646 self.test_descr = self.grid_factory.generate_samples(
1647 nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
1650 if fraction_play > 0:
1651 self.play_descr = self.grid_factory.generate_samples(
1652 nb=25, fraction_play=1.0, progress_bar=lambda r: tqdm.tqdm(r)
1655 self.play_descr = []
1657 # Build the tokenizer
1659 for d in [self.train_descr, self.test_descr, self.play_descr]:
1661 for t in s.strip().split(" "):
1663 # make this set a sorted list to get the same tensors given
1665 tokens = list(tokens)
1667 tokens = ["#"] + tokens
1668 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1669 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1670 self.t_nul = self.token2id["#"]
1671 self.t_true = self.token2id["true"]
1672 self.t_false = self.token2id["false"]
1673 self.t_pipe = self.token2id["|"]
1675 # Tokenize the train and test sets
1676 self.train_input = self.str2tensor(self.train_descr)
1677 self.test_input = self.str2tensor(self.test_descr)
1679 None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
1682 def batches(self, split="train"):
1683 assert split in {"train", "test"}
1684 input = self.train_input if split == "train" else self.test_input
1685 for batch in tqdm.tqdm(
1686 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1688 yield self.trim(batch)
1690 def vocabulary_size(self):
1691 return len(self.token2id)
1693 def produce_results(
1694 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1696 correct = self.test_input[:1000]
1697 result = correct.clone()
1698 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1699 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1701 logger(f"----------------------------------------------------------")
1703 for e in self.tensor2str(result[:10]):
1704 logger(f"test_before {e}")
1706 masked_inplace_autoregression(
1711 deterministic_synthesis,
1715 logger(f"----------------------------------------------------------")
1717 for e in self.tensor2str(result[:10]):
1718 logger(f"test_after {e}")
1720 logger(f"----------------------------------------------------------")
1722 nb_total = ar_mask.sum().item()
1723 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1725 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1726 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1728 if self.play_input is not None:
1729 result = self.play_input.clone()
1730 ar_mask = (result == self.t_pipe).long().cumsum(dim=1).clamp(max=1)
1731 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1733 logger(f"----------------------------------------------------------")
1735 for e in self.tensor2str(result[:10]):
1736 logger(f"play_before {e}")
1738 masked_inplace_autoregression(
1743 deterministic_synthesis,
1747 logger(f"----------------------------------------------------------")
1749 for e in self.tensor2str(result[:10]):
1750 logger(f"play_after {e}")
1752 logger(f"----------------------------------------------------------")
1755 ######################################################################
1761 ######################
1770 device=torch.device("cpu"),
1774 self.device = device
1775 self.batch_size = batch_size
1776 self.nb_samples_per_mlp = 256
1778 if logger is not None:
1780 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1783 seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1784 nb_mlps=nb_train_samples + nb_test_samples,
1785 nb_samples=self.nb_samples_per_mlp,
1789 nb_mlps_per_batch=1024,
1792 self.train_input = seq[:nb_train_samples]
1793 self.train_q_test_set = q_test_set[:nb_train_samples]
1794 self.train_ref_test_errors = test_error[:nb_train_samples]
1795 self.test_input = seq[nb_train_samples:]
1796 self.test_q_test_set = q_test_set[nb_train_samples:]
1797 self.test_ref_test_errors = test_error[nb_train_samples:]
1799 filename = os.path.join(result_dir, f"train_errors_ref.dat")
1800 with open(filename, "w") as f:
1801 for e in self.train_ref_test_errors:
1804 filename = os.path.join(result_dir, f"test_errors_ref.dat")
1805 with open(filename, "w") as f:
1806 for e in self.test_ref_test_errors:
1809 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1811 def batches(self, split="train"):
1812 assert split in {"train", "test"}
1813 input = self.train_input if split == "train" else self.test_input
1814 for batch in tqdm.tqdm(
1815 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1819 def vocabulary_size(self):
1820 return self.nb_codes
1822 def produce_results(
1823 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1825 correct = self.test_input[:1000]
1826 result = correct.clone()
1828 torch.arange(result.size(1), device=result.device)
1829 > self.nb_samples_per_mlp * 3 + 1
1831 ar_mask = ar_mask.expand_as(result)
1832 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1834 masked_inplace_autoregression(
1839 deterministic_synthesis,
1843 q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1844 q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1845 error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1847 filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1848 with open(filename, "w") as f:
1849 for e in error_test:
1853 ######################################################################