3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 # from graph import save_attention_image
18 save_attention_image = None
20 ######################################################################
23 def masked_inplace_autoregression(
28 deterministic_synthesis,
29 forbidden_tokens=None,
30 progress_bar_desc="autoregression",
31 device=torch.device("cpu"),
33 assert input.size() == ar_mask.size()
35 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
37 if progress_bar_desc is not None:
41 desc=progress_bar_desc,
42 total=(input.size(0) + batch_size - 1) // batch_size,
45 with torch.autograd.no_grad():
49 for input, ar_mask in batches:
50 model.masked_inplace_autoregression(
51 input, ar_mask, forbidden_tokens, deterministic_synthesis
57 ######################################################################
61 def batches(self, split="train"):
64 def vocabulary_size(self):
68 self, n_epoch, model, result_dir, logger, deterministic_synthesis
73 class TaskFromFile(Task):
74 def tensorize(self, pairs, shuffle):
75 len_max = max([len(x[0]) for x in pairs])
81 [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
89 pred_mask = torch.cat(
93 [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
103 i = torch.randperm(input.size(0))
104 input = input[i].contiguous()
105 pred_mask = pred_mask[i].contiguous()
107 return input, pred_mask
109 # trim all the tensors in the tuple z to remove as much token from
110 # left and right in the first tensor. If z is a tuple, all its
111 # elements are trimed according to the triming for the first
112 def trim(self, z, token="#"):
113 n = self.char2id[token]
116 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
117 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
118 return tuple([t[:, a:b] for t in z])
120 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
121 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
132 device=torch.device("cpu"),
134 self.batch_size = batch_size
137 def read_file(filename, nb=-1):
139 with open(filename, "r") as f:
141 sequence = f.readline().strip()
144 pred_mask = f.readline().strip()
145 assert len(sequence) == len(pred_mask)
146 assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
147 pairs.append((sequence, pred_mask))
153 assert len(pairs) == nb
157 train_pairs = read_file(train_filename, nb_train_samples)
158 test_pairs = read_file(test_filename, nb_test_samples)
160 symbols = ["#"] + list(
161 set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"])
163 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
164 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
166 self.train_input, self.train_pred_masks = self.tensorize(
167 train_pairs, shuffle=shuffle
169 self.test_input, self.test_pred_masks = self.tensorize(
170 test_pairs, shuffle=shuffle
173 def batches(self, split="train", nb_to_use=-1, desc=None):
174 assert split in {"train", "test"}
175 input = self.train_input if split == "train" else self.test_input
177 input = input[:nb_to_use]
179 desc = f"epoch-{split}"
180 for batch in tqdm.tqdm(
181 input.split(self.batch_size), dynamic_ncols=True, desc=desc
183 yield self.trim(batch).to(self.device)
185 def vocabulary_size(self):
186 return len(self.char2id)
188 def tensor2str(self, t):
189 return ["".join([self.id2char[x.item()] for x in s]) for s in t]
192 self, n_epoch, model, result_dir, logger, deterministic_synthesis
194 correct = self.trim(self.test_input[:1000]).to(self.device)
195 result = correct.clone()
196 pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
197 ar_mask = (pred_mask > 0).long()
198 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
200 logger(f"----------------------------------------------------------")
202 for e in self.tensor2str(result[:50]):
203 logger(f"test_before {e}")
205 masked_inplace_autoregression(
210 deterministic_synthesis,
214 logger(f"----------------------------------------------------------")
216 for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])):
217 logger(f"test_after {e}")
218 logger(f"correct {c}")
220 logger(f"----------------------------------------------------------")
222 err_mask = (pred_mask == 2).long()
223 nb_total = err_mask.sum().item()
224 nb_correct = ((correct == result).long() * err_mask).sum().item()
226 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
227 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
243 device=torch.device("cpu"),
248 self.batch_size = batch_size
250 self.problem = problem
252 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
255 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
259 self.train_input, self.train_ar_mask = self.train_input.to(
261 ), self.train_ar_mask.to(device)
262 self.test_input, self.test_ar_mask = self.test_input.to(
264 ), self.test_ar_mask.to(device)
266 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
268 # A bit of paranoia never hurts
269 assert self.nb_codes <= max_nb_codes
270 assert self.train_input.min() >= 0
271 assert self.test_input.min() >= 0
272 assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
277 assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
283 if logger is not None:
284 for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
285 logger(f"train_sequences {self.problem.seq2str(s)}")
286 a = "".join(["01"[x.item()] for x in a])
289 def batches(self, split="train", nb_to_use=-1, desc=None):
290 assert split in {"train", "test"}
291 input = self.train_input if split == "train" else self.test_input
293 input = input[:nb_to_use]
295 desc = f"epoch-{split}"
296 for batch in tqdm.tqdm(
297 input.split(self.batch_size), dynamic_ncols=True, desc=desc
301 def vocabulary_size(self):
305 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
307 def compute_accuracy(input, ar_mask, logger=None):
308 input, ar_mask = input[:nmax], ar_mask[:nmax]
309 result = input.clone() * (1 - ar_mask)
311 masked_inplace_autoregression(
316 deterministic_synthesis,
317 progress_bar_desc=None,
321 log_ground_truth = ar_mask.min() == 0
323 if logger is not None:
324 for sp, st in zip(result[:10], input[:10]):
326 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
330 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
333 nb_total, nb_correct = self.problem.compute_nb_correct(
334 input, ar_mask, result
337 # nb_total = ar_mask.sum().item()
338 # nb_correct = ((result == input).long() * ar_mask).sum().item()
340 return nb_total, nb_correct
342 train_nb_total, train_nb_correct = compute_accuracy(
343 self.train_input, self.train_ar_mask
347 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
350 test_nb_total, test_nb_correct = compute_accuracy(
351 self.test_input, self.test_ar_mask, logger
355 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
358 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
360 if save_attention_image is not None:
362 ns = torch.randint(self.test_input.size(0), (1,)).item()
363 input = self.test_input[ns : ns + 1].clone()
365 with torch.autograd.no_grad():
368 # model.record_attention(True)
369 model(BracketedSequence(input))
371 # ram = model.retrieve_attention()
372 # model.record_attention(False)
374 # tokens_output = [c for c in self.problem.seq2str(input[0])]
375 # tokens_input = ["n/a"] + tokens_output[:-1]
376 # for n_head in range(ram[0].size(1)):
377 # filename = os.path.join(
378 # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
380 # attention_matrices = [m[0, n_head] for m in ram]
381 # save_attention_image(
385 # attention_matrices,
387 ##min_total_attention=0.9,
391 # logger(f"wrote {filename}")
394 ######################################################################
399 class PicoCLVR(Task):
400 # Make a tensor from a list of strings
401 def tensorize(self, descr):
402 token_descr = [s.strip().split(" ") for s in descr]
403 l = max([len(s) for s in token_descr])
404 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
405 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
406 return torch.tensor(id_descr, device=self.device)
408 # Make a list of strings from a tensor
409 def detensorize(self, x):
410 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
412 # trim all the tensors in the tuple z to remove as much token from
413 # left and right in the first tensor. If z is a tuple, all its
414 # elements are trimed according to the triming for the first
415 def trim(self, z, token="<nul>"):
416 n = self.token2id[token]
419 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
420 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
421 return tuple([t[:, a:b] for t in z])
423 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
424 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
427 ######################
438 device=torch.device("cpu"),
444 def generate_descr(nb, cache_suffix, pruner):
445 return picoclvr.generate(
455 self.batch_size = batch_size
457 self.pruner_train = pruner_train
458 self.pruner_eval = pruner_eval
460 if logger is not None:
462 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
465 self.train_descr = generate_descr(
466 nb_train_samples, "train", pruner=self.pruner_train
468 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
470 # Build the tokenizer
471 tokens = {"<nul>", "<img>"}
472 for d in [self.train_descr, self.test_descr]:
474 for t in s.strip().split(" "):
476 # make this set a sorted list to get the same tensors given
478 tokens = list(tokens)
480 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
481 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
482 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
484 # Tokenize the train and test sets
485 self.train_input = self.tensorize(self.train_descr)
486 self.test_input = self.tensorize(self.test_descr)
488 def batches(self, split="train"):
489 assert split in {"train", "test"}
490 input = self.train_input if split == "train" else self.test_input
491 for batch in tqdm.tqdm(
492 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
494 yield self.trim(batch)
496 def vocabulary_size(self):
497 return len(self.token2id)
499 def compute_missing_properties(
500 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
502 acc_nb_requested_properties = []
503 acc_nb_missing_properties = []
506 for input in tqdm.tqdm(
507 self.test_input.split(self.batch_size),
509 desc=f"test-properties",
511 result = input.clone()
512 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
513 result = (1 - ar_mask) * result + ar_mask * self.t_nul
514 masked_inplace_autoregression(
519 deterministic_synthesis,
520 progress_bar_desc=None,
524 result_descr = self.detensorize(result)
525 np = picoclvr.nb_properties(
531 nb_requested_properties, _, nb_missing_properties = zip(*np)
532 acc_nb_requested_properties += nb_requested_properties
533 acc_nb_missing_properties += nb_missing_properties
534 acc_nb_results += len(result_descr)
536 nb_requested_properties = sum(acc_nb_requested_properties)
537 nb_missing_properties = sum(acc_nb_missing_properties)
539 prefix = "" if pruner is None else "pruned_"
540 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
542 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
545 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
549 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
552 ######################################################################
555 self, n_epoch, model, result_dir, logger, deterministic_synthesis
557 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
559 if self.pruner_eval is not None:
560 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
562 nb_tokens_to_generate = self.height * self.width + 3
567 for primer_descr in [
568 "red above green <sep> green top <sep> blue right of red",
569 "there is red <sep> there is yellow <sep> there is blue",
570 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
571 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
573 primer += [primer_descr + " <img>"] * nb_per_primer
575 result = self.tensorize(primer)
576 fill = result.new_full(
577 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
579 result = torch.cat((result, fill), 1)
580 ar_mask = (result == self.t_nul).long()
581 masked_inplace_autoregression(
586 deterministic_synthesis,
589 result_descr = self.detensorize(result)
591 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
593 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
594 acc_nb_results = len(result_descr)
596 nb_requested_properties = sum(acc_nb_requested_properties)
597 nb_missing_properties = sum(acc_nb_missing_properties)
600 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
602 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
605 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
608 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
612 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
616 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
622 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
623 torchvision.utils.save_image(
624 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
626 logger(f"wrote {image_name}")
629 ######################################################################
634 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
638 self.nb_train_samples = (nb_train_samples,)
639 self.nb_test_samples = (nb_test_samples,)
640 self.batch_size = batch_size
642 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
643 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
644 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
645 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
647 def batches(self, split="train", nb_to_use=-1, desc=None):
648 assert split in {"train", "test"}
649 input = self.train_input if split == "train" else self.test_input
651 input = input[:nb_to_use]
653 desc = f"epoch-{split}"
654 for batch in tqdm.tqdm(
655 input.split(self.batch_size), dynamic_ncols=True, desc=desc
659 def vocabulary_size(self):
663 self, n_epoch, model, result_dir, logger, deterministic_synthesis
665 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
666 ar_mask = torch.full_like(results, 1)
667 masked_inplace_autoregression(
672 deterministic_synthesis,
675 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
676 torchvision.utils.save_image(
677 1 - results.reshape(-1, 1, 28, 28) / 255.0,
682 logger(f"wrote {image_name}")
685 ######################################################################
691 def map2seq(self, *m):
692 return torch.cat([x.flatten(1) for x in m], 1)
694 def seq2map(self, s):
695 s = s.reshape(s.size(0), -1, self.height, self.width)
696 return (s[:, k] for k in range(s.size(1)))
706 device=torch.device("cpu"),
710 self.batch_size = batch_size
715 train_mazes, train_paths, _ = maze.create_maze_data(
720 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
722 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
724 test_mazes, test_paths, _ = maze.create_maze_data(
729 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
731 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
733 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
735 def batches(self, split="train", nb_to_use=-1, desc=None):
736 assert split in {"train", "test"}
737 input = self.train_input if split == "train" else self.test_input
739 input = input[:nb_to_use]
741 desc = f"epoch-{split}"
742 for batch in tqdm.tqdm(
743 input.split(self.batch_size), dynamic_ncols=True, desc=desc
747 def vocabulary_size(self):
751 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
753 nb_total, nb_correct = 0, 0
755 self.width * self.height,
756 self.width * self.height,
761 for input in self.batches(split, nb_to_use):
762 result = input.clone()
763 ar_mask = result.new_zeros(result.size())
764 ar_mask[:, self.height * self.width :] = 1
765 result *= 1 - ar_mask
766 masked_inplace_autoregression(
771 deterministic_synthesis,
772 progress_bar_desc=None,
775 mazes, paths = self.seq2map(result)
776 path_correctness = maze.path_correctness(mazes, paths)
777 nb_correct += path_correctness.long().sum()
778 nb_total += mazes.size(0)
780 optimal_path_lengths = (
781 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
783 predicted_path_lengths = (
784 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
786 optimal_path_lengths = optimal_path_lengths[path_correctness]
787 predicted_path_lengths = predicted_path_lengths[path_correctness]
788 count[optimal_path_lengths, predicted_path_lengths] += 1
794 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
797 return nb_total, nb_correct, count
800 self, n_epoch, model, result_dir, logger, deterministic_synthesis
802 train_nb_total, train_nb_correct, count = self.compute_error(
806 deterministic_synthesis=deterministic_synthesis,
809 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
812 test_nb_total, test_nb_correct, count = self.compute_error(
816 deterministic_synthesis=deterministic_synthesis,
819 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
822 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
824 if count is not None:
825 proportion_optimal = count.diagonal().sum().float() / count.sum()
826 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
828 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
830 for i in range(count.size(0)):
831 for j in range(count.size(1)):
832 eol = " " if j < count.size(1) - 1 else "\n"
833 f.write(f"{count[i,j]}{eol}")
835 input = self.test_input[:48]
836 result = input.clone()
837 ar_mask = result.new_zeros(result.size())
838 ar_mask[:, self.height * self.width :] = 1
839 result *= 1 - ar_mask
840 masked_inplace_autoregression(
845 deterministic_synthesis,
849 mazes, paths = self.seq2map(input)
850 _, predicted_paths = self.seq2map(result)
852 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
857 predicted_paths=predicted_paths,
858 path_correct=maze.path_correctness(mazes, predicted_paths),
859 path_optimal=maze.path_optimality(paths, predicted_paths),
861 logger(f"wrote {filename}")
864 ######################################################################
881 device=torch.device("cpu"),
885 self.batch_size = batch_size
889 self.prompt_length = prompt_length
891 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
900 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
910 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
912 def batches(self, split="train", nb_to_use=-1, desc=None):
913 assert split in {"train", "test"}
914 input = self.train_input if split == "train" else self.test_input
916 input = input[:nb_to_use]
918 desc = f"epoch-{split}"
919 for batch in tqdm.tqdm(
920 input.split(self.batch_size), dynamic_ncols=True, desc=desc
924 def vocabulary_size(self):
928 self, n_epoch, model, result_dir, logger, deterministic_synthesis
930 def compute_nb_correct(input, prior_visits):
931 result = input.clone()
932 i = torch.arange(result.size(1), device=result.device)[None, :]
934 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
938 result *= 1 - ar_mask
940 masked_inplace_autoregression(
945 deterministic_synthesis,
949 nb_total = ((prior_visits > 0) * ar_mask).sum()
951 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
953 return nb_total, nb_correct
955 test_nb_total, test_nb_correct = compute_nb_correct(
956 self.test_input[:1000], self.test_prior_visits[:1000]
960 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
963 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
966 ######################################################################
982 fraction_values_for_train=None,
983 device=torch.device("cpu"),
987 self.batch_size = batch_size
988 self.nb_steps = nb_steps
989 self.nb_stacks = nb_stacks
990 self.nb_digits = nb_digits
993 if fraction_values_for_train is None:
994 values_for_train = None
995 values_for_test = None
997 all = torch.randperm(10**nb_digits)
998 nb_for_train = int(all.size(0) * fraction_values_for_train)
999 values_for_train = all[:nb_for_train]
1000 values_for_test = all[nb_for_train:]
1002 self.train_input, self.train_stack_counts = stack.generate_sequences(
1011 self.test_input, self.test_stack_counts = stack.generate_sequences(
1020 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
1021 counts = self.test_stack_counts.flatten()[i.flatten()]
1022 counts = F.one_hot(counts).sum(0)
1023 logger(f"test_pop_stack_counts {counts}")
1025 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1027 def batches(self, split="train", nb_to_use=-1, desc=None):
1028 assert split in {"train", "test"}
1029 input = self.train_input if split == "train" else self.test_input
1031 input = input[:nb_to_use]
1033 desc = f"epoch-{split}"
1034 for batch in tqdm.tqdm(
1035 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1039 def vocabulary_size(self):
1040 return self.nb_codes
1042 def produce_results(
1043 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1045 def compute_nb_correct(input):
1046 result = input.clone()
1047 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1048 ar_mask = (result != input).long()
1049 masked_inplace_autoregression(
1054 deterministic_synthesis,
1058 errors = ((result != input).long() * ar_mask).reshape(
1059 -1, 1 + self.nb_digits
1061 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
1063 nb_total = ar_mask.max(1).values.sum()
1064 nb_correct = nb_total - errors.max(1).values.sum()
1066 return nb_total, nb_correct
1068 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1071 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1074 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1076 ##############################################################
1077 # Log a few generated sequences
1078 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1079 result = input.clone()
1080 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1081 ar_mask = (result != input).long()
1083 # for n in range(result.size(0)):
1085 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1088 masked_inplace_autoregression(
1093 deterministic_synthesis,
1097 for n in range(result.size(0)):
1099 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1101 ##############################################################
1104 ######################################################################
1110 def tensorize(self, sequences):
1111 len_max = max([len(x) for x in sequences])
1117 self.token2id[str(c)]
1118 for c in s + ["<nul>"] * (len_max - len(s))
1127 def seq2str(self, seq):
1128 return " ".join([self.id2token[i] for i in seq])
1135 nb_starting_values=3,
1141 device=torch.device("cpu"),
1145 self.batch_size = batch_size
1146 self.device = device
1147 self.no_prog = no_prog
1151 nb_starting_values=nb_starting_values,
1152 nb_result_values_max=4 * nb_starting_values,
1153 max_input=max_input,
1157 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1162 nb_starting_values=nb_starting_values,
1163 nb_result_values_max=4 * nb_starting_values,
1164 max_input=max_input,
1168 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1172 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1174 val_max = max([x if type(x) is int else 0 for x in symbols])
1175 symbols = list(filter(lambda x: type(x) is str, symbols))
1177 symbols += [str(n) for n in range(val_max + 1)]
1178 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1179 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1181 self.t_nul = self.token2id["<nul>"]
1182 self.t_input = self.token2id["<in>"]
1183 self.t_output = self.token2id["<out>"]
1184 self.t_prog = self.token2id["<prg>"]
1185 self.t_end = self.token2id["<end>"]
1187 self.train_input = self.tensorize(train_sequences)
1188 self.test_input = self.tensorize(test_sequences)
1191 # Excise the program from every train and test example
1192 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1196 ((self.train_input == self.t_prog).long() * k)
1197 .max(1, keepdim=True)
1200 self.train_input = (
1201 self.train_input * (k <= p).long()
1202 + self.t_end * (k == p + 1).long()
1203 + self.t_nul * (k > p + 1).long()
1205 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1209 ((self.test_input == self.t_prog).long() * k)
1210 .max(1, keepdim=True)
1214 self.test_input * (k <= p).long()
1215 + self.t_end * (k == p + 1).long()
1216 + self.t_nul * (k > p + 1).long()
1219 if logger is not None:
1220 logger(f"value_max {val_max}")
1221 for x in self.train_input[:25]:
1222 end = (x != self.t_nul).nonzero().max().item() + 1
1223 seq = [self.id2token[i.item()] for i in x[:end]]
1225 logger(f"example_seq {s}")
1227 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1229 def batches(self, split="train", nb_to_use=-1, desc=None):
1230 assert split in {"train", "test"}
1231 input = self.train_input if split == "train" else self.test_input
1233 input = input[:nb_to_use]
1235 desc = f"epoch-{split}"
1236 for batch in tqdm.tqdm(
1237 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1239 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1240 batch = batch[:, :last].to(self.device)
1243 def vocabulary_size(self):
1244 return self.nb_codes
1246 def produce_results(
1247 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1249 # --------------------------------------------------------------------
1250 def compute_nb_errors_prog(input, nb_to_log=0):
1251 result = input.clone()
1252 s = (result == self.t_prog).long()
1253 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1254 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1256 masked_inplace_autoregression(
1261 deterministic_synthesis,
1265 sum_nb_total, sum_nb_errors = 0, 0
1266 for one_input, one_result in zip(input, result):
1267 seq = [self.id2token[i.item()] for i in one_result]
1268 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1270 sum_nb_errors += 0 if nb_errors == 0 else 1
1272 gt_seq = [self.id2token[i.item()] for i in one_input]
1273 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1274 gt_prog = " ".join([str(x) for x in gt_prog])
1275 prog = " ".join([str(x) for x in prog])
1276 comment = "*" if nb_errors == 0 else "-"
1277 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1278 for start_stack, target_stack, result_stack, correct in stacks:
1279 comment = "*" if correct else "-"
1280 start_stack = " ".join([str(x) for x in start_stack])
1281 target_stack = " ".join([str(x) for x in target_stack])
1282 result_stack = " ".join([str(x) for x in result_stack])
1284 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1288 return sum_nb_total, sum_nb_errors
1290 # --------------------------------------------------------------------
1291 def compute_nb_errors_output(input, nb_to_log=0):
1292 result = input.clone()
1293 k = torch.arange(result.size(1), device=result.device)[None, :]
1295 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1298 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1300 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1301 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1303 masked_inplace_autoregression(
1308 deterministic_synthesis,
1312 sum_nb_total, sum_nb_errors = 0, 0
1313 for one_input, one_result, i, j in zip(
1314 input, result, last_output_idx, first_prog_idx
1316 seq = [self.id2token[i.item()] for i in one_result]
1318 correct = (one_input - one_result).abs().max() == 0
1319 sum_nb_errors += 0 if correct else 1
1322 self.id2token[i.item()] for i in one_result[i : j + 1]
1325 self.id2token[i.item()] for i in one_input[i : j + 1]
1327 comment = "*" if correct else "-"
1328 result_stack = " ".join([str(x) for x in result_stack])
1329 target_stack = " ".join([str(x) for x in target_stack])
1331 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1335 return sum_nb_total, sum_nb_errors
1337 # --------------------------------------------------------------------
1339 if not self.no_prog:
1340 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1341 self.test_input[:1000].to(self.device), nb_to_log=10
1345 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1348 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1350 test_nb_total, test_nb_errors = compute_nb_errors_output(
1351 self.test_input[:1000].to(self.device), nb_to_log=10
1355 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1358 if save_attention_image is None:
1359 logger("no save_attention_image (is pycairo installed?)")
1361 ns = torch.randint(self.test_input.size(0), (1,)).item()
1362 input = self.test_input[ns : ns + 1].clone()
1363 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1364 input = input[:, :last].to(self.device)
1366 with torch.autograd.no_grad():
1369 model.record_attention(True)
1370 model(BracketedSequence(input))
1372 ram = model.retrieve_attention()
1373 model.record_attention(False)
1375 tokens_output = [self.id2token[i.item()] for i in input[0]]
1376 tokens_input = ["n/a"] + tokens_output[:-1]
1377 for n_head in range(ram[0].size(1)):
1378 filename = os.path.join(
1379 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1381 attention_matrices = [m[0, n_head] for m in ram]
1382 save_attention_image(
1388 # min_total_attention=0.9,
1392 logger(f"wrote {filename}")
1395 ######################################################################
1402 def tensorize(self, sequences):
1403 len_max = max([len(x) for x in sequences])
1408 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1425 device=torch.device("cpu"),
1429 self.batch_size = batch_size
1430 self.device = device
1432 train_sequences = expr.generate_sequences(
1434 nb_variables=nb_variables,
1435 length=sequence_length,
1436 operand_max=operand_max,
1437 result_max=result_max,
1440 test_sequences = expr.generate_sequences(
1442 nb_variables=nb_variables,
1443 length=sequence_length,
1444 operand_max=operand_max,
1445 result_max=result_max,
1448 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1451 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1452 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1454 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1456 self.train_input = self.tensorize(train_sequences)
1457 self.test_input = self.tensorize(test_sequences)
1459 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1461 def batches(self, split="train", nb_to_use=-1, desc=None):
1462 assert split in {"train", "test"}
1463 input = self.train_input if split == "train" else self.test_input
1465 input = input[:nb_to_use]
1467 desc = f"epoch-{split}"
1468 for batch in tqdm.tqdm(
1469 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1471 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1472 batch = batch[:, :last]
1475 def vocabulary_size(self):
1476 return self.nb_codes
1478 def seq2str(self, s):
1479 return "".join([self.id2char[k.item()] for k in s])
1481 def produce_results(
1487 deterministic_synthesis,
1490 def compute_nb_correct(input):
1491 result = input.clone()
1492 s = (result == self.space).long()
1493 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1494 result = (1 - ar_mask) * result + ar_mask * self.filler
1495 masked_inplace_autoregression(
1500 deterministic_synthesis,
1504 nb_total = input.size(0)
1505 nb_correct = (input == result).long().min(1).values.sum()
1507 #######################################################################
1508 # Comput predicted vs. true variable values
1510 nb_delta = torch.zeros(5, dtype=torch.int64)
1513 values_input = expr.extract_results([self.seq2str(s) for s in input])
1514 values_result = expr.extract_results([self.seq2str(s) for s in result])
1516 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1518 with open(filename, "w") as f:
1519 for i, r in zip(values_input, values_result):
1520 for n, vi in i.items():
1522 f.write(f"{vi} {-1 if vr is None else vr}\n")
1524 if vr is None or vr < 0:
1528 if d >= nb_delta.size(0):
1533 ######################################################################
1535 return nb_total, nb_correct, nb_delta, nb_missed
1542 ) = compute_nb_correct(self.test_input[:10000])
1545 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1548 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1550 nb_total = test_nb_delta.sum() + test_nb_missed
1551 for d in range(test_nb_delta.size(0)):
1553 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1556 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1559 ##############################################################
1560 # Log a few generated sequences
1561 if input_file is None:
1562 input = self.test_input[:10]
1564 with open(input_file, "r") as f:
1565 sequences = [e.strip() for e in f.readlines()]
1566 sequences = [s + " " + "#" * 50 for s in sequences]
1567 input = self.tensorize(sequences)
1569 result = input.clone()
1570 s = (result == self.space).long()
1571 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1572 result = (1 - ar_mask) * result + ar_mask * self.filler
1574 for n in range(result.size(0)):
1575 logger(f"test_before {self.seq2str(result[n])}")
1577 masked_inplace_autoregression(
1582 deterministic_synthesis,
1586 correct = (1 - ar_mask) * self.space + ar_mask * input
1587 for n in range(result.size(0)):
1588 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1589 logger(f"test_after {self.seq2str(result[n])} {comment}")
1590 logger(f"truth {self.seq2str(correct[n])}")
1591 ##############################################################
1594 ######################################################################
1600 # Make a tensor from a list of strings
1601 def str2tensor(self, descr):
1602 token_descr = [s.strip().split(" ") for s in descr]
1603 l = max([len(s) for s in token_descr])
1604 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1605 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1606 return torch.tensor(id_descr, device=self.device)
1608 # Make a list of strings from a tensor
1609 def tensor2str(self, x):
1610 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1612 # trim all the tensors in the tuple z to remove as much token from
1613 # left and right in the first tensor. If z is a tuple, all its
1614 # elements are trimed according to the triming for the first
1615 def trim(self, z, token="#"):
1616 n = self.token2id[token]
1617 if type(z) == tuple:
1619 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1620 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1621 return tuple([t[:, a:b] for t in z])
1623 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1624 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1627 ######################
1637 device=torch.device("cpu"),
1641 self.device = device
1642 self.batch_size = batch_size
1643 self.grid_factory = grid.GridFactory(size=size)
1644 self.fraction_play = fraction_play
1646 if logger is not None:
1648 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1651 self.train_descr = self.grid_factory.generate_samples(
1652 nb=nb_train_samples,
1653 fraction_play=fraction_play,
1654 progress_bar=lambda r: tqdm.tqdm(r),
1657 self.test_descr = self.grid_factory.generate_samples(
1658 nb=nb_test_samples, fraction_play=0.0, progress_bar=lambda r: tqdm.tqdm(r)
1661 if fraction_play > 0:
1662 self.play_descr = self.grid_factory.generate_samples(
1663 nb=25, fraction_play=1.0, progress_bar=lambda r: tqdm.tqdm(r)
1666 self.play_descr = []
1668 # Build the tokenizer
1670 for d in [self.train_descr, self.test_descr, self.play_descr]:
1672 for t in s.strip().split(" "):
1674 # make this set a sorted list to get the same tensors given
1676 tokens = list(tokens)
1678 tokens = ["#"] + tokens
1679 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1680 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1681 self.t_nul = self.token2id["#"]
1682 self.t_true = self.token2id["true"]
1683 self.t_false = self.token2id["false"]
1684 self.t_pipe = self.token2id["|"]
1686 # Tokenize the train and test sets
1687 self.train_input = self.str2tensor(self.train_descr)
1688 self.test_input = self.str2tensor(self.test_descr)
1690 None if len(self.play_descr) == 0 else self.str2tensor(self.play_descr)
1693 def batches(self, split="train"):
1694 assert split in {"train", "test"}
1695 input = self.train_input if split == "train" else self.test_input
1696 for batch in tqdm.tqdm(
1697 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1699 yield self.trim(batch)
1701 def vocabulary_size(self):
1702 return len(self.token2id)
1704 def produce_results(
1705 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1707 correct = self.test_input[:1000]
1708 result = correct.clone()
1709 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1710 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1712 logger(f"----------------------------------------------------------")
1714 for e in self.tensor2str(result[:10]):
1715 logger(f"test_before {e}")
1717 masked_inplace_autoregression(
1722 deterministic_synthesis,
1726 logger(f"----------------------------------------------------------")
1728 for e in self.tensor2str(result[:10]):
1729 logger(f"test_after {e}")
1731 logger(f"----------------------------------------------------------")
1733 nb_total = ar_mask.sum().item()
1734 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1736 logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
1737 logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
1739 if self.play_input is not None:
1740 result = self.play_input.clone()
1741 ar_mask = (result == self.t_pipe).long().cumsum(dim=1).clamp(max=1)
1742 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1744 logger(f"----------------------------------------------------------")
1746 for e in self.tensor2str(result[:10]):
1747 logger(f"play_before {e}")
1749 masked_inplace_autoregression(
1754 deterministic_synthesis,
1758 logger(f"----------------------------------------------------------")
1760 for e in self.tensor2str(result[:10]):
1761 logger(f"play_after {e}")
1763 logger(f"----------------------------------------------------------")
1766 ######################################################################
1772 ######################
1781 device=torch.device("cpu"),
1785 self.device = device
1786 self.batch_size = batch_size
1787 self.nb_samples_per_mlp = 256
1789 if logger is not None:
1791 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1794 seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set(
1795 nb_mlps=nb_train_samples + nb_test_samples,
1796 nb_samples=self.nb_samples_per_mlp,
1800 nb_mlps_per_batch=1024,
1803 self.train_input = seq[:nb_train_samples]
1804 self.train_q_test_set = q_test_set[:nb_train_samples]
1805 self.train_ref_test_errors = test_error[:nb_train_samples]
1806 self.test_input = seq[nb_train_samples:]
1807 self.test_q_test_set = q_test_set[nb_train_samples:]
1808 self.test_ref_test_errors = test_error[nb_train_samples:]
1810 filename = os.path.join(result_dir, f"train_errors_ref.dat")
1811 with open(filename, "w") as f:
1812 for e in self.train_ref_test_errors:
1815 filename = os.path.join(result_dir, f"test_errors_ref.dat")
1816 with open(filename, "w") as f:
1817 for e in self.test_ref_test_errors:
1820 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1822 def batches(self, split="train"):
1823 assert split in {"train", "test"}
1824 input = self.train_input if split == "train" else self.test_input
1825 for batch in tqdm.tqdm(
1826 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1830 def vocabulary_size(self):
1831 return self.nb_codes
1833 def produce_results(
1834 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1836 correct = self.test_input[:1000]
1837 result = correct.clone()
1839 torch.arange(result.size(1), device=result.device)
1840 > self.nb_samples_per_mlp * 3 + 1
1842 ar_mask = ar_mask.expand_as(result)
1843 result *= 1 - ar_mask # paraaaaanoiaaaaaaa
1845 masked_inplace_autoregression(
1850 deterministic_synthesis,
1854 q_train_set = result[:, : self.nb_samples_per_mlp * 3]
1855 q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :]
1856 error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set)
1858 filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat")
1859 with open(filename, "w") as f:
1860 for e in error_test:
1864 ######################################################################