3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
18 from graph import save_attention_image
20 save_attention_image = None
22 ######################################################################
25 def masked_inplace_autoregression(
30 deterministic_synthesis,
31 forbidden_tokens=None,
32 progress_bar_desc="autoregression",
33 device=torch.device("cpu"),
35 assert input.size() == ar_mask.size()
37 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
39 if progress_bar_desc is not None:
43 desc=progress_bar_desc,
44 total=(input.size(0) + batch_size - 1) // batch_size,
47 with torch.autograd.no_grad():
51 for input, ar_mask in batches:
52 model.masked_inplace_autoregression(
53 input, ar_mask, forbidden_tokens, deterministic_synthesis
59 ######################################################################
63 def batches(self, split="train"):
66 def vocabulary_size(self):
70 self, n_epoch, model, result_dir, logger, deterministic_synthesis
88 device=torch.device("cpu"),
93 self.batch_size = batch_size
95 self.problem = problem
97 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
100 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
104 self.train_input, self.train_ar_mask = self.train_input.to(
106 ), self.train_ar_mask.to(device)
107 self.test_input, self.test_ar_mask = self.test_input.to(
109 ), self.test_ar_mask.to(device)
111 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
113 # A bit of paranoia never hurts
115 self.nb_codes <= max_nb_codes
116 and self.train_input.min() >= 0
117 and self.test_input.min() >= 0
118 and tuple(self.train_ar_mask.unique()) == (0, 1)
119 and tuple(self.test_ar_mask.unique()) == (0, 1)
122 def batches(self, split="train", nb_to_use=-1, desc=None):
123 assert split in {"train", "test"}
124 input = self.train_input if split == "train" else self.test_input
126 input = input[:nb_to_use]
128 desc = f"epoch-{split}"
129 for batch in tqdm.tqdm(
130 input.split(self.batch_size), dynamic_ncols=True, desc=desc
134 def vocabulary_size(self):
138 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
140 def compute_accuracy(input, ar_mask, logger=None):
141 input, ar_mask = input[:nmax], ar_mask[:nmax]
142 result = input.clone() * (1 - ar_mask)
144 masked_inplace_autoregression(
149 deterministic_synthesis,
150 progress_bar_desc=None,
154 if logger is not None:
155 for sp, st in zip(result[:10], input[:10]):
157 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
160 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
163 nb_total = ar_mask.sum().item()
164 nb_correct = ((result == input).long() * ar_mask).sum().item()
166 return nb_total, nb_correct
168 train_nb_total, train_nb_correct = compute_accuracy(
169 self.train_input, self.train_ar_mask
173 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
176 test_nb_total, test_nb_correct = compute_accuracy(
177 self.test_input, self.test_ar_mask, logger
181 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
184 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
186 if save_attention_image is None:
187 logger("no save_attention_image (is pycairo installed?)")
190 ns = torch.randint(self.test_input.size(0), (1,)).item()
191 input = self.test_input[ns : ns + 1].clone()
193 with torch.autograd.no_grad():
196 model.record_attention(True)
197 model(BracketedSequence(input))
199 ram = model.retrieve_attention()
200 model.record_attention(False)
202 tokens_output = [c for c in self.problem.seq2str(input[0])]
203 tokens_input = ["n/a"] + tokens_output[:-1]
204 for n_head in range(ram[0].size(1)):
205 filename = os.path.join(
206 result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
208 attention_matrices = [m[0, n_head] for m in ram]
209 save_attention_image(
215 # min_total_attention=0.9,
219 logger(f"wrote {filename}")
222 ######################################################################
227 class PicoCLVR(Task):
228 # Make a tensor from a list of strings
229 def tensorize(self, descr):
230 token_descr = [s.strip().split(" ") for s in descr]
231 l = max([len(s) for s in token_descr])
232 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
233 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
234 return torch.tensor(id_descr, device=self.device)
236 # Make a list of strings from a tensor
237 def detensorize(self, x):
238 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
240 # trim all the tensors in the tuple z to remove as much token from
241 # left and right in the first tensor. If z is a tuple, all its
242 # elements are trimed according to the triming for the first
243 def trim(self, z, token="<nul>"):
244 n = self.token2id[token]
247 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
248 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
249 return tuple([t[:, a:b] for t in z])
251 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
252 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
255 ######################
266 device=torch.device("cpu"),
272 def generate_descr(nb, cache_suffix, pruner):
273 return picoclvr.generate(
283 self.batch_size = batch_size
285 self.pruner_train = pruner_train
286 self.pruner_eval = pruner_eval
288 if logger is not None:
290 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
293 self.train_descr = generate_descr(
294 nb_train_samples, "train", pruner=self.pruner_train
296 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
298 # Build the tokenizer
299 tokens = {"<nul>", "<img>"}
300 for d in [self.train_descr, self.test_descr]:
302 for t in s.strip().split(" "):
304 # make this set a sorted list to get the same tensors given
306 tokens = list(tokens)
308 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
309 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
310 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
312 # Tokenize the train and test sets
313 self.train_input = self.tensorize(self.train_descr)
314 self.test_input = self.tensorize(self.test_descr)
316 def batches(self, split="train"):
317 assert split in {"train", "test"}
318 input = self.train_input if split == "train" else self.test_input
319 for batch in tqdm.tqdm(
320 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
322 yield self.trim(batch)
324 def vocabulary_size(self):
325 return len(self.token2id)
327 def compute_missing_properties(
328 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
330 acc_nb_requested_properties = []
331 acc_nb_missing_properties = []
334 for input in tqdm.tqdm(
335 self.test_input.split(self.batch_size),
337 desc=f"test-properties",
339 result = input.clone()
340 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
341 result = (1 - ar_mask) * result + ar_mask * self.t_nul
342 masked_inplace_autoregression(
347 deterministic_synthesis,
348 progress_bar_desc=None,
352 result_descr = self.detensorize(result)
353 np = picoclvr.nb_properties(
359 nb_requested_properties, _, nb_missing_properties = zip(*np)
360 acc_nb_requested_properties += nb_requested_properties
361 acc_nb_missing_properties += nb_missing_properties
362 acc_nb_results += len(result_descr)
364 nb_requested_properties = sum(acc_nb_requested_properties)
365 nb_missing_properties = sum(acc_nb_missing_properties)
367 prefix = "" if pruner is None else "pruned_"
368 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
370 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
373 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
377 f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}"
380 ######################################################################
383 self, n_epoch, model, result_dir, logger, deterministic_synthesis
385 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
387 if self.pruner_eval is not None:
388 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
390 nb_tokens_to_generate = self.height * self.width + 3
395 for primer_descr in [
396 "red above green <sep> green top <sep> blue right of red",
397 "there is red <sep> there is yellow <sep> there is blue",
398 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
399 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
401 primer += [primer_descr + " <img>"] * nb_per_primer
403 result = self.tensorize(primer)
404 fill = result.new_full(
405 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
407 result = torch.cat((result, fill), 1)
408 ar_mask = (result == self.t_nul).long()
409 masked_inplace_autoregression(
414 deterministic_synthesis,
417 result_descr = self.detensorize(result)
419 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
421 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
422 acc_nb_results = len(result_descr)
424 nb_requested_properties = sum(acc_nb_requested_properties)
425 nb_missing_properties = sum(acc_nb_missing_properties)
428 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
430 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
433 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
436 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
440 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
444 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
450 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
451 torchvision.utils.save_image(
452 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
454 logger(f"wrote {image_name}")
457 ######################################################################
462 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
466 self.nb_train_samples = (nb_train_samples,)
467 self.nb_test_samples = (nb_test_samples,)
468 self.batch_size = batch_size
470 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
471 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
472 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
473 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
475 def batches(self, split="train", nb_to_use=-1, desc=None):
476 assert split in {"train", "test"}
477 input = self.train_input if split == "train" else self.test_input
479 input = input[:nb_to_use]
481 desc = f"epoch-{split}"
482 for batch in tqdm.tqdm(
483 input.split(self.batch_size), dynamic_ncols=True, desc=desc
487 def vocabulary_size(self):
491 self, n_epoch, model, result_dir, logger, deterministic_synthesis
493 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
494 ar_mask = torch.full_like(results, 1)
495 masked_inplace_autoregression(
500 deterministic_synthesis,
503 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
504 torchvision.utils.save_image(
505 1 - results.reshape(-1, 1, 28, 28) / 255.0,
510 logger(f"wrote {image_name}")
513 ######################################################################
519 def map2seq(self, *m):
520 return torch.cat([x.flatten(1) for x in m], 1)
522 def seq2map(self, s):
523 s = s.reshape(s.size(0), -1, self.height, self.width)
524 return (s[:, k] for k in range(s.size(1)))
534 device=torch.device("cpu"),
538 self.batch_size = batch_size
543 train_mazes, train_paths, _ = maze.create_maze_data(
548 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
550 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
552 test_mazes, test_paths, _ = maze.create_maze_data(
557 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
559 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
561 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
563 def batches(self, split="train", nb_to_use=-1, desc=None):
564 assert split in {"train", "test"}
565 input = self.train_input if split == "train" else self.test_input
567 input = input[:nb_to_use]
569 desc = f"epoch-{split}"
570 for batch in tqdm.tqdm(
571 input.split(self.batch_size), dynamic_ncols=True, desc=desc
575 def vocabulary_size(self):
579 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
581 nb_total, nb_correct = 0, 0
583 self.width * self.height,
584 self.width * self.height,
589 for input in self.batches(split, nb_to_use):
590 result = input.clone()
591 ar_mask = result.new_zeros(result.size())
592 ar_mask[:, self.height * self.width :] = 1
593 result *= 1 - ar_mask
594 masked_inplace_autoregression(
599 deterministic_synthesis,
600 progress_bar_desc=None,
603 mazes, paths = self.seq2map(result)
604 path_correctness = maze.path_correctness(mazes, paths)
605 nb_correct += path_correctness.long().sum()
606 nb_total += mazes.size(0)
608 optimal_path_lengths = (
609 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
611 predicted_path_lengths = (
612 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
614 optimal_path_lengths = optimal_path_lengths[path_correctness]
615 predicted_path_lengths = predicted_path_lengths[path_correctness]
616 count[optimal_path_lengths, predicted_path_lengths] += 1
622 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
625 return nb_total, nb_correct, count
628 self, n_epoch, model, result_dir, logger, deterministic_synthesis
630 train_nb_total, train_nb_correct, count = self.compute_error(
634 deterministic_synthesis=deterministic_synthesis,
637 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
640 test_nb_total, test_nb_correct, count = self.compute_error(
644 deterministic_synthesis=deterministic_synthesis,
647 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
650 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
652 if count is not None:
653 proportion_optimal = count.diagonal().sum().float() / count.sum()
654 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
656 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
658 for i in range(count.size(0)):
659 for j in range(count.size(1)):
660 eol = " " if j < count.size(1) - 1 else "\n"
661 f.write(f"{count[i,j]}{eol}")
663 input = self.test_input[:48]
664 result = input.clone()
665 ar_mask = result.new_zeros(result.size())
666 ar_mask[:, self.height * self.width :] = 1
667 result *= 1 - ar_mask
668 masked_inplace_autoregression(
673 deterministic_synthesis,
677 mazes, paths = self.seq2map(input)
678 _, predicted_paths = self.seq2map(result)
680 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
685 predicted_paths=predicted_paths,
686 path_correct=maze.path_correctness(mazes, predicted_paths),
687 path_optimal=maze.path_optimality(paths, predicted_paths),
689 logger(f"wrote {filename}")
692 ######################################################################
709 device=torch.device("cpu"),
713 self.batch_size = batch_size
717 self.prompt_length = prompt_length
719 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
728 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
738 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
740 def batches(self, split="train", nb_to_use=-1, desc=None):
741 assert split in {"train", "test"}
742 input = self.train_input if split == "train" else self.test_input
744 input = input[:nb_to_use]
746 desc = f"epoch-{split}"
747 for batch in tqdm.tqdm(
748 input.split(self.batch_size), dynamic_ncols=True, desc=desc
752 def vocabulary_size(self):
756 self, n_epoch, model, result_dir, logger, deterministic_synthesis
758 def compute_nb_correct(input, prior_visits):
759 result = input.clone()
760 i = torch.arange(result.size(1), device=result.device)[None, :]
762 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
766 result *= 1 - ar_mask
768 masked_inplace_autoregression(
773 deterministic_synthesis,
777 nb_total = ((prior_visits > 0) * ar_mask).sum()
779 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
781 return nb_total, nb_correct
783 test_nb_total, test_nb_correct = compute_nb_correct(
784 self.test_input[:1000], self.test_prior_visits[:1000]
788 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
791 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
794 ######################################################################
810 fraction_values_for_train=None,
811 device=torch.device("cpu"),
815 self.batch_size = batch_size
816 self.nb_steps = nb_steps
817 self.nb_stacks = nb_stacks
818 self.nb_digits = nb_digits
821 if fraction_values_for_train is None:
822 values_for_train = None
823 values_for_test = None
825 all = torch.randperm(10**nb_digits)
826 nb_for_train = int(all.size(0) * fraction_values_for_train)
827 values_for_train = all[:nb_for_train]
828 values_for_test = all[nb_for_train:]
830 self.train_input, self.train_stack_counts = stack.generate_sequences(
839 self.test_input, self.test_stack_counts = stack.generate_sequences(
848 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
849 counts = self.test_stack_counts.flatten()[i.flatten()]
850 counts = F.one_hot(counts).sum(0)
851 logger(f"test_pop_stack_counts {counts}")
853 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
855 def batches(self, split="train", nb_to_use=-1, desc=None):
856 assert split in {"train", "test"}
857 input = self.train_input if split == "train" else self.test_input
859 input = input[:nb_to_use]
861 desc = f"epoch-{split}"
862 for batch in tqdm.tqdm(
863 input.split(self.batch_size), dynamic_ncols=True, desc=desc
867 def vocabulary_size(self):
871 self, n_epoch, model, result_dir, logger, deterministic_synthesis
873 def compute_nb_correct(input):
874 result = input.clone()
875 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
876 ar_mask = (result != input).long()
877 masked_inplace_autoregression(
882 deterministic_synthesis,
886 errors = ((result != input).long() * ar_mask).reshape(
887 -1, 1 + self.nb_digits
889 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
891 nb_total = ar_mask.max(1).values.sum()
892 nb_correct = nb_total - errors.max(1).values.sum()
894 return nb_total, nb_correct
896 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
899 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
902 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
904 ##############################################################
905 # Log a few generated sequences
906 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
907 result = input.clone()
908 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
909 ar_mask = (result != input).long()
911 # for n in range(result.size(0)):
913 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
916 masked_inplace_autoregression(
921 deterministic_synthesis,
925 for n in range(result.size(0)):
927 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
929 ##############################################################
932 ######################################################################
938 def tensorize(self, sequences):
939 len_max = max([len(x) for x in sequences])
945 self.token2id[str(c)]
946 for c in s + ["<nul>"] * (len_max - len(s))
955 def seq2str(self, seq):
956 return " ".join([self.id2token[i] for i in seq])
963 nb_starting_values=3,
969 device=torch.device("cpu"),
973 self.batch_size = batch_size
975 self.no_prog = no_prog
979 nb_starting_values=nb_starting_values,
980 nb_result_values_max=4 * nb_starting_values,
985 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
990 nb_starting_values=nb_starting_values,
991 nb_result_values_max=4 * nb_starting_values,
996 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1000 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1002 val_max = max([x if type(x) is int else 0 for x in symbols])
1003 symbols = list(filter(lambda x: type(x) is str, symbols))
1005 symbols += [str(n) for n in range(val_max + 1)]
1006 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1007 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1009 self.t_nul = self.token2id["<nul>"]
1010 self.t_input = self.token2id["<in>"]
1011 self.t_output = self.token2id["<out>"]
1012 self.t_prog = self.token2id["<prg>"]
1013 self.t_end = self.token2id["<end>"]
1015 self.train_input = self.tensorize(train_sequences)
1016 self.test_input = self.tensorize(test_sequences)
1019 # Excise the program from every train and test example
1020 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1024 ((self.train_input == self.t_prog).long() * k)
1025 .max(1, keepdim=True)
1028 self.train_input = (
1029 self.train_input * (k <= p).long()
1030 + self.t_end * (k == p + 1).long()
1031 + self.t_nul * (k > p + 1).long()
1033 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1037 ((self.test_input == self.t_prog).long() * k)
1038 .max(1, keepdim=True)
1042 self.test_input * (k <= p).long()
1043 + self.t_end * (k == p + 1).long()
1044 + self.t_nul * (k > p + 1).long()
1047 if logger is not None:
1048 logger(f"value_max {val_max}")
1049 for x in self.train_input[:25]:
1050 end = (x != self.t_nul).nonzero().max().item() + 1
1051 seq = [self.id2token[i.item()] for i in x[:end]]
1053 logger(f"example_seq {s}")
1055 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1057 def batches(self, split="train", nb_to_use=-1, desc=None):
1058 assert split in {"train", "test"}
1059 input = self.train_input if split == "train" else self.test_input
1061 input = input[:nb_to_use]
1063 desc = f"epoch-{split}"
1064 for batch in tqdm.tqdm(
1065 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1067 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1068 batch = batch[:, :last].to(self.device)
1071 def vocabulary_size(self):
1072 return self.nb_codes
1074 def produce_results(
1075 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1077 # --------------------------------------------------------------------
1078 def compute_nb_errors_prog(input, nb_to_log=0):
1079 result = input.clone()
1080 s = (result == self.t_prog).long()
1081 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1082 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1084 masked_inplace_autoregression(
1089 deterministic_synthesis,
1093 sum_nb_total, sum_nb_errors = 0, 0
1094 for one_input, one_result in zip(input, result):
1095 seq = [self.id2token[i.item()] for i in one_result]
1096 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1098 sum_nb_errors += 0 if nb_errors == 0 else 1
1100 gt_seq = [self.id2token[i.item()] for i in one_input]
1101 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1102 gt_prog = " ".join([str(x) for x in gt_prog])
1103 prog = " ".join([str(x) for x in prog])
1104 comment = "*" if nb_errors == 0 else "-"
1105 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1106 for start_stack, target_stack, result_stack, correct in stacks:
1107 comment = "*" if correct else "-"
1108 start_stack = " ".join([str(x) for x in start_stack])
1109 target_stack = " ".join([str(x) for x in target_stack])
1110 result_stack = " ".join([str(x) for x in result_stack])
1112 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1116 return sum_nb_total, sum_nb_errors
1118 # --------------------------------------------------------------------
1119 def compute_nb_errors_output(input, nb_to_log=0):
1120 result = input.clone()
1121 k = torch.arange(result.size(1), device=result.device)[None, :]
1123 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1126 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1128 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1129 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1131 masked_inplace_autoregression(
1136 deterministic_synthesis,
1140 sum_nb_total, sum_nb_errors = 0, 0
1141 for one_input, one_result, i, j in zip(
1142 input, result, last_output_idx, first_prog_idx
1144 seq = [self.id2token[i.item()] for i in one_result]
1146 correct = (one_input - one_result).abs().max() == 0
1147 sum_nb_errors += 0 if correct else 1
1150 self.id2token[i.item()] for i in one_result[i : j + 1]
1153 self.id2token[i.item()] for i in one_input[i : j + 1]
1155 comment = "*" if correct else "-"
1156 result_stack = " ".join([str(x) for x in result_stack])
1157 target_stack = " ".join([str(x) for x in target_stack])
1159 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1163 return sum_nb_total, sum_nb_errors
1165 # --------------------------------------------------------------------
1167 if not self.no_prog:
1168 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1169 self.test_input[:1000].to(self.device), nb_to_log=10
1173 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1176 logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}")
1178 test_nb_total, test_nb_errors = compute_nb_errors_output(
1179 self.test_input[:1000].to(self.device), nb_to_log=10
1183 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1186 if save_attention_image is None:
1187 logger("no save_attention_image (is pycairo installed?)")
1189 ns = torch.randint(self.test_input.size(0), (1,)).item()
1190 input = self.test_input[ns : ns + 1].clone()
1191 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1192 input = input[:, :last].to(self.device)
1194 with torch.autograd.no_grad():
1197 model.record_attention(True)
1198 model(BracketedSequence(input))
1200 ram = model.retrieve_attention()
1201 model.record_attention(False)
1203 tokens_output = [self.id2token[i.item()] for i in input[0]]
1204 tokens_input = ["n/a"] + tokens_output[:-1]
1205 for n_head in range(ram[0].size(1)):
1206 filename = os.path.join(
1207 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1209 attention_matrices = [m[0, n_head] for m in ram]
1210 save_attention_image(
1216 # min_total_attention=0.9,
1220 logger(f"wrote {filename}")
1223 ######################################################################
1230 def tensorize(self, sequences):
1231 len_max = max([len(x) for x in sequences])
1236 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1253 device=torch.device("cpu"),
1257 self.batch_size = batch_size
1258 self.device = device
1260 train_sequences = expr.generate_sequences(
1262 nb_variables=nb_variables,
1263 length=sequence_length,
1264 operand_max=operand_max,
1265 result_max=result_max,
1268 test_sequences = expr.generate_sequences(
1270 nb_variables=nb_variables,
1271 length=sequence_length,
1272 operand_max=operand_max,
1273 result_max=result_max,
1276 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1279 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1280 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1282 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1284 self.train_input = self.tensorize(train_sequences)
1285 self.test_input = self.tensorize(test_sequences)
1287 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1289 def batches(self, split="train", nb_to_use=-1, desc=None):
1290 assert split in {"train", "test"}
1291 input = self.train_input if split == "train" else self.test_input
1293 input = input[:nb_to_use]
1295 desc = f"epoch-{split}"
1296 for batch in tqdm.tqdm(
1297 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1299 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1300 batch = batch[:, :last]
1303 def vocabulary_size(self):
1304 return self.nb_codes
1306 def seq2str(self, s):
1307 return "".join([self.id2char[k.item()] for k in s])
1309 def produce_results(
1315 deterministic_synthesis,
1318 def compute_nb_correct(input):
1319 result = input.clone()
1320 s = (result == self.space).long()
1321 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1322 result = (1 - ar_mask) * result + ar_mask * self.filler
1323 masked_inplace_autoregression(
1328 deterministic_synthesis,
1332 nb_total = input.size(0)
1333 nb_correct = (input == result).long().min(1).values.sum()
1335 #######################################################################
1336 # Comput predicted vs. true variable values
1338 nb_delta = torch.zeros(5, dtype=torch.int64)
1341 values_input = expr.extract_results([self.seq2str(s) for s in input])
1342 values_result = expr.extract_results([self.seq2str(s) for s in result])
1344 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1346 with open(filename, "w") as f:
1347 for i, r in zip(values_input, values_result):
1348 for n, vi in i.items():
1350 f.write(f"{vi} {-1 if vr is None else vr}\n")
1352 if vr is None or vr < 0:
1356 if d >= nb_delta.size(0):
1361 ######################################################################
1363 return nb_total, nb_correct, nb_delta, nb_missed
1370 ) = compute_nb_correct(self.test_input[:10000])
1373 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1376 logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
1378 nb_total = test_nb_delta.sum() + test_nb_missed
1379 for d in range(test_nb_delta.size(0)):
1381 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1384 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1387 ##############################################################
1388 # Log a few generated sequences
1389 if input_file is None:
1390 input = self.test_input[:10]
1392 with open(input_file, "r") as f:
1393 sequences = [e.strip() for e in f.readlines()]
1394 sequences = [s + " " + "#" * 50 for s in sequences]
1395 input = self.tensorize(sequences)
1397 result = input.clone()
1398 s = (result == self.space).long()
1399 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1400 result = (1 - ar_mask) * result + ar_mask * self.filler
1402 for n in range(result.size(0)):
1403 logger(f"test_before {self.seq2str(result[n])}")
1405 masked_inplace_autoregression(
1410 deterministic_synthesis,
1414 correct = (1 - ar_mask) * self.space + ar_mask * input
1415 for n in range(result.size(0)):
1416 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1417 logger(f"test_after {self.seq2str(result[n])} {comment}")
1418 logger(f"truth {self.seq2str(correct[n])}")
1419 ##############################################################
1422 ######################################################################
1428 # Make a tensor from a list of strings
1429 def tensorize(self, descr):
1430 token_descr = [s.strip().split(" ") for s in descr]
1431 l = max([len(s) for s in token_descr])
1432 token_descr = [s + ["#"] * (l - len(s)) for s in token_descr]
1433 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
1434 return torch.tensor(id_descr, device=self.device)
1436 # Make a list of strings from a tensor
1437 def detensorize(self, x):
1438 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
1440 # trim all the tensors in the tuple z to remove as much token from
1441 # left and right in the first tensor. If z is a tuple, all its
1442 # elements are trimed according to the triming for the first
1443 def trim(self, z, token="#"):
1444 n = self.token2id[token]
1445 if type(z) == tuple:
1447 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1448 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1449 return tuple([t[:, a:b] for t in z])
1451 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
1452 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
1455 ######################
1464 device=torch.device("cpu"),
1468 self.device = device
1469 self.batch_size = batch_size
1470 self.grid_factory = grid.GridFactory(size=size)
1472 if logger is not None:
1474 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
1477 self.train_descr = self.grid_factory.generate_samples(
1478 nb_train_samples, lambda r: tqdm.tqdm(r)
1480 self.test_descr = self.grid_factory.generate_samples(
1481 nb_test_samples, lambda r: tqdm.tqdm(r)
1484 # Build the tokenizer
1486 for d in [self.train_descr, self.test_descr]:
1488 for t in s.strip().split(" "):
1490 # make this set a sorted list to get the same tensors given
1492 tokens = list(tokens)
1494 tokens = ["#"] + tokens
1495 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
1496 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
1497 self.t_nul = self.token2id["#"]
1498 self.t_true = self.token2id["<true>"]
1499 self.t_false = self.token2id["<false>"]
1501 # Tokenize the train and test sets
1502 self.train_input = self.tensorize(self.train_descr)
1503 self.test_input = self.tensorize(self.test_descr)
1505 def batches(self, split="train"):
1506 assert split in {"train", "test"}
1507 input = self.train_input if split == "train" else self.test_input
1508 for batch in tqdm.tqdm(
1509 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
1511 yield self.trim(batch)
1513 def vocabulary_size(self):
1514 return len(self.token2id)
1516 def produce_results(
1517 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1519 correct = self.test_input[:1000]
1520 result = correct.clone()
1521 ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long()
1522 result *= 1 - ar_mask
1524 for e in self.detensorize(result[:10]):
1525 logger(f"test_before {e}")
1527 masked_inplace_autoregression(
1532 deterministic_synthesis,
1536 for e in self.detensorize(result[:10]):
1537 logger(f"test_after {e}")
1539 nb_total = ar_mask.sum().item()
1540 nb_correct = ((correct == result).long() * ar_mask).sum().item()
1542 logger(f"test_performance {nb_total=} {nb_correct=}")
1543 logger(f"main_test_accuracy {nb_correct / nb_total}")
1546 ######################################################################
1559 device=torch.device("cpu"),
1560 device_storage=torch.device("cpu"),
1564 self.batch_size = batch_size
1565 self.device = device
1574 ) = world.create_data_and_processors(
1579 nb_epochs=vqae_nb_epochs,
1582 device_storage=device_storage,
1585 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1586 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1588 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1589 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1591 self.len_frame_seq = train_frame_seq.size(1)
1592 self.len_action_seq = train_action_seq.size(1)
1593 self.nb_codes = nb_frame_codes + nb_action_codes
1595 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1597 train_action_seq += nb_frame_codes
1598 self.train_input = torch.cat(
1599 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1602 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1603 test_action_seq += nb_frame_codes
1604 self.test_input = torch.cat(
1605 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1608 def batches(self, split="train", nb_to_use=-1, desc=None):
1609 assert split in {"train", "test"}
1610 input = self.train_input if split == "train" else self.test_input
1612 input = input[:nb_to_use]
1614 desc = f"epoch-{split}"
1615 for batch in tqdm.tqdm(
1616 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1618 yield batch.to(self.device)
1620 def vocabulary_size(self):
1621 return self.nb_codes
1623 def produce_results(
1624 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1627 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1630 input = self.test_input[:64].to(self.device)
1631 result = input.clone()
1634 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1636 result *= 1 - ar_mask
1638 masked_inplace_autoregression(
1643 deterministic_synthesis,
1647 seq_start = input[:, : self.len_frame_seq]
1648 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1649 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1652 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1654 result = result.reshape(-1, result.size(-1))
1656 frames = self.seq2frame(result)
1657 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1658 torchvision.utils.save_image(
1659 frames.float() / (world.Box.nb_rgb_levels - 1),
1665 logger(f"wrote {image_name}")
1668 ######################################################################