3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
18 from graph import save_attention_image
20 save_attention_image = None
22 ######################################################################
25 def masked_inplace_autoregression(
30 deterministic_synthesis,
31 forbidden_tokens=None,
32 progress_bar_desc="autoregression",
33 device=torch.device("cpu"),
35 assert input.size() == ar_mask.size()
37 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
39 if progress_bar_desc is not None:
43 desc=progress_bar_desc,
44 total=(input.size(0) + batch_size - 1) // batch_size,
47 with torch.autograd.no_grad():
51 for input, ar_mask in batches:
52 model.masked_inplace_autoregression(
53 input, ar_mask, forbidden_tokens, deterministic_synthesis
59 ######################################################################
63 def batches(self, split="train"):
66 def vocabulary_size(self):
70 self, n_epoch, model, result_dir, logger, deterministic_synthesis
75 ######################################################################
79 def generate_sequences(self, nb):
82 def seq2str(self, seq):
83 return "[NOT IMPLEMENTED]"
89 class ProblemLevel0(Problem):
90 def __init__(self, nb_sentences=100, len_prompt=5, len_result=5):
91 self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result))
92 self.seq[:, len_prompt] = 10
94 def generate_sequences(self, nb):
95 sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
96 ar_mask = (sequences == 10).long()
97 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
98 return sequences, ar_mask
101 class ProblemLevel1(Problem):
102 def __init__(self, nb_operators=100, len_source=5, len_result=8):
103 self.len_source = len_source
104 self.len_result = len_result
105 self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1
106 self.operators = F.one_hot(
107 torch.rand(nb_operators, len_result, len_source).argmax(-1),
108 num_classes=len_source,
111 def generate_sequences(self, nb):
112 nb_operators = torch.randint(self.operators.size(0), (nb,))
113 operators = self.operators[nb_operators]
115 nb_operators[:, None]
116 // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1)
118 marker1 = torch.full((nb, 1), 10)
119 # source = torch.randint(10, (nb, self.len_source))
120 source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
121 marker2 = torch.full((nb, 1), 11)
122 result = operators.bmm(source[:, :, None]).squeeze(-1)
123 sequences = torch.cat((nb_operators, marker1, source, marker2, result), 1)
124 ar_mask = (sequences == 11).long()
125 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
126 return sequences, ar_mask
128 def seq2str(self, seq):
129 return "".join("0123456789|>"[x.item()] for x in seq)
132 class ProblemLevel2(Problem):
133 def __init__(self, len_source=5, len_result=8):
134 self.len_source = len_source
135 self.len_result = len_result
137 def generate_sequences(self, nb):
138 operators = F.one_hot(
139 torch.rand(nb, self.len_result, self.len_source).argmax(-1),
140 num_classes=self.len_source,
142 source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
143 # source1 = torch.randint(10, (nb, self.len_source))
144 marker1 = torch.full((nb, 1), 10)
145 result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
146 marker2 = torch.full((nb, 1), 11)
147 source2 = torch.randint(10, (nb, self.len_source))
148 marker3 = torch.full((nb, 1), 12)
149 result2 = operators.bmm(source2[:, :, None]).squeeze(-1)
151 sequences = torch.cat(
152 (source1, marker1, result1, marker2, source2, marker3, result2), 1
154 ar_mask = (sequences == 12).long()
155 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
156 return sequences, ar_mask
158 def seq2str(self, seq):
159 return "".join("0123456789>|~"[x.item()] for x in seq)
165 class ProblemAddition(Problem):
166 def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False):
167 self.nb_digits = nb_digits
168 self.zero_padded = zero_padded
169 self.inverted_result = inverted_result
170 self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")])
171 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
173 def tensorize(self, strings):
174 len_max = max([len(x) for x in strings])
179 [self.char2id[c] for c in s + "$" * (len_max - len(s))]
187 def generate_sequences(self, nb):
190 a, b = torch.randint(10**self.nb_digits, (2,))
192 a, b, c = str(a.item()), str(b.item()), str(c.item())
194 a = "0" * (self.nb_digits - len(a)) + a
195 b = "0" * (self.nb_digits - len(b)) + b
196 c = "0" * (self.nb_digits + 1 - len(c)) + c
197 if self.inverted_result:
199 sequences.append(f"{a}+{b}={c}$")
201 sequences = self.tensorize(sequences)
202 ar_mask = (sequences == self.char2id["="]).long()
203 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
204 return sequences, ar_mask
206 def seq2str(self, seq):
207 return "".join(self.id2char[x.item()] for x in seq)
210 # class ProblemUnion(Problem):
211 # problems = [ProblemByheart()]
212 # nb_common_codes = 100
214 # def generate_sequences(nb_samples):
215 # problem_indexes = torch.randint(len(problems), (nb_samples,))
216 # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
217 # print(f"{nb_samples_per_problem}")
219 # for nb, p in zip(nb_samples_per_problem, problems):
220 # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
223 # for strain, stest in zip(train_seq, test_seq):
224 # s = torch.cat((strain, stest), 0)
237 device=torch.device("cpu"),
242 self.batch_size = batch_size
244 self.problem = problem
246 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
249 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
253 self.train_input, self.train_ar_mask = self.train_input.to(
255 ), self.train_ar_mask.to(device)
256 self.test_input, self.test_ar_mask = self.test_input.to(
258 ), self.test_ar_mask.to(device)
260 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
262 # A bit of paranoia never hurts
264 self.nb_codes <= max_nb_codes
265 and self.train_input.min() >= 0
266 and self.test_input.min() >= 0
267 and tuple(self.train_ar_mask.unique()) == (0, 1)
268 and tuple(self.test_ar_mask.unique()) == (0, 1)
271 def batches(self, split="train", nb_to_use=-1, desc=None):
272 assert split in {"train", "test"}
273 input = self.train_input if split == "train" else self.test_input
275 input = input[:nb_to_use]
277 desc = f"epoch-{split}"
278 for batch in tqdm.tqdm(
279 input.split(self.batch_size), dynamic_ncols=True, desc=desc
283 def vocabulary_size(self):
287 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
289 def compute_accuracy(input, ar_mask, logger=None):
290 input, ar_mask = input[:nmax], ar_mask[:nmax]
291 result = input.clone() * (1 - ar_mask)
293 masked_inplace_autoregression(
298 deterministic_synthesis,
299 progress_bar_desc=None,
303 if logger is not None:
304 for sp, st in zip(result[:10], input[:10]):
306 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
309 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
312 nb_total = ar_mask.sum().item()
313 nb_correct = ((result == input).long() * ar_mask).sum().item()
315 return nb_total, nb_correct
317 train_nb_total, train_nb_correct = compute_accuracy(
318 self.train_input, self.train_ar_mask
322 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
325 test_nb_total, test_nb_correct = compute_accuracy(
326 self.test_input, self.test_ar_mask, logger
330 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
334 ######################################################################
339 class PicoCLVR(Task):
340 # Make a tensor from a list of strings
341 def tensorize(self, descr):
342 token_descr = [s.strip().split(" ") for s in descr]
343 l = max([len(s) for s in token_descr])
344 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
345 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
346 return torch.tensor(id_descr, device=self.device)
348 # Make a list of strings from a tensor
349 def detensorize(self, x):
350 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
352 # trim all the tensors in the tuple z to remove as much token from
353 # left and right in the first tensor. If z is a tuple, all its
354 # elements are trimed according to the triming for the first
355 def trim(self, z, token="<nul>"):
356 n = self.token2id[token]
359 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
360 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
361 return tuple([t[:, a:b] for t in z])
363 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
364 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
367 ######################
378 device=torch.device("cpu"),
384 def generate_descr(nb, cache_suffix, pruner):
385 return picoclvr.generate(
395 self.batch_size = batch_size
397 self.pruner_train = pruner_train
398 self.pruner_eval = pruner_eval
400 if logger is not None:
402 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
405 self.train_descr = generate_descr(
406 nb_train_samples, "train", pruner=self.pruner_train
408 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
410 # Build the tokenizer
411 tokens = {"<nul>", "<img>"}
412 for d in [self.train_descr, self.test_descr]:
414 for t in s.strip().split(" "):
416 # make this set a sorted list to get the same tensors given
418 tokens = list(tokens)
420 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
421 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
422 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
424 # Tokenize the train and test sets
425 self.train_input = self.tensorize(self.train_descr)
426 self.test_input = self.tensorize(self.test_descr)
428 def batches(self, split="train"):
429 assert split in {"train", "test"}
430 input = self.train_input if split == "train" else self.test_input
431 for batch in tqdm.tqdm(
432 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
434 yield self.trim(batch)
436 def vocabulary_size(self):
437 return len(self.token2id)
439 def compute_missing_properties(
440 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
442 acc_nb_requested_properties = []
443 acc_nb_missing_properties = []
446 for input in tqdm.tqdm(
447 self.test_input.split(self.batch_size),
449 desc=f"test-properties",
451 result = input.clone()
452 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
453 result = (1 - ar_mask) * result + ar_mask * self.t_nul
454 masked_inplace_autoregression(
459 deterministic_synthesis,
460 progress_bar_desc=None,
464 result_descr = self.detensorize(result)
465 np = picoclvr.nb_properties(
471 nb_requested_properties, _, nb_missing_properties = zip(*np)
472 acc_nb_requested_properties += nb_requested_properties
473 acc_nb_missing_properties += nb_missing_properties
474 acc_nb_results += len(result_descr)
476 nb_requested_properties = sum(acc_nb_requested_properties)
477 nb_missing_properties = sum(acc_nb_missing_properties)
479 prefix = "" if pruner is None else "pruned_"
480 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
482 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
485 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
488 ######################################################################
491 self, n_epoch, model, result_dir, logger, deterministic_synthesis
493 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
495 if self.pruner_eval is not None:
496 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
498 nb_tokens_to_generate = self.height * self.width + 3
503 for primer_descr in [
504 "red above green <sep> green top <sep> blue right of red",
505 "there is red <sep> there is yellow <sep> there is blue",
506 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
507 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
509 primer += [primer_descr + " <img>"] * nb_per_primer
511 result = self.tensorize(primer)
512 fill = result.new_full(
513 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
515 result = torch.cat((result, fill), 1)
516 ar_mask = (result == self.t_nul).long()
517 masked_inplace_autoregression(
522 deterministic_synthesis,
525 result_descr = self.detensorize(result)
527 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
529 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
530 acc_nb_results = len(result_descr)
532 nb_requested_properties = sum(acc_nb_requested_properties)
533 nb_missing_properties = sum(acc_nb_missing_properties)
536 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
538 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
541 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
544 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
548 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
552 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
558 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
559 torchvision.utils.save_image(
560 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
562 logger(f"wrote {image_name}")
565 ######################################################################
570 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
574 self.nb_train_samples = (nb_train_samples,)
575 self.nb_test_samples = (nb_test_samples,)
576 self.batch_size = batch_size
578 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
579 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
580 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
581 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
583 def batches(self, split="train", nb_to_use=-1, desc=None):
584 assert split in {"train", "test"}
585 input = self.train_input if split == "train" else self.test_input
587 input = input[:nb_to_use]
589 desc = f"epoch-{split}"
590 for batch in tqdm.tqdm(
591 input.split(self.batch_size), dynamic_ncols=True, desc=desc
595 def vocabulary_size(self):
599 self, n_epoch, model, result_dir, logger, deterministic_synthesis
601 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
602 ar_mask = torch.full_like(results, 1)
603 masked_inplace_autoregression(
608 deterministic_synthesis,
611 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
612 torchvision.utils.save_image(
613 1 - results.reshape(-1, 1, 28, 28) / 255.0,
618 logger(f"wrote {image_name}")
621 ######################################################################
627 def map2seq(self, *m):
628 return torch.cat([x.flatten(1) for x in m], 1)
630 def seq2map(self, s):
631 s = s.reshape(s.size(0), -1, self.height, self.width)
632 return (s[:, k] for k in range(s.size(1)))
642 device=torch.device("cpu"),
646 self.batch_size = batch_size
651 train_mazes, train_paths, _ = maze.create_maze_data(
656 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
658 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
660 test_mazes, test_paths, _ = maze.create_maze_data(
665 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
667 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
669 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
671 def batches(self, split="train", nb_to_use=-1, desc=None):
672 assert split in {"train", "test"}
673 input = self.train_input if split == "train" else self.test_input
675 input = input[:nb_to_use]
677 desc = f"epoch-{split}"
678 for batch in tqdm.tqdm(
679 input.split(self.batch_size), dynamic_ncols=True, desc=desc
683 def vocabulary_size(self):
687 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
689 nb_total, nb_correct = 0, 0
691 self.width * self.height,
692 self.width * self.height,
697 for input in self.batches(split, nb_to_use):
698 result = input.clone()
699 ar_mask = result.new_zeros(result.size())
700 ar_mask[:, self.height * self.width :] = 1
701 result *= 1 - ar_mask
702 masked_inplace_autoregression(
707 deterministic_synthesis,
708 progress_bar_desc=None,
711 mazes, paths = self.seq2map(result)
712 path_correctness = maze.path_correctness(mazes, paths)
713 nb_correct += path_correctness.long().sum()
714 nb_total += mazes.size(0)
716 optimal_path_lengths = (
717 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
719 predicted_path_lengths = (
720 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
722 optimal_path_lengths = optimal_path_lengths[path_correctness]
723 predicted_path_lengths = predicted_path_lengths[path_correctness]
724 count[optimal_path_lengths, predicted_path_lengths] += 1
730 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
733 return nb_total, nb_correct, count
736 self, n_epoch, model, result_dir, logger, deterministic_synthesis
738 train_nb_total, train_nb_correct, count = self.compute_error(
742 deterministic_synthesis=deterministic_synthesis,
745 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
748 test_nb_total, test_nb_correct, count = self.compute_error(
752 deterministic_synthesis=deterministic_synthesis,
755 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
758 if count is not None:
759 proportion_optimal = count.diagonal().sum().float() / count.sum()
760 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
762 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
764 for i in range(count.size(0)):
765 for j in range(count.size(1)):
766 eol = " " if j < count.size(1) - 1 else "\n"
767 f.write(f"{count[i,j]}{eol}")
769 input = self.test_input[:48]
770 result = input.clone()
771 ar_mask = result.new_zeros(result.size())
772 ar_mask[:, self.height * self.width :] = 1
773 result *= 1 - ar_mask
774 masked_inplace_autoregression(
779 deterministic_synthesis,
783 mazes, paths = self.seq2map(input)
784 _, predicted_paths = self.seq2map(result)
786 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
791 predicted_paths=predicted_paths,
792 path_correct=maze.path_correctness(mazes, predicted_paths),
793 path_optimal=maze.path_optimality(paths, predicted_paths),
795 logger(f"wrote {filename}")
798 ######################################################################
815 device=torch.device("cpu"),
819 self.batch_size = batch_size
823 self.prompt_length = prompt_length
825 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
834 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
844 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
846 def batches(self, split="train", nb_to_use=-1, desc=None):
847 assert split in {"train", "test"}
848 input = self.train_input if split == "train" else self.test_input
850 input = input[:nb_to_use]
852 desc = f"epoch-{split}"
853 for batch in tqdm.tqdm(
854 input.split(self.batch_size), dynamic_ncols=True, desc=desc
858 def vocabulary_size(self):
862 self, n_epoch, model, result_dir, logger, deterministic_synthesis
864 def compute_nb_correct(input, prior_visits):
865 result = input.clone()
866 i = torch.arange(result.size(1), device=result.device)[None, :]
868 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
872 result *= 1 - ar_mask
874 masked_inplace_autoregression(
879 deterministic_synthesis,
883 nb_total = ((prior_visits > 0) * ar_mask).sum()
885 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
887 return nb_total, nb_correct
889 test_nb_total, test_nb_correct = compute_nb_correct(
890 self.test_input[:1000], self.test_prior_visits[:1000]
894 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
898 ######################################################################
914 fraction_values_for_train=None,
915 device=torch.device("cpu"),
919 self.batch_size = batch_size
920 self.nb_steps = nb_steps
921 self.nb_stacks = nb_stacks
922 self.nb_digits = nb_digits
925 if fraction_values_for_train is None:
926 values_for_train = None
927 values_for_test = None
929 all = torch.randperm(10**nb_digits)
930 nb_for_train = int(all.size(0) * fraction_values_for_train)
931 values_for_train = all[:nb_for_train]
932 values_for_test = all[nb_for_train:]
934 self.train_input, self.train_stack_counts = stack.generate_sequences(
943 self.test_input, self.test_stack_counts = stack.generate_sequences(
952 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
953 counts = self.test_stack_counts.flatten()[i.flatten()]
954 counts = F.one_hot(counts).sum(0)
955 logger(f"test_pop_stack_counts {counts}")
957 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
959 def batches(self, split="train", nb_to_use=-1, desc=None):
960 assert split in {"train", "test"}
961 input = self.train_input if split == "train" else self.test_input
963 input = input[:nb_to_use]
965 desc = f"epoch-{split}"
966 for batch in tqdm.tqdm(
967 input.split(self.batch_size), dynamic_ncols=True, desc=desc
971 def vocabulary_size(self):
975 self, n_epoch, model, result_dir, logger, deterministic_synthesis
977 def compute_nb_correct(input):
978 result = input.clone()
979 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
980 ar_mask = (result != input).long()
981 masked_inplace_autoregression(
986 deterministic_synthesis,
990 errors = ((result != input).long() * ar_mask).reshape(
991 -1, 1 + self.nb_digits
993 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
995 nb_total = ar_mask.max(1).values.sum()
996 nb_correct = nb_total - errors.max(1).values.sum()
998 return nb_total, nb_correct
1000 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
1003 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1006 ##############################################################
1007 # Log a few generated sequences
1008 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1009 result = input.clone()
1010 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1011 ar_mask = (result != input).long()
1013 # for n in range(result.size(0)):
1015 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1018 masked_inplace_autoregression(
1023 deterministic_synthesis,
1027 for n in range(result.size(0)):
1029 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1031 ##############################################################
1034 ######################################################################
1040 def tensorize(self, sequences):
1041 len_max = max([len(x) for x in sequences])
1047 self.token2id[str(c)]
1048 for c in s + ["<nul>"] * (len_max - len(s))
1057 def seq2str(self, seq):
1058 return " ".join([self.id2token[i] for i in seq])
1065 nb_starting_values=3,
1071 device=torch.device("cpu"),
1075 self.batch_size = batch_size
1076 self.device = device
1077 self.no_prog = no_prog
1081 nb_starting_values=nb_starting_values,
1082 nb_result_values_max=4 * nb_starting_values,
1083 max_input=max_input,
1087 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1092 nb_starting_values=nb_starting_values,
1093 nb_result_values_max=4 * nb_starting_values,
1094 max_input=max_input,
1098 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1102 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1104 val_max = max([x if type(x) is int else 0 for x in symbols])
1105 symbols = list(filter(lambda x: type(x) is str, symbols))
1107 symbols += [str(n) for n in range(val_max + 1)]
1108 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1109 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1111 self.t_nul = self.token2id["<nul>"]
1112 self.t_input = self.token2id["<in>"]
1113 self.t_output = self.token2id["<out>"]
1114 self.t_prog = self.token2id["<prg>"]
1115 self.t_end = self.token2id["<end>"]
1117 self.train_input = self.tensorize(train_sequences)
1118 self.test_input = self.tensorize(test_sequences)
1121 # Excise the program from every train and test example
1122 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
1126 ((self.train_input == self.t_prog).long() * k)
1127 .max(1, keepdim=True)
1130 self.train_input = (
1131 self.train_input * (k <= p).long()
1132 + self.t_end * (k == p + 1).long()
1133 + self.t_nul * (k > p + 1).long()
1135 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
1139 ((self.test_input == self.t_prog).long() * k)
1140 .max(1, keepdim=True)
1144 self.test_input * (k <= p).long()
1145 + self.t_end * (k == p + 1).long()
1146 + self.t_nul * (k > p + 1).long()
1149 if logger is not None:
1150 logger(f"value_max {val_max}")
1151 for x in self.train_input[:25]:
1152 end = (x != self.t_nul).nonzero().max().item() + 1
1153 seq = [self.id2token[i.item()] for i in x[:end]]
1155 logger(f"example_seq {s}")
1157 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1159 def batches(self, split="train", nb_to_use=-1, desc=None):
1160 assert split in {"train", "test"}
1161 input = self.train_input if split == "train" else self.test_input
1163 input = input[:nb_to_use]
1165 desc = f"epoch-{split}"
1166 for batch in tqdm.tqdm(
1167 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1169 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1170 batch = batch[:, :last].to(self.device)
1173 def vocabulary_size(self):
1174 return self.nb_codes
1176 def produce_results(
1177 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1179 # --------------------------------------------------------------------
1180 def compute_nb_errors_prog(input, nb_to_log=0):
1181 result = input.clone()
1182 s = (result == self.t_prog).long()
1183 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1184 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1186 masked_inplace_autoregression(
1191 deterministic_synthesis,
1195 sum_nb_total, sum_nb_errors = 0, 0
1196 for one_input, one_result in zip(input, result):
1197 seq = [self.id2token[i.item()] for i in one_result]
1198 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1200 sum_nb_errors += 0 if nb_errors == 0 else 1
1202 gt_seq = [self.id2token[i.item()] for i in one_input]
1203 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1204 gt_prog = " ".join([str(x) for x in gt_prog])
1205 prog = " ".join([str(x) for x in prog])
1206 comment = "*" if nb_errors == 0 else "-"
1207 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1208 for start_stack, target_stack, result_stack, correct in stacks:
1209 comment = "*" if correct else "-"
1210 start_stack = " ".join([str(x) for x in start_stack])
1211 target_stack = " ".join([str(x) for x in target_stack])
1212 result_stack = " ".join([str(x) for x in result_stack])
1214 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1218 return sum_nb_total, sum_nb_errors
1220 # --------------------------------------------------------------------
1221 def compute_nb_errors_output(input, nb_to_log=0):
1222 result = input.clone()
1223 k = torch.arange(result.size(1), device=result.device)[None, :]
1225 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1228 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1230 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1231 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1233 masked_inplace_autoregression(
1238 deterministic_synthesis,
1242 sum_nb_total, sum_nb_errors = 0, 0
1243 for one_input, one_result, i, j in zip(
1244 input, result, last_output_idx, first_prog_idx
1246 seq = [self.id2token[i.item()] for i in one_result]
1248 correct = (one_input - one_result).abs().max() == 0
1249 sum_nb_errors += 0 if correct else 1
1252 self.id2token[i.item()] for i in one_result[i : j + 1]
1255 self.id2token[i.item()] for i in one_input[i : j + 1]
1257 comment = "*" if correct else "-"
1258 result_stack = " ".join([str(x) for x in result_stack])
1259 target_stack = " ".join([str(x) for x in target_stack])
1261 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1265 return sum_nb_total, sum_nb_errors
1267 # --------------------------------------------------------------------
1269 if not self.no_prog:
1270 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1271 self.test_input[:1000].to(self.device), nb_to_log=10
1275 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1278 test_nb_total, test_nb_errors = compute_nb_errors_output(
1279 self.test_input[:1000].to(self.device), nb_to_log=10
1283 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1286 if save_attention_image is not None:
1287 input = self.test_input[:10]
1288 result = input.clone()
1289 s = (result == self.t_prog).long()
1290 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1291 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1293 masked_inplace_autoregression(
1298 deterministic_synthesis,
1302 with torch.autograd.no_grad():
1305 model.record_attention(True)
1306 model(BracketedSequence(result))
1308 attention = model.retrieve_attention()
1309 model.record_attention(False)
1312 tokens_output = [self.id2token[i.item()] for i in result[n_sample]]
1313 tokens_input = ["n/a"] + tokens_output[:-1]
1314 for n_head in range(attention[0].size(1)):
1315 filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1316 save_attention_image(
1327 logger(f"wrote {filename}")
1330 ######################################################################
1337 def tensorize(self, sequences):
1338 len_max = max([len(x) for x in sequences])
1343 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1360 device=torch.device("cpu"),
1364 self.batch_size = batch_size
1365 self.device = device
1367 train_sequences = expr.generate_sequences(
1369 nb_variables=nb_variables,
1370 length=sequence_length,
1371 operand_max=operand_max,
1372 result_max=result_max,
1375 test_sequences = expr.generate_sequences(
1377 nb_variables=nb_variables,
1378 length=sequence_length,
1379 operand_max=operand_max,
1380 result_max=result_max,
1383 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1386 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1387 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1389 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1391 self.train_input = self.tensorize(train_sequences)
1392 self.test_input = self.tensorize(test_sequences)
1394 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1396 def batches(self, split="train", nb_to_use=-1, desc=None):
1397 assert split in {"train", "test"}
1398 input = self.train_input if split == "train" else self.test_input
1400 input = input[:nb_to_use]
1402 desc = f"epoch-{split}"
1403 for batch in tqdm.tqdm(
1404 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1406 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1407 batch = batch[:, :last]
1410 def vocabulary_size(self):
1411 return self.nb_codes
1413 def seq2str(self, s):
1414 return "".join([self.id2char[k.item()] for k in s])
1416 def produce_results(
1422 deterministic_synthesis,
1425 def compute_nb_correct(input):
1426 result = input.clone()
1427 s = (result == self.space).long()
1428 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1429 result = (1 - ar_mask) * result + ar_mask * self.filler
1430 masked_inplace_autoregression(
1435 deterministic_synthesis,
1439 nb_total = input.size(0)
1440 nb_correct = (input == result).long().min(1).values.sum()
1442 #######################################################################
1443 # Comput predicted vs. true variable values
1445 nb_delta = torch.zeros(5, dtype=torch.int64)
1448 values_input = expr.extract_results([self.seq2str(s) for s in input])
1449 values_result = expr.extract_results([self.seq2str(s) for s in result])
1451 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1453 with open(filename, "w") as f:
1454 for i, r in zip(values_input, values_result):
1455 for n, vi in i.items():
1457 f.write(f"{vi} {-1 if vr is None else vr}\n")
1459 if vr is None or vr < 0:
1463 if d >= nb_delta.size(0):
1468 ######################################################################
1470 return nb_total, nb_correct, nb_delta, nb_missed
1477 ) = compute_nb_correct(self.test_input[:10000])
1480 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1483 nb_total = test_nb_delta.sum() + test_nb_missed
1484 for d in range(test_nb_delta.size(0)):
1486 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1489 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1492 ##############################################################
1493 # Log a few generated sequences
1494 if input_file is None:
1495 input = self.test_input[:10]
1497 with open(input_file, "r") as f:
1498 sequences = [e.strip() for e in f.readlines()]
1499 sequences = [s + " " + "#" * 50 for s in sequences]
1500 input = self.tensorize(sequences)
1502 result = input.clone()
1503 s = (result == self.space).long()
1504 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1505 result = (1 - ar_mask) * result + ar_mask * self.filler
1507 for n in range(result.size(0)):
1508 logger(f"test_before {self.seq2str(result[n])}")
1510 masked_inplace_autoregression(
1515 deterministic_synthesis,
1519 correct = (1 - ar_mask) * self.space + ar_mask * input
1520 for n in range(result.size(0)):
1521 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1522 logger(f"test_after {self.seq2str(result[n])} {comment}")
1523 logger(f"truth {self.seq2str(correct[n])}")
1524 ##############################################################
1527 ######################################################################
1540 device=torch.device("cpu"),
1541 device_storage=torch.device("cpu"),
1545 self.batch_size = batch_size
1546 self.device = device
1555 ) = world.create_data_and_processors(
1560 nb_epochs=vqae_nb_epochs,
1563 device_storage=device_storage,
1566 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1567 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1569 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1570 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1572 self.len_frame_seq = train_frame_seq.size(1)
1573 self.len_action_seq = train_action_seq.size(1)
1574 self.nb_codes = nb_frame_codes + nb_action_codes
1576 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1578 train_action_seq += nb_frame_codes
1579 self.train_input = torch.cat(
1580 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1583 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1584 test_action_seq += nb_frame_codes
1585 self.test_input = torch.cat(
1586 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1589 def batches(self, split="train", nb_to_use=-1, desc=None):
1590 assert split in {"train", "test"}
1591 input = self.train_input if split == "train" else self.test_input
1593 input = input[:nb_to_use]
1595 desc = f"epoch-{split}"
1596 for batch in tqdm.tqdm(
1597 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1599 yield batch.to(self.device)
1601 def vocabulary_size(self):
1602 return self.nb_codes
1604 def produce_results(
1605 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1608 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1611 input = self.test_input[:64].to(self.device)
1612 result = input.clone()
1615 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1617 result *= 1 - ar_mask
1619 masked_inplace_autoregression(
1624 deterministic_synthesis,
1628 seq_start = input[:, : self.len_frame_seq]
1629 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1630 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1633 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1635 result = result.reshape(-1, result.size(-1))
1637 frames = self.seq2frame(result)
1638 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1639 torchvision.utils.save_image(
1640 frames.float() / (world.Box.nb_rgb_levels - 1),
1646 logger(f"wrote {image_name}")
1649 ######################################################################