3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 ######################################################################
18 def masked_inplace_autoregression(
23 deterministic_synthesis,
24 forbidden_tokens=None,
25 progress_bar_desc="autoregression",
26 device=torch.device("cpu"),
28 assert input.size() == ar_mask.size()
30 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
32 if progress_bar_desc is not None:
36 desc=progress_bar_desc,
37 # total=input.size(0) // batch_size,
40 with torch.autograd.no_grad():
44 for input, ar_mask in batches:
45 model.masked_inplace_autoregression(
46 input, ar_mask, forbidden_tokens, deterministic_synthesis
52 ######################################################################
56 def batches(self, split="train"):
59 def vocabulary_size(self):
63 self, n_epoch, model, result_dir, logger, deterministic_synthesis
68 ######################################################################
72 def generate_sequences(self, nb):
75 def seq2str(self, seq):
76 return "[NOT IMPLEMENTED]"
82 class ProblemLevel0(Problem):
83 def __init__(self, nb_sentences=100, len_prompt=5, len_result=5):
84 self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result))
85 self.seq[:, len_prompt] = 10
87 def generate_sequences(self, nb):
88 sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
89 ar_mask = (sequences == 10).long()
90 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
91 return sequences, ar_mask
94 class ProblemLevel1(Problem):
95 def __init__(self, nb_operators=100, len_source=5, len_result=8):
96 self.len_source = len_source
97 self.len_result = len_result
98 self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1
99 self.operators = F.one_hot(
100 torch.rand(nb_operators, len_result, len_source).argmax(-1),
101 num_classes=len_source,
104 def generate_sequences(self, nb):
105 nb_operators = torch.randint(self.operators.size(0), (nb,))
106 operators = self.operators[nb_operators]
108 nb_operators[:, None]
109 // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1)
111 marker1 = torch.full((nb, 1), 10)
112 # source = torch.randint(10, (nb, self.len_source))
113 source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
114 marker2 = torch.full((nb, 1), 11)
115 result = operators.bmm(source[:, :, None]).squeeze(-1)
116 sequences = torch.cat((nb_operators, marker1, source, marker2, result), 1)
117 ar_mask = (sequences == 11).long()
118 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
119 return sequences, ar_mask
121 def seq2str(self, seq):
122 return "".join("0123456789|>"[x.item()] for x in seq)
125 class ProblemLevel2(Problem):
126 def __init__(self, len_source=5, len_result=8):
127 self.len_source = len_source
128 self.len_result = len_result
130 def generate_sequences(self, nb):
131 operators = F.one_hot(
132 torch.rand(nb, self.len_result, self.len_source).argmax(-1),
133 num_classes=self.len_source,
135 source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
136 # source1 = torch.randint(10, (nb, self.len_source))
137 marker1 = torch.full((nb, 1), 10)
138 result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
139 marker2 = torch.full((nb, 1), 11)
140 source2 = torch.randint(10, (nb, self.len_source))
141 marker3 = torch.full((nb, 1), 12)
142 result2 = operators.bmm(source2[:, :, None]).squeeze(-1)
144 sequences = torch.cat(
145 (source1, marker1, result1, marker2, source2, marker3, result2), 1
147 ar_mask = (sequences == 12).long()
148 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
149 return sequences, ar_mask
151 def seq2str(self, seq):
152 return "".join("0123456789>|~"[x.item()] for x in seq)
158 class ProblemAddition(Problem):
159 def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False):
160 self.nb_digits = nb_digits
161 self.zero_padded = zero_padded
162 self.inverted_result = inverted_result
163 self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")])
164 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
166 def tensorize(self, strings):
167 len_max = max([len(x) for x in strings])
172 [self.char2id[c] for c in s + "$" * (len_max - len(s))]
180 def generate_sequences(self, nb):
183 a, b = torch.randint(10**self.nb_digits, (2,))
185 a, b, c = str(a.item()), str(b.item()), str(c.item())
187 a = "0" * (self.nb_digits - len(a)) + a
188 b = "0" * (self.nb_digits - len(b)) + b
189 c = "0" * (self.nb_digits + 1 - len(c)) + c
190 if self.inverted_result:
192 sequences.append(f"{a}+{b}={c}$")
194 sequences = self.tensorize(sequences)
195 ar_mask = (sequences == self.char2id["="]).long()
196 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
197 return sequences, ar_mask
199 def seq2str(self, seq):
200 return "".join(self.id2char[x.item()] for x in seq)
203 # class ProblemUnion(Problem):
204 # problems = [ProblemByheart()]
205 # nb_common_codes = 100
207 # def generate_sequences(nb_samples):
208 # problem_indexes = torch.randint(len(problems), (nb_samples,))
209 # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
210 # print(f"{nb_samples_per_problem}")
212 # for nb, p in zip(nb_samples_per_problem, problems):
213 # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
216 # for strain, stest in zip(train_seq, test_seq):
217 # s = torch.cat((strain, stest), 0)
230 device=torch.device("cpu"),
235 self.batch_size = batch_size
237 self.problem = problem
239 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
242 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
246 self.train_input, self.train_ar_mask = self.train_input.to(
248 ), self.train_ar_mask.to(device)
249 self.test_input, self.test_ar_mask = self.test_input.to(
251 ), self.test_ar_mask.to(device)
253 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
255 # A bit of paranoia never hurts
257 self.nb_codes <= max_nb_codes
258 and self.train_input.min() >= 0
259 and self.test_input.min() >= 0
260 and tuple(self.train_ar_mask.unique()) == (0, 1)
261 and tuple(self.test_ar_mask.unique()) == (0, 1)
264 def batches(self, split="train", nb_to_use=-1, desc=None):
265 assert split in {"train", "test"}
266 input = self.train_input if split == "train" else self.test_input
268 input = input[:nb_to_use]
270 desc = f"epoch-{split}"
271 for batch in tqdm.tqdm(
272 input.split(self.batch_size), dynamic_ncols=True, desc=desc
276 def vocabulary_size(self):
280 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
282 def compute_accuracy(input, ar_mask, logger=None):
283 input, ar_mask = input[:nmax], ar_mask[:nmax]
284 result = input.clone() * (1 - ar_mask)
286 masked_inplace_autoregression(
291 deterministic_synthesis,
292 progress_bar_desc=None,
296 if logger is not None:
297 for sp, st in zip(result[:10], input[:10]):
299 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
302 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
305 nb_total = ar_mask.sum().item()
306 nb_correct = ((result == input).long() * ar_mask).sum().item()
308 return nb_total, nb_correct
310 train_nb_total, train_nb_correct = compute_accuracy(
311 self.train_input, self.train_ar_mask
315 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
318 test_nb_total, test_nb_correct = compute_accuracy(
319 self.test_input, self.test_ar_mask, logger
323 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
327 ######################################################################
332 class PicoCLVR(Task):
333 # Make a tensor from a list of strings
334 def tensorize(self, descr):
335 token_descr = [s.strip().split(" ") for s in descr]
336 l = max([len(s) for s in token_descr])
337 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
338 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
339 return torch.tensor(id_descr, device=self.device)
341 # Make a list of strings from a tensor
342 def detensorize(self, x):
343 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
345 # trim all the tensors in the tuple z to remove as much token from
346 # left and right in the first tensor. If z is a tuple, all its
347 # elements are trimed according to the triming for the first
348 def trim(self, z, token="<nul>"):
349 n = self.token2id[token]
352 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
353 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
354 return tuple([t[:, a:b] for t in z])
356 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
357 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
360 ######################
371 device=torch.device("cpu"),
377 def generate_descr(nb, cache_suffix, pruner):
378 return picoclvr.generate(
388 self.batch_size = batch_size
390 self.pruner_train = pruner_train
391 self.pruner_eval = pruner_eval
393 if logger is not None:
395 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
398 self.train_descr = generate_descr(
399 nb_train_samples, "train", pruner=self.pruner_train
401 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
403 # Build the tokenizer
404 tokens = {"<nul>", "<img>"}
405 for d in [self.train_descr, self.test_descr]:
407 for t in s.strip().split(" "):
409 # make this set a sorted list to get the same tensors given
411 tokens = list(tokens)
413 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
414 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
415 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
417 # Tokenize the train and test sets
418 self.train_input = self.tensorize(self.train_descr)
419 self.test_input = self.tensorize(self.test_descr)
421 def batches(self, split="train"):
422 assert split in {"train", "test"}
423 input = self.train_input if split == "train" else self.test_input
424 for batch in tqdm.tqdm(
425 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
427 yield self.trim(batch)
429 def vocabulary_size(self):
430 return len(self.token2id)
432 def compute_missing_properties(
433 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
435 acc_nb_requested_properties = []
436 acc_nb_missing_properties = []
439 for input in tqdm.tqdm(
440 self.test_input.split(self.batch_size),
442 desc=f"test-properties",
444 result = input.clone()
445 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
446 result = (1 - ar_mask) * result + ar_mask * self.t_nul
447 masked_inplace_autoregression(
452 deterministic_synthesis,
453 progress_bar_desc=None,
457 result_descr = self.detensorize(result)
458 np = picoclvr.nb_properties(
464 nb_requested_properties, _, nb_missing_properties = zip(*np)
465 acc_nb_requested_properties += nb_requested_properties
466 acc_nb_missing_properties += nb_missing_properties
467 acc_nb_results += len(result_descr)
469 nb_requested_properties = sum(acc_nb_requested_properties)
470 nb_missing_properties = sum(acc_nb_missing_properties)
472 prefix = "" if pruner is None else "pruned_"
473 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
475 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
478 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
481 ######################################################################
484 self, n_epoch, model, result_dir, logger, deterministic_synthesis
486 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
488 if self.pruner_eval is not None:
489 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
491 nb_tokens_to_generate = self.height * self.width + 3
496 for primer_descr in [
497 "red above green <sep> green top <sep> blue right of red",
498 "there is red <sep> there is yellow <sep> there is blue",
499 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
500 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
502 primer += [primer_descr + " <img>"] * nb_per_primer
504 result = self.tensorize(primer)
505 fill = result.new_full(
506 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
508 result = torch.cat((result, fill), 1)
509 ar_mask = (result == self.t_nul).long()
510 masked_inplace_autoregression(
515 deterministic_synthesis,
518 result_descr = self.detensorize(result)
520 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
522 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
523 acc_nb_results = len(result_descr)
525 nb_requested_properties = sum(acc_nb_requested_properties)
526 nb_missing_properties = sum(acc_nb_missing_properties)
529 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
531 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
534 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
537 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
541 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
545 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
551 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
552 torchvision.utils.save_image(
553 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
555 logger(f"wrote {image_name}")
558 ######################################################################
563 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
567 self.nb_train_samples = (nb_train_samples,)
568 self.nb_test_samples = (nb_test_samples,)
569 self.batch_size = batch_size
571 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
572 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
573 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
574 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
576 def batches(self, split="train", nb_to_use=-1, desc=None):
577 assert split in {"train", "test"}
578 input = self.train_input if split == "train" else self.test_input
580 input = input[:nb_to_use]
582 desc = f"epoch-{split}"
583 for batch in tqdm.tqdm(
584 input.split(self.batch_size), dynamic_ncols=True, desc=desc
588 def vocabulary_size(self):
592 self, n_epoch, model, result_dir, logger, deterministic_synthesis
594 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
595 ar_mask = torch.full_like(results, 1)
596 masked_inplace_autoregression(
601 deterministic_synthesis,
604 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
605 torchvision.utils.save_image(
606 1 - results.reshape(-1, 1, 28, 28) / 255.0,
611 logger(f"wrote {image_name}")
614 ######################################################################
620 def map2seq(self, *m):
621 return torch.cat([x.flatten(1) for x in m], 1)
623 def seq2map(self, s):
624 s = s.reshape(s.size(0), -1, self.height, self.width)
625 return (s[:, k] for k in range(s.size(1)))
635 device=torch.device("cpu"),
639 self.batch_size = batch_size
644 train_mazes, train_paths, _ = maze.create_maze_data(
649 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
651 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
653 test_mazes, test_paths, _ = maze.create_maze_data(
658 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
660 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
662 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
664 def batches(self, split="train", nb_to_use=-1, desc=None):
665 assert split in {"train", "test"}
666 input = self.train_input if split == "train" else self.test_input
668 input = input[:nb_to_use]
670 desc = f"epoch-{split}"
671 for batch in tqdm.tqdm(
672 input.split(self.batch_size), dynamic_ncols=True, desc=desc
676 def vocabulary_size(self):
680 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
682 nb_total, nb_correct = 0, 0
684 self.width * self.height,
685 self.width * self.height,
690 for input in self.batches(split, nb_to_use):
691 result = input.clone()
692 ar_mask = result.new_zeros(result.size())
693 ar_mask[:, self.height * self.width :] = 1
694 result *= 1 - ar_mask
695 masked_inplace_autoregression(
700 deterministic_synthesis,
701 progress_bar_desc=None,
704 mazes, paths = self.seq2map(result)
705 path_correctness = maze.path_correctness(mazes, paths)
706 nb_correct += path_correctness.long().sum()
707 nb_total += mazes.size(0)
709 optimal_path_lengths = (
710 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
712 predicted_path_lengths = (
713 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
715 optimal_path_lengths = optimal_path_lengths[path_correctness]
716 predicted_path_lengths = predicted_path_lengths[path_correctness]
717 count[optimal_path_lengths, predicted_path_lengths] += 1
723 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
726 return nb_total, nb_correct, count
729 self, n_epoch, model, result_dir, logger, deterministic_synthesis
731 train_nb_total, train_nb_correct, count = self.compute_error(
735 deterministic_synthesis=deterministic_synthesis,
738 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
741 test_nb_total, test_nb_correct, count = self.compute_error(
745 deterministic_synthesis=deterministic_synthesis,
748 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
751 if count is not None:
752 proportion_optimal = count.diagonal().sum().float() / count.sum()
753 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
755 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
757 for i in range(count.size(0)):
758 for j in range(count.size(1)):
759 eol = " " if j < count.size(1) - 1 else "\n"
760 f.write(f"{count[i,j]}{eol}")
762 input = self.test_input[:48]
763 result = input.clone()
764 ar_mask = result.new_zeros(result.size())
765 ar_mask[:, self.height * self.width :] = 1
766 result *= 1 - ar_mask
767 masked_inplace_autoregression(
772 deterministic_synthesis,
776 mazes, paths = self.seq2map(input)
777 _, predicted_paths = self.seq2map(result)
779 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
784 predicted_paths=predicted_paths,
785 path_correct=maze.path_correctness(mazes, predicted_paths),
786 path_optimal=maze.path_optimality(paths, predicted_paths),
788 logger(f"wrote {filename}")
791 ######################################################################
808 device=torch.device("cpu"),
812 self.batch_size = batch_size
816 self.prompt_length = prompt_length
818 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
827 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
837 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
839 def batches(self, split="train", nb_to_use=-1, desc=None):
840 assert split in {"train", "test"}
841 input = self.train_input if split == "train" else self.test_input
843 input = input[:nb_to_use]
845 desc = f"epoch-{split}"
846 for batch in tqdm.tqdm(
847 input.split(self.batch_size), dynamic_ncols=True, desc=desc
851 def vocabulary_size(self):
855 self, n_epoch, model, result_dir, logger, deterministic_synthesis
857 def compute_nb_correct(input, prior_visits):
858 result = input.clone()
859 i = torch.arange(result.size(1), device=result.device)[None, :]
861 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
865 result *= 1 - ar_mask
867 masked_inplace_autoregression(
872 deterministic_synthesis,
876 nb_total = ((prior_visits > 0) * ar_mask).sum()
878 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
880 return nb_total, nb_correct
882 test_nb_total, test_nb_correct = compute_nb_correct(
883 self.test_input[:1000], self.test_prior_visits[:1000]
887 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
891 ######################################################################
907 fraction_values_for_train=None,
908 device=torch.device("cpu"),
912 self.batch_size = batch_size
913 self.nb_steps = nb_steps
914 self.nb_stacks = nb_stacks
915 self.nb_digits = nb_digits
918 if fraction_values_for_train is None:
919 values_for_train = None
920 values_for_test = None
922 all = torch.randperm(10**nb_digits)
923 nb_for_train = int(all.size(0) * fraction_values_for_train)
924 values_for_train = all[:nb_for_train]
925 values_for_test = all[nb_for_train:]
927 self.train_input, self.train_stack_counts = stack.generate_sequences(
936 self.test_input, self.test_stack_counts = stack.generate_sequences(
945 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
946 counts = self.test_stack_counts.flatten()[i.flatten()]
947 counts = F.one_hot(counts).sum(0)
948 logger(f"test_pop_stack_counts {counts}")
950 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
952 def batches(self, split="train", nb_to_use=-1, desc=None):
953 assert split in {"train", "test"}
954 input = self.train_input if split == "train" else self.test_input
956 input = input[:nb_to_use]
958 desc = f"epoch-{split}"
959 for batch in tqdm.tqdm(
960 input.split(self.batch_size), dynamic_ncols=True, desc=desc
964 def vocabulary_size(self):
968 self, n_epoch, model, result_dir, logger, deterministic_synthesis
970 def compute_nb_correct(input):
971 result = input.clone()
972 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
973 ar_mask = (result != input).long()
974 masked_inplace_autoregression(
979 deterministic_synthesis,
983 errors = ((result != input).long() * ar_mask).reshape(
984 -1, 1 + self.nb_digits
986 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
988 nb_total = ar_mask.max(1).values.sum()
989 nb_correct = nb_total - errors.max(1).values.sum()
991 return nb_total, nb_correct
993 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
996 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
999 ##############################################################
1000 # Log a few generated sequences
1001 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
1002 result = input.clone()
1003 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1004 ar_mask = (result != input).long()
1006 # for n in range(result.size(0)):
1008 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1011 masked_inplace_autoregression(
1016 deterministic_synthesis,
1020 for n in range(result.size(0)):
1022 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1024 ##############################################################
1027 ######################################################################
1033 def tensorize(self, sequences):
1034 len_max = max([len(x) for x in sequences])
1040 self.token2id[str(c)]
1041 for c in s + ["<nul>"] * (len_max - len(s))
1050 def seq2str(self, seq):
1051 return " ".join([self.id2token[i] for i in seq])
1058 nb_starting_values=3,
1063 device=torch.device("cpu"),
1067 self.batch_size = batch_size
1068 self.device = device
1072 nb_starting_values=nb_starting_values,
1073 max_input=max_input,
1077 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1082 nb_starting_values=nb_starting_values,
1083 max_input=max_input,
1087 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1091 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1093 val_max = max([x if type(x) is int else 0 for x in symbols])
1094 symbols = list(filter(lambda x: type(x) is str, symbols))
1096 symbols += [str(n) for n in range(val_max + 1)]
1097 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1098 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1100 self.t_nul, self.t_prog = self.token2id["<nul>"], self.token2id["<prog>"]
1102 self.train_input = self.tensorize(train_sequences)
1103 self.test_input = self.tensorize(test_sequences)
1105 if logger is not None:
1106 logger(f"value_max {val_max}")
1107 for x in self.train_input[:25]:
1108 end = (x != self.t_nul).nonzero().max().item() + 1
1109 seq = [self.id2token[i.item()] for i in x[:end]]
1111 logger(f"example_seq {s}")
1113 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1115 def batches(self, split="train", nb_to_use=-1, desc=None):
1116 assert split in {"train", "test"}
1117 input = self.train_input if split == "train" else self.test_input
1119 input = input[:nb_to_use]
1121 desc = f"epoch-{split}"
1122 for batch in tqdm.tqdm(
1123 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1125 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1126 batch = batch[:, :last].to(self.device)
1129 def vocabulary_size(self):
1130 return self.nb_codes
1132 def produce_results(
1133 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1135 # --------------------------------------------------------------------
1136 def compute_nb_errors(input, nb_to_log=0):
1137 result = input.clone()
1138 s = (result == self.t_prog).long()
1139 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1140 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1142 masked_inplace_autoregression(
1147 deterministic_synthesis,
1151 sum_nb_total, sum_nb_errors = 0, 0
1152 for x, y in zip(input, result):
1153 seq = [self.id2token[i.item()] for i in y]
1154 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1156 sum_nb_errors += 0 if nb_errors == 0 else 1
1158 gt_seq = [self.id2token[i.item()] for i in x]
1159 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1160 gt_prog = " ".join([str(x) for x in gt_prog])
1161 prog = " ".join([str(x) for x in prog])
1162 comment = "*" if nb_errors == 0 else "-"
1163 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1164 for start_stack, target_stack, result_stack, correct in stacks:
1165 comment = "*" if correct else "-"
1166 start_stack = " ".join([str(x) for x in start_stack])
1167 target_stack = " ".join([str(x) for x in target_stack])
1168 result_stack = " ".join([str(x) for x in result_stack])
1170 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1174 return sum_nb_total, sum_nb_errors
1176 # --------------------------------------------------------------------
1178 test_nb_total, test_nb_errors = compute_nb_errors(
1179 self.test_input[:1000].to(self.device), nb_to_log=10
1183 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1187 ######################################################################
1194 def tensorize(self, sequences):
1195 len_max = max([len(x) for x in sequences])
1200 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1217 device=torch.device("cpu"),
1221 self.batch_size = batch_size
1222 self.device = device
1224 train_sequences = expr.generate_sequences(
1226 nb_variables=nb_variables,
1227 length=sequence_length,
1228 operand_max=operand_max,
1229 result_max=result_max,
1232 test_sequences = expr.generate_sequences(
1234 nb_variables=nb_variables,
1235 length=sequence_length,
1236 operand_max=operand_max,
1237 result_max=result_max,
1240 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1243 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1244 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1246 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1248 self.train_input = self.tensorize(train_sequences)
1249 self.test_input = self.tensorize(test_sequences)
1251 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1253 def batches(self, split="train", nb_to_use=-1, desc=None):
1254 assert split in {"train", "test"}
1255 input = self.train_input if split == "train" else self.test_input
1257 input = input[:nb_to_use]
1259 desc = f"epoch-{split}"
1260 for batch in tqdm.tqdm(
1261 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1263 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1264 batch = batch[:, :last]
1267 def vocabulary_size(self):
1268 return self.nb_codes
1270 def seq2str(self, s):
1271 return "".join([self.id2char[k.item()] for k in s])
1273 def produce_results(
1279 deterministic_synthesis,
1282 def compute_nb_correct(input):
1283 result = input.clone()
1284 s = (result == self.space).long()
1285 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1286 result = (1 - ar_mask) * result + ar_mask * self.filler
1287 masked_inplace_autoregression(
1292 deterministic_synthesis,
1296 nb_total = input.size(0)
1297 nb_correct = (input == result).long().min(1).values.sum()
1299 #######################################################################
1300 # Comput predicted vs. true variable values
1302 nb_delta = torch.zeros(5, dtype=torch.int64)
1305 values_input = expr.extract_results([self.seq2str(s) for s in input])
1306 values_result = expr.extract_results([self.seq2str(s) for s in result])
1308 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1310 with open(filename, "w") as f:
1311 for i, r in zip(values_input, values_result):
1312 for n, vi in i.items():
1314 f.write(f"{vi} {-1 if vr is None else vr}\n")
1316 if vr is None or vr < 0:
1320 if d >= nb_delta.size(0):
1325 ######################################################################
1327 return nb_total, nb_correct, nb_delta, nb_missed
1334 ) = compute_nb_correct(self.test_input[:10000])
1337 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1340 nb_total = test_nb_delta.sum() + test_nb_missed
1341 for d in range(test_nb_delta.size(0)):
1343 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1346 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1349 ##############################################################
1350 # Log a few generated sequences
1351 if input_file is None:
1352 input = self.test_input[:10]
1354 with open(input_file, "r") as f:
1355 sequences = [e.strip() for e in f.readlines()]
1356 sequences = [s + " " + "#" * 50 for s in sequences]
1357 input = self.tensorize(sequences)
1359 result = input.clone()
1360 s = (result == self.space).long()
1361 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1362 result = (1 - ar_mask) * result + ar_mask * self.filler
1364 for n in range(result.size(0)):
1365 logger(f"test_before {self.seq2str(result[n])}")
1367 masked_inplace_autoregression(
1372 deterministic_synthesis,
1376 correct = (1 - ar_mask) * self.space + ar_mask * input
1377 for n in range(result.size(0)):
1378 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1379 logger(f"test_after {self.seq2str(result[n])} {comment}")
1380 logger(f"truth {self.seq2str(correct[n])}")
1381 ##############################################################
1384 ######################################################################
1397 device=torch.device("cpu"),
1398 device_storage=torch.device("cpu"),
1402 self.batch_size = batch_size
1403 self.device = device
1412 ) = world.create_data_and_processors(
1417 nb_epochs=vqae_nb_epochs,
1420 device_storage=device_storage,
1423 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1424 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1426 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1427 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1429 self.len_frame_seq = train_frame_seq.size(1)
1430 self.len_action_seq = train_action_seq.size(1)
1431 self.nb_codes = nb_frame_codes + nb_action_codes
1433 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1435 train_action_seq += nb_frame_codes
1436 self.train_input = torch.cat(
1437 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1440 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1441 test_action_seq += nb_frame_codes
1442 self.test_input = torch.cat(
1443 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1446 def batches(self, split="train", nb_to_use=-1, desc=None):
1447 assert split in {"train", "test"}
1448 input = self.train_input if split == "train" else self.test_input
1450 input = input[:nb_to_use]
1452 desc = f"epoch-{split}"
1453 for batch in tqdm.tqdm(
1454 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1456 yield batch.to(self.device)
1458 def vocabulary_size(self):
1459 return self.nb_codes
1461 def produce_results(
1462 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1465 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1468 input = self.test_input[:64].to(self.device)
1469 result = input.clone()
1472 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1474 result *= 1 - ar_mask
1476 masked_inplace_autoregression(
1481 deterministic_synthesis,
1485 seq_start = input[:, : self.len_frame_seq]
1486 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1487 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1490 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1492 result = result.reshape(-1, result.size(-1))
1494 frames = self.seq2frame(result)
1495 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1496 torchvision.utils.save_image(
1497 frames.float() / (world.Box.nb_rgb_levels - 1),
1503 logger(f"wrote {image_name}")
1506 ######################################################################