5 import torch, torchvision
8 from torch.nn import functional as F
10 ######################################################################
13 def masked_inplace_autoregression(
18 deterministic_synthesis,
19 forbidden_tokens=None,
20 progress_bar_desc="autoregression",
21 device=torch.device("cpu"),
23 assert input.size() == ar_mask.size()
25 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
27 if progress_bar_desc is not None:
31 desc=progress_bar_desc,
32 # total=input.size(0) // batch_size,
35 with torch.autograd.no_grad():
39 for input, ar_mask in batches:
40 model.masked_inplace_autoregression(
41 input, ar_mask, forbidden_tokens, deterministic_synthesis
47 ######################################################################
51 def batches(self, split="train"):
54 def vocabulary_size(self):
58 self, n_epoch, model, result_dir, logger, deterministic_synthesis
63 ######################################################################
67 def generate_sequences(self, nb):
70 def seq2str(self, seq):
71 return "[NOT IMPLEMENTED]"
77 class ProblemLevel0(Problem):
78 def __init__(self, nb_sentences=100, len_prompt=5, len_result=5):
79 self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result))
80 self.seq[:, len_prompt] = 10
82 def generate_sequences(self, nb):
83 sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
84 ar_mask = (sequences == 10).long()
85 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
86 return sequences, ar_mask
89 class ProblemLevel1(Problem):
90 def __init__(self, nb_operators=100, len_source=5, len_result=8):
91 self.len_source = len_source
92 self.len_result = len_result
93 self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1
94 self.operators = F.one_hot(
95 torch.rand(nb_operators, len_result, len_source).argmax(-1),
96 num_classes=len_source,
99 def generate_sequences(self, nb):
100 nb_operators = torch.randint(self.operators.size(0), (nb,))
101 operators = self.operators[nb_operators]
103 nb_operators[:, None]
104 // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1)
106 marker1 = torch.full((nb, 1), 10)
107 # source = torch.randint(10, (nb, self.len_source))
108 source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
109 marker2 = torch.full((nb, 1), 11)
110 result = operators.bmm(source[:, :, None]).squeeze(-1)
111 print(f"{nb_operators.dtype=} {marker1.dtype=}")
112 sequences = torch.cat((nb_operators, marker1, source, marker2, result), 1)
113 print(f"{sequences.size()=}")
114 ar_mask = (sequences == 11).long()
115 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
116 return sequences, ar_mask
118 def seq2str(self, seq):
119 return "".join("0123456789|>"[x.item()] for x in seq)
122 class ProblemLevel2(Problem):
123 def __init__(self, len_source=5, len_result=8):
124 self.len_source = len_source
125 self.len_result = len_result
127 def generate_sequences(self, nb):
128 operators = F.one_hot(
129 torch.rand(nb, self.len_result, self.len_source).argmax(-1),
130 num_classes=self.len_source,
132 source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
133 # source1 = torch.randint(10, (nb, self.len_source))
134 marker1 = torch.full((nb, 1), 10)
135 result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
136 marker2 = torch.full((nb, 1), 11)
137 source2 = torch.randint(10, (nb, self.len_source))
138 marker3 = torch.full((nb, 1), 12)
139 result2 = operators.bmm(source2[:, :, None]).squeeze(-1)
141 sequences = torch.cat(
142 (source1, marker1, result1, marker2, source2, marker3, result2), 1
144 ar_mask = (sequences == 12).long()
145 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
146 return sequences, ar_mask
148 def seq2str(self, seq):
149 return "".join("0123456789>|~"[x.item()] for x in seq)
155 class ProblemAddition(Problem):
156 def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False):
157 self.nb_digits = nb_digits
158 self.zero_padded = zero_padded
159 self.inverted_result = inverted_result
160 self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")])
161 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
163 def tensorize(self, strings):
164 len_max = max([len(x) for x in strings])
169 [self.char2id[c] for c in s + "$" * (len_max - len(s))]
177 def generate_sequences(self, nb):
180 a, b = torch.randint(10**self.nb_digits, (2,))
182 a, b, c = str(a.item()), str(b.item()), str(c.item())
184 a = "0" * (self.nb_digits - len(a)) + a
185 b = "0" * (self.nb_digits - len(b)) + b
186 c = "0" * (self.nb_digits + 1 - len(c)) + c
187 if self.inverted_result:
189 sequences.append(f"{a}+{b}={c}$")
191 sequences = self.tensorize(sequences)
192 ar_mask = (sequences == self.char2id["="]).long()
193 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
194 return sequences, ar_mask
196 def seq2str(self, seq):
197 return "".join(self.id2char[x.item()] for x in seq)
200 # class ProblemUnion(Problem):
201 # problems = [ProblemByheart()]
202 # nb_common_codes = 100
204 # def generate_sequences(nb_samples):
205 # problem_indexes = torch.randint(len(problems), (nb_samples,))
206 # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
207 # print(f"{nb_samples_per_problem}")
209 # for nb, p in zip(nb_samples_per_problem, problems):
210 # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
213 # for strain, stest in zip(train_seq, test_seq):
214 # s = torch.cat((strain, stest), 0)
227 device=torch.device("cpu"),
232 self.batch_size = batch_size
234 self.problem = problem
236 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
239 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
243 self.train_input, self.train_ar_mask = self.train_input.to(
245 ), self.train_ar_mask.to(device)
246 self.test_input, self.test_ar_mask = self.test_input.to(
248 ), self.test_ar_mask.to(device)
250 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
252 # A bit of paranoia never hurts
254 self.nb_codes <= max_nb_codes
255 and self.train_input.min() >= 0
256 and self.test_input.min() >= 0
257 and tuple(self.train_ar_mask.unique()) == (0, 1)
258 and tuple(self.test_ar_mask.unique()) == (0, 1)
261 def batches(self, split="train", nb_to_use=-1, desc=None):
262 assert split in {"train", "test"}
263 input = self.train_input if split == "train" else self.test_input
265 input = input[:nb_to_use]
267 desc = f"epoch-{split}"
268 for batch in tqdm.tqdm(
269 input.split(self.batch_size), dynamic_ncols=True, desc=desc
273 def vocabulary_size(self):
277 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
279 def compute_accuracy(input, ar_mask, logger=None):
280 input, ar_mask = input[:nmax], ar_mask[:nmax]
281 result = input.clone() * (1 - ar_mask)
283 masked_inplace_autoregression(
288 deterministic_synthesis,
289 progress_bar_desc=None,
293 if logger is not None:
294 for sp, st in zip(result[:10], input[:10]):
296 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
299 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
302 nb_total = ar_mask.sum().item()
303 nb_correct = ((result == input).long() * ar_mask).sum().item()
305 return nb_total, nb_correct
307 train_nb_total, train_nb_correct = compute_accuracy(
308 self.train_input, self.train_ar_mask
312 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
315 test_nb_total, test_nb_correct = compute_accuracy(
316 self.test_input, self.test_ar_mask, logger
320 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
324 ######################################################################
329 class PicoCLVR(Task):
330 # Make a tensor from a list of strings
331 def tensorize(self, descr):
332 token_descr = [s.strip().split(" ") for s in descr]
333 l = max([len(s) for s in token_descr])
334 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
335 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
336 return torch.tensor(id_descr, device=self.device)
338 # Make a list of strings from a tensor
339 def detensorize(self, x):
340 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
342 # trim all the tensors in the tuple z to remove as much token from
343 # left and right in the first tensor. If z is a tuple, all its
344 # elements are trimed according to the triming for the first
345 def trim(self, z, token="<nul>"):
346 n = self.token2id[token]
349 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
350 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
351 return tuple([t[:, a:b] for t in z])
353 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
354 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
357 ######################
368 device=torch.device("cpu"),
374 def generate_descr(nb, cache_suffix, pruner):
375 return picoclvr.generate(
385 self.batch_size = batch_size
387 self.pruner_train = pruner_train
388 self.pruner_eval = pruner_eval
390 if logger is not None:
392 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
395 self.train_descr = generate_descr(
396 nb_train_samples, "train", pruner=self.pruner_train
398 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
400 # Build the tokenizer
401 tokens = {"<nul>", "<img>"}
402 for d in [self.train_descr, self.test_descr]:
404 for t in s.strip().split(" "):
406 # make this set a sorted list to get the same tensors given
408 tokens = list(tokens)
410 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
411 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
412 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
414 # Tokenize the train and test sets
415 self.train_input = self.tensorize(self.train_descr)
416 self.test_input = self.tensorize(self.test_descr)
418 def batches(self, split="train"):
419 assert split in {"train", "test"}
420 input = self.train_input if split == "train" else self.test_input
421 for batch in tqdm.tqdm(
422 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
424 yield self.trim(batch)
426 def vocabulary_size(self):
427 return len(self.token2id)
429 def compute_missing_properties(
430 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
432 acc_nb_requested_properties = []
433 acc_nb_missing_properties = []
436 for input in tqdm.tqdm(
437 self.test_input.split(self.batch_size),
439 desc=f"test-properties",
441 result = input.clone()
442 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
443 result = (1 - ar_mask) * result + ar_mask * self.t_nul
444 masked_inplace_autoregression(
449 deterministic_synthesis,
450 progress_bar_desc=None,
454 result_descr = self.detensorize(result)
455 np = picoclvr.nb_properties(
461 nb_requested_properties, _, nb_missing_properties = zip(*np)
462 acc_nb_requested_properties += nb_requested_properties
463 acc_nb_missing_properties += nb_missing_properties
464 acc_nb_results += len(result_descr)
466 nb_requested_properties = sum(acc_nb_requested_properties)
467 nb_missing_properties = sum(acc_nb_missing_properties)
469 prefix = "" if pruner is None else "pruned_"
470 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
472 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
475 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
478 ######################################################################
481 self, n_epoch, model, result_dir, logger, deterministic_synthesis
483 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
485 if self.pruner_eval is not None:
486 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
488 nb_tokens_to_generate = self.height * self.width + 3
493 for primer_descr in [
494 "red above green <sep> green top <sep> blue right of red",
495 "there is red <sep> there is yellow <sep> there is blue",
496 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
497 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
499 primer += [primer_descr + " <img>"] * nb_per_primer
501 result = self.tensorize(primer)
502 fill = result.new_full(
503 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
505 result = torch.cat((result, fill), 1)
506 ar_mask = (result == self.t_nul).long()
507 masked_inplace_autoregression(
512 deterministic_synthesis,
515 result_descr = self.detensorize(result)
517 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
519 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
520 acc_nb_results = len(result_descr)
522 nb_requested_properties = sum(acc_nb_requested_properties)
523 nb_missing_properties = sum(acc_nb_missing_properties)
526 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
528 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
531 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
534 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
538 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
542 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
548 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
549 torchvision.utils.save_image(
550 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
552 logger(f"wrote {image_name}")
555 ######################################################################
560 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
564 self.nb_train_samples = (nb_train_samples,)
565 self.nb_test_samples = (nb_test_samples,)
566 self.batch_size = batch_size
568 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
569 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
570 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
571 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
573 def batches(self, split="train", nb_to_use=-1, desc=None):
574 assert split in {"train", "test"}
575 input = self.train_input if split == "train" else self.test_input
577 input = input[:nb_to_use]
579 desc = f"epoch-{split}"
580 for batch in tqdm.tqdm(
581 input.split(self.batch_size), dynamic_ncols=True, desc=desc
585 def vocabulary_size(self):
589 self, n_epoch, model, result_dir, logger, deterministic_synthesis
591 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
592 ar_mask = torch.full_like(results, 1)
593 masked_inplace_autoregression(
598 deterministic_synthesis,
601 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
602 torchvision.utils.save_image(
603 1 - results.reshape(-1, 1, 28, 28) / 255.0,
608 logger(f"wrote {image_name}")
611 ######################################################################
617 def map2seq(self, *m):
618 return torch.cat([x.flatten(1) for x in m], 1)
620 def seq2map(self, s):
621 s = s.reshape(s.size(0), -1, self.height, self.width)
622 return (s[:, k] for k in range(s.size(1)))
632 device=torch.device("cpu"),
636 self.batch_size = batch_size
641 train_mazes, train_paths, _ = maze.create_maze_data(
646 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
648 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
650 test_mazes, test_paths, _ = maze.create_maze_data(
655 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
657 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
659 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
661 def batches(self, split="train", nb_to_use=-1, desc=None):
662 assert split in {"train", "test"}
663 input = self.train_input if split == "train" else self.test_input
665 input = input[:nb_to_use]
667 desc = f"epoch-{split}"
668 for batch in tqdm.tqdm(
669 input.split(self.batch_size), dynamic_ncols=True, desc=desc
673 def vocabulary_size(self):
677 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
679 nb_total, nb_correct = 0, 0
681 self.width * self.height,
682 self.width * self.height,
687 for input in self.batches(split, nb_to_use):
688 result = input.clone()
689 ar_mask = result.new_zeros(result.size())
690 ar_mask[:, self.height * self.width :] = 1
691 result *= 1 - ar_mask
692 masked_inplace_autoregression(
697 deterministic_synthesis,
698 progress_bar_desc=None,
701 mazes, paths = self.seq2map(result)
702 path_correctness = maze.path_correctness(mazes, paths)
703 nb_correct += path_correctness.long().sum()
704 nb_total += mazes.size(0)
706 optimal_path_lengths = (
707 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
709 predicted_path_lengths = (
710 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
712 optimal_path_lengths = optimal_path_lengths[path_correctness]
713 predicted_path_lengths = predicted_path_lengths[path_correctness]
714 count[optimal_path_lengths, predicted_path_lengths] += 1
720 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
723 return nb_total, nb_correct, count
726 self, n_epoch, model, result_dir, logger, deterministic_synthesis
728 train_nb_total, train_nb_correct, count = self.compute_error(
732 deterministic_synthesis=deterministic_synthesis,
735 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
738 test_nb_total, test_nb_correct, count = self.compute_error(
742 deterministic_synthesis=deterministic_synthesis,
745 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
748 if count is not None:
749 proportion_optimal = count.diagonal().sum().float() / count.sum()
750 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
752 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
754 for i in range(count.size(0)):
755 for j in range(count.size(1)):
756 eol = " " if j < count.size(1) - 1 else "\n"
757 f.write(f"{count[i,j]}{eol}")
759 input = self.test_input[:48]
760 result = input.clone()
761 ar_mask = result.new_zeros(result.size())
762 ar_mask[:, self.height * self.width :] = 1
763 result *= 1 - ar_mask
764 masked_inplace_autoregression(
769 deterministic_synthesis,
773 mazes, paths = self.seq2map(input)
774 _, predicted_paths = self.seq2map(result)
776 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
781 predicted_paths=predicted_paths,
782 path_correct=maze.path_correctness(mazes, predicted_paths),
783 path_optimal=maze.path_optimality(paths, predicted_paths),
785 logger(f"wrote {filename}")
788 ######################################################################
805 device=torch.device("cpu"),
809 self.batch_size = batch_size
813 self.prompt_length = prompt_length
815 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
824 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
834 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
836 def batches(self, split="train", nb_to_use=-1, desc=None):
837 assert split in {"train", "test"}
838 input = self.train_input if split == "train" else self.test_input
840 input = input[:nb_to_use]
842 desc = f"epoch-{split}"
843 for batch in tqdm.tqdm(
844 input.split(self.batch_size), dynamic_ncols=True, desc=desc
848 def vocabulary_size(self):
852 self, n_epoch, model, result_dir, logger, deterministic_synthesis
854 def compute_nb_correct(input, prior_visits):
855 result = input.clone()
856 i = torch.arange(result.size(1), device=result.device)[None, :]
858 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
862 result *= 1 - ar_mask
864 masked_inplace_autoregression(
869 deterministic_synthesis,
873 nb_total = ((prior_visits > 0) * ar_mask).sum()
875 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
877 return nb_total, nb_correct
879 test_nb_total, test_nb_correct = compute_nb_correct(
880 self.test_input[:1000], self.test_prior_visits[:1000]
884 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
888 ######################################################################
904 fraction_values_for_train=None,
905 device=torch.device("cpu"),
909 self.batch_size = batch_size
910 self.nb_steps = nb_steps
911 self.nb_stacks = nb_stacks
912 self.nb_digits = nb_digits
915 if fraction_values_for_train is None:
916 values_for_train = None
917 values_for_test = None
919 all = torch.randperm(10**nb_digits)
920 nb_for_train = int(all.size(0) * fraction_values_for_train)
921 values_for_train = all[:nb_for_train]
922 values_for_test = all[nb_for_train:]
924 self.train_input, self.train_stack_counts = stack.generate_sequences(
933 self.test_input, self.test_stack_counts = stack.generate_sequences(
942 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
943 counts = self.test_stack_counts.flatten()[i.flatten()]
944 counts = F.one_hot(counts).sum(0)
945 logger(f"test_pop_stack_counts {counts}")
947 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
949 def batches(self, split="train", nb_to_use=-1, desc=None):
950 assert split in {"train", "test"}
951 input = self.train_input if split == "train" else self.test_input
953 input = input[:nb_to_use]
955 desc = f"epoch-{split}"
956 for batch in tqdm.tqdm(
957 input.split(self.batch_size), dynamic_ncols=True, desc=desc
961 def vocabulary_size(self):
965 self, n_epoch, model, result_dir, logger, deterministic_synthesis
967 def compute_nb_correct(input):
968 result = input.clone()
969 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
970 ar_mask = (result != input).long()
971 masked_inplace_autoregression(
976 deterministic_synthesis,
980 errors = ((result != input).long() * ar_mask).reshape(
981 -1, 1 + self.nb_digits
983 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
985 nb_total = ar_mask.max(1).values.sum()
986 nb_correct = nb_total - errors.max(1).values.sum()
988 return nb_total, nb_correct
990 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
993 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
996 ##############################################################
997 # Log a few generated sequences
998 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
999 result = input.clone()
1000 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
1001 ar_mask = (result != input).long()
1003 # for n in range(result.size(0)):
1005 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1008 masked_inplace_autoregression(
1013 deterministic_synthesis,
1017 for n in range(result.size(0)):
1019 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
1021 ##############################################################
1024 ######################################################################
1030 def tensorize(self, sequences):
1031 len_max = max([len(x) for x in sequences])
1037 self.token2id[str(c)]
1038 for c in s + ["<nul>"] * (len_max - len(s))
1052 device=torch.device("cpu"),
1056 self.batch_size = batch_size
1057 self.device = device
1061 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
1064 rpl.generate() for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
1068 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
1070 val_max = max([x if type(x) is int else 0 for x in symbols])
1071 symbols = list(filter(lambda x: type(x) is str, symbols))
1073 symbols += [str(n) for n in range(val_max + 1)]
1074 print(f"{val_max=}")
1075 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
1076 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
1078 self.t_nul, self.t_prog = self.token2id["<nul>"], self.token2id["<prog>"]
1080 self.train_input = self.tensorize(train_sequences)
1081 self.test_input = self.tensorize(test_sequences)
1083 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1085 def batches(self, split="train", nb_to_use=-1, desc=None):
1086 assert split in {"train", "test"}
1087 input = self.train_input if split == "train" else self.test_input
1089 input = input[:nb_to_use]
1091 desc = f"epoch-{split}"
1092 for batch in tqdm.tqdm(
1093 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1095 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1096 batch = batch[:, :last]
1099 def vocabulary_size(self):
1100 return self.nb_codes
1102 def produce_results(
1103 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1105 def compute_nb_errors(input, nb_to_log=0):
1106 result = input.clone()
1107 s = (result == self.t_prog).long()
1108 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1109 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1111 masked_inplace_autoregression(
1116 deterministic_synthesis,
1121 for x in result[:nb_to_log]:
1122 s = " ".join([self.id2token[i.item()] for i in x])
1123 logger(f"check {n_epoch} {s}")
1124 nb_to_log -= min(nb_to_log, result.size(0))
1126 sum_nb_total, sum_nb_errors = 0, 0
1128 seq = [self.id2token[i.item()] for i in x]
1129 nb_total, nb_errors = rpl.check(seq)
1130 sum_nb_total += nb_total
1131 sum_nb_errors += nb_errors
1133 return sum_nb_total, sum_nb_errors
1135 test_nb_total, test_nb_errors = compute_nb_errors(self.test_input, nb_to_log=10)
1138 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1142 ######################################################################
1149 def tensorize(self, sequences):
1150 len_max = max([len(x) for x in sequences])
1155 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1172 device=torch.device("cpu"),
1176 self.batch_size = batch_size
1177 self.device = device
1179 train_sequences = expr.generate_sequences(
1181 nb_variables=nb_variables,
1182 length=sequence_length,
1183 operand_max=operand_max,
1184 result_max=result_max,
1187 test_sequences = expr.generate_sequences(
1189 nb_variables=nb_variables,
1190 length=sequence_length,
1191 operand_max=operand_max,
1192 result_max=result_max,
1195 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1198 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1199 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1201 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1203 self.train_input = self.tensorize(train_sequences)
1204 self.test_input = self.tensorize(test_sequences)
1206 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1208 def batches(self, split="train", nb_to_use=-1, desc=None):
1209 assert split in {"train", "test"}
1210 input = self.train_input if split == "train" else self.test_input
1212 input = input[:nb_to_use]
1214 desc = f"epoch-{split}"
1215 for batch in tqdm.tqdm(
1216 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1218 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1219 batch = batch[:, :last]
1222 def vocabulary_size(self):
1223 return self.nb_codes
1225 def seq2str(self, s):
1226 return "".join([self.id2char[k.item()] for k in s])
1228 def produce_results(
1234 deterministic_synthesis,
1237 def compute_nb_correct(input):
1238 result = input.clone()
1239 s = (result == self.space).long()
1240 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1241 result = (1 - ar_mask) * result + ar_mask * self.filler
1242 masked_inplace_autoregression(
1247 deterministic_synthesis,
1251 nb_total = input.size(0)
1252 nb_correct = (input == result).long().min(1).values.sum()
1254 #######################################################################
1255 # Comput predicted vs. true variable values
1257 nb_delta = torch.zeros(5, dtype=torch.int64)
1260 values_input = expr.extract_results([self.seq2str(s) for s in input])
1261 values_result = expr.extract_results([self.seq2str(s) for s in result])
1263 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1265 with open(filename, "w") as f:
1266 for i, r in zip(values_input, values_result):
1267 for n, vi in i.items():
1269 f.write(f"{vi} {-1 if vr is None else vr}\n")
1271 if vr is None or vr < 0:
1275 if d >= nb_delta.size(0):
1280 ######################################################################
1282 return nb_total, nb_correct, nb_delta, nb_missed
1289 ) = compute_nb_correct(self.test_input[:10000])
1292 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1295 nb_total = test_nb_delta.sum() + test_nb_missed
1296 for d in range(test_nb_delta.size(0)):
1298 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1301 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1304 ##############################################################
1305 # Log a few generated sequences
1306 if input_file is None:
1307 input = self.test_input[:10]
1309 with open(input_file, "r") as f:
1310 sequences = [e.strip() for e in f.readlines()]
1311 sequences = [s + " " + "#" * 50 for s in sequences]
1312 input = self.tensorize(sequences)
1314 result = input.clone()
1315 s = (result == self.space).long()
1316 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1317 result = (1 - ar_mask) * result + ar_mask * self.filler
1319 for n in range(result.size(0)):
1320 logger(f"test_before {self.seq2str(result[n])}")
1322 masked_inplace_autoregression(
1327 deterministic_synthesis,
1331 correct = (1 - ar_mask) * self.space + ar_mask * input
1332 for n in range(result.size(0)):
1333 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1334 logger(f"test_after {self.seq2str(result[n])} {comment}")
1335 logger(f"truth {self.seq2str(correct[n])}")
1336 ##############################################################
1339 ######################################################################
1352 device=torch.device("cpu"),
1353 device_storage=torch.device("cpu"),
1357 self.batch_size = batch_size
1358 self.device = device
1367 ) = world.create_data_and_processors(
1372 nb_epochs=vqae_nb_epochs,
1375 device_storage=device_storage,
1378 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1379 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1381 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1382 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1384 self.len_frame_seq = train_frame_seq.size(1)
1385 self.len_action_seq = train_action_seq.size(1)
1386 self.nb_codes = nb_frame_codes + nb_action_codes
1388 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1390 train_action_seq += nb_frame_codes
1391 self.train_input = torch.cat(
1392 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1395 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1396 test_action_seq += nb_frame_codes
1397 self.test_input = torch.cat(
1398 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1401 def batches(self, split="train", nb_to_use=-1, desc=None):
1402 assert split in {"train", "test"}
1403 input = self.train_input if split == "train" else self.test_input
1405 input = input[:nb_to_use]
1407 desc = f"epoch-{split}"
1408 for batch in tqdm.tqdm(
1409 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1411 yield batch.to(self.device)
1413 def vocabulary_size(self):
1414 return self.nb_codes
1416 def produce_results(
1417 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1420 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1423 input = self.test_input[:64].to(self.device)
1424 result = input.clone()
1427 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1429 result *= 1 - ar_mask
1431 masked_inplace_autoregression(
1436 deterministic_synthesis,
1440 seq_start = input[:, : self.len_frame_seq]
1441 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1442 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1445 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1447 result = result.reshape(-1, result.size(-1))
1449 frames = self.seq2frame(result)
1450 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1451 torchvision.utils.save_image(
1452 frames.float() / (world.Box.nb_rgb_levels - 1),
1458 logger(f"wrote {image_name}")
1461 ######################################################################