5 import torch, torchvision
8 from torch.nn import functional as F
10 ######################################################################
13 def masked_inplace_autoregression(
18 deterministic_synthesis,
19 forbidden_tokens=None,
20 progress_bar_desc="autoregression",
21 device=torch.device("cpu"),
23 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
25 if progress_bar_desc is not None:
29 desc=progress_bar_desc,
30 total=input.size(0) // batch_size,
33 with torch.autograd.no_grad():
37 for input, ar_mask in batches:
38 model.masked_inplace_autoregression(
39 input, ar_mask, forbidden_tokens, deterministic_synthesis
45 ######################################################################
49 def batches(self, split="train"):
52 def vocabulary_size(self):
56 self, n_epoch, model, result_dir, logger, deterministic_synthesis
61 ######################################################################
67 # Make a tensor from a list of strings
68 def tensorize(self, descr):
69 token_descr = [s.strip().split(" ") for s in descr]
70 l = max([len(s) for s in token_descr])
71 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
72 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
73 return torch.tensor(id_descr, device=self.device)
75 # Make a list of strings from a tensor
76 def detensorize(self, x):
77 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
79 # trim all the tensors in the tuple z to remove as much token from
80 # left and right in the first tensor. If z is a tuple, all its
81 # elements are trimed according to the triming for the first
82 def trim(self, z, token="<nul>"):
83 n = self.token2id[token]
86 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
87 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
88 return tuple([t[:, a:b] for t in z])
90 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
91 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
94 ######################
105 device=torch.device("cpu"),
109 def generate_descr(nb, cache_suffix, pruner):
110 return picoclvr.generate(
120 self.batch_size = batch_size
122 self.pruner_train = pruner_train
123 self.pruner_eval = pruner_eval
125 if logger is not None:
127 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
130 self.train_descr = generate_descr(
131 nb_train_samples, "train", pruner=self.pruner_train
133 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
135 # Build the tokenizer
136 tokens = {"<nul>", "<img>"}
137 for d in [self.train_descr, self.test_descr]:
139 for t in s.strip().split(" "):
141 # make this set a sorted list to get the same tensors given
143 tokens = list(tokens)
145 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
146 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
147 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
149 # Tokenize the train and test sets
150 self.train_input = self.tensorize(self.train_descr)
151 self.test_input = self.tensorize(self.test_descr)
153 def batches(self, split="train"):
154 assert split in {"train", "test"}
155 input = self.train_input if split == "train" else self.test_input
156 for batch in tqdm.tqdm(
157 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
159 yield self.trim(batch)
161 def vocabulary_size(self):
162 return len(self.token2id)
164 def compute_missing_properties(
165 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
167 acc_nb_requested_properties = []
168 acc_nb_missing_properties = []
171 for input in tqdm.tqdm(
172 self.test_input.split(self.batch_size),
174 desc=f"test-properties",
176 result = input.clone()
177 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
178 result = (1 - ar_mask) * result + ar_mask * self.t_nul
179 masked_inplace_autoregression(
184 deterministic_synthesis,
185 progress_bar_desc=None,
189 result_descr = self.detensorize(result)
190 np = picoclvr.nb_properties(
196 nb_requested_properties, _, nb_missing_properties = zip(*np)
197 acc_nb_requested_properties += nb_requested_properties
198 acc_nb_missing_properties += nb_missing_properties
199 acc_nb_results += len(result_descr)
201 nb_requested_properties = sum(acc_nb_requested_properties)
202 nb_missing_properties = sum(acc_nb_missing_properties)
204 prefix = "" if pruner is None else "pruned_"
205 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
207 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
210 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
213 ######################################################################
216 self, n_epoch, model, result_dir, logger, deterministic_synthesis
218 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
220 if self.pruner_eval is not None:
221 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
223 nb_tokens_to_generate = self.height * self.width + 3
228 for primer_descr in [
229 "red above green <sep> green top <sep> blue right of red",
230 "there is red <sep> there is yellow <sep> there is blue",
231 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
232 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
234 primer += [primer_descr + " <img>"] * nb_per_primer
236 result = self.tensorize(primer)
237 fill = result.new_full(
238 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
240 result = torch.cat((result, fill), 1)
241 ar_mask = (result == self.t_nul).long()
242 masked_inplace_autoregression(
247 deterministic_synthesis,
250 result_descr = self.detensorize(result)
252 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
254 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
255 acc_nb_results = len(result_descr)
257 nb_requested_properties = sum(acc_nb_requested_properties)
258 nb_missing_properties = sum(acc_nb_missing_properties)
261 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
263 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
266 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
269 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
273 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
277 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
283 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
284 torchvision.utils.save_image(
285 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
287 logger(f"wrote {image_name}")
290 ######################################################################
295 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
297 self.nb_train_samples = (nb_train_samples,)
298 self.nb_test_samples = (nb_test_samples,)
299 self.batch_size = batch_size
301 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
302 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
303 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
304 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
306 def batches(self, split="train", nb_to_use=-1, desc=None):
307 assert split in {"train", "test"}
308 input = self.train_input if split == "train" else self.test_input
310 input = input[:nb_to_use]
312 desc = f"epoch-{split}"
313 for batch in tqdm.tqdm(
314 input.split(self.batch_size), dynamic_ncols=True, desc=desc
318 def vocabulary_size(self):
322 self, n_epoch, model, result_dir, logger, deterministic_synthesis
324 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
325 ar_mask = torch.full_like(results, 1)
326 masked_inplace_autoregression(
331 deterministic_synthesis,
334 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
335 torchvision.utils.save_image(
336 1 - results.reshape(-1, 1, 28, 28) / 255.0,
341 logger(f"wrote {image_name}")
344 ######################################################################
350 def map2seq(self, *m):
351 return torch.cat([x.flatten(1) for x in m], 1)
353 def seq2map(self, s):
354 s = s.reshape(s.size(0), -1, self.height, self.width)
355 return (s[:, k] for k in range(s.size(1)))
365 device=torch.device("cpu"),
367 self.batch_size = batch_size
372 train_mazes, train_paths, _ = maze.create_maze_data(
377 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
379 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
381 test_mazes, test_paths, _ = maze.create_maze_data(
386 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
388 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
390 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
392 def batches(self, split="train", nb_to_use=-1, desc=None):
393 assert split in {"train", "test"}
394 input = self.train_input if split == "train" else self.test_input
396 input = input[:nb_to_use]
398 desc = f"epoch-{split}"
399 for batch in tqdm.tqdm(
400 input.split(self.batch_size), dynamic_ncols=True, desc=desc
404 def vocabulary_size(self):
408 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
410 nb_total, nb_correct = 0, 0
412 self.width * self.height,
413 self.width * self.height,
418 for input in self.batches(split, nb_to_use):
419 result = input.clone()
420 ar_mask = result.new_zeros(result.size())
421 ar_mask[:, self.height * self.width :] = 1
422 result *= 1 - ar_mask
423 masked_inplace_autoregression(
428 deterministic_synthesis,
429 progress_bar_desc=None,
432 mazes, paths = self.seq2map(result)
433 path_correctness = maze.path_correctness(mazes, paths)
434 nb_correct += path_correctness.long().sum()
435 nb_total += mazes.size(0)
437 optimal_path_lengths = (
438 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
440 predicted_path_lengths = (
441 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
443 optimal_path_lengths = optimal_path_lengths[path_correctness]
444 predicted_path_lengths = predicted_path_lengths[path_correctness]
445 count[optimal_path_lengths, predicted_path_lengths] += 1
451 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
454 return nb_total, nb_correct, count
457 self, n_epoch, model, result_dir, logger, deterministic_synthesis
459 train_nb_total, train_nb_correct, count = self.compute_error(
463 deterministic_synthesis=deterministic_synthesis,
466 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
469 test_nb_total, test_nb_correct, count = self.compute_error(
473 deterministic_synthesis=deterministic_synthesis,
476 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
479 if count is not None:
480 proportion_optimal = count.diagonal().sum().float() / count.sum()
481 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
483 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
485 for i in range(count.size(0)):
486 for j in range(count.size(1)):
487 eol = " " if j < count.size(1) - 1 else "\n"
488 f.write(f"{count[i,j]}{eol}")
490 input = self.test_input[:48]
491 result = input.clone()
492 ar_mask = result.new_zeros(result.size())
493 ar_mask[:, self.height * self.width :] = 1
494 result *= 1 - ar_mask
495 masked_inplace_autoregression(
500 deterministic_synthesis,
504 mazes, paths = self.seq2map(input)
505 _, predicted_paths = self.seq2map(result)
507 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
512 predicted_paths=predicted_paths,
513 path_correct=maze.path_correctness(mazes, predicted_paths),
514 path_optimal=maze.path_optimality(paths, predicted_paths),
516 logger(f"wrote {filename}")
519 ######################################################################
536 device=torch.device("cpu"),
538 self.batch_size = batch_size
542 self.prompt_length = prompt_length
544 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
553 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
563 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
565 def batches(self, split="train", nb_to_use=-1, desc=None):
566 assert split in {"train", "test"}
567 input = self.train_input if split == "train" else self.test_input
569 input = input[:nb_to_use]
571 desc = f"epoch-{split}"
572 for batch in tqdm.tqdm(
573 input.split(self.batch_size), dynamic_ncols=True, desc=desc
577 def vocabulary_size(self):
581 self, n_epoch, model, result_dir, logger, deterministic_synthesis
583 def compute_nb_correct(input, prior_visits):
584 result = input.clone()
585 i = torch.arange(result.size(1), device=result.device)[None, :]
587 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
591 result *= 1 - ar_mask
593 masked_inplace_autoregression(
598 deterministic_synthesis,
602 nb_total = ((prior_visits > 0) * ar_mask).sum()
604 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
606 return nb_total, nb_correct
608 test_nb_total, test_nb_correct = compute_nb_correct(
609 self.test_input[:1000], self.test_prior_visits[:1000]
613 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
617 ######################################################################
633 fraction_values_for_train=None,
634 device=torch.device("cpu"),
636 self.batch_size = batch_size
637 self.nb_steps = nb_steps
638 self.nb_stacks = nb_stacks
639 self.nb_digits = nb_digits
642 if fraction_values_for_train is None:
643 values_for_train = None
644 values_for_test = None
646 all = torch.randperm(10**nb_digits)
647 nb_for_train = int(all.size(0) * fraction_values_for_train)
648 values_for_train = all[:nb_for_train]
649 values_for_test = all[nb_for_train:]
651 self.train_input, self.train_stack_counts = stack.generate_sequences(
660 self.test_input, self.test_stack_counts = stack.generate_sequences(
669 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
670 counts = self.test_stack_counts.flatten()[i.flatten()]
671 counts = F.one_hot(counts).sum(0)
672 logger(f"test_pop_stack_counts {counts}")
674 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
676 def batches(self, split="train", nb_to_use=-1, desc=None):
677 assert split in {"train", "test"}
678 input = self.train_input if split == "train" else self.test_input
680 input = input[:nb_to_use]
682 desc = f"epoch-{split}"
683 for batch in tqdm.tqdm(
684 input.split(self.batch_size), dynamic_ncols=True, desc=desc
688 def vocabulary_size(self):
692 self, n_epoch, model, result_dir, logger, deterministic_synthesis
694 def compute_nb_correct(input):
695 result = input.clone()
696 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
697 ar_mask = (result != input).long()
698 masked_inplace_autoregression(
703 deterministic_synthesis,
707 errors = ((result != input).long() * ar_mask).reshape(
708 -1, 1 + self.nb_digits
710 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
712 nb_total = ar_mask.max(1).values.sum()
713 nb_correct = nb_total - errors.max(1).values.sum()
715 return nb_total, nb_correct
717 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
720 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
723 ##############################################################
724 # Log a few generated sequences
725 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
726 result = input.clone()
727 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
728 ar_mask = (result != input).long()
730 # for n in range(result.size(0)):
732 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
735 masked_inplace_autoregression(
740 deterministic_synthesis,
744 for n in range(result.size(0)):
746 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
748 ##############################################################
751 ######################################################################
758 def tensorize(self, sequences):
759 len_max = max([len(x) for x in sequences])
764 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
781 device=torch.device("cpu"),
783 self.batch_size = batch_size
786 train_sequences = expr.generate_sequences(
788 nb_variables=nb_variables,
789 length=sequence_length,
790 operand_max=operand_max,
791 result_max=result_max,
794 test_sequences = expr.generate_sequences(
796 nb_variables=nb_variables,
797 length=sequence_length,
798 operand_max=operand_max,
799 result_max=result_max,
802 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
805 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
806 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
808 self.filler, self.space = self.char2id["#"], self.char2id[" "]
810 self.train_input = self.tensorize(train_sequences)
811 self.test_input = self.tensorize(test_sequences)
813 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
815 def batches(self, split="train", nb_to_use=-1, desc=None):
816 assert split in {"train", "test"}
817 input = self.train_input if split == "train" else self.test_input
819 input = input[:nb_to_use]
821 desc = f"epoch-{split}"
822 for batch in tqdm.tqdm(
823 input.split(self.batch_size), dynamic_ncols=True, desc=desc
825 last = (batch != self.filler).max(0).values.nonzero().max() + 3
826 batch = batch[:, :last]
829 def vocabulary_size(self):
832 def seq2str(self, s):
833 return "".join([self.id2char[k.item()] for k in s])
841 deterministic_synthesis,
844 def compute_nb_correct(input):
845 result = input.clone()
846 s = (result == self.space).long()
847 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
848 result = (1 - ar_mask) * result + ar_mask * self.filler
849 masked_inplace_autoregression(
854 deterministic_synthesis,
858 nb_total = input.size(0)
859 nb_correct = (input == result).long().min(1).values.sum()
861 #######################################################################
862 # Comput predicted vs. true variable values
864 nb_delta = torch.zeros(5, dtype=torch.int64)
867 values_input = expr.extract_results([self.seq2str(s) for s in input])
868 values_result = expr.extract_results([self.seq2str(s) for s in result])
870 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
872 with open(filename, "w") as f:
873 for i, r in zip(values_input, values_result):
874 for n, vi in i.items():
876 f.write(f"{vi} {-1 if vr is None else vr}\n")
878 if vr is None or vr < 0:
882 if d >= nb_delta.size(0):
887 ######################################################################
889 return nb_total, nb_correct, nb_delta, nb_missed
896 ) = compute_nb_correct(self.test_input[:10000])
899 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
902 nb_total = test_nb_delta.sum() + test_nb_missed
903 for d in range(test_nb_delta.size(0)):
905 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
908 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
911 ##############################################################
912 # Log a few generated sequences
913 if input_file is None:
914 input = self.test_input[:10]
916 with open(input_file, "r") as f:
917 sequences = [e.strip() for e in f.readlines()]
918 sequences = [s + " " + "#" * 50 for s in sequences]
919 input = self.tensorize(sequences)
921 result = input.clone()
922 s = (result == self.space).long()
923 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
924 result = (1 - ar_mask) * result + ar_mask * self.filler
926 for n in range(result.size(0)):
927 logger(f"test_before {self.seq2str(result[n])}")
929 masked_inplace_autoregression(
934 deterministic_synthesis,
938 correct = (1 - ar_mask) * self.space + ar_mask * input
939 for n in range(result.size(0)):
940 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
941 logger(f"test_after {self.seq2str(result[n])} {comment}")
942 logger(f"truth {self.seq2str(correct[n])}")
943 ##############################################################
946 ######################################################################
956 device=torch.device("cpu"),
958 self.batch_size = batch_size
968 ) = world.create_data_and_processors(
976 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
978 def batches(self, split="train", nb_to_use=-1, desc=None):
979 assert split in {"train", "test"}
980 input = self.train_input if split == "train" else self.test_input
982 input = input[:nb_to_use]
984 desc = f"epoch-{split}"
985 for batch in tqdm.tqdm(
986 input.split(self.batch_size), dynamic_ncols=True, desc=desc
990 def vocabulary_size(self):
994 self, n_epoch, model, result_dir, logger, deterministic_synthesis
999 ######################################################################