3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
18 from graph import save_attention_image
20 save_attention_image = None
22 ######################################################################
25 def masked_inplace_autoregression(
30 deterministic_synthesis,
31 forbidden_tokens=None,
32 progress_bar_desc="autoregression",
33 device=torch.device("cpu"),
35 assert input.size() == ar_mask.size()
37 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
39 if progress_bar_desc is not None:
43 desc=progress_bar_desc,
44 total=(input.size(0) + batch_size - 1) // batch_size,
47 with torch.autograd.no_grad():
51 for input, ar_mask in batches:
52 model.masked_inplace_autoregression(
53 input, ar_mask, forbidden_tokens, deterministic_synthesis
59 ######################################################################
63 def batches(self, split="train"):
66 def vocabulary_size(self):
70 self, n_epoch, model, result_dir, logger, deterministic_synthesis
87 device=torch.device("cpu"),
92 self.batch_size = batch_size
94 self.problem = problem
96 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
99 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
103 self.train_input, self.train_ar_mask = self.train_input.to(
105 ), self.train_ar_mask.to(device)
106 self.test_input, self.test_ar_mask = self.test_input.to(
108 ), self.test_ar_mask.to(device)
110 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
112 # A bit of paranoia never hurts
114 self.nb_codes <= max_nb_codes
115 and self.train_input.min() >= 0
116 and self.test_input.min() >= 0
117 and tuple(self.train_ar_mask.unique()) == (0, 1)
118 and tuple(self.test_ar_mask.unique()) == (0, 1)
121 def batches(self, split="train", nb_to_use=-1, desc=None):
122 assert split in {"train", "test"}
123 input = self.train_input if split == "train" else self.test_input
125 input = input[:nb_to_use]
127 desc = f"epoch-{split}"
128 for batch in tqdm.tqdm(
129 input.split(self.batch_size), dynamic_ncols=True, desc=desc
133 def vocabulary_size(self):
137 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
139 def compute_accuracy(input, ar_mask, logger=None):
140 input, ar_mask = input[:nmax], ar_mask[:nmax]
141 result = input.clone() * (1 - ar_mask)
143 masked_inplace_autoregression(
148 deterministic_synthesis,
149 progress_bar_desc=None,
153 if logger is not None:
154 for sp, st in zip(result[:10], input[:10]):
156 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
159 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
162 nb_total = ar_mask.sum().item()
163 nb_correct = ((result == input).long() * ar_mask).sum().item()
165 return nb_total, nb_correct
167 train_nb_total, train_nb_correct = compute_accuracy(
168 self.train_input, self.train_ar_mask
172 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
175 test_nb_total, test_nb_correct = compute_accuracy(
176 self.test_input, self.test_ar_mask, logger
180 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
184 ######################################################################
189 class PicoCLVR(Task):
190 # Make a tensor from a list of strings
191 def tensorize(self, descr):
192 token_descr = [s.strip().split(" ") for s in descr]
193 l = max([len(s) for s in token_descr])
194 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
195 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
196 return torch.tensor(id_descr, device=self.device)
198 # Make a list of strings from a tensor
199 def detensorize(self, x):
200 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
202 # trim all the tensors in the tuple z to remove as much token from
203 # left and right in the first tensor. If z is a tuple, all its
204 # elements are trimed according to the triming for the first
205 def trim(self, z, token="<nul>"):
206 n = self.token2id[token]
209 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
210 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
211 return tuple([t[:, a:b] for t in z])
213 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
214 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
217 ######################
228 device=torch.device("cpu"),
234 def generate_descr(nb, cache_suffix, pruner):
235 return picoclvr.generate(
245 self.batch_size = batch_size
247 self.pruner_train = pruner_train
248 self.pruner_eval = pruner_eval
250 if logger is not None:
252 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
255 self.train_descr = generate_descr(
256 nb_train_samples, "train", pruner=self.pruner_train
258 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
260 # Build the tokenizer
261 tokens = {"<nul>", "<img>"}
262 for d in [self.train_descr, self.test_descr]:
264 for t in s.strip().split(" "):
266 # make this set a sorted list to get the same tensors given
268 tokens = list(tokens)
270 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
271 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
272 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
274 # Tokenize the train and test sets
275 self.train_input = self.tensorize(self.train_descr)
276 self.test_input = self.tensorize(self.test_descr)
278 def batches(self, split="train"):
279 assert split in {"train", "test"}
280 input = self.train_input if split == "train" else self.test_input
281 for batch in tqdm.tqdm(
282 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
284 yield self.trim(batch)
286 def vocabulary_size(self):
287 return len(self.token2id)
289 def compute_missing_properties(
290 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
292 acc_nb_requested_properties = []
293 acc_nb_missing_properties = []
296 for input in tqdm.tqdm(
297 self.test_input.split(self.batch_size),
299 desc=f"test-properties",
301 result = input.clone()
302 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
303 result = (1 - ar_mask) * result + ar_mask * self.t_nul
304 masked_inplace_autoregression(
309 deterministic_synthesis,
310 progress_bar_desc=None,
314 result_descr = self.detensorize(result)
315 np = picoclvr.nb_properties(
321 nb_requested_properties, _, nb_missing_properties = zip(*np)
322 acc_nb_requested_properties += nb_requested_properties
323 acc_nb_missing_properties += nb_missing_properties
324 acc_nb_results += len(result_descr)
326 nb_requested_properties = sum(acc_nb_requested_properties)
327 nb_missing_properties = sum(acc_nb_missing_properties)
329 prefix = "" if pruner is None else "pruned_"
330 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
332 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
335 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
338 ######################################################################
341 self, n_epoch, model, result_dir, logger, deterministic_synthesis
343 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
345 if self.pruner_eval is not None:
346 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
348 nb_tokens_to_generate = self.height * self.width + 3
353 for primer_descr in [
354 "red above green <sep> green top <sep> blue right of red",
355 "there is red <sep> there is yellow <sep> there is blue",
356 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
357 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
359 primer += [primer_descr + " <img>"] * nb_per_primer
361 result = self.tensorize(primer)
362 fill = result.new_full(
363 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
365 result = torch.cat((result, fill), 1)
366 ar_mask = (result == self.t_nul).long()
367 masked_inplace_autoregression(
372 deterministic_synthesis,
375 result_descr = self.detensorize(result)
377 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
379 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
380 acc_nb_results = len(result_descr)
382 nb_requested_properties = sum(acc_nb_requested_properties)
383 nb_missing_properties = sum(acc_nb_missing_properties)
386 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
388 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
391 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
394 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
398 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
402 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
408 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
409 torchvision.utils.save_image(
410 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
412 logger(f"wrote {image_name}")
415 ######################################################################
420 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
424 self.nb_train_samples = (nb_train_samples,)
425 self.nb_test_samples = (nb_test_samples,)
426 self.batch_size = batch_size
428 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
429 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
430 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
431 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
433 def batches(self, split="train", nb_to_use=-1, desc=None):
434 assert split in {"train", "test"}
435 input = self.train_input if split == "train" else self.test_input
437 input = input[:nb_to_use]
439 desc = f"epoch-{split}"
440 for batch in tqdm.tqdm(
441 input.split(self.batch_size), dynamic_ncols=True, desc=desc
445 def vocabulary_size(self):
449 self, n_epoch, model, result_dir, logger, deterministic_synthesis
451 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
452 ar_mask = torch.full_like(results, 1)
453 masked_inplace_autoregression(
458 deterministic_synthesis,
461 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
462 torchvision.utils.save_image(
463 1 - results.reshape(-1, 1, 28, 28) / 255.0,
468 logger(f"wrote {image_name}")
471 ######################################################################
477 def map2seq(self, *m):
478 return torch.cat([x.flatten(1) for x in m], 1)
480 def seq2map(self, s):
481 s = s.reshape(s.size(0), -1, self.height, self.width)
482 return (s[:, k] for k in range(s.size(1)))
492 device=torch.device("cpu"),
496 self.batch_size = batch_size
501 train_mazes, train_paths, _ = maze.create_maze_data(
506 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
508 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
510 test_mazes, test_paths, _ = maze.create_maze_data(
515 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
517 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
519 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
521 def batches(self, split="train", nb_to_use=-1, desc=None):
522 assert split in {"train", "test"}
523 input = self.train_input if split == "train" else self.test_input
525 input = input[:nb_to_use]
527 desc = f"epoch-{split}"
528 for batch in tqdm.tqdm(
529 input.split(self.batch_size), dynamic_ncols=True, desc=desc
533 def vocabulary_size(self):
537 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
539 nb_total, nb_correct = 0, 0
541 self.width * self.height,
542 self.width * self.height,
547 for input in self.batches(split, nb_to_use):
548 result = input.clone()
549 ar_mask = result.new_zeros(result.size())
550 ar_mask[:, self.height * self.width :] = 1
551 result *= 1 - ar_mask
552 masked_inplace_autoregression(
557 deterministic_synthesis,
558 progress_bar_desc=None,
561 mazes, paths = self.seq2map(result)
562 path_correctness = maze.path_correctness(mazes, paths)
563 nb_correct += path_correctness.long().sum()
564 nb_total += mazes.size(0)
566 optimal_path_lengths = (
567 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
569 predicted_path_lengths = (
570 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
572 optimal_path_lengths = optimal_path_lengths[path_correctness]
573 predicted_path_lengths = predicted_path_lengths[path_correctness]
574 count[optimal_path_lengths, predicted_path_lengths] += 1
580 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
583 return nb_total, nb_correct, count
586 self, n_epoch, model, result_dir, logger, deterministic_synthesis
588 train_nb_total, train_nb_correct, count = self.compute_error(
592 deterministic_synthesis=deterministic_synthesis,
595 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
598 test_nb_total, test_nb_correct, count = self.compute_error(
602 deterministic_synthesis=deterministic_synthesis,
605 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
608 if count is not None:
609 proportion_optimal = count.diagonal().sum().float() / count.sum()
610 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
612 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
614 for i in range(count.size(0)):
615 for j in range(count.size(1)):
616 eol = " " if j < count.size(1) - 1 else "\n"
617 f.write(f"{count[i,j]}{eol}")
619 input = self.test_input[:48]
620 result = input.clone()
621 ar_mask = result.new_zeros(result.size())
622 ar_mask[:, self.height * self.width :] = 1
623 result *= 1 - ar_mask
624 masked_inplace_autoregression(
629 deterministic_synthesis,
633 mazes, paths = self.seq2map(input)
634 _, predicted_paths = self.seq2map(result)
636 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
641 predicted_paths=predicted_paths,
642 path_correct=maze.path_correctness(mazes, predicted_paths),
643 path_optimal=maze.path_optimality(paths, predicted_paths),
645 logger(f"wrote {filename}")
648 ######################################################################
665 device=torch.device("cpu"),
669 self.batch_size = batch_size
673 self.prompt_length = prompt_length
675 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
684 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
694 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
696 def batches(self, split="train", nb_to_use=-1, desc=None):
697 assert split in {"train", "test"}
698 input = self.train_input if split == "train" else self.test_input
700 input = input[:nb_to_use]
702 desc = f"epoch-{split}"
703 for batch in tqdm.tqdm(
704 input.split(self.batch_size), dynamic_ncols=True, desc=desc
708 def vocabulary_size(self):
712 self, n_epoch, model, result_dir, logger, deterministic_synthesis
714 def compute_nb_correct(input, prior_visits):
715 result = input.clone()
716 i = torch.arange(result.size(1), device=result.device)[None, :]
718 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
722 result *= 1 - ar_mask
724 masked_inplace_autoregression(
729 deterministic_synthesis,
733 nb_total = ((prior_visits > 0) * ar_mask).sum()
735 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
737 return nb_total, nb_correct
739 test_nb_total, test_nb_correct = compute_nb_correct(
740 self.test_input[:1000], self.test_prior_visits[:1000]
744 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
748 ######################################################################
764 fraction_values_for_train=None,
765 device=torch.device("cpu"),
769 self.batch_size = batch_size
770 self.nb_steps = nb_steps
771 self.nb_stacks = nb_stacks
772 self.nb_digits = nb_digits
775 if fraction_values_for_train is None:
776 values_for_train = None
777 values_for_test = None
779 all = torch.randperm(10**nb_digits)
780 nb_for_train = int(all.size(0) * fraction_values_for_train)
781 values_for_train = all[:nb_for_train]
782 values_for_test = all[nb_for_train:]
784 self.train_input, self.train_stack_counts = stack.generate_sequences(
793 self.test_input, self.test_stack_counts = stack.generate_sequences(
802 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
803 counts = self.test_stack_counts.flatten()[i.flatten()]
804 counts = F.one_hot(counts).sum(0)
805 logger(f"test_pop_stack_counts {counts}")
807 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
809 def batches(self, split="train", nb_to_use=-1, desc=None):
810 assert split in {"train", "test"}
811 input = self.train_input if split == "train" else self.test_input
813 input = input[:nb_to_use]
815 desc = f"epoch-{split}"
816 for batch in tqdm.tqdm(
817 input.split(self.batch_size), dynamic_ncols=True, desc=desc
821 def vocabulary_size(self):
825 self, n_epoch, model, result_dir, logger, deterministic_synthesis
827 def compute_nb_correct(input):
828 result = input.clone()
829 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
830 ar_mask = (result != input).long()
831 masked_inplace_autoregression(
836 deterministic_synthesis,
840 errors = ((result != input).long() * ar_mask).reshape(
841 -1, 1 + self.nb_digits
843 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
845 nb_total = ar_mask.max(1).values.sum()
846 nb_correct = nb_total - errors.max(1).values.sum()
848 return nb_total, nb_correct
850 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
853 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
856 ##############################################################
857 # Log a few generated sequences
858 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
859 result = input.clone()
860 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
861 ar_mask = (result != input).long()
863 # for n in range(result.size(0)):
865 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
868 masked_inplace_autoregression(
873 deterministic_synthesis,
877 for n in range(result.size(0)):
879 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
881 ##############################################################
884 ######################################################################
890 def tensorize(self, sequences):
891 len_max = max([len(x) for x in sequences])
897 self.token2id[str(c)]
898 for c in s + ["<nul>"] * (len_max - len(s))
907 def seq2str(self, seq):
908 return " ".join([self.id2token[i] for i in seq])
915 nb_starting_values=3,
921 device=torch.device("cpu"),
925 self.batch_size = batch_size
927 self.no_prog = no_prog
931 nb_starting_values=nb_starting_values,
932 nb_result_values_max=4 * nb_starting_values,
937 for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
942 nb_starting_values=nb_starting_values,
943 nb_result_values_max=4 * nb_starting_values,
948 for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
952 set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
954 val_max = max([x if type(x) is int else 0 for x in symbols])
955 symbols = list(filter(lambda x: type(x) is str, symbols))
957 symbols += [str(n) for n in range(val_max + 1)]
958 self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
959 self.id2token = dict([(n, c) for c, n in self.token2id.items()])
961 self.t_nul = self.token2id["<nul>"]
962 self.t_input = self.token2id["<in>"]
963 self.t_output = self.token2id["<out>"]
964 self.t_prog = self.token2id["<prg>"]
965 self.t_end = self.token2id["<end>"]
967 self.train_input = self.tensorize(train_sequences)
968 self.test_input = self.tensorize(test_sequences)
971 # Excise the program from every train and test example
972 k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
976 ((self.train_input == self.t_prog).long() * k)
977 .max(1, keepdim=True)
981 self.train_input * (k <= p).long()
982 + self.t_end * (k == p + 1).long()
983 + self.t_nul * (k > p + 1).long()
985 k = torch.arange(self.test_input.size(1), device=self.test_input.device)[
989 ((self.test_input == self.t_prog).long() * k)
990 .max(1, keepdim=True)
994 self.test_input * (k <= p).long()
995 + self.t_end * (k == p + 1).long()
996 + self.t_nul * (k > p + 1).long()
999 if logger is not None:
1000 logger(f"value_max {val_max}")
1001 for x in self.train_input[:25]:
1002 end = (x != self.t_nul).nonzero().max().item() + 1
1003 seq = [self.id2token[i.item()] for i in x[:end]]
1005 logger(f"example_seq {s}")
1007 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1009 def batches(self, split="train", nb_to_use=-1, desc=None):
1010 assert split in {"train", "test"}
1011 input = self.train_input if split == "train" else self.test_input
1013 input = input[:nb_to_use]
1015 desc = f"epoch-{split}"
1016 for batch in tqdm.tqdm(
1017 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1019 last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
1020 batch = batch[:, :last].to(self.device)
1023 def vocabulary_size(self):
1024 return self.nb_codes
1026 def produce_results(
1027 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1029 # --------------------------------------------------------------------
1030 def compute_nb_errors_prog(input, nb_to_log=0):
1031 result = input.clone()
1032 s = (result == self.t_prog).long()
1033 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1034 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1036 masked_inplace_autoregression(
1041 deterministic_synthesis,
1045 sum_nb_total, sum_nb_errors = 0, 0
1046 for one_input, one_result in zip(input, result):
1047 seq = [self.id2token[i.item()] for i in one_result]
1048 nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
1050 sum_nb_errors += 0 if nb_errors == 0 else 1
1052 gt_seq = [self.id2token[i.item()] for i in one_input]
1053 _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
1054 gt_prog = " ".join([str(x) for x in gt_prog])
1055 prog = " ".join([str(x) for x in prog])
1056 comment = "*" if nb_errors == 0 else "-"
1057 logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
1058 for start_stack, target_stack, result_stack, correct in stacks:
1059 comment = "*" if correct else "-"
1060 start_stack = " ".join([str(x) for x in start_stack])
1061 target_stack = " ".join([str(x) for x in target_stack])
1062 result_stack = " ".join([str(x) for x in result_stack])
1064 f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
1068 return sum_nb_total, sum_nb_errors
1070 # --------------------------------------------------------------------
1071 def compute_nb_errors_output(input, nb_to_log=0):
1072 result = input.clone()
1073 k = torch.arange(result.size(1), device=result.device)[None, :]
1075 ((result == self.t_output) * k).max(dim=1, keepdim=True).values
1078 ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
1080 ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
1081 result = (1 - ar_mask) * result + ar_mask * self.t_nul
1083 masked_inplace_autoregression(
1088 deterministic_synthesis,
1092 sum_nb_total, sum_nb_errors = 0, 0
1093 for one_input, one_result, i, j in zip(
1094 input, result, last_output_idx, first_prog_idx
1096 seq = [self.id2token[i.item()] for i in one_result]
1098 correct = (one_input - one_result).abs().max() == 0
1099 sum_nb_errors += 0 if correct else 1
1102 self.id2token[i.item()] for i in one_result[i : j + 1]
1105 self.id2token[i.item()] for i in one_input[i : j + 1]
1107 comment = "*" if correct else "-"
1108 result_stack = " ".join([str(x) for x in result_stack])
1109 target_stack = " ".join([str(x) for x in target_stack])
1111 f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
1115 return sum_nb_total, sum_nb_errors
1117 # --------------------------------------------------------------------
1119 if not self.no_prog:
1120 test_nb_total, test_nb_errors = compute_nb_errors_prog(
1121 self.test_input[:1000].to(self.device), nb_to_log=10
1125 f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1128 test_nb_total, test_nb_errors = compute_nb_errors_output(
1129 self.test_input[:1000].to(self.device), nb_to_log=10
1133 f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
1136 if save_attention_image is not None:
1137 ns=torch.randint(self.test_input.size(0),(1,)).item()
1138 input = self.test_input[ns:ns+1].clone()
1139 last = (input != self.t_nul).max(0).values.nonzero().max() + 3
1140 input = input[:, :last].to(self.device)
1142 with torch.autograd.no_grad():
1145 model.record_attention(True)
1146 model(BracketedSequence(input))
1148 ram = model.retrieve_attention()
1149 model.record_attention(False)
1151 tokens_output = [self.id2token[i.item()] for i in input[0]]
1152 tokens_input = ["n/a"] + tokens_output[:-1]
1153 for n_head in range(ram[0].size(1)):
1154 filename = os.path.join(
1155 result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
1157 attention_matrices = [m[0, n_head] for m in ram]
1158 save_attention_image(
1164 # min_total_attention=0.9,
1168 logger(f"wrote {filename}")
1171 ######################################################################
1178 def tensorize(self, sequences):
1179 len_max = max([len(x) for x in sequences])
1184 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1201 device=torch.device("cpu"),
1205 self.batch_size = batch_size
1206 self.device = device
1208 train_sequences = expr.generate_sequences(
1210 nb_variables=nb_variables,
1211 length=sequence_length,
1212 operand_max=operand_max,
1213 result_max=result_max,
1216 test_sequences = expr.generate_sequences(
1218 nb_variables=nb_variables,
1219 length=sequence_length,
1220 operand_max=operand_max,
1221 result_max=result_max,
1224 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1227 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1228 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1230 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1232 self.train_input = self.tensorize(train_sequences)
1233 self.test_input = self.tensorize(test_sequences)
1235 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1237 def batches(self, split="train", nb_to_use=-1, desc=None):
1238 assert split in {"train", "test"}
1239 input = self.train_input if split == "train" else self.test_input
1241 input = input[:nb_to_use]
1243 desc = f"epoch-{split}"
1244 for batch in tqdm.tqdm(
1245 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1247 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1248 batch = batch[:, :last]
1251 def vocabulary_size(self):
1252 return self.nb_codes
1254 def seq2str(self, s):
1255 return "".join([self.id2char[k.item()] for k in s])
1257 def produce_results(
1263 deterministic_synthesis,
1266 def compute_nb_correct(input):
1267 result = input.clone()
1268 s = (result == self.space).long()
1269 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1270 result = (1 - ar_mask) * result + ar_mask * self.filler
1271 masked_inplace_autoregression(
1276 deterministic_synthesis,
1280 nb_total = input.size(0)
1281 nb_correct = (input == result).long().min(1).values.sum()
1283 #######################################################################
1284 # Comput predicted vs. true variable values
1286 nb_delta = torch.zeros(5, dtype=torch.int64)
1289 values_input = expr.extract_results([self.seq2str(s) for s in input])
1290 values_result = expr.extract_results([self.seq2str(s) for s in result])
1292 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1294 with open(filename, "w") as f:
1295 for i, r in zip(values_input, values_result):
1296 for n, vi in i.items():
1298 f.write(f"{vi} {-1 if vr is None else vr}\n")
1300 if vr is None or vr < 0:
1304 if d >= nb_delta.size(0):
1309 ######################################################################
1311 return nb_total, nb_correct, nb_delta, nb_missed
1318 ) = compute_nb_correct(self.test_input[:10000])
1321 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1324 nb_total = test_nb_delta.sum() + test_nb_missed
1325 for d in range(test_nb_delta.size(0)):
1327 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1330 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1333 ##############################################################
1334 # Log a few generated sequences
1335 if input_file is None:
1336 input = self.test_input[:10]
1338 with open(input_file, "r") as f:
1339 sequences = [e.strip() for e in f.readlines()]
1340 sequences = [s + " " + "#" * 50 for s in sequences]
1341 input = self.tensorize(sequences)
1343 result = input.clone()
1344 s = (result == self.space).long()
1345 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1346 result = (1 - ar_mask) * result + ar_mask * self.filler
1348 for n in range(result.size(0)):
1349 logger(f"test_before {self.seq2str(result[n])}")
1351 masked_inplace_autoregression(
1356 deterministic_synthesis,
1360 correct = (1 - ar_mask) * self.space + ar_mask * input
1361 for n in range(result.size(0)):
1362 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1363 logger(f"test_after {self.seq2str(result[n])} {comment}")
1364 logger(f"truth {self.seq2str(correct[n])}")
1365 ##############################################################
1368 ######################################################################
1381 device=torch.device("cpu"),
1382 device_storage=torch.device("cpu"),
1386 self.batch_size = batch_size
1387 self.device = device
1396 ) = world.create_data_and_processors(
1401 nb_epochs=vqae_nb_epochs,
1404 device_storage=device_storage,
1407 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1408 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1410 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1411 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1413 self.len_frame_seq = train_frame_seq.size(1)
1414 self.len_action_seq = train_action_seq.size(1)
1415 self.nb_codes = nb_frame_codes + nb_action_codes
1417 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1419 train_action_seq += nb_frame_codes
1420 self.train_input = torch.cat(
1421 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1424 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1425 test_action_seq += nb_frame_codes
1426 self.test_input = torch.cat(
1427 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1430 def batches(self, split="train", nb_to_use=-1, desc=None):
1431 assert split in {"train", "test"}
1432 input = self.train_input if split == "train" else self.test_input
1434 input = input[:nb_to_use]
1436 desc = f"epoch-{split}"
1437 for batch in tqdm.tqdm(
1438 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1440 yield batch.to(self.device)
1442 def vocabulary_size(self):
1443 return self.nb_codes
1445 def produce_results(
1446 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1449 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1452 input = self.test_input[:64].to(self.device)
1453 result = input.clone()
1456 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1458 result *= 1 - ar_mask
1460 masked_inplace_autoregression(
1465 deterministic_synthesis,
1469 seq_start = input[:, : self.len_frame_seq]
1470 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1471 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1474 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1476 result = result.reshape(-1, result.size(-1))
1478 frames = self.seq2frame(result)
1479 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1480 torchvision.utils.save_image(
1481 frames.float() / (world.Box.nb_rgb_levels - 1),
1487 logger(f"wrote {image_name}")
1490 ######################################################################