5 import torch, torchvision
8 from torch.nn import functional as F
10 ######################################################################
13 def masked_inplace_autoregression(
18 deterministic_synthesis,
19 forbidden_tokens=None,
20 progress_bar_desc="autoregression",
21 device=torch.device("cpu"),
23 assert input.size() == ar_mask.size()
25 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
27 if progress_bar_desc is not None:
31 desc=progress_bar_desc,
32 # total=input.size(0) // batch_size,
35 with torch.autograd.no_grad():
39 for input, ar_mask in batches:
40 model.masked_inplace_autoregression(
41 input, ar_mask, forbidden_tokens, deterministic_synthesis
47 ######################################################################
51 def batches(self, split="train"):
54 def vocabulary_size(self):
58 self, n_epoch, model, result_dir, logger, deterministic_synthesis
63 ######################################################################
67 def generate_sequences(self, nb):
70 def seq2str(self, seq):
71 return "[NOT IMPLEMENTED]"
77 class ProblemLevel0(Problem):
78 def __init__(self, nb_sentences=100, len_prompt=5, len_result=5):
79 self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result))
80 self.seq[:, len_prompt] = 10
82 def generate_sequences(self, nb):
83 sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
84 ar_mask = (sequences == 10).long()
85 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
86 return sequences, ar_mask
89 class ProblemLevel1(Problem):
90 def __init__(self, nb_operators=100, len_prompt=5, len_result=8):
91 self.len_prompt = len_prompt
92 self.len_result = len_result
93 self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1
94 self.operators = F.one_hot(
95 torch.rand(nb_operators, len_result, len_prompt).argmax(-1),
96 num_classes=len_prompt,
99 def generate_sequences(self, nb):
100 a = self.len_nb_operator
101 b = a + 1 + self.len_prompt
102 sequences = torch.empty(nb, b + 1 + self.len_result, dtype=torch.int64)
103 nb_operators = torch.randint(self.operators.size(0), (nb,))
104 sequences[:, :a] = (nb_operators[:, None] / 10 ** torch.arange(a)) % 10
106 sequences[:, a + 1 : b] = torch.randint(10, (nb, b - a - 1))
109 o = self.operators[nb_operators]
110 p = sequences[:, a + 1 : b]
111 print(f"{o.size()=} {p.size()=} {sequences[:,b+1:].size()=}")
112 sequences[:, b + 1 :] = o.bmm(p[:, :, None]).squeeze(-1)
113 ar_mask = (sequences == 11).long()
114 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
115 return sequences, ar_mask
117 def seq2str(self, seq):
118 return "".join(self.id2char[x.item()] for x in seq)
124 class ProblemAddition(Problem):
125 def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False):
126 self.nb_digits = nb_digits
127 self.zero_padded = zero_padded
128 self.inverted_result = inverted_result
129 self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")])
130 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
132 def tensorize(self, strings):
133 len_max = max([len(x) for x in strings])
138 [self.char2id[c] for c in s + "$" * (len_max - len(s))]
146 def generate_sequences(self, nb):
149 a, b = torch.randint(10**self.nb_digits, (2,))
151 a, b, c = str(a.item()), str(b.item()), str(c.item())
153 a = "0" * (self.nb_digits - len(a)) + a
154 b = "0" * (self.nb_digits - len(b)) + b
155 c = "0" * (self.nb_digits + 1 - len(c)) + c
156 if self.inverted_result:
158 sequences.append(f"{a}+{b}={c}$")
160 sequences = self.tensorize(sequences)
161 ar_mask = (sequences == self.char2id["="]).long()
162 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
163 return sequences, ar_mask
165 def seq2str(self, seq):
166 return "".join(self.id2char[x.item()] for x in seq)
169 # class ProblemUnion(Problem):
170 # problems = [ProblemByheart()]
171 # nb_common_codes = 100
173 # def generate_sequences(nb_samples):
174 # problem_indexes = torch.randint(len(problems), (nb_samples,))
175 # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
176 # print(f"{nb_samples_per_problem}")
178 # for nb, p in zip(nb_samples_per_problem, problems):
179 # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
182 # for strain, stest in zip(train_seq, test_seq):
183 # s = torch.cat((strain, stest), 0)
196 device=torch.device("cpu"),
201 self.batch_size = batch_size
203 self.problem = problem
205 self.train_input, self.train_ar_mask = self.problem.generate_sequences(
208 self.test_input, self.test_ar_mask = self.problem.generate_sequences(
212 self.train_input, self.train_ar_mask = self.train_input.to(
214 ), self.train_ar_mask.to(device)
215 self.test_input, self.test_ar_mask = self.test_input.to(
217 ), self.test_ar_mask.to(device)
219 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
221 # A bit of paranoia never hurts
223 self.nb_codes <= max_nb_codes
224 and self.train_input.min() >= 0
225 and self.test_input.min() >= 0
226 and tuple(self.train_ar_mask.unique()) == (0, 1)
227 and tuple(self.test_ar_mask.unique()) == (0, 1)
230 def batches(self, split="train", nb_to_use=-1, desc=None):
231 assert split in {"train", "test"}
232 input = self.train_input if split == "train" else self.test_input
234 input = input[:nb_to_use]
236 desc = f"epoch-{split}"
237 for batch in tqdm.tqdm(
238 input.split(self.batch_size), dynamic_ncols=True, desc=desc
242 def vocabulary_size(self):
246 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
248 def compute_accuracy(input, ar_mask, logger=None):
249 input, ar_mask = input[:nmax], ar_mask[:nmax]
250 result = input.clone() * (1 - ar_mask)
252 masked_inplace_autoregression(
257 deterministic_synthesis,
258 progress_bar_desc=None,
262 if logger is not None:
263 for sp, st in zip(result[:10], input[:10]):
265 f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
268 f" {n_epoch} ground truth {self.problem.seq2str(st)}"
271 nb_total = ar_mask.sum().item()
272 nb_correct = ((result == input).long() * ar_mask).sum().item()
274 return nb_total, nb_correct
276 train_nb_total, train_nb_correct = compute_accuracy(
277 self.train_input, self.train_ar_mask
281 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
284 test_nb_total, test_nb_correct = compute_accuracy(
285 self.test_input, self.test_ar_mask, logger
289 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
293 ######################################################################
298 class PicoCLVR(Task):
299 # Make a tensor from a list of strings
300 def tensorize(self, descr):
301 token_descr = [s.strip().split(" ") for s in descr]
302 l = max([len(s) for s in token_descr])
303 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
304 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
305 return torch.tensor(id_descr, device=self.device)
307 # Make a list of strings from a tensor
308 def detensorize(self, x):
309 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
311 # trim all the tensors in the tuple z to remove as much token from
312 # left and right in the first tensor. If z is a tuple, all its
313 # elements are trimed according to the triming for the first
314 def trim(self, z, token="<nul>"):
315 n = self.token2id[token]
318 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
319 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
320 return tuple([t[:, a:b] for t in z])
322 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
323 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
326 ######################
337 device=torch.device("cpu"),
343 def generate_descr(nb, cache_suffix, pruner):
344 return picoclvr.generate(
354 self.batch_size = batch_size
356 self.pruner_train = pruner_train
357 self.pruner_eval = pruner_eval
359 if logger is not None:
361 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
364 self.train_descr = generate_descr(
365 nb_train_samples, "train", pruner=self.pruner_train
367 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
369 # Build the tokenizer
370 tokens = {"<nul>", "<img>"}
371 for d in [self.train_descr, self.test_descr]:
373 for t in s.strip().split(" "):
375 # make this set a sorted list to get the same tensors given
377 tokens = list(tokens)
379 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
380 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
381 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
383 # Tokenize the train and test sets
384 self.train_input = self.tensorize(self.train_descr)
385 self.test_input = self.tensorize(self.test_descr)
387 def batches(self, split="train"):
388 assert split in {"train", "test"}
389 input = self.train_input if split == "train" else self.test_input
390 for batch in tqdm.tqdm(
391 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
393 yield self.trim(batch)
395 def vocabulary_size(self):
396 return len(self.token2id)
398 def compute_missing_properties(
399 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
401 acc_nb_requested_properties = []
402 acc_nb_missing_properties = []
405 for input in tqdm.tqdm(
406 self.test_input.split(self.batch_size),
408 desc=f"test-properties",
410 result = input.clone()
411 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
412 result = (1 - ar_mask) * result + ar_mask * self.t_nul
413 masked_inplace_autoregression(
418 deterministic_synthesis,
419 progress_bar_desc=None,
423 result_descr = self.detensorize(result)
424 np = picoclvr.nb_properties(
430 nb_requested_properties, _, nb_missing_properties = zip(*np)
431 acc_nb_requested_properties += nb_requested_properties
432 acc_nb_missing_properties += nb_missing_properties
433 acc_nb_results += len(result_descr)
435 nb_requested_properties = sum(acc_nb_requested_properties)
436 nb_missing_properties = sum(acc_nb_missing_properties)
438 prefix = "" if pruner is None else "pruned_"
439 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
441 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
444 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
447 ######################################################################
450 self, n_epoch, model, result_dir, logger, deterministic_synthesis
452 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
454 if self.pruner_eval is not None:
455 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
457 nb_tokens_to_generate = self.height * self.width + 3
462 for primer_descr in [
463 "red above green <sep> green top <sep> blue right of red",
464 "there is red <sep> there is yellow <sep> there is blue",
465 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
466 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
468 primer += [primer_descr + " <img>"] * nb_per_primer
470 result = self.tensorize(primer)
471 fill = result.new_full(
472 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
474 result = torch.cat((result, fill), 1)
475 ar_mask = (result == self.t_nul).long()
476 masked_inplace_autoregression(
481 deterministic_synthesis,
484 result_descr = self.detensorize(result)
486 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
488 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
489 acc_nb_results = len(result_descr)
491 nb_requested_properties = sum(acc_nb_requested_properties)
492 nb_missing_properties = sum(acc_nb_missing_properties)
495 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
497 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
500 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
503 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
507 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
511 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
517 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
518 torchvision.utils.save_image(
519 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
521 logger(f"wrote {image_name}")
524 ######################################################################
529 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
533 self.nb_train_samples = (nb_train_samples,)
534 self.nb_test_samples = (nb_test_samples,)
535 self.batch_size = batch_size
537 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
538 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
539 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
540 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
542 def batches(self, split="train", nb_to_use=-1, desc=None):
543 assert split in {"train", "test"}
544 input = self.train_input if split == "train" else self.test_input
546 input = input[:nb_to_use]
548 desc = f"epoch-{split}"
549 for batch in tqdm.tqdm(
550 input.split(self.batch_size), dynamic_ncols=True, desc=desc
554 def vocabulary_size(self):
558 self, n_epoch, model, result_dir, logger, deterministic_synthesis
560 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
561 ar_mask = torch.full_like(results, 1)
562 masked_inplace_autoregression(
567 deterministic_synthesis,
570 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
571 torchvision.utils.save_image(
572 1 - results.reshape(-1, 1, 28, 28) / 255.0,
577 logger(f"wrote {image_name}")
580 ######################################################################
586 def map2seq(self, *m):
587 return torch.cat([x.flatten(1) for x in m], 1)
589 def seq2map(self, s):
590 s = s.reshape(s.size(0), -1, self.height, self.width)
591 return (s[:, k] for k in range(s.size(1)))
601 device=torch.device("cpu"),
605 self.batch_size = batch_size
610 train_mazes, train_paths, _ = maze.create_maze_data(
615 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
617 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
619 test_mazes, test_paths, _ = maze.create_maze_data(
624 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
626 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
628 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
630 def batches(self, split="train", nb_to_use=-1, desc=None):
631 assert split in {"train", "test"}
632 input = self.train_input if split == "train" else self.test_input
634 input = input[:nb_to_use]
636 desc = f"epoch-{split}"
637 for batch in tqdm.tqdm(
638 input.split(self.batch_size), dynamic_ncols=True, desc=desc
642 def vocabulary_size(self):
646 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
648 nb_total, nb_correct = 0, 0
650 self.width * self.height,
651 self.width * self.height,
656 for input in self.batches(split, nb_to_use):
657 result = input.clone()
658 ar_mask = result.new_zeros(result.size())
659 ar_mask[:, self.height * self.width :] = 1
660 result *= 1 - ar_mask
661 masked_inplace_autoregression(
666 deterministic_synthesis,
667 progress_bar_desc=None,
670 mazes, paths = self.seq2map(result)
671 path_correctness = maze.path_correctness(mazes, paths)
672 nb_correct += path_correctness.long().sum()
673 nb_total += mazes.size(0)
675 optimal_path_lengths = (
676 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
678 predicted_path_lengths = (
679 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
681 optimal_path_lengths = optimal_path_lengths[path_correctness]
682 predicted_path_lengths = predicted_path_lengths[path_correctness]
683 count[optimal_path_lengths, predicted_path_lengths] += 1
689 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
692 return nb_total, nb_correct, count
695 self, n_epoch, model, result_dir, logger, deterministic_synthesis
697 train_nb_total, train_nb_correct, count = self.compute_error(
701 deterministic_synthesis=deterministic_synthesis,
704 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
707 test_nb_total, test_nb_correct, count = self.compute_error(
711 deterministic_synthesis=deterministic_synthesis,
714 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
717 if count is not None:
718 proportion_optimal = count.diagonal().sum().float() / count.sum()
719 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
721 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
723 for i in range(count.size(0)):
724 for j in range(count.size(1)):
725 eol = " " if j < count.size(1) - 1 else "\n"
726 f.write(f"{count[i,j]}{eol}")
728 input = self.test_input[:48]
729 result = input.clone()
730 ar_mask = result.new_zeros(result.size())
731 ar_mask[:, self.height * self.width :] = 1
732 result *= 1 - ar_mask
733 masked_inplace_autoregression(
738 deterministic_synthesis,
742 mazes, paths = self.seq2map(input)
743 _, predicted_paths = self.seq2map(result)
745 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
750 predicted_paths=predicted_paths,
751 path_correct=maze.path_correctness(mazes, predicted_paths),
752 path_optimal=maze.path_optimality(paths, predicted_paths),
754 logger(f"wrote {filename}")
757 ######################################################################
774 device=torch.device("cpu"),
778 self.batch_size = batch_size
782 self.prompt_length = prompt_length
784 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
793 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
803 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
805 def batches(self, split="train", nb_to_use=-1, desc=None):
806 assert split in {"train", "test"}
807 input = self.train_input if split == "train" else self.test_input
809 input = input[:nb_to_use]
811 desc = f"epoch-{split}"
812 for batch in tqdm.tqdm(
813 input.split(self.batch_size), dynamic_ncols=True, desc=desc
817 def vocabulary_size(self):
821 self, n_epoch, model, result_dir, logger, deterministic_synthesis
823 def compute_nb_correct(input, prior_visits):
824 result = input.clone()
825 i = torch.arange(result.size(1), device=result.device)[None, :]
827 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
831 result *= 1 - ar_mask
833 masked_inplace_autoregression(
838 deterministic_synthesis,
842 nb_total = ((prior_visits > 0) * ar_mask).sum()
844 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
846 return nb_total, nb_correct
848 test_nb_total, test_nb_correct = compute_nb_correct(
849 self.test_input[:1000], self.test_prior_visits[:1000]
853 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
857 ######################################################################
873 fraction_values_for_train=None,
874 device=torch.device("cpu"),
878 self.batch_size = batch_size
879 self.nb_steps = nb_steps
880 self.nb_stacks = nb_stacks
881 self.nb_digits = nb_digits
884 if fraction_values_for_train is None:
885 values_for_train = None
886 values_for_test = None
888 all = torch.randperm(10**nb_digits)
889 nb_for_train = int(all.size(0) * fraction_values_for_train)
890 values_for_train = all[:nb_for_train]
891 values_for_test = all[nb_for_train:]
893 self.train_input, self.train_stack_counts = stack.generate_sequences(
902 self.test_input, self.test_stack_counts = stack.generate_sequences(
911 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
912 counts = self.test_stack_counts.flatten()[i.flatten()]
913 counts = F.one_hot(counts).sum(0)
914 logger(f"test_pop_stack_counts {counts}")
916 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
918 def batches(self, split="train", nb_to_use=-1, desc=None):
919 assert split in {"train", "test"}
920 input = self.train_input if split == "train" else self.test_input
922 input = input[:nb_to_use]
924 desc = f"epoch-{split}"
925 for batch in tqdm.tqdm(
926 input.split(self.batch_size), dynamic_ncols=True, desc=desc
930 def vocabulary_size(self):
934 self, n_epoch, model, result_dir, logger, deterministic_synthesis
936 def compute_nb_correct(input):
937 result = input.clone()
938 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
939 ar_mask = (result != input).long()
940 masked_inplace_autoregression(
945 deterministic_synthesis,
949 errors = ((result != input).long() * ar_mask).reshape(
950 -1, 1 + self.nb_digits
952 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
954 nb_total = ar_mask.max(1).values.sum()
955 nb_correct = nb_total - errors.max(1).values.sum()
957 return nb_total, nb_correct
959 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
962 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
965 ##############################################################
966 # Log a few generated sequences
967 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
968 result = input.clone()
969 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
970 ar_mask = (result != input).long()
972 # for n in range(result.size(0)):
974 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
977 masked_inplace_autoregression(
982 deterministic_synthesis,
986 for n in range(result.size(0)):
988 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
990 ##############################################################
993 ######################################################################
1000 def tensorize(self, sequences):
1001 len_max = max([len(x) for x in sequences])
1006 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
1023 device=torch.device("cpu"),
1027 self.batch_size = batch_size
1028 self.device = device
1030 train_sequences = expr.generate_sequences(
1032 nb_variables=nb_variables,
1033 length=sequence_length,
1034 operand_max=operand_max,
1035 result_max=result_max,
1038 test_sequences = expr.generate_sequences(
1040 nb_variables=nb_variables,
1041 length=sequence_length,
1042 operand_max=operand_max,
1043 result_max=result_max,
1046 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
1049 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
1050 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
1052 self.filler, self.space = self.char2id["#"], self.char2id[" "]
1054 self.train_input = self.tensorize(train_sequences)
1055 self.test_input = self.tensorize(test_sequences)
1057 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
1059 def batches(self, split="train", nb_to_use=-1, desc=None):
1060 assert split in {"train", "test"}
1061 input = self.train_input if split == "train" else self.test_input
1063 input = input[:nb_to_use]
1065 desc = f"epoch-{split}"
1066 for batch in tqdm.tqdm(
1067 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1069 last = (batch != self.filler).max(0).values.nonzero().max() + 3
1070 batch = batch[:, :last]
1073 def vocabulary_size(self):
1074 return self.nb_codes
1076 def seq2str(self, s):
1077 return "".join([self.id2char[k.item()] for k in s])
1079 def produce_results(
1085 deterministic_synthesis,
1088 def compute_nb_correct(input):
1089 result = input.clone()
1090 s = (result == self.space).long()
1091 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1092 result = (1 - ar_mask) * result + ar_mask * self.filler
1093 masked_inplace_autoregression(
1098 deterministic_synthesis,
1102 nb_total = input.size(0)
1103 nb_correct = (input == result).long().min(1).values.sum()
1105 #######################################################################
1106 # Comput predicted vs. true variable values
1108 nb_delta = torch.zeros(5, dtype=torch.int64)
1111 values_input = expr.extract_results([self.seq2str(s) for s in input])
1112 values_result = expr.extract_results([self.seq2str(s) for s in result])
1114 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
1116 with open(filename, "w") as f:
1117 for i, r in zip(values_input, values_result):
1118 for n, vi in i.items():
1120 f.write(f"{vi} {-1 if vr is None else vr}\n")
1122 if vr is None or vr < 0:
1126 if d >= nb_delta.size(0):
1131 ######################################################################
1133 return nb_total, nb_correct, nb_delta, nb_missed
1140 ) = compute_nb_correct(self.test_input[:10000])
1143 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1146 nb_total = test_nb_delta.sum() + test_nb_missed
1147 for d in range(test_nb_delta.size(0)):
1149 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1152 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1155 ##############################################################
1156 # Log a few generated sequences
1157 if input_file is None:
1158 input = self.test_input[:10]
1160 with open(input_file, "r") as f:
1161 sequences = [e.strip() for e in f.readlines()]
1162 sequences = [s + " " + "#" * 50 for s in sequences]
1163 input = self.tensorize(sequences)
1165 result = input.clone()
1166 s = (result == self.space).long()
1167 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1168 result = (1 - ar_mask) * result + ar_mask * self.filler
1170 for n in range(result.size(0)):
1171 logger(f"test_before {self.seq2str(result[n])}")
1173 masked_inplace_autoregression(
1178 deterministic_synthesis,
1182 correct = (1 - ar_mask) * self.space + ar_mask * input
1183 for n in range(result.size(0)):
1184 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1185 logger(f"test_after {self.seq2str(result[n])} {comment}")
1186 logger(f"truth {self.seq2str(correct[n])}")
1187 ##############################################################
1190 ######################################################################
1203 device=torch.device("cpu"),
1204 device_storage=torch.device("cpu"),
1208 self.batch_size = batch_size
1209 self.device = device
1218 ) = world.create_data_and_processors(
1223 nb_epochs=vqae_nb_epochs,
1226 device_storage=device_storage,
1229 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1230 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1232 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1233 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1235 self.len_frame_seq = train_frame_seq.size(1)
1236 self.len_action_seq = train_action_seq.size(1)
1237 self.nb_codes = nb_frame_codes + nb_action_codes
1239 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1241 train_action_seq += nb_frame_codes
1242 self.train_input = torch.cat(
1243 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1246 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1247 test_action_seq += nb_frame_codes
1248 self.test_input = torch.cat(
1249 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1252 def batches(self, split="train", nb_to_use=-1, desc=None):
1253 assert split in {"train", "test"}
1254 input = self.train_input if split == "train" else self.test_input
1256 input = input[:nb_to_use]
1258 desc = f"epoch-{split}"
1259 for batch in tqdm.tqdm(
1260 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1262 yield batch.to(self.device)
1264 def vocabulary_size(self):
1265 return self.nb_codes
1267 def produce_results(
1268 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1271 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1274 input = self.test_input[:64].to(self.device)
1275 result = input.clone()
1278 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1280 result *= 1 - ar_mask
1282 masked_inplace_autoregression(
1287 deterministic_synthesis,
1291 seq_start = input[:, : self.len_frame_seq]
1292 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1293 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1296 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1298 result = result.reshape(-1, result.size(-1))
1300 frames = self.seq2frame(result)
1301 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1302 torchvision.utils.save_image(
1303 frames.float() / (world.Box.nb_rgb_levels - 1),
1309 logger(f"wrote {image_name}")
1312 ######################################################################