5 import torch, torchvision
8 from torch.nn import functional as F
10 ######################################################################
13 def masked_inplace_autoregression(
18 deterministic_synthesis,
19 forbidden_tokens=None,
20 progress_bar_desc="autoregression",
21 device=torch.device("cpu"),
23 assert input.size() == ar_mask.size()
25 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
27 if progress_bar_desc is not None:
31 desc=progress_bar_desc,
32 # total=input.size(0) // batch_size,
35 with torch.autograd.no_grad():
39 for input, ar_mask in batches:
40 model.masked_inplace_autoregression(
41 input, ar_mask, forbidden_tokens, deterministic_synthesis
47 ######################################################################
51 def batches(self, split="train"):
54 def vocabulary_size(self):
58 self, n_epoch, model, result_dir, logger, deterministic_synthesis
63 ######################################################################
67 def generate_sequences(self, nb):
70 def log_performance(self, sequences, logger):
74 class ProblemByheart(Problem):
76 nb_seq, len_prompt, len_result = 100, 5, 5
77 self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result))
78 self.seq[:, len_prompt] = 10
80 def generate_sequences(self, nb):
81 sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
82 ar_mask = (sequences==10).long()
83 ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
84 return sequences, ar_mask
86 # problems = [ProblemByheart()]
87 # nb_common_codes = 100
89 # def generate_sequences(nb_samples):
90 # problem_indexes = torch.randint(len(problems), (nb_samples,))
91 # nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
92 # print(f"{nb_samples_per_problem}")
94 # for nb, p in zip(nb_samples_per_problem, problems):
95 # all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
98 # for strain, stest in zip(train_seq, test_seq):
99 # s = torch.cat((strain, stest), 0)
109 device=torch.device("cpu"),
113 self.batch_size = batch_size
116 self.train_input, self.train_ar_mask = problem.generate_sequences(nb_train_samples)
117 self.test_input, self.test_ar_mask = problem.generate_sequences(nb_test_samples)
119 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
121 def batches(self, split="train", nb_to_use=-1, desc=None):
122 assert split in {"train", "test"}
123 input = self.train_input if split == "train" else self.test_input
125 input = input[:nb_to_use]
127 desc = f"epoch-{split}"
128 for batch in tqdm.tqdm(
129 input.split(self.batch_size), dynamic_ncols=True, desc=desc
133 def vocabulary_size(self):
137 self, n_epoch, model, result_dir, logger, deterministic_synthesis
140 def compute_accuracy(input, ar_mask):
141 result = input.clone() * (1-ar_mask)
142 masked_inplace_autoregression(
147 deterministic_synthesis,
148 progress_bar_desc=None,
152 nb_total = ar_mask.sum().item()
153 nb_correct = ((result==input).long() * ar_mask).sum().item()
155 return nb_total, nb_correct
157 train_nb_total, train_nb_correct = compute_accuracy(self.train_input, self.train_ar_mask)
160 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
163 test_nb_total, test_nb_correct = compute_accuracy(self.test_input, self.test_ar_mask)
166 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
169 ######################################################################
174 class PicoCLVR(Task):
175 # Make a tensor from a list of strings
176 def tensorize(self, descr):
177 token_descr = [s.strip().split(" ") for s in descr]
178 l = max([len(s) for s in token_descr])
179 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
180 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
181 return torch.tensor(id_descr, device=self.device)
183 # Make a list of strings from a tensor
184 def detensorize(self, x):
185 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
187 # trim all the tensors in the tuple z to remove as much token from
188 # left and right in the first tensor. If z is a tuple, all its
189 # elements are trimed according to the triming for the first
190 def trim(self, z, token="<nul>"):
191 n = self.token2id[token]
194 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
195 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
196 return tuple([t[:, a:b] for t in z])
198 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
199 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
202 ######################
213 device=torch.device("cpu"),
219 def generate_descr(nb, cache_suffix, pruner):
220 return picoclvr.generate(
230 self.batch_size = batch_size
232 self.pruner_train = pruner_train
233 self.pruner_eval = pruner_eval
235 if logger is not None:
237 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
240 self.train_descr = generate_descr(
241 nb_train_samples, "train", pruner=self.pruner_train
243 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
245 # Build the tokenizer
246 tokens = {"<nul>", "<img>"}
247 for d in [self.train_descr, self.test_descr]:
249 for t in s.strip().split(" "):
251 # make this set a sorted list to get the same tensors given
253 tokens = list(tokens)
255 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
256 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
257 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
259 # Tokenize the train and test sets
260 self.train_input = self.tensorize(self.train_descr)
261 self.test_input = self.tensorize(self.test_descr)
263 def batches(self, split="train"):
264 assert split in {"train", "test"}
265 input = self.train_input if split == "train" else self.test_input
266 for batch in tqdm.tqdm(
267 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
269 yield self.trim(batch)
271 def vocabulary_size(self):
272 return len(self.token2id)
274 def compute_missing_properties(
275 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
277 acc_nb_requested_properties = []
278 acc_nb_missing_properties = []
281 for input in tqdm.tqdm(
282 self.test_input.split(self.batch_size),
284 desc=f"test-properties",
286 result = input.clone()
287 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
288 result = (1 - ar_mask) * result + ar_mask * self.t_nul
289 masked_inplace_autoregression(
294 deterministic_synthesis,
295 progress_bar_desc=None,
299 result_descr = self.detensorize(result)
300 np = picoclvr.nb_properties(
306 nb_requested_properties, _, nb_missing_properties = zip(*np)
307 acc_nb_requested_properties += nb_requested_properties
308 acc_nb_missing_properties += nb_missing_properties
309 acc_nb_results += len(result_descr)
311 nb_requested_properties = sum(acc_nb_requested_properties)
312 nb_missing_properties = sum(acc_nb_missing_properties)
314 prefix = "" if pruner is None else "pruned_"
315 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
317 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
320 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
323 ######################################################################
326 self, n_epoch, model, result_dir, logger, deterministic_synthesis
328 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
330 if self.pruner_eval is not None:
331 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
333 nb_tokens_to_generate = self.height * self.width + 3
338 for primer_descr in [
339 "red above green <sep> green top <sep> blue right of red",
340 "there is red <sep> there is yellow <sep> there is blue",
341 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
342 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
344 primer += [primer_descr + " <img>"] * nb_per_primer
346 result = self.tensorize(primer)
347 fill = result.new_full(
348 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
350 result = torch.cat((result, fill), 1)
351 ar_mask = (result == self.t_nul).long()
352 masked_inplace_autoregression(
357 deterministic_synthesis,
360 result_descr = self.detensorize(result)
362 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
364 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
365 acc_nb_results = len(result_descr)
367 nb_requested_properties = sum(acc_nb_requested_properties)
368 nb_missing_properties = sum(acc_nb_missing_properties)
371 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
373 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
376 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
379 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
383 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
387 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
393 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
394 torchvision.utils.save_image(
395 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
397 logger(f"wrote {image_name}")
400 ######################################################################
405 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
409 self.nb_train_samples = (nb_train_samples,)
410 self.nb_test_samples = (nb_test_samples,)
411 self.batch_size = batch_size
413 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
414 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
415 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
416 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
418 def batches(self, split="train", nb_to_use=-1, desc=None):
419 assert split in {"train", "test"}
420 input = self.train_input if split == "train" else self.test_input
422 input = input[:nb_to_use]
424 desc = f"epoch-{split}"
425 for batch in tqdm.tqdm(
426 input.split(self.batch_size), dynamic_ncols=True, desc=desc
430 def vocabulary_size(self):
434 self, n_epoch, model, result_dir, logger, deterministic_synthesis
436 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
437 ar_mask = torch.full_like(results, 1)
438 masked_inplace_autoregression(
443 deterministic_synthesis,
446 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
447 torchvision.utils.save_image(
448 1 - results.reshape(-1, 1, 28, 28) / 255.0,
453 logger(f"wrote {image_name}")
456 ######################################################################
462 def map2seq(self, *m):
463 return torch.cat([x.flatten(1) for x in m], 1)
465 def seq2map(self, s):
466 s = s.reshape(s.size(0), -1, self.height, self.width)
467 return (s[:, k] for k in range(s.size(1)))
477 device=torch.device("cpu"),
481 self.batch_size = batch_size
486 train_mazes, train_paths, _ = maze.create_maze_data(
491 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
493 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
495 test_mazes, test_paths, _ = maze.create_maze_data(
500 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
502 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
504 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
506 def batches(self, split="train", nb_to_use=-1, desc=None):
507 assert split in {"train", "test"}
508 input = self.train_input if split == "train" else self.test_input
510 input = input[:nb_to_use]
512 desc = f"epoch-{split}"
513 for batch in tqdm.tqdm(
514 input.split(self.batch_size), dynamic_ncols=True, desc=desc
518 def vocabulary_size(self):
522 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
524 nb_total, nb_correct = 0, 0
526 self.width * self.height,
527 self.width * self.height,
532 for input in self.batches(split, nb_to_use):
533 result = input.clone()
534 ar_mask = result.new_zeros(result.size())
535 ar_mask[:, self.height * self.width :] = 1
536 result *= 1 - ar_mask
537 masked_inplace_autoregression(
542 deterministic_synthesis,
543 progress_bar_desc=None,
546 mazes, paths = self.seq2map(result)
547 path_correctness = maze.path_correctness(mazes, paths)
548 nb_correct += path_correctness.long().sum()
549 nb_total += mazes.size(0)
551 optimal_path_lengths = (
552 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
554 predicted_path_lengths = (
555 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
557 optimal_path_lengths = optimal_path_lengths[path_correctness]
558 predicted_path_lengths = predicted_path_lengths[path_correctness]
559 count[optimal_path_lengths, predicted_path_lengths] += 1
565 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
568 return nb_total, nb_correct, count
571 self, n_epoch, model, result_dir, logger, deterministic_synthesis
573 train_nb_total, train_nb_correct, count = self.compute_error(
577 deterministic_synthesis=deterministic_synthesis,
580 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
583 test_nb_total, test_nb_correct, count = self.compute_error(
587 deterministic_synthesis=deterministic_synthesis,
590 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
593 if count is not None:
594 proportion_optimal = count.diagonal().sum().float() / count.sum()
595 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
597 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
599 for i in range(count.size(0)):
600 for j in range(count.size(1)):
601 eol = " " if j < count.size(1) - 1 else "\n"
602 f.write(f"{count[i,j]}{eol}")
604 input = self.test_input[:48]
605 result = input.clone()
606 ar_mask = result.new_zeros(result.size())
607 ar_mask[:, self.height * self.width :] = 1
608 result *= 1 - ar_mask
609 masked_inplace_autoregression(
614 deterministic_synthesis,
618 mazes, paths = self.seq2map(input)
619 _, predicted_paths = self.seq2map(result)
621 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
626 predicted_paths=predicted_paths,
627 path_correct=maze.path_correctness(mazes, predicted_paths),
628 path_optimal=maze.path_optimality(paths, predicted_paths),
630 logger(f"wrote {filename}")
633 ######################################################################
650 device=torch.device("cpu"),
654 self.batch_size = batch_size
658 self.prompt_length = prompt_length
660 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
669 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
679 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
681 def batches(self, split="train", nb_to_use=-1, desc=None):
682 assert split in {"train", "test"}
683 input = self.train_input if split == "train" else self.test_input
685 input = input[:nb_to_use]
687 desc = f"epoch-{split}"
688 for batch in tqdm.tqdm(
689 input.split(self.batch_size), dynamic_ncols=True, desc=desc
693 def vocabulary_size(self):
697 self, n_epoch, model, result_dir, logger, deterministic_synthesis
699 def compute_nb_correct(input, prior_visits):
700 result = input.clone()
701 i = torch.arange(result.size(1), device=result.device)[None, :]
703 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
707 result *= 1 - ar_mask
709 masked_inplace_autoregression(
714 deterministic_synthesis,
718 nb_total = ((prior_visits > 0) * ar_mask).sum()
720 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
722 return nb_total, nb_correct
724 test_nb_total, test_nb_correct = compute_nb_correct(
725 self.test_input[:1000], self.test_prior_visits[:1000]
729 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
733 ######################################################################
749 fraction_values_for_train=None,
750 device=torch.device("cpu"),
754 self.batch_size = batch_size
755 self.nb_steps = nb_steps
756 self.nb_stacks = nb_stacks
757 self.nb_digits = nb_digits
760 if fraction_values_for_train is None:
761 values_for_train = None
762 values_for_test = None
764 all = torch.randperm(10**nb_digits)
765 nb_for_train = int(all.size(0) * fraction_values_for_train)
766 values_for_train = all[:nb_for_train]
767 values_for_test = all[nb_for_train:]
769 self.train_input, self.train_stack_counts = stack.generate_sequences(
778 self.test_input, self.test_stack_counts = stack.generate_sequences(
787 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
788 counts = self.test_stack_counts.flatten()[i.flatten()]
789 counts = F.one_hot(counts).sum(0)
790 logger(f"test_pop_stack_counts {counts}")
792 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
794 def batches(self, split="train", nb_to_use=-1, desc=None):
795 assert split in {"train", "test"}
796 input = self.train_input if split == "train" else self.test_input
798 input = input[:nb_to_use]
800 desc = f"epoch-{split}"
801 for batch in tqdm.tqdm(
802 input.split(self.batch_size), dynamic_ncols=True, desc=desc
806 def vocabulary_size(self):
810 self, n_epoch, model, result_dir, logger, deterministic_synthesis
812 def compute_nb_correct(input):
813 result = input.clone()
814 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
815 ar_mask = (result != input).long()
816 masked_inplace_autoregression(
821 deterministic_synthesis,
825 errors = ((result != input).long() * ar_mask).reshape(
826 -1, 1 + self.nb_digits
828 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
830 nb_total = ar_mask.max(1).values.sum()
831 nb_correct = nb_total - errors.max(1).values.sum()
833 return nb_total, nb_correct
835 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
838 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
841 ##############################################################
842 # Log a few generated sequences
843 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
844 result = input.clone()
845 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
846 ar_mask = (result != input).long()
848 # for n in range(result.size(0)):
850 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
853 masked_inplace_autoregression(
858 deterministic_synthesis,
862 for n in range(result.size(0)):
864 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
866 ##############################################################
869 ######################################################################
876 def tensorize(self, sequences):
877 len_max = max([len(x) for x in sequences])
882 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
899 device=torch.device("cpu"),
903 self.batch_size = batch_size
906 train_sequences = expr.generate_sequences(
908 nb_variables=nb_variables,
909 length=sequence_length,
910 operand_max=operand_max,
911 result_max=result_max,
914 test_sequences = expr.generate_sequences(
916 nb_variables=nb_variables,
917 length=sequence_length,
918 operand_max=operand_max,
919 result_max=result_max,
922 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
925 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
926 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
928 self.filler, self.space = self.char2id["#"], self.char2id[" "]
930 self.train_input = self.tensorize(train_sequences)
931 self.test_input = self.tensorize(test_sequences)
933 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
935 def batches(self, split="train", nb_to_use=-1, desc=None):
936 assert split in {"train", "test"}
937 input = self.train_input if split == "train" else self.test_input
939 input = input[:nb_to_use]
941 desc = f"epoch-{split}"
942 for batch in tqdm.tqdm(
943 input.split(self.batch_size), dynamic_ncols=True, desc=desc
945 last = (batch != self.filler).max(0).values.nonzero().max() + 3
946 batch = batch[:, :last]
949 def vocabulary_size(self):
952 def seq2str(self, s):
953 return "".join([self.id2char[k.item()] for k in s])
961 deterministic_synthesis,
964 def compute_nb_correct(input):
965 result = input.clone()
966 s = (result == self.space).long()
967 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
968 result = (1 - ar_mask) * result + ar_mask * self.filler
969 masked_inplace_autoregression(
974 deterministic_synthesis,
978 nb_total = input.size(0)
979 nb_correct = (input == result).long().min(1).values.sum()
981 #######################################################################
982 # Comput predicted vs. true variable values
984 nb_delta = torch.zeros(5, dtype=torch.int64)
987 values_input = expr.extract_results([self.seq2str(s) for s in input])
988 values_result = expr.extract_results([self.seq2str(s) for s in result])
990 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
992 with open(filename, "w") as f:
993 for i, r in zip(values_input, values_result):
994 for n, vi in i.items():
996 f.write(f"{vi} {-1 if vr is None else vr}\n")
998 if vr is None or vr < 0:
1002 if d >= nb_delta.size(0):
1007 ######################################################################
1009 return nb_total, nb_correct, nb_delta, nb_missed
1016 ) = compute_nb_correct(self.test_input[:10000])
1019 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
1022 nb_total = test_nb_delta.sum() + test_nb_missed
1023 for d in range(test_nb_delta.size(0)):
1025 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
1028 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
1031 ##############################################################
1032 # Log a few generated sequences
1033 if input_file is None:
1034 input = self.test_input[:10]
1036 with open(input_file, "r") as f:
1037 sequences = [e.strip() for e in f.readlines()]
1038 sequences = [s + " " + "#" * 50 for s in sequences]
1039 input = self.tensorize(sequences)
1041 result = input.clone()
1042 s = (result == self.space).long()
1043 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
1044 result = (1 - ar_mask) * result + ar_mask * self.filler
1046 for n in range(result.size(0)):
1047 logger(f"test_before {self.seq2str(result[n])}")
1049 masked_inplace_autoregression(
1054 deterministic_synthesis,
1058 correct = (1 - ar_mask) * self.space + ar_mask * input
1059 for n in range(result.size(0)):
1060 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
1061 logger(f"test_after {self.seq2str(result[n])} {comment}")
1062 logger(f"truth {self.seq2str(correct[n])}")
1063 ##############################################################
1066 ######################################################################
1079 device=torch.device("cpu"),
1080 device_storage=torch.device("cpu"),
1084 self.batch_size = batch_size
1085 self.device = device
1094 ) = world.create_data_and_processors(
1099 nb_epochs=vqae_nb_epochs,
1102 device_storage=device_storage,
1105 print(f"{train_action_seq.size()=}")
1107 train_frame_seq = self.frame2seq(train_frames).to(device_storage)
1108 test_frame_seq = self.frame2seq(test_frames).to(device_storage)
1110 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
1111 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
1113 self.len_frame_seq = train_frame_seq.size(1)
1114 self.len_action_seq = train_action_seq.size(1)
1115 self.nb_codes = nb_frame_codes + nb_action_codes
1117 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
1118 print(f"{train_action_seq.device=} {nb_frame_codes.device=}")
1119 train_action_seq += nb_frame_codes
1120 self.train_input = torch.cat(
1121 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1124 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1125 test_action_seq += nb_frame_codes
1126 self.test_input = torch.cat(
1127 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1130 def batches(self, split="train", nb_to_use=-1, desc=None):
1131 assert split in {"train", "test"}
1132 input = self.train_input if split == "train" else self.test_input
1134 input = input[:nb_to_use]
1136 desc = f"epoch-{split}"
1137 for batch in tqdm.tqdm(
1138 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1140 yield batch.to(self.device)
1142 def vocabulary_size(self):
1143 return self.nb_codes
1145 def produce_results(
1146 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1149 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1152 input = self.test_input[:64].to(self.device)
1153 result = input.clone()
1156 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1158 result *= 1 - ar_mask
1160 masked_inplace_autoregression(
1165 deterministic_synthesis,
1169 seq_start = input[:, : self.len_frame_seq]
1170 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1171 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1174 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1176 result = result.reshape(-1, result.size(-1))
1177 print(f"{result.size()=}")
1179 frames = self.seq2frame(result)
1180 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1181 torchvision.utils.save_image(
1182 frames.float() / (world.Box.nb_rgb_levels - 1),
1188 logger(f"wrote {image_name}")
1191 ######################################################################