5 import torch, torchvision
8 from torch.nn import functional as F
10 ######################################################################
13 def masked_inplace_autoregression(
18 deterministic_synthesis,
19 forbidden_tokens=None,
20 progress_bar_desc="autoregression",
21 device=torch.device("cpu"),
23 assert input.size() == ar_mask.size()
25 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
27 if progress_bar_desc is not None:
31 desc=progress_bar_desc,
32 # total=input.size(0) // batch_size,
35 with torch.autograd.no_grad():
39 for input, ar_mask in batches:
40 model.masked_inplace_autoregression(
41 input, ar_mask, forbidden_tokens, deterministic_synthesis
47 ######################################################################
51 def batches(self, split="train"):
54 def vocabulary_size(self):
58 self, n_epoch, model, result_dir, logger, deterministic_synthesis
63 ######################################################################
69 # Make a tensor from a list of strings
70 def tensorize(self, descr):
71 token_descr = [s.strip().split(" ") for s in descr]
72 l = max([len(s) for s in token_descr])
73 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
74 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
75 return torch.tensor(id_descr, device=self.device)
77 # Make a list of strings from a tensor
78 def detensorize(self, x):
79 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
81 # trim all the tensors in the tuple z to remove as much token from
82 # left and right in the first tensor. If z is a tuple, all its
83 # elements are trimed according to the triming for the first
84 def trim(self, z, token="<nul>"):
85 n = self.token2id[token]
88 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
89 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
90 return tuple([t[:, a:b] for t in z])
92 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
93 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
96 ######################
107 device=torch.device("cpu"),
111 def generate_descr(nb, cache_suffix, pruner):
112 return picoclvr.generate(
122 self.batch_size = batch_size
124 self.pruner_train = pruner_train
125 self.pruner_eval = pruner_eval
127 if logger is not None:
129 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
132 self.train_descr = generate_descr(
133 nb_train_samples, "train", pruner=self.pruner_train
135 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
137 # Build the tokenizer
138 tokens = {"<nul>", "<img>"}
139 for d in [self.train_descr, self.test_descr]:
141 for t in s.strip().split(" "):
143 # make this set a sorted list to get the same tensors given
145 tokens = list(tokens)
147 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
148 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
149 self.t_img, self.t_nul = self.token2id["<img>"], self.token2id["<nul>"]
151 # Tokenize the train and test sets
152 self.train_input = self.tensorize(self.train_descr)
153 self.test_input = self.tensorize(self.test_descr)
155 def batches(self, split="train"):
156 assert split in {"train", "test"}
157 input = self.train_input if split == "train" else self.test_input
158 for batch in tqdm.tqdm(
159 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
161 yield self.trim(batch)
163 def vocabulary_size(self):
164 return len(self.token2id)
166 def compute_missing_properties(
167 self, n_epoch, model, logger, deterministic_synthesis, pruner=None
169 acc_nb_requested_properties = []
170 acc_nb_missing_properties = []
173 for input in tqdm.tqdm(
174 self.test_input.split(self.batch_size),
176 desc=f"test-properties",
178 result = input.clone()
179 ar_mask = (result == self.t_img).long().cumsum(dim=1).clamp(max=1)
180 result = (1 - ar_mask) * result + ar_mask * self.t_nul
181 masked_inplace_autoregression(
186 deterministic_synthesis,
187 progress_bar_desc=None,
191 result_descr = self.detensorize(result)
192 np = picoclvr.nb_properties(
198 nb_requested_properties, _, nb_missing_properties = zip(*np)
199 acc_nb_requested_properties += nb_requested_properties
200 acc_nb_missing_properties += nb_missing_properties
201 acc_nb_results += len(result_descr)
203 nb_requested_properties = sum(acc_nb_requested_properties)
204 nb_missing_properties = sum(acc_nb_missing_properties)
206 prefix = "" if pruner is None else "pruned_"
207 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
209 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
212 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
215 ######################################################################
218 self, n_epoch, model, result_dir, logger, deterministic_synthesis
220 self.compute_missing_properties(n_epoch, model, logger, deterministic_synthesis)
222 if self.pruner_eval is not None:
223 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
225 nb_tokens_to_generate = self.height * self.width + 3
230 for primer_descr in [
231 "red above green <sep> green top <sep> blue right of red",
232 "there is red <sep> there is yellow <sep> there is blue",
233 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
234 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
236 primer += [primer_descr + " <img>"] * nb_per_primer
238 result = self.tensorize(primer)
239 fill = result.new_full(
240 result.size()[:-1] + (self.height * self.width + 1,), self.t_nul
242 result = torch.cat((result, fill), 1)
243 ar_mask = (result == self.t_nul).long()
244 masked_inplace_autoregression(
249 deterministic_synthesis,
252 result_descr = self.detensorize(result)
254 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
256 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
257 acc_nb_results = len(result_descr)
259 nb_requested_properties = sum(acc_nb_requested_properties)
260 nb_missing_properties = sum(acc_nb_missing_properties)
263 logger(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
265 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
268 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
271 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
275 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
279 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
285 image_name = os.path.join(result_dir, f"picoclvr_result_{n_epoch:04d}.png")
286 torchvision.utils.save_image(
287 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
289 logger(f"wrote {image_name}")
292 ######################################################################
297 self, nb_train_samples, nb_test_samples, batch_size, device=torch.device("cpu")
299 self.nb_train_samples = (nb_train_samples,)
300 self.nb_test_samples = (nb_test_samples,)
301 self.batch_size = batch_size
303 data_set = torchvision.datasets.MNIST(root="./data", train=True, download=True)
304 self.train_input = data_set.data[:nb_train_samples].view(-1, 28 * 28).long()
305 data_set = torchvision.datasets.MNIST(root="./data", train=False, download=True)
306 self.test_input = data_set.data[:nb_test_samples].view(-1, 28 * 28).long()
308 def batches(self, split="train", nb_to_use=-1, desc=None):
309 assert split in {"train", "test"}
310 input = self.train_input if split == "train" else self.test_input
312 input = input[:nb_to_use]
314 desc = f"epoch-{split}"
315 for batch in tqdm.tqdm(
316 input.split(self.batch_size), dynamic_ncols=True, desc=desc
320 def vocabulary_size(self):
324 self, n_epoch, model, result_dir, logger, deterministic_synthesis
326 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
327 ar_mask = torch.full_like(results, 1)
328 masked_inplace_autoregression(
333 deterministic_synthesis,
336 image_name = os.path.join(result_dir, f"mnist_result_{n_epoch:04d}.png")
337 torchvision.utils.save_image(
338 1 - results.reshape(-1, 1, 28, 28) / 255.0,
343 logger(f"wrote {image_name}")
346 ######################################################################
352 def map2seq(self, *m):
353 return torch.cat([x.flatten(1) for x in m], 1)
355 def seq2map(self, s):
356 s = s.reshape(s.size(0), -1, self.height, self.width)
357 return (s[:, k] for k in range(s.size(1)))
367 device=torch.device("cpu"),
369 self.batch_size = batch_size
374 train_mazes, train_paths, _ = maze.create_maze_data(
379 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
381 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
383 test_mazes, test_paths, _ = maze.create_maze_data(
388 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
390 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
392 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
394 def batches(self, split="train", nb_to_use=-1, desc=None):
395 assert split in {"train", "test"}
396 input = self.train_input if split == "train" else self.test_input
398 input = input[:nb_to_use]
400 desc = f"epoch-{split}"
401 for batch in tqdm.tqdm(
402 input.split(self.batch_size), dynamic_ncols=True, desc=desc
406 def vocabulary_size(self):
410 self, model, split="train", nb_to_use=-1, deterministic_synthesis=False
412 nb_total, nb_correct = 0, 0
414 self.width * self.height,
415 self.width * self.height,
420 for input in self.batches(split, nb_to_use):
421 result = input.clone()
422 ar_mask = result.new_zeros(result.size())
423 ar_mask[:, self.height * self.width :] = 1
424 result *= 1 - ar_mask
425 masked_inplace_autoregression(
430 deterministic_synthesis,
431 progress_bar_desc=None,
434 mazes, paths = self.seq2map(result)
435 path_correctness = maze.path_correctness(mazes, paths)
436 nb_correct += path_correctness.long().sum()
437 nb_total += mazes.size(0)
439 optimal_path_lengths = (
440 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
442 predicted_path_lengths = (
443 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
445 optimal_path_lengths = optimal_path_lengths[path_correctness]
446 predicted_path_lengths = predicted_path_lengths[path_correctness]
447 count[optimal_path_lengths, predicted_path_lengths] += 1
453 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
456 return nb_total, nb_correct, count
459 self, n_epoch, model, result_dir, logger, deterministic_synthesis
461 train_nb_total, train_nb_correct, count = self.compute_error(
465 deterministic_synthesis=deterministic_synthesis,
468 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
471 test_nb_total, test_nb_correct, count = self.compute_error(
475 deterministic_synthesis=deterministic_synthesis,
478 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
481 if count is not None:
482 proportion_optimal = count.diagonal().sum().float() / count.sum()
483 logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
485 os.path.join(result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
487 for i in range(count.size(0)):
488 for j in range(count.size(1)):
489 eol = " " if j < count.size(1) - 1 else "\n"
490 f.write(f"{count[i,j]}{eol}")
492 input = self.test_input[:48]
493 result = input.clone()
494 ar_mask = result.new_zeros(result.size())
495 ar_mask[:, self.height * self.width :] = 1
496 result *= 1 - ar_mask
497 masked_inplace_autoregression(
502 deterministic_synthesis,
506 mazes, paths = self.seq2map(input)
507 _, predicted_paths = self.seq2map(result)
509 filename = os.path.join(result_dir, f"maze_result_{n_epoch:04d}.png")
514 predicted_paths=predicted_paths,
515 path_correct=maze.path_correctness(mazes, predicted_paths),
516 path_optimal=maze.path_optimality(paths, predicted_paths),
518 logger(f"wrote {filename}")
521 ######################################################################
538 device=torch.device("cpu"),
540 self.batch_size = batch_size
544 self.prompt_length = prompt_length
546 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
555 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
565 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
567 def batches(self, split="train", nb_to_use=-1, desc=None):
568 assert split in {"train", "test"}
569 input = self.train_input if split == "train" else self.test_input
571 input = input[:nb_to_use]
573 desc = f"epoch-{split}"
574 for batch in tqdm.tqdm(
575 input.split(self.batch_size), dynamic_ncols=True, desc=desc
579 def vocabulary_size(self):
583 self, n_epoch, model, result_dir, logger, deterministic_synthesis
585 def compute_nb_correct(input, prior_visits):
586 result = input.clone()
587 i = torch.arange(result.size(1), device=result.device)[None, :]
589 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
593 result *= 1 - ar_mask
595 masked_inplace_autoregression(
600 deterministic_synthesis,
604 nb_total = ((prior_visits > 0) * ar_mask).sum()
606 nb_correct = ((result == input).long() * (prior_visits > 0) * ar_mask).sum()
608 return nb_total, nb_correct
610 test_nb_total, test_nb_correct = compute_nb_correct(
611 self.test_input[:1000], self.test_prior_visits[:1000]
615 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
619 ######################################################################
635 fraction_values_for_train=None,
636 device=torch.device("cpu"),
638 self.batch_size = batch_size
639 self.nb_steps = nb_steps
640 self.nb_stacks = nb_stacks
641 self.nb_digits = nb_digits
644 if fraction_values_for_train is None:
645 values_for_train = None
646 values_for_test = None
648 all = torch.randperm(10**nb_digits)
649 nb_for_train = int(all.size(0) * fraction_values_for_train)
650 values_for_train = all[:nb_for_train]
651 values_for_test = all[nb_for_train:]
653 self.train_input, self.train_stack_counts = stack.generate_sequences(
662 self.test_input, self.test_stack_counts = stack.generate_sequences(
671 i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
672 counts = self.test_stack_counts.flatten()[i.flatten()]
673 counts = F.one_hot(counts).sum(0)
674 logger(f"test_pop_stack_counts {counts}")
676 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
678 def batches(self, split="train", nb_to_use=-1, desc=None):
679 assert split in {"train", "test"}
680 input = self.train_input if split == "train" else self.test_input
682 input = input[:nb_to_use]
684 desc = f"epoch-{split}"
685 for batch in tqdm.tqdm(
686 input.split(self.batch_size), dynamic_ncols=True, desc=desc
690 def vocabulary_size(self):
694 self, n_epoch, model, result_dir, logger, deterministic_synthesis
696 def compute_nb_correct(input):
697 result = input.clone()
698 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
699 ar_mask = (result != input).long()
700 masked_inplace_autoregression(
705 deterministic_synthesis,
709 errors = ((result != input).long() * ar_mask).reshape(
710 -1, 1 + self.nb_digits
712 ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
714 nb_total = ar_mask.max(1).values.sum()
715 nb_correct = nb_total - errors.max(1).values.sum()
717 return nb_total, nb_correct
719 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
722 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
725 ##############################################################
726 # Log a few generated sequences
727 input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
728 result = input.clone()
729 stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
730 ar_mask = (result != input).long()
732 # for n in range(result.size(0)):
734 # f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
737 masked_inplace_autoregression(
742 deterministic_synthesis,
746 for n in range(result.size(0)):
748 f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
750 ##############################################################
753 ######################################################################
760 def tensorize(self, sequences):
761 len_max = max([len(x) for x in sequences])
766 [self.char2id[c] for c in s + "#" * (len_max - len(s))]
783 device=torch.device("cpu"),
785 self.batch_size = batch_size
788 train_sequences = expr.generate_sequences(
790 nb_variables=nb_variables,
791 length=sequence_length,
792 operand_max=operand_max,
793 result_max=result_max,
796 test_sequences = expr.generate_sequences(
798 nb_variables=nb_variables,
799 length=sequence_length,
800 operand_max=operand_max,
801 result_max=result_max,
804 symbols = list(set("#" + "".join(train_sequences + test_sequences)))
807 self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
808 self.id2char = dict([(n, c) for c, n in self.char2id.items()])
810 self.filler, self.space = self.char2id["#"], self.char2id[" "]
812 self.train_input = self.tensorize(train_sequences)
813 self.test_input = self.tensorize(test_sequences)
815 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
817 def batches(self, split="train", nb_to_use=-1, desc=None):
818 assert split in {"train", "test"}
819 input = self.train_input if split == "train" else self.test_input
821 input = input[:nb_to_use]
823 desc = f"epoch-{split}"
824 for batch in tqdm.tqdm(
825 input.split(self.batch_size), dynamic_ncols=True, desc=desc
827 last = (batch != self.filler).max(0).values.nonzero().max() + 3
828 batch = batch[:, :last]
831 def vocabulary_size(self):
834 def seq2str(self, s):
835 return "".join([self.id2char[k.item()] for k in s])
843 deterministic_synthesis,
846 def compute_nb_correct(input):
847 result = input.clone()
848 s = (result == self.space).long()
849 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
850 result = (1 - ar_mask) * result + ar_mask * self.filler
851 masked_inplace_autoregression(
856 deterministic_synthesis,
860 nb_total = input.size(0)
861 nb_correct = (input == result).long().min(1).values.sum()
863 #######################################################################
864 # Comput predicted vs. true variable values
866 nb_delta = torch.zeros(5, dtype=torch.int64)
869 values_input = expr.extract_results([self.seq2str(s) for s in input])
870 values_result = expr.extract_results([self.seq2str(s) for s in result])
872 filename = os.path.join(result_dir, f"expr_result_{n_epoch:04d}.txt")
874 with open(filename, "w") as f:
875 for i, r in zip(values_input, values_result):
876 for n, vi in i.items():
878 f.write(f"{vi} {-1 if vr is None else vr}\n")
880 if vr is None or vr < 0:
884 if d >= nb_delta.size(0):
889 ######################################################################
891 return nb_total, nb_correct, nb_delta, nb_missed
898 ) = compute_nb_correct(self.test_input[:10000])
901 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
904 nb_total = test_nb_delta.sum() + test_nb_missed
905 for d in range(test_nb_delta.size(0)):
907 f"error_value {n_epoch} delta {d} {test_nb_delta[d]} {test_nb_delta[d]*100/nb_total:.02f}%"
910 f"error_value {n_epoch} missed {test_nb_missed} {test_nb_missed*100/nb_total:.02f}%"
913 ##############################################################
914 # Log a few generated sequences
915 if input_file is None:
916 input = self.test_input[:10]
918 with open(input_file, "r") as f:
919 sequences = [e.strip() for e in f.readlines()]
920 sequences = [s + " " + "#" * 50 for s in sequences]
921 input = self.tensorize(sequences)
923 result = input.clone()
924 s = (result == self.space).long()
925 ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
926 result = (1 - ar_mask) * result + ar_mask * self.filler
928 for n in range(result.size(0)):
929 logger(f"test_before {self.seq2str(result[n])}")
931 masked_inplace_autoregression(
936 deterministic_synthesis,
940 correct = (1 - ar_mask) * self.space + ar_mask * input
941 for n in range(result.size(0)):
942 comment = "GOOD" if (result[n] - input[n]).abs().max() == 0 else ""
943 logger(f"test_after {self.seq2str(result[n])} {comment}")
944 logger(f"truth {self.seq2str(correct[n])}")
945 ##############################################################
948 ######################################################################
961 device=torch.device("cpu"),
963 self.batch_size = batch_size
973 ) = world.create_data_and_processors(
978 nb_epochs=vqae_nb_epochs,
983 print(f"{train_action_seq.size()=}")
985 train_frame_seq = self.frame2seq(train_frames)
986 test_frame_seq = self.frame2seq(test_frames)
988 nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
989 nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
991 self.len_frame_seq = train_frame_seq.size(1)
992 self.len_action_seq = train_action_seq.size(1)
993 self.nb_codes = nb_frame_codes + nb_action_codes
995 train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
996 train_action_seq += nb_frame_codes
997 self.train_input = torch.cat(
998 (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
1001 test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
1002 test_action_seq += nb_frame_codes
1003 self.test_input = torch.cat(
1004 (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
1007 def batches(self, split="train", nb_to_use=-1, desc=None):
1008 assert split in {"train", "test"}
1009 input = self.train_input if split == "train" else self.test_input
1011 input = input[:nb_to_use]
1013 desc = f"epoch-{split}"
1014 for batch in tqdm.tqdm(
1015 input.split(self.batch_size), dynamic_ncols=True, desc=desc
1019 def vocabulary_size(self):
1020 return self.nb_codes
1022 def produce_results(
1023 self, n_epoch, model, result_dir, logger, deterministic_synthesis
1026 2 * self.len_frame_seq + self.len_action_seq, device=self.device
1029 input = self.test_input[:64]
1030 result = input.clone()
1033 (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
1035 result *= 1 - ar_mask
1037 masked_inplace_autoregression(
1042 deterministic_synthesis,
1046 seq_start = input[:, : self.len_frame_seq]
1047 seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
1048 seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
1051 (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
1053 result = result.reshape(-1, result.size(-1))
1054 print(f"{result.size()=}")
1056 frames = self.seq2frame(result)
1057 image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
1058 torchvision.utils.save_image(
1059 frames.float() / (world.Box.nb_rgb_levels - 1),
1065 logger(f"wrote {image_name}")
1068 ######################################################################