+
+######################################################################
+
+import world
+
+
+class World(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ vqae_nb_epochs,
+ logger=None,
+ device=torch.device("cpu"),
+ ):
+ self.batch_size = batch_size
+ self.device = device
+
+ (
+ train_frames,
+ train_action_seq,
+ test_frames,
+ test_action_seq,
+ self.frame2seq,
+ self.seq2frame,
+ ) = world.create_data_and_processors(
+ nb_train_samples,
+ nb_test_samples,
+ mode="first_last",
+ nb_steps=30,
+ nb_epochs=vqae_nb_epochs,
+ logger=logger,
+ device=device,
+ )
+
+ print(f"{train_action_seq.size()=}")
+
+ train_frame_seq = self.frame2seq(train_frames)
+ test_frame_seq = self.frame2seq(test_frames)
+
+ nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
+ nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
+
+ self.len_frame_seq = train_frame_seq.size(1)
+ self.len_action_seq = train_action_seq.size(1)
+ self.nb_codes = nb_frame_codes + nb_action_codes
+
+ train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
+ train_action_seq += nb_frame_codes
+ self.train_input = torch.cat(
+ (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
+ )
+
+ test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
+ test_action_seq += nb_frame_codes
+ self.test_input = torch.cat(
+ (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
+ )
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ k = torch.arange(
+ 2 * self.len_frame_seq + self.len_action_seq, device=self.device
+ )[None, :]
+
+ input = self.test_input[:64]
+ result = input.clone()
+
+ ar_mask = (
+ (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
+ )
+ result *= 1 - ar_mask
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ seq_start = input[:, : self.len_frame_seq]
+ seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
+ seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
+
+ result = torch.cat(
+ (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
+ )
+ result = result.reshape(-1, result.size(-1))
+ print(f"{result.size()=}")
+
+ frames = self.seq2frame(result)
+ image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
+ torchvision.utils.save_image(
+ frames.float() / (world.Box.nb_rgb_levels - 1),
+ image_name,
+ nrow=12,
+ padding=1,
+ pad_value=0.0,
+ )
+ logger(f"wrote {image_name}")