##############################
# World options
-parser.add_argument("--world_vqae_nb_epochs", type=int, default=10)
+parser.add_argument("--world_vqae_nb_epochs", type=int, default=25)
######################################################################
"nb_test_samples": 10000,
},
"world": {
- "nb_epochs": 5,
+ "nb_epochs": 10,
"batch_size": 25,
- "nb_train_samples": 10000,
+ "nb_train_samples": 125000,
"nb_test_samples": 1000,
},
}
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
vqae_nb_epochs=args.world_vqae_nb_epochs,
+ logger=log_string,
device=device,
)
batches,
dynamic_ncols=True,
desc=progress_bar_desc,
- #total=input.size(0) // batch_size,
+ # total=input.size(0) // batch_size,
)
with torch.autograd.no_grad():
nb_test_samples,
batch_size,
vqae_nb_epochs,
+ logger=None,
device=torch.device("cpu"),
):
self.batch_size = batch_size
(
train_frames,
- self.train_actions,
+ train_action_seq,
test_frames,
- self.test_actions,
+ test_action_seq,
self.frame2seq,
self.seq2frame,
) = world.create_data_and_processors(
mode="first_last",
nb_steps=30,
nb_epochs=vqae_nb_epochs,
+ logger=logger,
device=device,
)
- self.train_input = self.frame2seq(train_frames)
- self.train_input = self.train_input.reshape(self.train_input.size(0) // 2, -1)
- self.test_input = self.frame2seq(test_frames)
- self.test_input = self.test_input.reshape(self.test_input.size(0) // 2, -1)
+ print(f"{train_action_seq.size()=}")
- self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+ train_frame_seq = self.frame2seq(train_frames)
+ test_frame_seq = self.frame2seq(test_frames)
+
+ nb_frame_codes = max(train_frame_seq.max(), test_frame_seq.max()) + 1
+ nb_action_codes = max(train_action_seq.max(), test_action_seq.max()) + 1
+
+ self.len_frame_seq = train_frame_seq.size(1)
+ self.len_action_seq = train_action_seq.size(1)
+ self.nb_codes = nb_frame_codes + nb_action_codes
+
+ train_frame_seq = train_frame_seq.reshape(train_frame_seq.size(0) // 2, 2, -1)
+ train_action_seq += nb_frame_codes
+ self.train_input = torch.cat(
+ (train_frame_seq[:, 0, :], train_action_seq, train_frame_seq[:, 1, :]), 1
+ )
+
+ test_frame_seq = test_frame_seq.reshape(test_frame_seq.size(0) // 2, 2, -1)
+ test_action_seq += nb_frame_codes
+ self.test_input = torch.cat(
+ (test_frame_seq[:, 0, :], test_action_seq, test_frame_seq[:, 1, :]), 1
+ )
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
def produce_results(
self, n_epoch, model, result_dir, logger, deterministic_synthesis
):
- l = self.train_input.size(1)
- k = torch.arange(l, device=self.device)[None, :]
- result = self.test_input[:64].clone()
+ k = torch.arange(
+ 2 * self.len_frame_seq + self.len_action_seq, device=self.device
+ )[None, :]
+
+ input = self.test_input[:64]
+ result = input.clone()
- ar_mask = (k >= l // 2).long().expand_as(result)
+ ar_mask = (
+ (k >= self.len_frame_seq + self.len_action_seq).long().expand_as(result)
+ )
result *= 1 - ar_mask
masked_inplace_autoregression(
device=self.device,
)
- result = result.reshape(result.size(0) * 2, -1)
+ seq_start = input[:, : self.len_frame_seq]
+ seq_end = input[:, self.len_frame_seq + self.len_action_seq :]
+ seq_predicted = result[:, self.len_frame_seq + self.len_action_seq :]
+
+ result = torch.cat(
+ (seq_start[:, None, :], seq_end[:, None, :], seq_predicted[:, None, :]), 1
+ )
+ result = result.reshape(-1, result.size(-1))
+ print(f"{result.size()=}")
frames = self.seq2frame(result)
image_name = os.path.join(result_dir, f"world_result_{n_epoch:04d}.png")
torchvision.utils.save_image(
frames.float() / (world.Box.nb_rgb_levels - 1),
image_name,
- nrow=8,
+ nrow=12,
padding=1,
pad_value=0.0,
)
lr_end=1e-4,
nb_epochs=10,
batch_size=25,
+ logger=None,
device=torch.device("cpu"),
):
+ if logger is None:
+ logger = lambda s: print(s)
+
mu, std = train_input.float().mean(), train_input.float().std()
def encoder_core(depth, dim):
nb_parameters = sum(p.numel() for p in model.parameters())
- print(f"nb_parameters {nb_parameters}")
+ logger(f"nb_parameters {nb_parameters}")
model.to(device)
train_loss = acc_train_loss / train_input.size(0)
test_loss = acc_test_loss / test_input.size(0)
- print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+ logger(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
sys.stdout.flush()
return encoder, quantizer, decoder
for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
frames, actions = generate_episode(steps)
all_frames += frames
- all_actions += [actions]
+ all_actions += [actions[None, :]]
return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
nb_steps,
nb_epochs=10,
device=torch.device("cpu"),
+ logger=None,
):
assert mode in ["first_last"]
test_input, test_actions = test_input.to(device), test_actions.to(device)
encoder, quantizer, decoder = train_encoder(
- train_input, test_input, nb_epochs=nb_epochs, device=device
+ train_input, test_input, nb_epochs=nb_epochs, logger=logger, device=device
)
encoder.train(False)
quantizer.train(False)