- if change:
- break
-
- return frames, actions
-
-
-######################################################################
-
-
-def generate_episodes(nb, steps):
- all_frames, all_actions = [], []
- for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
- frames, actions = generate_episode(steps)
- all_frames += frames
- all_actions += [actions[None, :]]
- return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
-
-
-def create_data_and_processors(
- nb_train_samples,
- nb_test_samples,
- mode,
- nb_steps,
- nb_epochs=10,
- device=torch.device("cpu"),
- device_storage=torch.device("cpu"),
- logger=None,
-):
- assert mode in ["first_last"]
-
- if mode == "first_last":
- steps = [True] + [False] * (nb_steps + 1) + [True]
-
- train_input, train_actions = generate_episodes(nb_train_samples, steps)
- train_input, train_actions = train_input.to(device_storage), train_actions.to(device_storage)
- test_input, test_actions = generate_episodes(nb_test_samples, steps)
- test_input, test_actions = test_input.to(device_storage), test_actions.to(device_storage)
-
- encoder, quantizer, decoder = train_encoder(
- train_input, test_input, nb_epochs=nb_epochs, logger=logger, device=device
- )
- encoder.train(False)
- quantizer.train(False)
- decoder.train(False)
-
- z = encoder(train_input[:1].to(device))
- pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
- z_h, z_w = z.size(2), z.size(3)
-
- def frame2seq(input, batch_size=25):
- seq = []
- p = pow2.to(device)
- for x in input.split(batch_size):
- x=x.to(device)
- z = encoder(x)
- ze_bool = (quantizer(z) >= 0).long()
- output = (
- ze_bool.permute(0, 2, 3, 1).reshape(
- ze_bool.size(0), -1, ze_bool.size(1)
- )
- * p
- ).sum(-1)
-
- seq.append(output)
-
- return torch.cat(seq, dim=0)
-
- def seq2frame(input, batch_size=25, T=1e-2):
- frames = []
- p = pow2.to(device)
- for seq in input.split(batch_size):
- seq = seq.to(device)
- zd_bool = (seq[:, :, None] // p) % 2
- zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
- logits = decoder(zd_bool * 2.0 - 1.0)
- logits = logits.reshape(
- logits.size(0), -1, 3, logits.size(2), logits.size(3)
- ).permute(0, 2, 3, 4, 1)
- output = torch.distributions.categorical.Categorical(
- logits=logits / T
- ).sample()
-
- frames.append(output)
-
- return torch.cat(frames, dim=0)
-
- return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame