-def generate_episode(steps, size=64):
- delta = 0.1
- effects = [
- (False, 0, 0),
- (False, delta, 0),
- (False, 0, delta),
- (False, -delta, 0),
- (False, 0, -delta),
- (True, delta, 0),
- (True, 0, delta),
- (True, -delta, 0),
- (True, 0, -delta),
- ]
-
- while True:
- frames = []
-
- scene = random_scene()
- xh, yh = tuple(x.item() for x in torch.rand(2))
-
- actions = torch.randint(len(effects), (len(steps),))
- nb_changes = 0
-
- for s, a in zip(steps, actions):
- if s:
- frames.append(scene2tensor(xh, yh, scene, size=size))
-
- grasp, dx, dy = effects[a]
-
- if grasp:
- for b in scene:
- if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
- x, y = b.x, b.y
- b.x += dx
- b.y += dy
- if (
- b.x < 0
- or b.y < 0
- or b.x + b.w > 1
- or b.y + b.h > 1
- or b.collision(scene)
- ):
- b.x, b.y = x, y
- else:
- xh += dx
- yh += dy
- nb_changes += 1
- else:
- x, y = xh, yh
- xh += dx
- yh += dy
- if xh < 0 or xh > 1 or yh < 0 or yh > 1:
- xh, yh = x, y
-
- if nb_changes > len(steps) // 3:
- break
-
- return frames, actions
-
-
-######################################################################
-
-
-def generate_episodes(nb, steps):
- all_frames, all_actions = [], []
- for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
- frames, actions = generate_episode(steps)
- all_frames += frames
- all_actions += [actions[None, :]]
- return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
-
-
-def create_data_and_processors(
- nb_train_samples,
- nb_test_samples,
- mode,
- nb_steps,
- nb_epochs=10,
- device=torch.device("cpu"),
- device_storage=torch.device("cpu"),
- logger=None,
-):
- assert mode in ["first_last"]
-
- if mode == "first_last":
- steps = [True] + [False] * (nb_steps + 1) + [True]
-
- train_input, train_actions = generate_episodes(nb_train_samples, steps)
- train_input, train_actions = train_input.to(device_storage), train_actions.to(
- device_storage
- )
- test_input, test_actions = generate_episodes(nb_test_samples, steps)
- test_input, test_actions = test_input.to(device_storage), test_actions.to(
- device_storage
- )
-
- encoder, quantizer, decoder = train_encoder(
- train_input,
- test_input,
- lambda_entropy=1.0,
- nb_epochs=nb_epochs,
- logger=logger,
- device=device,
- )
- encoder.train(False)
- quantizer.train(False)
- decoder.train(False)
-
- z = encoder(train_input[:1].to(device))
- pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
- z_h, z_w = z.size(2), z.size(3)
-
- def frame2seq(input, batch_size=25):
- seq = []
- p = pow2.to(device)
- for x in input.split(batch_size):
- x = x.to(device)
- z = encoder(x)
- ze_bool = (quantizer(z) >= 0).long()
- output = (
- ze_bool.permute(0, 2, 3, 1).reshape(
- ze_bool.size(0), -1, ze_bool.size(1)
- )
- * p
- ).sum(-1)
-
- seq.append(output)
-
- return torch.cat(seq, dim=0)
-
- def seq2frame(input, batch_size=25, T=1e-2):
- frames = []
- p = pow2.to(device)
- for seq in input.split(batch_size):
- seq = seq.to(device)
- zd_bool = (seq[:, :, None] // p) % 2
- zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
- logits = decoder(zd_bool * 2.0 - 1.0)
- logits = logits.reshape(
- logits.size(0), -1, 3, logits.size(2), logits.size(3)
- ).permute(0, 2, 3, 4, 1)
- output = torch.distributions.categorical.Categorical(
- logits=logits / T
- ).sample()
-
- frames.append(output)
-
- return torch.cat(frames, dim=0)
-
- return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame