- train_loss = F.cross_entropy(output, input)
-
- acc_train_loss += train_loss.item() * input.size(0)
-
- optimizer.zero_grad()
- train_loss.backward()
- optimizer.step()
-
- acc_test_loss = 0.0
-
- for input in test_input.split(batch_size):
- z = encoder(input)
- zq = z if k < 1 else quantizer(z)
- output = decoder(zq)
-
- output = output.reshape(
- output.size(0), -1, 3, output.size(2), output.size(3)
- )
-
- test_loss = F.cross_entropy(output, input)
-
- acc_test_loss += test_loss.item() * input.size(0)
-
- train_loss = acc_train_loss / train_input.size(0)
- test_loss = acc_test_loss / test_input.size(0)
-
- print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
- sys.stdout.flush()
-
- return encoder, quantizer, decoder
-
-def generate_episodes(nb):
- all_frames = []
- for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
- frames, actions = generate_episode(nb_steps=31)
- all_frames += [ frames[0], frames[-1] ]
- return torch.cat(all_frames, 0).contiguous()
-
-def create_data_and_processors(nb_train_samples, nb_test_samples):
- train_input = generate_episodes(nb_train_samples)
- test_input = generate_episodes(nb_test_samples)
- encoder, quantizer, decoder = train_encoder(train_input, test_input, nb_epochs=2)
+ return torch.cat(result, dim=0)
+
+
+def sample2img(seq, height, width, upscale=15):
+ f_first = seq[:, : height * width].reshape(-1, height, width)
+ f_second = seq[:, height * width + 1 :].reshape(-1, height, width)
+ direction = seq[:, height * width]
+
+ def mosaic(x, upscale):
+ x = x.reshape(-1, height, width)
+ m = torch.logical_and(x >= 0, x < first_bird_token + nb_bird_tokens).long()
+ x = colors[x * m].permute(0, 3, 1, 2)
+ s = x.shape
+ x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale)
+ x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale)
+
+ x[:, :, :, torch.arange(0, x.size(3), upscale)] = 0
+ x[:, :, torch.arange(0, x.size(2), upscale), :] = 0
+ x = x[:, :, 1:, 1:]
+
+ for n in range(m.size(0)):
+ for i in range(m.size(1)):
+ for j in range(m.size(2)):
+ if m[n, i, j] == 0:
+ for k in range(2, upscale - 2):
+ x[n, :, i * upscale + k, j * upscale + k] = 0
+ x[n, :, i * upscale + upscale - 1 - k, j * upscale + k] = 0
+
+ return x
+
+ direction_symbol = torch.full((direction.size(0), height * upscale - 1, upscale), 0)
+ direction_symbol = colors[direction_symbol].permute(0, 3, 1, 2)
+ separator = torch.full((direction.size(0), 3, height * upscale - 1, 1), 0)
+
+ for n in range(direction_symbol.size(0)):
+ if direction[n] == token_forward:
+ for k in range(upscale):
+ direction_symbol[
+ n,
+ :,
+ (height * upscale) // 2 - upscale // 2 + k,
+ 3 + upscale // 2 - abs(k - upscale // 2),
+ ] = 0
+ elif direction[n] == token_backward:
+ for k in range(upscale):
+ direction_symbol[
+ n,
+ :,
+ (height * upscale) // 2 - upscale // 2 + k,
+ 3 + abs(k - upscale // 2),
+ ] = 0
+ else:
+ for k in range(2, upscale - 2):
+ direction_symbol[
+ n, :, (height * upscale) // 2 - upscale // 2 + k, k
+ ] = 0
+ direction_symbol[
+ n, :, (height * upscale) // 2 - upscale // 2 + k, upscale - 1 - k
+ ] = 0
+
+ return torch.cat(
+ [
+ mosaic(f_first, upscale),
+ separator,
+ direction_symbol,
+ separator,
+ mosaic(f_second, upscale),
+ ],
+ dim=3,
+ )