colors = torch.tensor(
[
[255, 255, 255],
- [0, 0, 0],
[255, 0, 0],
[0, 128, 0],
[0, 0, 255],
]
)
-token2char = "_X" + "".join([str(n) for n in range(len(colors) - 2)]) + ">"
+token_background = 0
+first_fish_token = 1
+nb_fish_tokens = len(colors) - 1
+token_forward = first_fish_token + nb_fish_tokens
+token_backward = token_forward + 1
+
+token2char = "_" + "".join([str(n) for n in range(len(colors) - 1)]) + "><"
def generate(
max_nb_obj=2,
nb_iterations=2,
):
- f_start = torch.zeros(nb, height, width, dtype=torch.int64)
- f_end = torch.zeros(nb, height, width, dtype=torch.int64)
- n = torch.arange(f_start.size(0))
+ pairs = []
for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"):
+ f_start = torch.zeros(height, width, dtype=torch.int64)
+ f_end = torch.zeros(height, width, dtype=torch.int64)
+ n = torch.arange(f_start.size(0))
+
nb_fish = torch.randint(max_nb_obj, (1,)).item() + 1
- for c in torch.randperm(colors.size(0) - 2)[:nb_fish].sort().values:
+ for c in (
+ (torch.randperm(nb_fish_tokens) + first_fish_token)[:nb_fish].sort().values
+ ):
i, j = (
torch.randint(height - 2, (1,))[0] + 1,
torch.randint(width - 2, (1,))[0] + 1,
vm = torch.randint(4, (1,))[0]
vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * (2 * (vm % 2) - 1)
- f_start[n, i, j] = c + 2
- f_start[n, i - vi, j - vj] = c + 2
- f_start[n, i + vj, j - vi] = c + 2
- f_start[n, i - vj, j + vi] = c + 2
+ f_start[i, j] = c
+ f_start[i - vi, j - vj] = c
+ f_start[i + vj, j - vi] = c
+ f_start[i - vj, j + vi] = c
for l in range(nb_iterations):
i += vi
i += vi
j += vj
- f_end[n, i, j] = c + 2
- f_end[n, i - vi, j - vj] = c + 2
- f_end[n, i + vj, j - vi] = c + 2
- f_end[n, i - vj, j + vi] = c + 2
-
- return torch.cat(
- [
- f_end.flatten(1),
- torch.full((f_end.size(0), 1), len(colors)),
- f_start.flatten(1),
- ],
- dim=1,
- )
+ f_end[i, j] = c
+ f_end[i - vi, j - vj] = c
+ f_end[i + vj, j - vi] = c
+ f_end[i - vj, j + vi] = c
+
+ pairs.append((f_start, f_end))
+
+ result = []
+ for p in pairs:
+ if torch.rand(1) < 0.5:
+ result.append(
+ torch.cat(
+ [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()],
+ dim=0,
+ )[None, :]
+ )
+ else:
+ result.append(
+ torch.cat(
+ [p[1].flatten(), torch.tensor([token_backward]), p[0].flatten()],
+ dim=0,
+ )[None, :]
+ )
+ return torch.cat(result, dim=0)
-def sample2img(seq, height, width):
+
+def sample2img(seq, height, width, upscale=15):
f_start = seq[:, : height * width].reshape(-1, height, width)
- f_start = (f_start >= len(colors)).long() + (f_start < len(colors)).long() * f_start
f_end = seq[:, height * width + 1 :].reshape(-1, height, width)
- f_end = (f_end >= len(colors)).long() + (f_end < len(colors)).long() * f_end
-
- img_f_start, img_f_end = colors[f_start], colors[f_end]
-
- img = torch.cat(
- [
- img_f_start,
- torch.full(
- (img_f_start.size(0), img_f_start.size(1), 1, img_f_start.size(3)), 1
- ),
- img_f_end,
- ],
- dim=2,
- )
- return img.permute(0, 3, 1, 2)
+ def mosaic(x, upscale):
+ x = x.reshape(-1, height, width)
+ m = torch.logical_and(x >= 0, x < first_fish_token + nb_fish_tokens).long()
+ x = colors[x * m].permute(0, 3, 1, 2)
+ s = x.shape
+ x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale)
+ x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale)
+
+ for n in range(m.size(0)):
+ for i in range(m.size(1)):
+ for j in range(m.size(2)):
+ if m[n, i, j] == 0:
+ for k in range(2, upscale - 2):
+ x[n, :, i * upscale + k, j * upscale + k] = 0
+ x[n, :, i * upscale + upscale - 1 - k, j * upscale + k] = 0
+
+ return x
+
+ return torch.cat([mosaic(f_start, upscale), mosaic(f_end, upscale)], dim=3)
def seq2str(seq):
print(seq2str(seq[:4]))
+ # m = (torch.rand(seq.size()) < 0.05).long()
+ # seq = (1 - m) * seq + m * 23
+
img = sample2img(seq, height, width)
print(img.size())
- torchvision.utils.save_image(img.float() / 255.0, "world.png", nrow=8, padding=2)
+ torchvision.utils.save_image(
+ img.float() / 255.0, "/tmp/world.png", nrow=8, padding=2
+ )