Update.
[culture.git] / world.py
index ab02c82..68f46de 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -18,28 +18,147 @@ from torch.nn import functional as F
 colors = torch.tensor(
     [
         [255, 255, 255],
-        [255, 0, 0],
-        [0, 128, 0],
+        [255, 20, 147],
         [0, 0, 255],
-        [255, 200, 0],
+        [0, 192, 0],
+        [0, 255, 255],
         [192, 192, 192],
+        [106, 90, 205],
+        [255, 0, 0],
+        [220, 20, 60],
+        [65, 105, 225],
+        [255, 200, 0],
+        # [255, 182, 193],
+        # [75, 0, 130],
+        # [128, 0, 128],
+        # [30, 144, 255],
+        # [135, 206, 235],
+        # [0, 255, 0],
+        # [64, 224, 208],
+        # [250, 128, 114],
+        # [255, 165, 0],
+        # [0, 255, 255],
     ]
 )
 
 token_background = 0
 first_bird_token = 1
-nb_bird_tokens = len(colors) - 1
+nb_bird_tokens = colors.size(0) - 1
 token_forward = first_bird_token + nb_bird_tokens
 token_backward = token_forward + 1
 
-token2char = "_" + "".join([str(n) for n in range(len(colors) - 1)]) + "><"
+token2char = "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><"
 
 
 def generate(
     nb,
     height,
     width,
-    max_nb_obj=2,
+    nb_birds=3,
+    nb_iterations=1,
+):
+    pairs = []
+
+    for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"):
+        f_start = torch.zeros(height, width, dtype=torch.int64)
+
+        i, j, vi, vj = (
+            torch.empty(nb_birds, dtype=torch.int64),
+            torch.empty(nb_birds, dtype=torch.int64),
+            torch.empty(nb_birds, dtype=torch.int64),
+            torch.empty(nb_birds, dtype=torch.int64),
+        )
+
+        col = torch.randperm(colors.size(0) - 1)[:nb_birds].sort().values + 1
+
+        for n in range(nb_birds):
+            c = col[n]
+
+            while True:
+                i[n], j[n] = (
+                    torch.randint(height, (1,))[0],
+                    torch.randint(width, (1,))[0],
+                )
+                vm = torch.randint(4, (1,))[0]
+                vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1
+                if (
+                    i[n] - vi[n] >= 0
+                    and i[n] - vi[n] < height
+                    and j[n] - vj[n] >= 0
+                    and j[n] - vj[n] < width
+                    and f_start[i[n], j[n]] == 0
+                    and f_start[i[n] - vi[n], j[n]] == 0
+                    and f_start[i[n], j[n] - vj[n]] == 0
+                ):
+                    break
+
+            f_start[i[n], j[n]] = c
+            f_start[i[n] - vi[n], j[n]] = c
+            f_start[i[n], j[n] - vj[n]] = c
+
+        f_end = f_start.clone()
+
+        for l in range(nb_iterations):
+            for n in range(nb_birds):
+                c = col[n]
+                f_end[i[n], j[n]] = 0
+                f_end[i[n] - vi[n], j[n]] = 0
+                f_end[i[n], j[n] - vj[n]] = 0
+
+                pi, pj, pvi, pvj = i[n].item(), j[n].item(), vi[n].item(), vj[n].item()
+
+                assert (
+                    f_end[i[n], j[n]] == 0
+                    and f_end[i[n] - vi[n], j[n]] == 0
+                    and f_end[i[n], j[n] - vj[n]] == 0
+                )
+
+                if (i[n] == 0 and vi[n] == -1) or (i[n] == height - 1 and vi[n] == 1):
+                    vi[n] = -vi[n]
+                if (j[n] == 0 and vj[n] == -1) or (j[n] == width - 1 and vj[n] == 1):
+                    vj[n] = -vj[n]
+
+                i[n] += vi[n]
+                j[n] += vj[n]
+
+                if not (
+                    f_end[i[n], j[n]] == 0
+                    and f_end[i[n] - vi[n], j[n]] == 0
+                    and f_end[i[n], j[n] - vj[n]] == 0
+                ):
+                    i[n], j[n], vi[n], vj[n] = pi, pj, pvi, pvj
+
+                f_end[i[n], j[n]] = c
+                f_end[i[n] - vi[n], j[n]] = c
+                f_end[i[n], j[n] - vj[n]] = c
+
+        pairs.append((f_start, f_end))
+
+    result = []
+    for p in pairs:
+        if torch.rand(1) < 0.5:
+            result.append(
+                torch.cat(
+                    [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()],
+                    dim=0,
+                )[None, :]
+            )
+        else:
+            result.append(
+                torch.cat(
+                    [p[1].flatten(), torch.tensor([token_backward]), p[0].flatten()],
+                    dim=0,
+                )[None, :]
+            )
+
+    return torch.cat(result, dim=0)
+
+
+def generate_(
+    nb,
+    height,
+    width,
+    nb_birds=3,
     nb_iterations=2,
 ):
     pairs = []
@@ -49,7 +168,6 @@ def generate(
         f_end = torch.zeros(height, width, dtype=torch.int64)
         n = torch.arange(f_start.size(0))
 
-        nb_birds = torch.randint(max_nb_obj, (1,)).item() + 1
         for c in (
             (torch.randperm(nb_bird_tokens) + first_bird_token)[:nb_birds].sort().values
         ):
@@ -115,6 +233,10 @@ def sample2img(seq, height, width, upscale=15):
         x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale)
         x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale)
 
+        x[:, :, :, torch.arange(0, x.size(3), upscale)] = 0
+        x[:, :, torch.arange(0, x.size(2), upscale), :] = 0
+        x = x[:, :, 1:, 1:]
+
         for n in range(m.size(0)):
             for i in range(m.size(1)):
                 for j in range(m.size(2)):
@@ -125,9 +247,9 @@ def sample2img(seq, height, width, upscale=15):
 
         return x
 
-    direction_symbol = torch.full((direction.size(0), height * upscale, upscale), 0)
+    direction_symbol = torch.full((direction.size(0), height * upscale - 1, upscale), 0)
     direction_symbol = colors[direction_symbol].permute(0, 3, 1, 2)
-    separator = torch.full((direction.size(0), 3, height * upscale, 1), 0)
+    separator = torch.full((direction.size(0), 3, height * upscale - 1, 1), 0)
 
     for n in range(direction_symbol.size(0)):
         if direction[n] == token_forward:
@@ -136,7 +258,7 @@ def sample2img(seq, height, width, upscale=15):
                     n,
                     :,
                     (height * upscale) // 2 - upscale // 2 + k,
-                    3 + abs(k - upscale // 2),
+                    3 + upscale // 2 - abs(k - upscale // 2),
                 ] = 0
         elif direction[n] == token_backward:
             for k in range(upscale):
@@ -144,7 +266,7 @@ def sample2img(seq, height, width, upscale=15):
                     n,
                     :,
                     (height * upscale) // 2 - upscale // 2 + k,
-                    3 + upscale // 2 - abs(k - upscale // 2),
+                    3 + abs(k - upscale // 2),
                 ] = 0
         else:
             for k in range(2, upscale - 2):
@@ -181,7 +303,7 @@ if __name__ == "__main__":
 
     height, width = 6, 8
     start_time = time.perf_counter()
-    seq = generate(nb=90, height=height, width=width, max_nb_obj=3)
+    seq = generate(nb=90, height=height, width=width)
     delay = time.perf_counter() - start_time
     print(f"{seq.size(0)/delay:02f} samples/s")
 
@@ -194,5 +316,5 @@ if __name__ == "__main__":
     print(img.size())
 
     torchvision.utils.save_image(
-        img.float() / 255.0, "/tmp/world.png", nrow=6, padding=4
+        img.float() / 255.0, "/tmp/world.png", nrow=6, padding=6, pad_value=0
     )