Update.
[culture.git] / world.py
index 4055533..36aa1e9 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -18,31 +18,16 @@ from torch.nn import functional as F
 colors = torch.tensor(
     [
         [255, 255, 255],
 colors = torch.tensor(
     [
         [255, 255, 255],
-        [0, 0, 255],
-        [0, 0, 255],
+        [255, 0, 0],
         [0, 192, 0],
         [0, 192, 0],
-        [0, 255, 0],
-        [0, 255, 127],
-        [0, 255, 255],
+        [0, 0, 255],
+        [255, 192, 0],
         [0, 255, 255],
         [0, 255, 255],
-        [30, 144, 255],
-        [64, 224, 208],
-        [65, 105, 225],
-        [75, 0, 130],
-        [106, 90, 205],
-        [128, 0, 128],
-        [135, 206, 235],
-        [192, 192, 192],
-        [220, 20, 60],
-        [250, 128, 114],
-        [255, 0, 0],
         [255, 0, 255],
         [255, 0, 255],
-        [255, 105, 180],
-        [255, 127, 80],
-        [255, 165, 0],
-        [255, 182, 193],
-        [255, 20, 147],
-        [255, 200, 0],
+        [192, 255, 192],
+        [255, 192, 192],
+        [192, 192, 255],
+        [192, 192, 192],
     ]
 )
 
     ]
 )
 
@@ -55,11 +40,131 @@ token_backward = token_forward + 1
 token2char = "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><"
 
 
 token2char = "_" + "".join([chr(ord("A") + n) for n in range(len(colors) - 1)]) + "><"
 
 
-def generate(
+def generate_seq(
+    nb, height, width, nb_birds=3, nb_iterations=2, return_iterations=False
+):
+    pairs = []
+    kept_iterations = []
+
+    for _ in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world generation"):
+        while True:
+            iterations = []
+
+            f_start = torch.zeros(height, width, dtype=torch.int64)
+
+            i, j, vi, vj = (
+                torch.empty(nb_birds, dtype=torch.int64),
+                torch.empty(nb_birds, dtype=torch.int64),
+                torch.empty(nb_birds, dtype=torch.int64),
+                torch.empty(nb_birds, dtype=torch.int64),
+            )
+
+            col = torch.randperm(colors.size(0) - 1)[:nb_birds].sort().values + 1
+
+            for n in range(nb_birds):
+                c = col[n]
+
+                while True:
+                    i[n], j[n] = (
+                        torch.randint(height, (1,))[0],
+                        torch.randint(width, (1,))[0],
+                    )
+                    vm = torch.randint(4, (1,))[0]
+                    vi[n], vj[n] = (vm % 2) * 2 - 1, (vm // 2) * 2 - 1
+                    if (
+                        i[n] - vi[n] >= 0
+                        and i[n] - vi[n] < height
+                        and j[n] - vj[n] >= 0
+                        and j[n] - vj[n] < width
+                        and f_start[i[n], j[n]] == 0
+                        and f_start[i[n] - vi[n], j[n]] == 0
+                        and f_start[i[n], j[n] - vj[n]] == 0
+                    ):
+                        break
+
+                f_start[i[n], j[n]] = c
+                f_start[i[n] - vi[n], j[n]] = c
+                f_start[i[n], j[n] - vj[n]] = c
+
+            f_end = f_start.clone()
+
+            for l in range(nb_iterations):
+                iterations.append(f_end.clone())
+                f_end[...] = 0
+                nb_collisions = 0
+                for n in range(nb_birds):
+                    c = col[n]
+
+                    pi, pj, pvi, pvj = (
+                        i[n].item(),
+                        j[n].item(),
+                        vi[n].item(),
+                        vj[n].item(),
+                    )
+
+                    if (i[n] == 0 and vi[n] == -1) or (
+                        i[n] == height - 1 and vi[n] == 1
+                    ):
+                        vi[n] = -vi[n]
+                    if (j[n] == 0 and vj[n] == -1) or (
+                        j[n] == width - 1 and vj[n] == 1
+                    ):
+                        vj[n] = -vj[n]
+
+                    i[n] += vi[n]
+                    j[n] += vj[n]
+
+                    if not (
+                        f_end[i[n], j[n]] == 0
+                        and f_end[i[n] - vi[n], j[n]] == 0
+                        and f_end[i[n], j[n] - vj[n]] == 0
+                    ):
+                        nb_collisions += 1
+
+                    f_end[i[n], j[n]] = c
+                    f_end[i[n] - vi[n], j[n]] = c
+                    f_end[i[n], j[n] - vj[n]] = c
+
+            iterations.append(f_end.clone())
+
+            if nb_collisions == 0:
+                break
+
+        kept_iterations.append(iterations)
+        pairs.append((f_start, f_end))
+
+    result = []
+    for p in pairs:
+        if torch.rand(1) < 0.5:
+            result.append(
+                torch.cat(
+                    [p[0].flatten(), torch.tensor([token_forward]), p[1].flatten()],
+                    dim=0,
+                )[None, :]
+            )
+        else:
+            result.append(
+                torch.cat(
+                    [p[1].flatten(), torch.tensor([token_backward]), p[0].flatten()],
+                    dim=0,
+                )[None, :]
+            )
+
+    if return_iterations:
+        # iterations = torch.cat([ torch.cat([ x[None, None] for x in l], dim = 1) for l in kept_iterations ], dim=0)
+        return torch.cat(result, dim=0), kept_iterations
+    else:
+        return torch.cat(result, dim=0)
+
+
+######################################################################
+
+
+def generate_seq_old(
     nb,
     height,
     width,
     nb,
     height,
     width,
-    nb_birds=2,
+    nb_birds=3,
     nb_iterations=2,
 ):
     pairs = []
     nb_iterations=2,
 ):
     pairs = []
@@ -121,32 +226,33 @@ def generate(
     return torch.cat(result, dim=0)
 
 
     return torch.cat(result, dim=0)
 
 
-def sample2img(seq, height, width, upscale=15):
-    f_first = seq[:, : height * width].reshape(-1, height, width)
-    f_second = seq[:, height * width + 1 :].reshape(-1, height, width)
-    direction = seq[:, height * width]
+def frame2img(x, height, width, upscale=15):
+    x = x.reshape(-1, height, width)
+    m = torch.logical_and(x >= 0, x < first_bird_token + nb_bird_tokens).long()
+    x = colors[x * m].permute(0, 3, 1, 2)
+    s = x.shape
+    x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale)
+    x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale)
+
+    x[:, :, :, torch.arange(0, x.size(3), upscale)] = 0
+    x[:, :, torch.arange(0, x.size(2), upscale), :] = 0
+    x = x[:, :, 1:, 1:]
 
 
-    def mosaic(x, upscale):
-        x = x.reshape(-1, height, width)
-        m = torch.logical_and(x >= 0, x < first_bird_token + nb_bird_tokens).long()
-        x = colors[x * m].permute(0, 3, 1, 2)
-        s = x.shape
-        x = x[:, :, :, None, :, None].expand(-1, -1, -1, upscale, -1, upscale)
-        x = x.reshape(s[0], s[1], s[2] * upscale, s[3] * upscale)
+    for n in range(m.size(0)):
+        for i in range(m.size(1)):
+            for j in range(m.size(2)):
+                if m[n, i, j] == 0:
+                    for k in range(2, upscale - 2):
+                        x[n, :, i * upscale + k, j * upscale + k] = 0
+                        x[n, :, i * upscale + upscale - 1 - k, j * upscale + k] = 0
 
 
-        x[:, :, :, torch.arange(0, x.size(3), upscale)] = 0
-        x[:, :, torch.arange(0, x.size(2), upscale), :] = 0
-        x = x[:, :, 1:, 1:]
+    return x
 
 
-        for n in range(m.size(0)):
-            for i in range(m.size(1)):
-                for j in range(m.size(2)):
-                    if m[n, i, j] == 0:
-                        for k in range(2, upscale - 2):
-                            x[n, :, i * upscale + k, j * upscale + k] = 0
-                            x[n, :, i * upscale + upscale - 1 - k, j * upscale + k] = 0
 
 
-        return x
+def seq2img(seq, height, width, upscale=15):
+    f_first = seq[:, : height * width].reshape(-1, height, width)
+    f_second = seq[:, height * width + 1 :].reshape(-1, height, width)
+    direction = seq[:, height * width]
 
     direction_symbol = torch.full((direction.size(0), height * upscale - 1, upscale), 0)
     direction_symbol = colors[direction_symbol].permute(0, 3, 1, 2)
 
     direction_symbol = torch.full((direction.size(0), height * upscale - 1, upscale), 0)
     direction_symbol = colors[direction_symbol].permute(0, 3, 1, 2)
@@ -159,7 +265,7 @@ def sample2img(seq, height, width, upscale=15):
                     n,
                     :,
                     (height * upscale) // 2 - upscale // 2 + k,
                     n,
                     :,
                     (height * upscale) // 2 - upscale // 2 + k,
-                    3 + abs(k - upscale // 2),
+                    3 + upscale // 2 - abs(k - upscale // 2),
                 ] = 0
         elif direction[n] == token_backward:
             for k in range(upscale):
                 ] = 0
         elif direction[n] == token_backward:
             for k in range(upscale):
@@ -167,7 +273,7 @@ def sample2img(seq, height, width, upscale=15):
                     n,
                     :,
                     (height * upscale) // 2 - upscale // 2 + k,
                     n,
                     :,
                     (height * upscale) // 2 - upscale // 2 + k,
-                    3 + upscale // 2 - abs(k - upscale // 2),
+                    3 + abs(k - upscale // 2),
                 ] = 0
         else:
             for k in range(2, upscale - 2):
                 ] = 0
         else:
             for k in range(2, upscale - 2):
@@ -180,11 +286,11 @@ def sample2img(seq, height, width, upscale=15):
 
     return torch.cat(
         [
 
     return torch.cat(
         [
-            mosaic(f_first, upscale),
+            frame2img(f_first, height, width, upscale),
             separator,
             direction_symbol,
             separator,
             separator,
             direction_symbol,
             separator,
-            mosaic(f_second, upscale),
+            frame2img(f_second, height, width, upscale),
         ],
         dim=3,
     )
         ],
         dim=3,
     )
@@ -204,16 +310,28 @@ if __name__ == "__main__":
 
     height, width = 6, 8
     start_time = time.perf_counter()
 
     height, width = 6, 8
     start_time = time.perf_counter()
-    seq = generate(nb=90, height=height, width=width)
+    seq, it = generate_seq(
+        nb=64, height=height, width=width, nb_iterations=100, return_iterations=True
+    )
     delay = time.perf_counter() - start_time
     print(f"{seq.size(0)/delay:02f} samples/s")
 
     print(seq2str(seq[:4]))
 
     delay = time.perf_counter() - start_time
     print(f"{seq.size(0)/delay:02f} samples/s")
 
     print(seq2str(seq[:4]))
 
+    for t in range(len(it[0])):
+        img = torch.cat([frame2img(f[t], height, width) for f in it], dim=0)
+        torchvision.utils.save_image(
+            img.float() / 255.0,
+            f"/tmp/frame_{t:03d}.png",
+            nrow=8,
+            padding=6,
+            pad_value=0,
+        )
+
     # m = (torch.rand(seq.size()) < 0.05).long()
     # seq = (1 - m) * seq + m * 23
 
     # m = (torch.rand(seq.size()) < 0.05).long()
     # seq = (1 - m) * seq + m * 23
 
-    img = sample2img(seq, height, width)
+    img = seq2img(seq, height, width)
     print(img.size())
 
     torchvision.utils.save_image(
     print(img.size())
 
     torchvision.utils.save_image(