Update.
[picoclvr.git] / world.py
index a93684b..61a07e9 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -8,6 +8,8 @@ from torch import nn
 from torch.nn import functional as F
 import cairo
 
+######################################################################
+
 
 class Box:
     nb_rgb_levels = 10
@@ -32,216 +34,9 @@ class Box:
         return False
 
 
-def scene2tensor(xh, yh, scene, size):
-    width, height = size, size
-    pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
-    data = pixel_map.numpy()
-    surface = cairo.ImageSurface.create_for_data(
-        data, cairo.FORMAT_ARGB32, width, height
-    )
-
-    ctx = cairo.Context(surface)
-    ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
-
-    for b in scene:
-        ctx.move_to(b.x * size, b.y * size)
-        ctx.rel_line_to(b.w * size, 0)
-        ctx.rel_line_to(0, b.h * size)
-        ctx.rel_line_to(-b.w * size, 0)
-        ctx.close_path()
-        ctx.set_source_rgba(
-            b.r / (Box.nb_rgb_levels - 1),
-            b.g / (Box.nb_rgb_levels - 1),
-            b.b / (Box.nb_rgb_levels - 1),
-            1.0,
-        )
-        ctx.fill()
-
-    hs = size * 0.1
-    ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
-    ctx.move_to(xh * size - hs / 2, yh * size - hs / 2)
-    ctx.rel_line_to(hs, 0)
-    ctx.rel_line_to(0, hs)
-    ctx.rel_line_to(-hs, 0)
-    ctx.close_path()
-    ctx.fill()
-
-    return (
-        pixel_map[None, :, :, :3]
-        .flip(-1)
-        .permute(0, 3, 1, 2)
-        .long()
-        .mul(Box.nb_rgb_levels)
-        .floor_divide(256)
-    )
-
-
-def random_scene():
-    scene = []
-    colors = [
-        ((Box.nb_rgb_levels - 1), 0, 0),
-        (0, (Box.nb_rgb_levels - 1), 0),
-        (0, 0, (Box.nb_rgb_levels - 1)),
-        ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
-        (
-            (Box.nb_rgb_levels * 2) // 3,
-            (Box.nb_rgb_levels * 2) // 3,
-            (Box.nb_rgb_levels * 2) // 3,
-        ),
-    ]
-
-    for k in range(10):
-        wh = torch.rand(2) * 0.2 + 0.2
-        xy = torch.rand(2) * (1 - wh)
-        c = colors[torch.randint(len(colors), (1,))]
-        b = Box(
-            xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2]
-        )
-        if not b.collision(scene):
-            scene.append(b)
-
-    return scene
-
-
-def generate_episode(nb_steps=10, size=64):
-    delta = 0.1
-    effects = [
-        (False, 0, 0),
-        (False, delta, 0),
-        (False, 0, delta),
-        (False, -delta, 0),
-        (False, 0, -delta),
-        (True, delta, 0),
-        (True, 0, delta),
-        (True, -delta, 0),
-        (True, 0, -delta),
-    ]
-
-    while True:
-        frames = []
-
-        scene = random_scene()
-        xh, yh = tuple(x.item() for x in torch.rand(2))
-
-        frames.append(scene2tensor(xh, yh, scene, size=size))
-
-        actions = torch.randint(len(effects), (nb_steps,))
-        change = False
-
-        for a in actions:
-            g, dx, dy = effects[a]
-            if g:
-                for b in scene:
-                    if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
-                        x, y = b.x, b.y
-                        b.x += dx
-                        b.y += dy
-                        if (
-                            b.x < 0
-                            or b.y < 0
-                            or b.x + b.w > 1
-                            or b.y + b.h > 1
-                            or b.collision(scene)
-                        ):
-                            b.x, b.y = x, y
-                        else:
-                            xh += dx
-                            yh += dy
-                            change = True
-            else:
-                x, y = xh, yh
-                xh += dx
-                yh += dy
-                if xh < 0 or xh > 1 or yh < 0 or yh > 1:
-                    xh, yh = x, y
-
-            frames.append(scene2tensor(xh, yh, scene, size=size))
-
-        if change:
-            break
-
-    return frames, actions
-
-
-######################################################################
-
-
-# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2<x_i, c_j>
-def sq2matrix(x, c):
-    nx = x.pow(2).sum(1)
-    nc = c.pow(2).sum(1)
-    return nx[:, None] + nc[None, :] - 2 * x @ c.t()
-
-
-def update_centroids(x, c, nb_min=1):
-    _, b = sq2matrix(x, c).min(1)
-    b.squeeze_()
-    nb_resets = 0
-
-    for k in range(0, c.size(0)):
-        i = b.eq(k).nonzero(as_tuple=False).squeeze()
-        if i.numel() >= nb_min:
-            c[k] = x.index_select(0, i).mean(0)
-        else:
-            n = torch.randint(x.size(0), (1,))
-            nb_resets += 1
-            c[k] = x[n]
-
-    return c, b, nb_resets
-
-
-def kmeans(x, nb_centroids, nb_min=1):
-    if x.size(0) < nb_centroids * nb_min:
-        print("Not enough points!")
-        exit(1)
-
-    c = x[torch.randperm(x.size(0))[:nb_centroids]]
-    t = torch.full((x.size(0),), -1)
-    n = 0
-
-    while True:
-        c, u, nb_resets = update_centroids(x, c, nb_min)
-        n = n + 1
-        nb_changes = (u - t).sign().abs().sum() + nb_resets
-        t = u
-        if nb_changes == 0:
-            break
-
-    return c, t
-
-
 ######################################################################
 
 
-def patchify(x, factor, invert_size=None):
-    if invert_size is None:
-        return (
-            x.reshape(
-                x.size(0),  # 0
-                x.size(1),  # 1
-                factor,  # 2
-                x.size(2) // factor,  # 3
-                factor,  # 4
-                x.size(3) // factor,  # 5
-            )
-            .permute(0, 2, 4, 1, 3, 5)
-            .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor)
-        )
-    else:
-        return (
-            x.reshape(
-                invert_size[0],  # 0
-                factor,  # 1
-                factor,  # 2
-                invert_size[1],  # 3
-                invert_size[2] // factor,  # 4
-                invert_size[3] // factor,  # 5
-            )
-            .permute(0, 3, 1, 4, 2, 5)
-            .reshape(invert_size)
-        )
-
-
 class Normalizer(nn.Module):
     def __init__(self, mu, std):
         super().__init__()
@@ -259,6 +54,7 @@ class SignSTE(nn.Module):
     def forward(self, x):
         # torch.sign() takes three values
         s = (x >= 0).float() * 2 - 1
+
         if self.training:
             u = torch.tanh(x)
             return s + u - u.detach()
@@ -269,9 +65,9 @@ class SignSTE(nn.Module):
 def train_encoder(
     train_input,
     test_input,
-    depth=2,
+    depth=3,
     dim_hidden=48,
-    nb_bits_per_token=10,
+    nb_bits_per_token=8,
     lr_start=1e-3,
     lr_end=1e-4,
     nb_epochs=10,
@@ -340,10 +136,6 @@ def train_encoder(
 
     model.to(device)
 
-    g5x5 = torch.exp(-torch.tensor([[-2.0, -1.0, 0.0, 1.0, 2.0]]) ** 2 / 2)
-    g5x5 = (g5x5.t() @ g5x5).view(1, 1, 5, 5)
-    g5x5 = g5x5 / g5x5.sum()
-
     for k in range(nb_epochs):
         lr = math.exp(
             math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k
@@ -352,9 +144,9 @@ def train_encoder(
 
         acc_train_loss = 0.0
 
-        for input in train_input.split(batch_size):
+        for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"):
             z = encoder(input)
-            zq = z if k < 1 else quantizer(z)
+            zq = z if k < 2 else quantizer(z)
             output = decoder(zq)
 
             output = output.reshape(
@@ -371,7 +163,7 @@ def train_encoder(
 
         acc_test_loss = 0.0
 
-        for input in test_input.split(batch_size):
+        for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"):
             z = encoder(input)
             zq = z if k < 1 else quantizer(z)
             output = decoder(zq)
@@ -392,68 +184,227 @@ def train_encoder(
 
     return encoder, quantizer, decoder
 
-def generate_episodes(nb):
+
+######################################################################
+
+
+def scene2tensor(xh, yh, scene, size):
+    width, height = size, size
+    pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
+    data = pixel_map.numpy()
+    surface = cairo.ImageSurface.create_for_data(
+        data, cairo.FORMAT_ARGB32, width, height
+    )
+
+    ctx = cairo.Context(surface)
+    ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
+
+    for b in scene:
+        ctx.move_to(b.x * size, b.y * size)
+        ctx.rel_line_to(b.w * size, 0)
+        ctx.rel_line_to(0, b.h * size)
+        ctx.rel_line_to(-b.w * size, 0)
+        ctx.close_path()
+        ctx.set_source_rgba(
+            b.r / (Box.nb_rgb_levels - 1),
+            b.g / (Box.nb_rgb_levels - 1),
+            b.b / (Box.nb_rgb_levels - 1),
+            1.0,
+        )
+        ctx.fill()
+
+    hs = size * 0.1
+    ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
+    ctx.move_to(xh * size - hs / 2, yh * size - hs / 2)
+    ctx.rel_line_to(hs, 0)
+    ctx.rel_line_to(0, hs)
+    ctx.rel_line_to(-hs, 0)
+    ctx.close_path()
+    ctx.fill()
+
+    return (
+        pixel_map[None, :, :, :3]
+        .flip(-1)
+        .permute(0, 3, 1, 2)
+        .long()
+        .mul(Box.nb_rgb_levels)
+        .floor_divide(256)
+    )
+
+
+def random_scene():
+    scene = []
+    colors = [
+        ((Box.nb_rgb_levels - 1), 0, 0),
+        (0, (Box.nb_rgb_levels - 1), 0),
+        (0, 0, (Box.nb_rgb_levels - 1)),
+        ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
+        (
+            (Box.nb_rgb_levels * 2) // 3,
+            (Box.nb_rgb_levels * 2) // 3,
+            (Box.nb_rgb_levels * 2) // 3,
+        ),
+    ]
+
+    for k in range(10):
+        wh = torch.rand(2) * 0.2 + 0.2
+        xy = torch.rand(2) * (1 - wh)
+        c = colors[torch.randint(len(colors), (1,))]
+        b = Box(
+            xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2]
+        )
+        if not b.collision(scene):
+            scene.append(b)
+
+    return scene
+
+
+def generate_episode(steps, size=64):
+    delta = 0.1
+    effects = [
+        (False, 0, 0),
+        (False, delta, 0),
+        (False, 0, delta),
+        (False, -delta, 0),
+        (False, 0, -delta),
+        (True, delta, 0),
+        (True, 0, delta),
+        (True, -delta, 0),
+        (True, 0, -delta),
+    ]
+
+    while True:
+        frames = []
+
+        scene = random_scene()
+        xh, yh = tuple(x.item() for x in torch.rand(2))
+
+        actions = torch.randint(len(effects), (len(steps),))
+        change = False
+
+        for s, a in zip(steps, actions):
+            if s:
+                frames.append(scene2tensor(xh, yh, scene, size=size))
+
+            g, dx, dy = effects[a]
+            if g:
+                for b in scene:
+                    if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
+                        x, y = b.x, b.y
+                        b.x += dx
+                        b.y += dy
+                        if (
+                            b.x < 0
+                            or b.y < 0
+                            or b.x + b.w > 1
+                            or b.y + b.h > 1
+                            or b.collision(scene)
+                        ):
+                            b.x, b.y = x, y
+                        else:
+                            xh += dx
+                            yh += dy
+                            change = True
+            else:
+                x, y = xh, yh
+                xh += dx
+                yh += dy
+                if xh < 0 or xh > 1 or yh < 0 or yh > 1:
+                    xh, yh = x, y
+
+        if change:
+            break
+
+    return frames, actions
+
+
+######################################################################
+
+
+def generate_episodes(nb, steps):
     all_frames = []
     for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
-        frames, actions = generate_episode(nb_steps=31)
-        all_frames += [ frames[0], frames[-1] ]
+        frames, actions = generate_episode(steps)
+        all_frames += frames
     return torch.cat(all_frames, 0).contiguous()
 
-def create_data_and_processors(nb_train_samples, nb_test_samples):
-    train_input = generate_episodes(nb_train_samples)
-    test_input = generate_episodes(nb_test_samples)
-    encoder, quantizer, decoder = train_encoder(train_input, test_input, nb_epochs=2)
 
-    input = test_input[:64]
+def create_data_and_processors(nb_train_samples, nb_test_samples, nb_epochs=10):
+    steps = [True] + [False] * 30 + [True]
+    train_input = generate_episodes(nb_train_samples, steps)
+    test_input = generate_episodes(nb_test_samples, steps)
 
-    z = encoder(input.float())
-    height, width = z.size(2), z.size(3)
-    zq = quantizer(z).long()
-    pow2=(2**torch.arange(zq.size(1), device=zq.device))[None,None,:]
-    seq = (zq.permute(0,2,3,1).clamp(min=0).reshape(zq.size(0),-1,zq.size(1)) * pow2).sum(-1)
-    print(f"{seq.size()=}")
+    encoder, quantizer, decoder = train_encoder(
+        train_input, test_input, nb_epochs=nb_epochs
+    )
+    encoder.train(False)
+    quantizer.train(False)
+    decoder.train(False)
 
-    ZZ=zq
+    z = encoder(train_input[:1])
+    pow2 = (2 ** torch.arange(z.size(1), device=z.device))[None, None, :]
+    z_h, z_w = z.size(2), z.size(3)
 
-    zq = ((seq[:,:,None] // pow2)%2)*2-1
-    zq = zq.reshape(zq.size(0), height, width, -1).permute(0,3,1,2)
+    def frame2seq(input, batch_size=25):
+        seq = []
 
-    print(ZZ[0])
-    print(zq[0])
+        for x in input.split(batch_size):
+            z = encoder(x)
+            ze_bool = (quantizer(z) >= 0).long()
+            output = (
+                ze_bool.permute(0, 2, 3, 1).reshape(
+                    ze_bool.size(0), -1, ze_bool.size(1)
+                )
+                * pow2
+            ).sum(-1)
 
-    print("CHECK", (ZZ-zq).abs().sum())
+            seq.append(output)
 
-    results = decoder(zq.float())
-    T = 0.1
-    results = results.reshape(
-        results.size(0), -1, 3, results.size(2), results.size(3)
-    ).permute(0, 2, 3, 4, 1)
-    results = torch.distributions.categorical.Categorical(logits=results / T).sample()
+        return torch.cat(seq, dim=0)
 
+    def seq2frame(input, batch_size=25, T=1e-2):
+        frames = []
 
-    torchvision.utils.save_image(
-        input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8
-    )
+        for seq in input.split(batch_size):
+            zd_bool = (seq[:, :, None] // pow2) % 2
+            zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
+            logits = decoder(zd_bool * 2.0 - 1.0)
+            logits = logits.reshape(
+                logits.size(0), -1, 3, logits.size(2), logits.size(3)
+            ).permute(0, 2, 3, 4, 1)
+            output = torch.distributions.categorical.Categorical(
+                logits=logits / T
+            ).sample()
 
-    torchvision.utils.save_image(
-        results.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8
-    )
+            frames.append(output)
+
+        return torch.cat(frames, dim=0)
+
+    return train_input, test_input, frame2seq, seq2frame
 
 
 ######################################################################
 
 if __name__ == "__main__":
-    create_data_and_processors(250,100)
+    train_input, test_input, frame2seq, seq2frame = create_data_and_processors(
+        # 10000, 1000,
+        100,
+        100,
+        nb_epochs=2,
+    )
+
+    input = test_input[:64]
 
-    # train_input = generate_episodes(2500)
-    # test_input = generate_episodes(1000)
+    seq = frame2seq(input)
 
-    # encoder, quantizer, decoder = train_encoder(train_input, test_input)
+    print(f"{seq.size()=} {seq.dtype=} {seq.min()=} {seq.max()=}")
 
-    # input = test_input[torch.randperm(test_input.size(0))[:64]]
-    # z = encoder(input.float())
-    # zq = quantizer(z)
-    # results = decoder(zq)
+    output = seq2frame(seq)
 
-    # T = 0.1
-    # results = torch.distributions.categorical.Categorical(logits=results / T).sample()
+    torchvision.utils.save_image(
+        input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8
+    )
+
+    torchvision.utils.save_image(
+        output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8
+    )