Update.
[picoclvr.git] / world.py
index 5ba0f36..a93684b 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -1,6 +1,6 @@
 #!/usr/bin/env python
 
-import math
+import math, sys, tqdm
 
 import torch, torchvision
 
@@ -10,6 +10,8 @@ import cairo
 
 
 class Box:
+    nb_rgb_levels = 10
+
     def __init__(self, x, y, w, h, r, g, b):
         self.x = x
         self.y = y
@@ -30,7 +32,7 @@ class Box:
         return False
 
 
-def scene2tensor(xh, yh, scene, size=64):
+def scene2tensor(xh, yh, scene, size):
     width, height = size, size
     pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
     data = pixel_map.numpy()
@@ -47,7 +49,12 @@ def scene2tensor(xh, yh, scene, size=64):
         ctx.rel_line_to(0, b.h * size)
         ctx.rel_line_to(-b.w * size, 0)
         ctx.close_path()
-        ctx.set_source_rgba(b.r, b.g, b.b, 1.0)
+        ctx.set_source_rgba(
+            b.r / (Box.nb_rgb_levels - 1),
+            b.g / (Box.nb_rgb_levels - 1),
+            b.b / (Box.nb_rgb_levels - 1),
+            1.0,
+        )
         ctx.fill()
 
     hs = size * 0.1
@@ -59,17 +66,28 @@ def scene2tensor(xh, yh, scene, size=64):
     ctx.close_path()
     ctx.fill()
 
-    return pixel_map[None, :, :, :3].flip(-1).permute(0, 3, 1, 2).float() / 255
+    return (
+        pixel_map[None, :, :, :3]
+        .flip(-1)
+        .permute(0, 3, 1, 2)
+        .long()
+        .mul(Box.nb_rgb_levels)
+        .floor_divide(256)
+    )
 
 
 def random_scene():
     scene = []
     colors = [
-        (1.00, 0.00, 0.00),
-        (0.00, 1.00, 0.00),
-        (0.00, 0.00, 1.00),
-        (1.00, 1.00, 0.00),
-        (0.75, 0.75, 0.75),
+        ((Box.nb_rgb_levels - 1), 0, 0),
+        (0, (Box.nb_rgb_levels - 1), 0),
+        (0, 0, (Box.nb_rgb_levels - 1)),
+        ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
+        (
+            (Box.nb_rgb_levels * 2) // 3,
+            (Box.nb_rgb_levels * 2) // 3,
+            (Box.nb_rgb_levels * 2) // 3,
+        ),
     ]
 
     for k in range(10):
@@ -85,7 +103,7 @@ def random_scene():
     return scene
 
 
-def sequence(nb_steps=10, all_frames=False):
+def generate_episode(nb_steps=10, size=64):
     delta = 0.1
     effects = [
         (False, 0, 0),
@@ -100,13 +118,12 @@ def sequence(nb_steps=10, all_frames=False):
     ]
 
     while True:
-
-        frames =[]
+        frames = []
 
         scene = random_scene()
         xh, yh = tuple(x.item() for x in torch.rand(2))
 
-        frames.append(scene2tensor(xh, yh, scene))
+        frames.append(scene2tensor(xh, yh, scene, size=size))
 
         actions = torch.randint(len(effects), (nb_steps,))
         change = False
@@ -138,11 +155,7 @@ def sequence(nb_steps=10, all_frames=False):
                 if xh < 0 or xh > 1 or yh < 0 or yh > 1:
                     xh, yh = x, y
 
-            if all_frames:
-                frames.append(scene2tensor(xh, yh, scene))
-
-        if not all_frames:
-            frames.append(scene2tensor(xh, yh, scene))
+            frames.append(scene2tensor(xh, yh, scene, size=size))
 
         if change:
             break
@@ -150,8 +163,297 @@ def sequence(nb_steps=10, all_frames=False):
     return frames, actions
 
 
+######################################################################
+
+
+# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2<x_i, c_j>
+def sq2matrix(x, c):
+    nx = x.pow(2).sum(1)
+    nc = c.pow(2).sum(1)
+    return nx[:, None] + nc[None, :] - 2 * x @ c.t()
+
+
+def update_centroids(x, c, nb_min=1):
+    _, b = sq2matrix(x, c).min(1)
+    b.squeeze_()
+    nb_resets = 0
+
+    for k in range(0, c.size(0)):
+        i = b.eq(k).nonzero(as_tuple=False).squeeze()
+        if i.numel() >= nb_min:
+            c[k] = x.index_select(0, i).mean(0)
+        else:
+            n = torch.randint(x.size(0), (1,))
+            nb_resets += 1
+            c[k] = x[n]
+
+    return c, b, nb_resets
+
+
+def kmeans(x, nb_centroids, nb_min=1):
+    if x.size(0) < nb_centroids * nb_min:
+        print("Not enough points!")
+        exit(1)
+
+    c = x[torch.randperm(x.size(0))[:nb_centroids]]
+    t = torch.full((x.size(0),), -1)
+    n = 0
+
+    while True:
+        c, u, nb_resets = update_centroids(x, c, nb_min)
+        n = n + 1
+        nb_changes = (u - t).sign().abs().sum() + nb_resets
+        t = u
+        if nb_changes == 0:
+            break
+
+    return c, t
+
+
+######################################################################
+
+
+def patchify(x, factor, invert_size=None):
+    if invert_size is None:
+        return (
+            x.reshape(
+                x.size(0),  # 0
+                x.size(1),  # 1
+                factor,  # 2
+                x.size(2) // factor,  # 3
+                factor,  # 4
+                x.size(3) // factor,  # 5
+            )
+            .permute(0, 2, 4, 1, 3, 5)
+            .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor)
+        )
+    else:
+        return (
+            x.reshape(
+                invert_size[0],  # 0
+                factor,  # 1
+                factor,  # 2
+                invert_size[1],  # 3
+                invert_size[2] // factor,  # 4
+                invert_size[3] // factor,  # 5
+            )
+            .permute(0, 3, 1, 4, 2, 5)
+            .reshape(invert_size)
+        )
+
+
+class Normalizer(nn.Module):
+    def __init__(self, mu, std):
+        super().__init__()
+        self.register_buffer("mu", mu)
+        self.register_buffer("log_var", 2 * torch.log(std))
+
+    def forward(self, x):
+        return (x - self.mu) / torch.exp(self.log_var / 2.0)
+
+
+class SignSTE(nn.Module):
+    def __init__(self):
+        super().__init__()
+
+    def forward(self, x):
+        # torch.sign() takes three values
+        s = (x >= 0).float() * 2 - 1
+        if self.training:
+            u = torch.tanh(x)
+            return s + u - u.detach()
+        else:
+            return s
+
+
+def train_encoder(
+    train_input,
+    test_input,
+    depth=2,
+    dim_hidden=48,
+    nb_bits_per_token=10,
+    lr_start=1e-3,
+    lr_end=1e-4,
+    nb_epochs=10,
+    batch_size=25,
+    device=torch.device("cpu"),
+):
+    mu, std = train_input.float().mean(), train_input.float().std()
+
+    def encoder_core(depth, dim):
+        l = [
+            [
+                nn.Conv2d(
+                    dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2
+                ),
+                nn.ReLU(),
+                nn.Conv2d(dim * 2**k, dim * 2 ** (k + 1), kernel_size=2, stride=2),
+                nn.ReLU(),
+            ]
+            for k in range(depth)
+        ]
+
+        return nn.Sequential(*[x for m in l for x in m])
+
+    def decoder_core(depth, dim):
+        l = [
+            [
+                nn.ConvTranspose2d(
+                    dim * 2 ** (k + 1), dim * 2**k, kernel_size=2, stride=2
+                ),
+                nn.ReLU(),
+                nn.ConvTranspose2d(
+                    dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2
+                ),
+                nn.ReLU(),
+            ]
+            for k in range(depth - 1, -1, -1)
+        ]
+
+        return nn.Sequential(*[x for m in l for x in m])
+
+    encoder = nn.Sequential(
+        Normalizer(mu, std),
+        nn.Conv2d(3, dim_hidden, kernel_size=1, stride=1),
+        nn.ReLU(),
+        # 64x64
+        encoder_core(depth=depth, dim=dim_hidden),
+        # 8x8
+        nn.Conv2d(dim_hidden * 2**depth, nb_bits_per_token, kernel_size=1, stride=1),
+    )
+
+    quantizer = SignSTE()
+
+    decoder = nn.Sequential(
+        nn.Conv2d(nb_bits_per_token, dim_hidden * 2**depth, kernel_size=1, stride=1),
+        # 8x8
+        decoder_core(depth=depth, dim=dim_hidden),
+        # 64x64
+        nn.ConvTranspose2d(dim_hidden, 3 * Box.nb_rgb_levels, kernel_size=1, stride=1),
+    )
+
+    model = nn.Sequential(encoder, decoder)
+
+    nb_parameters = sum(p.numel() for p in model.parameters())
+
+    print(f"nb_parameters {nb_parameters}")
+
+    model.to(device)
+
+    g5x5 = torch.exp(-torch.tensor([[-2.0, -1.0, 0.0, 1.0, 2.0]]) ** 2 / 2)
+    g5x5 = (g5x5.t() @ g5x5).view(1, 1, 5, 5)
+    g5x5 = g5x5 / g5x5.sum()
+
+    for k in range(nb_epochs):
+        lr = math.exp(
+            math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k
+        )
+        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
+
+        acc_train_loss = 0.0
+
+        for input in train_input.split(batch_size):
+            z = encoder(input)
+            zq = z if k < 1 else quantizer(z)
+            output = decoder(zq)
+
+            output = output.reshape(
+                output.size(0), -1, 3, output.size(2), output.size(3)
+            )
+
+            train_loss = F.cross_entropy(output, input)
+
+            acc_train_loss += train_loss.item() * input.size(0)
+
+            optimizer.zero_grad()
+            train_loss.backward()
+            optimizer.step()
+
+        acc_test_loss = 0.0
+
+        for input in test_input.split(batch_size):
+            z = encoder(input)
+            zq = z if k < 1 else quantizer(z)
+            output = decoder(zq)
+
+            output = output.reshape(
+                output.size(0), -1, 3, output.size(2), output.size(3)
+            )
+
+            test_loss = F.cross_entropy(output, input)
+
+            acc_test_loss += test_loss.item() * input.size(0)
+
+        train_loss = acc_train_loss / train_input.size(0)
+        test_loss = acc_test_loss / test_input.size(0)
+
+        print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+        sys.stdout.flush()
+
+    return encoder, quantizer, decoder
+
+def generate_episodes(nb):
+    all_frames = []
+    for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
+        frames, actions = generate_episode(nb_steps=31)
+        all_frames += [ frames[0], frames[-1] ]
+    return torch.cat(all_frames, 0).contiguous()
+
+def create_data_and_processors(nb_train_samples, nb_test_samples):
+    train_input = generate_episodes(nb_train_samples)
+    test_input = generate_episodes(nb_test_samples)
+    encoder, quantizer, decoder = train_encoder(train_input, test_input, nb_epochs=2)
+
+    input = test_input[:64]
+
+    z = encoder(input.float())
+    height, width = z.size(2), z.size(3)
+    zq = quantizer(z).long()
+    pow2=(2**torch.arange(zq.size(1), device=zq.device))[None,None,:]
+    seq = (zq.permute(0,2,3,1).clamp(min=0).reshape(zq.size(0),-1,zq.size(1)) * pow2).sum(-1)
+    print(f"{seq.size()=}")
+
+    ZZ=zq
+
+    zq = ((seq[:,:,None] // pow2)%2)*2-1
+    zq = zq.reshape(zq.size(0), height, width, -1).permute(0,3,1,2)
+
+    print(ZZ[0])
+    print(zq[0])
+
+    print("CHECK", (ZZ-zq).abs().sum())
+
+    results = decoder(zq.float())
+    T = 0.1
+    results = results.reshape(
+        results.size(0), -1, 3, results.size(2), results.size(3)
+    ).permute(0, 2, 3, 4, 1)
+    results = torch.distributions.categorical.Categorical(logits=results / T).sample()
+
+
+    torchvision.utils.save_image(
+        input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8
+    )
+
+    torchvision.utils.save_image(
+        results.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8
+    )
+
+
+######################################################################
+
 if __name__ == "__main__":
-    frames, actions = sequence(nb_steps=31,all_frames=True)
-    frames = torch.cat(frames,0)
-    print(f"{frames.size()=}")
-    torchvision.utils.save_image(frames, "seq.png", nrow=8)
+    create_data_and_processors(250,100)
+
+    # train_input = generate_episodes(2500)
+    # test_input = generate_episodes(1000)
+
+    # encoder, quantizer, decoder = train_encoder(train_input, test_input)
+
+    # input = test_input[torch.randperm(test_input.size(0))[:64]]
+    # z = encoder(input.float())
+    # zq = quantizer(z)
+    # results = decoder(zq)
+
+    # T = 0.1
+    # results = torch.distributions.categorical.Categorical(logits=results / T).sample()