Oups
[picoclvr.git] / world.py
index c3eb101..d95bddb 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -1,5 +1,10 @@
 #!/usr/bin/env python
 
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+# Written by Francois Fleuret <francois@fleuret.org>
+
 import math, sys, tqdm
 
 import torch, torchvision
@@ -62,16 +67,39 @@ class SignSTE(nn.Module):
             return s
 
 
+class DiscreteSampler2d(nn.Module):
+    def __init__(self):
+        super().__init__()
+
+    def forward(self, x):
+        s = (x >= x.max(-3, keepdim=True).values).float()
+
+        if self.training:
+            u = x.softmax(dim=-3)
+            return s + u - u.detach()
+        else:
+            return s
+
+
+def loss_H(binary_logits, h_threshold=1):
+    p = binary_logits.sigmoid().mean(0)
+    h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2)
+    h.clamp_(max=h_threshold)
+    return h_threshold - h.mean()
+
+
 def train_encoder(
     train_input,
     test_input,
-    depth=3,
+    depth,
+    nb_bits_per_token,
     dim_hidden=48,
-    nb_bits_per_token=8,
+    lambda_entropy=0.0,
     lr_start=1e-3,
     lr_end=1e-4,
     nb_epochs=10,
     batch_size=25,
+    logger=None,
     device=torch.device("cpu"),
 ):
     mu, std = train_input.float().mean(), train_input.float().std()
@@ -132,7 +160,7 @@ def train_encoder(
 
     nb_parameters = sum(p.numel() for p in model.parameters())
 
-    print(f"nb_parameters {nb_parameters}")
+    logger(f"vqae nb_parameters {nb_parameters}")
 
     model.to(device)
 
@@ -145,8 +173,9 @@ def train_encoder(
         acc_train_loss = 0.0
 
         for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"):
+            input = input.to(device)
             z = encoder(input)
-            zq = z if k < 2 else quantizer(z)
+            zq = quantizer(z)
             output = decoder(zq)
 
             output = output.reshape(
@@ -155,6 +184,9 @@ def train_encoder(
 
             train_loss = F.cross_entropy(output, input)
 
+            if lambda_entropy > 0:
+                train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5)
+
             acc_train_loss += train_loss.item() * input.size(0)
 
             optimizer.zero_grad()
@@ -164,8 +196,9 @@ def train_encoder(
         acc_test_loss = 0.0
 
         for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"):
+            input = input.to(device)
             z = encoder(input)
-            zq = z if k < 1 else quantizer(z)
+            zq = quantizer(z)
             output = decoder(zq)
 
             output = output.reshape(
@@ -179,7 +212,7 @@ def train_encoder(
         train_loss = acc_train_loss / train_input.size(0)
         test_loss = acc_test_loss / test_input.size(0)
 
-        print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+        logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
         sys.stdout.flush()
 
     return encoder, quantizer, decoder
@@ -232,7 +265,7 @@ def scene2tensor(xh, yh, scene, size):
     )
 
 
-def random_scene():
+def random_scene(nb_insert_attempts=3):
     scene = []
     colors = [
         ((Box.nb_rgb_levels - 1), 0, 0),
@@ -246,7 +279,7 @@ def random_scene():
         ),
     ]
 
-    for k in range(10):
+    for k in range(nb_insert_attempts):
         wh = torch.rand(2) * 0.2 + 0.2
         xy = torch.rand(2) * (1 - wh)
         c = colors[torch.randint(len(colors), (1,))]
@@ -280,14 +313,15 @@ def generate_episode(steps, size=64):
         xh, yh = tuple(x.item() for x in torch.rand(2))
 
         actions = torch.randint(len(effects), (len(steps),))
-        change = False
+        nb_changes = 0
 
         for s, a in zip(steps, actions):
             if s:
                 frames.append(scene2tensor(xh, yh, scene, size=size))
 
-            g, dx, dy = effects[a]
-            if g:
+            grasp, dx, dy = effects[a]
+
+            if grasp:
                 for b in scene:
                     if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
                         x, y = b.x, b.y
@@ -304,7 +338,7 @@ def generate_episode(steps, size=64):
                         else:
                             xh += dx
                             yh += dy
-                            change = True
+                            nb_changes += 1
             else:
                 x, y = xh, yh
                 xh += dx
@@ -312,7 +346,7 @@ def generate_episode(steps, size=64):
                 if xh < 0 or xh > 1 or yh < 0 or yh > 1:
                     xh, yh = x, y
 
-        if change:
+        if nb_changes > len(steps) // 3:
             break
 
     return frames, actions
@@ -326,43 +360,71 @@ def generate_episodes(nb, steps):
     for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
         frames, actions = generate_episode(steps)
         all_frames += frames
-        all_actions += [actions]
+        all_actions += [actions[None, :]]
     return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
 
 
 def create_data_and_processors(
-    nb_train_samples, nb_test_samples, mode, nb_steps, nb_epochs=10
+    nb_train_samples,
+    nb_test_samples,
+    mode,
+    nb_steps,
+    depth=3,
+    nb_bits_per_token=8,
+    nb_epochs=10,
+    device=torch.device("cpu"),
+    device_storage=torch.device("cpu"),
+    logger=None,
 ):
     assert mode in ["first_last"]
 
     if mode == "first_last":
         steps = [True] + [False] * (nb_steps + 1) + [True]
 
+    if logger is None:
+        logger = lambda s: print(s)
+
     train_input, train_actions = generate_episodes(nb_train_samples, steps)
+    train_input, train_actions = train_input.to(device_storage), train_actions.to(
+        device_storage
+    )
     test_input, test_actions = generate_episodes(nb_test_samples, steps)
+    test_input, test_actions = test_input.to(device_storage), test_actions.to(
+        device_storage
+    )
 
     encoder, quantizer, decoder = train_encoder(
-        train_input, test_input, nb_epochs=nb_epochs
+        train_input,
+        test_input,
+        depth=depth,
+        nb_bits_per_token=nb_bits_per_token,
+        lambda_entropy=1.0,
+        nb_epochs=nb_epochs,
+        logger=logger,
+        device=device,
     )
     encoder.train(False)
     quantizer.train(False)
     decoder.train(False)
 
-    z = encoder(train_input[:1])
-    pow2 = (2 ** torch.arange(z.size(1), device=z.device))[None, None, :]
+    z = encoder(train_input[:1].to(device))
+    pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
     z_h, z_w = z.size(2), z.size(3)
 
+    logger(f"vqae input {train_input[0].size()} output {z[0].size()}")
+
     def frame2seq(input, batch_size=25):
         seq = []
-
+        p = pow2.to(device)
         for x in input.split(batch_size):
+            x = x.to(device)
             z = encoder(x)
             ze_bool = (quantizer(z) >= 0).long()
             output = (
                 ze_bool.permute(0, 2, 3, 1).reshape(
                     ze_bool.size(0), -1, ze_bool.size(1)
                 )
-                * pow2
+                * p
             ).sum(-1)
 
             seq.append(output)
@@ -371,9 +433,10 @@ def create_data_and_processors(
 
     def seq2frame(input, batch_size=25, T=1e-2):
         frames = []
-
+        p = pow2.to(device)
         for seq in input.split(batch_size):
-            zd_bool = (seq[:, :, None] // pow2) % 2
+            seq = seq.to(device)
+            zd_bool = (seq[:, :, None] // p) % 2
             zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
             logits = decoder(zd_bool * 2.0 - 1.0)
             logits = logits.reshape(
@@ -401,26 +464,22 @@ if __name__ == "__main__":
         frame2seq,
         seq2frame,
     ) = create_data_and_processors(
-        # 10000, 1000,
-        100,
-        100,
-        nb_epochs=2,
+        250,
+        1000,
+        nb_epochs=5,
         mode="first_last",
         nb_steps=20,
     )
 
-    input = test_input[:64]
+    input = test_input[:256]
 
     seq = frame2seq(input)
-
-    print(f"{seq.size()=} {seq.dtype=} {seq.min()=} {seq.max()=}")
-
     output = seq2frame(seq)
 
     torchvision.utils.save_image(
-        input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8
+        input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16
     )
 
     torchvision.utils.save_image(
-        output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8
+        output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16
     )