Update.
authorFrançois Fleuret <francois@fleuret.org>
Thu, 20 Jun 2024 21:05:45 +0000 (23:05 +0200)
committerFrançois Fleuret <francois@fleuret.org>
Thu, 20 Jun 2024 21:05:45 +0000 (23:05 +0200)
main.py
tasks.py
world.py

diff --git a/main.py b/main.py
index 3ff64b7..5234d6f 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -219,6 +219,12 @@ default_task_args = {
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
+    "world": {
+        "model": "37M",
+        "batch_size": 25,
+        "nb_train_samples": 50000,
+        "nb_test_samples": 10000,
+    },
     "byheart": {
         "model": "37M",
         "batch_size": 25,
     "byheart": {
         "model": "37M",
         "batch_size": 25,
@@ -463,6 +469,16 @@ elif args.task == "byheart":
     )
     args.max_percents_of_test_in_train = -1
 
     )
     args.max_percents_of_test_in_train = -1
 
+elif args.task == "world":
+    task = tasks.World(
+        nb_train_samples=args.nb_train_samples,
+        nb_test_samples=args.nb_test_samples,
+        batch_size=args.physical_batch_size,
+        logger=log_string,
+        device=device,
+    )
+    args.max_percents_of_test_in_train = -1
+
 elif args.task == "learnop":
     task = tasks.SandBox(
         problem=problems.ProblemLearnOperator(),
 elif args.task == "learnop":
     task = tasks.SandBox(
         problem=problems.ProblemLearnOperator(),
index 443419e..7894fcd 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -395,6 +395,145 @@ class SandBox(Task):
                 # logger(f"wrote {filename}")
 
 
                 # logger(f"wrote {filename}")
 
 
+######################################################################
+
+import world
+
+
+class World(Task):
+    def __init__(
+        self,
+        nb_train_samples,
+        nb_test_samples,
+        batch_size,
+        logger=None,
+        device=torch.device("cpu"),
+    ):
+        super().__init__()
+
+        self.batch_size = batch_size
+        self.device = device
+        self.height = 6
+        self.width = 8
+
+        self.train_input = world.generate(
+            nb_train_samples, height=self.height, width=self.width
+        )
+        self.train_ar_mask = (
+            (torch.arange(self.train_input.size(1)) > self.train_input.size(1) // 2)
+            .long()[None, :]
+            .expand_as(self.train_input)
+        )
+
+        self.test_input = world.generate(
+            nb_test_samples, height=self.height, width=self.width
+        )
+        self.test_ar_mask = (
+            (torch.arange(self.test_input.size(1)) > self.test_input.size(1) // 2)
+            .long()[None, :]
+            .expand_as(self.test_input)
+        )
+
+        self.train_input, self.train_ar_mask = self.train_input.to(
+            device
+        ), self.train_ar_mask.to(device)
+        self.test_input, self.test_ar_mask = self.test_input.to(
+            device
+        ), self.test_ar_mask.to(device)
+
+        self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+    def batches(self, split="train", nb_to_use=-1, desc=None):
+        assert split in {"train", "test"}
+        input = self.train_input if split == "train" else self.test_input
+        if nb_to_use > 0:
+            input = input[:nb_to_use]
+        if desc is None:
+            desc = f"epoch-{split}"
+        for batch in tqdm.tqdm(
+            input.split(self.batch_size), dynamic_ncols=True, desc=desc
+        ):
+            yield batch
+
+    def vocabulary_size(self):
+        return self.nb_codes
+
+    def produce_results(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+    ):
+        def compute_accuracy(input, ar_mask, logger=None):
+            input, ar_mask = input[:nmax], ar_mask[:nmax]
+            result = input.clone() * (1 - ar_mask)
+
+            masked_inplace_autoregression(
+                model,
+                self.batch_size,
+                result,
+                ar_mask,
+                deterministic_synthesis,
+                progress_bar_desc=None,
+                device=self.device,
+            )
+
+            nb_total, nb_correct = (
+                input.size(0),
+                (input == result).long().min(dim=1).values.sum(),
+            )
+
+            return nb_total, nb_correct
+
+        train_nb_total, train_nb_correct = compute_accuracy(
+            self.train_input, self.train_ar_mask
+        )
+
+        logger(
+            f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+        )
+
+        test_nb_total, test_nb_correct = compute_accuracy(
+            self.test_input, self.test_ar_mask, logger
+        )
+
+        logger(
+            f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+        )
+
+        logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
+
+        if save_attention_image is not None:
+            for k in range(10):
+                ns = torch.randint(self.test_input.size(0), (1,)).item()
+                input = self.test_input[ns : ns + 1].clone()
+
+                with torch.autograd.no_grad():
+                    t = model.training
+                    model.eval()
+                    # model.record_attention(True)
+                    model(BracketedSequence(input))
+                    model.train(t)
+                    # ram = model.retrieve_attention()
+                    # model.record_attention(False)
+
+                # tokens_output = [c for c in self.problem.seq2str(input[0])]
+                # tokens_input = ["n/a"] + tokens_output[:-1]
+                # for n_head in range(ram[0].size(1)):
+                # filename = os.path.join(
+                # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
+                # )
+                # attention_matrices = [m[0, n_head] for m in ram]
+                # save_attention_image(
+                # filename,
+                # tokens_input,
+                # tokens_output,
+                # attention_matrices,
+                # k_top=10,
+                ##min_total_attention=0.9,
+                # token_gap=12,
+                # layer_gap=50,
+                # )
+                # logger(f"wrote {filename}")
+
+
 ######################################################################
 
 import picoclvr
 ######################################################################
 
 import picoclvr
index d95bddb..0392940 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -11,475 +11,120 @@ import torch, torchvision
 
 from torch import nn
 from torch.nn import functional as F
 
 from torch import nn
 from torch.nn import functional as F
-import cairo
 
 ######################################################################
 
 
 
 ######################################################################
 
 
-class Box:
-    nb_rgb_levels = 10
-
-    def __init__(self, x, y, w, h, r, g, b):
-        self.x = x
-        self.y = y
-        self.w = w
-        self.h = h
-        self.r = r
-        self.g = g
-        self.b = b
-
-    def collision(self, scene):
-        for c in scene:
-            if (
-                self is not c
-                and max(self.x, c.x) <= min(self.x + self.w, c.x + c.w)
-                and max(self.y, c.y) <= min(self.y + self.h, c.y + c.h)
-            ):
-                return True
-        return False
-
-
-######################################################################
-
-
-class Normalizer(nn.Module):
-    def __init__(self, mu, std):
-        super().__init__()
-        self.register_buffer("mu", mu)
-        self.register_buffer("log_var", 2 * torch.log(std))
-
-    def forward(self, x):
-        return (x - self.mu) / torch.exp(self.log_var / 2.0)
-
-
-class SignSTE(nn.Module):
-    def __init__(self):
-        super().__init__()
-
-    def forward(self, x):
-        # torch.sign() takes three values
-        s = (x >= 0).float() * 2 - 1
-
-        if self.training:
-            u = torch.tanh(x)
-            return s + u - u.detach()
-        else:
-            return s
-
-
-class DiscreteSampler2d(nn.Module):
-    def __init__(self):
-        super().__init__()
-
-    def forward(self, x):
-        s = (x >= x.max(-3, keepdim=True).values).float()
-
-        if self.training:
-            u = x.softmax(dim=-3)
-            return s + u - u.detach()
-        else:
-            return s
-
-
-def loss_H(binary_logits, h_threshold=1):
-    p = binary_logits.sigmoid().mean(0)
-    h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2)
-    h.clamp_(max=h_threshold)
-    return h_threshold - h.mean()
-
-
-def train_encoder(
-    train_input,
-    test_input,
-    depth,
-    nb_bits_per_token,
-    dim_hidden=48,
-    lambda_entropy=0.0,
-    lr_start=1e-3,
-    lr_end=1e-4,
-    nb_epochs=10,
-    batch_size=25,
-    logger=None,
-    device=torch.device("cpu"),
-):
-    mu, std = train_input.float().mean(), train_input.float().std()
-
-    def encoder_core(depth, dim):
-        l = [
-            [
-                nn.Conv2d(
-                    dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2
-                ),
-                nn.ReLU(),
-                nn.Conv2d(dim * 2**k, dim * 2 ** (k + 1), kernel_size=2, stride=2),
-                nn.ReLU(),
-            ]
-            for k in range(depth)
-        ]
-
-        return nn.Sequential(*[x for m in l for x in m])
-
-    def decoder_core(depth, dim):
-        l = [
-            [
-                nn.ConvTranspose2d(
-                    dim * 2 ** (k + 1), dim * 2**k, kernel_size=2, stride=2
-                ),
-                nn.ReLU(),
-                nn.ConvTranspose2d(
-                    dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2
-                ),
-                nn.ReLU(),
-            ]
-            for k in range(depth - 1, -1, -1)
-        ]
-
-        return nn.Sequential(*[x for m in l for x in m])
-
-    encoder = nn.Sequential(
-        Normalizer(mu, std),
-        nn.Conv2d(3, dim_hidden, kernel_size=1, stride=1),
-        nn.ReLU(),
-        # 64x64
-        encoder_core(depth=depth, dim=dim_hidden),
-        # 8x8
-        nn.Conv2d(dim_hidden * 2**depth, nb_bits_per_token, kernel_size=1, stride=1),
-    )
-
-    quantizer = SignSTE()
-
-    decoder = nn.Sequential(
-        nn.Conv2d(nb_bits_per_token, dim_hidden * 2**depth, kernel_size=1, stride=1),
-        # 8x8
-        decoder_core(depth=depth, dim=dim_hidden),
-        # 64x64
-        nn.ConvTranspose2d(dim_hidden, 3 * Box.nb_rgb_levels, kernel_size=1, stride=1),
-    )
-
-    model = nn.Sequential(encoder, decoder)
-
-    nb_parameters = sum(p.numel() for p in model.parameters())
-
-    logger(f"vqae nb_parameters {nb_parameters}")
-
-    model.to(device)
-
-    for k in range(nb_epochs):
-        lr = math.exp(
-            math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k
-        )
-        optimizer = torch.optim.Adam(model.parameters(), lr=lr)
-
-        acc_train_loss = 0.0
-
-        for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"):
-            input = input.to(device)
-            z = encoder(input)
-            zq = quantizer(z)
-            output = decoder(zq)
-
-            output = output.reshape(
-                output.size(0), -1, 3, output.size(2), output.size(3)
-            )
-
-            train_loss = F.cross_entropy(output, input)
-
-            if lambda_entropy > 0:
-                train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5)
-
-            acc_train_loss += train_loss.item() * input.size(0)
-
-            optimizer.zero_grad()
-            train_loss.backward()
-            optimizer.step()
-
-        acc_test_loss = 0.0
-
-        for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"):
-            input = input.to(device)
-            z = encoder(input)
-            zq = quantizer(z)
-            output = decoder(zq)
-
-            output = output.reshape(
-                output.size(0), -1, 3, output.size(2), output.size(3)
-            )
-
-            test_loss = F.cross_entropy(output, input)
-
-            acc_test_loss += test_loss.item() * input.size(0)
-
-        train_loss = acc_train_loss / train_input.size(0)
-        test_loss = acc_test_loss / test_input.size(0)
-
-        logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
-        sys.stdout.flush()
-
-    return encoder, quantizer, decoder
-
-
-######################################################################
-
-
-def scene2tensor(xh, yh, scene, size):
-    width, height = size, size
-    pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
-    data = pixel_map.numpy()
-    surface = cairo.ImageSurface.create_for_data(
-        data, cairo.FORMAT_ARGB32, width, height
-    )
-
-    ctx = cairo.Context(surface)
-    ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
-
-    for b in scene:
-        ctx.move_to(b.x * size, b.y * size)
-        ctx.rel_line_to(b.w * size, 0)
-        ctx.rel_line_to(0, b.h * size)
-        ctx.rel_line_to(-b.w * size, 0)
-        ctx.close_path()
-        ctx.set_source_rgba(
-            b.r / (Box.nb_rgb_levels - 1),
-            b.g / (Box.nb_rgb_levels - 1),
-            b.b / (Box.nb_rgb_levels - 1),
-            1.0,
-        )
-        ctx.fill()
-
-    hs = size * 0.1
-    ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
-    ctx.move_to(xh * size - hs / 2, yh * size - hs / 2)
-    ctx.rel_line_to(hs, 0)
-    ctx.rel_line_to(0, hs)
-    ctx.rel_line_to(-hs, 0)
-    ctx.close_path()
-    ctx.fill()
-
-    return (
-        pixel_map[None, :, :, :3]
-        .flip(-1)
-        .permute(0, 3, 1, 2)
-        .long()
-        .mul(Box.nb_rgb_levels)
-        .floor_divide(256)
-    )
-
-
-def random_scene(nb_insert_attempts=3):
-    scene = []
-    colors = [
-        ((Box.nb_rgb_levels - 1), 0, 0),
-        (0, (Box.nb_rgb_levels - 1), 0),
-        (0, 0, (Box.nb_rgb_levels - 1)),
-        ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
-        (
-            (Box.nb_rgb_levels * 2) // 3,
-            (Box.nb_rgb_levels * 2) // 3,
-            (Box.nb_rgb_levels * 2) // 3,
-        ),
-    ]
-
-    for k in range(nb_insert_attempts):
-        wh = torch.rand(2) * 0.2 + 0.2
-        xy = torch.rand(2) * (1 - wh)
-        c = colors[torch.randint(len(colors), (1,))]
-        b = Box(
-            xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2]
-        )
-        if not b.collision(scene):
-            scene.append(b)
-
-    return scene
-
-
-def generate_episode(steps, size=64):
-    delta = 0.1
-    effects = [
-        (False, 0, 0),
-        (False, delta, 0),
-        (False, 0, delta),
-        (False, -delta, 0),
-        (False, 0, -delta),
-        (True, delta, 0),
-        (True, 0, delta),
-        (True, -delta, 0),
-        (True, 0, -delta),
+colors = torch.tensor(
+    [
+        [255, 255, 255],
+        [0, 0, 0],
+        [255, 0, 0],
+        [0, 128, 0],
+        [0, 0, 255],
+        [255, 255, 0],
+        [192, 192, 192],
     ]
     ]
+)
 
 
-    while True:
-        frames = []
-
-        scene = random_scene()
-        xh, yh = tuple(x.item() for x in torch.rand(2))
-
-        actions = torch.randint(len(effects), (len(steps),))
-        nb_changes = 0
-
-        for s, a in zip(steps, actions):
-            if s:
-                frames.append(scene2tensor(xh, yh, scene, size=size))
+token2char = "_X01234>"
 
 
-            grasp, dx, dy = effects[a]
 
 
-            if grasp:
-                for b in scene:
-                    if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
-                        x, y = b.x, b.y
-                        b.x += dx
-                        b.y += dy
-                        if (
-                            b.x < 0
-                            or b.y < 0
-                            or b.x + b.w > 1
-                            or b.y + b.h > 1
-                            or b.collision(scene)
-                        ):
-                            b.x, b.y = x, y
-                        else:
-                            xh += dx
-                            yh += dy
-                            nb_changes += 1
-            else:
-                x, y = xh, yh
-                xh += dx
-                yh += dy
-                if xh < 0 or xh > 1 or yh < 0 or yh > 1:
-                    xh, yh = x, y
-
-        if nb_changes > len(steps) // 3:
-            break
-
-    return frames, actions
-
-
-######################################################################
-
-
-def generate_episodes(nb, steps):
-    all_frames, all_actions = [], []
-    for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
-        frames, actions = generate_episode(steps)
-        all_frames += frames
-        all_actions += [actions[None, :]]
-    return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
-
-
-def create_data_and_processors(
-    nb_train_samples,
-    nb_test_samples,
-    mode,
-    nb_steps,
-    depth=3,
-    nb_bits_per_token=8,
-    nb_epochs=10,
-    device=torch.device("cpu"),
-    device_storage=torch.device("cpu"),
-    logger=None,
+def generate(
+    nb,
+    height,
+    width,
+    obj_length=6,
+    mask_height=3,
+    mask_width=3,
+    nb_obj=3,
 ):
 ):
-    assert mode in ["first_last"]
-
-    if mode == "first_last":
-        steps = [True] + [False] * (nb_steps + 1) + [True]
-
-    if logger is None:
-        logger = lambda s: print(s)
-
-    train_input, train_actions = generate_episodes(nb_train_samples, steps)
-    train_input, train_actions = train_input.to(device_storage), train_actions.to(
-        device_storage
+    intact = torch.zeros(nb, height, width, dtype=torch.int64)
+    n = torch.arange(intact.size(0))
+
+    for n in range(nb):
+        for c in torch.randperm(colors.size(0) - 2)[:nb_obj] + 2:
+            z = intact[n].flatten()
+            m = (torch.rand(z.size()) * (z == 0)).argmax(dim=0)
+            i, j = m // width, m % width
+            vm = torch.randint(4, (1,))[0]
+            vi, vj = (vm // 2) * (2 * (vm % 2) - 1), (1 - vm // 2) * (2 * (vm % 2) - 1)
+            for l in range(obj_length):
+                intact[n, i, j] = c
+                i += vi
+                j += vj
+                if i < 0 or i >= height or j < 0 or j >= width or intact[n, i, j] != 0:
+                    i -= vi
+                    j -= vj
+                    vi, vj = -vj, vi
+                    i += vi
+                    j += vj
+                    if (
+                        i < 0
+                        or i >= height
+                        or j < 0
+                        or j >= width
+                        or intact[n, i, j] != 0
+                    ):
+                        break
+
+    masked = intact.clone()
+
+    for n in range(nb):
+        i = torch.randint(height - mask_height + 1, (1,))[0]
+        j = torch.randint(width - mask_width + 1, (1,))[0]
+        masked[n, i : i + mask_height, j : j + mask_width] = 1
+
+    return torch.cat(
+        [
+            masked.flatten(1),
+            torch.full((masked.size(0), 1), len(colors)),
+            intact.flatten(1),
+        ],
+        dim=1,
     )
     )
-    test_input, test_actions = generate_episodes(nb_test_samples, steps)
-    test_input, test_actions = test_input.to(device_storage), test_actions.to(
-        device_storage
-    )
-
-    encoder, quantizer, decoder = train_encoder(
-        train_input,
-        test_input,
-        depth=depth,
-        nb_bits_per_token=nb_bits_per_token,
-        lambda_entropy=1.0,
-        nb_epochs=nb_epochs,
-        logger=logger,
-        device=device,
-    )
-    encoder.train(False)
-    quantizer.train(False)
-    decoder.train(False)
-
-    z = encoder(train_input[:1].to(device))
-    pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
-    z_h, z_w = z.size(2), z.size(3)
-
-    logger(f"vqae input {train_input[0].size()} output {z[0].size()}")
-
-    def frame2seq(input, batch_size=25):
-        seq = []
-        p = pow2.to(device)
-        for x in input.split(batch_size):
-            x = x.to(device)
-            z = encoder(x)
-            ze_bool = (quantizer(z) >= 0).long()
-            output = (
-                ze_bool.permute(0, 2, 3, 1).reshape(
-                    ze_bool.size(0), -1, ze_bool.size(1)
-                )
-                * p
-            ).sum(-1)
 
 
-            seq.append(output)
 
 
-        return torch.cat(seq, dim=0)
-
-    def seq2frame(input, batch_size=25, T=1e-2):
-        frames = []
-        p = pow2.to(device)
-        for seq in input.split(batch_size):
-            seq = seq.to(device)
-            zd_bool = (seq[:, :, None] // p) % 2
-            zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
-            logits = decoder(zd_bool * 2.0 - 1.0)
-            logits = logits.reshape(
-                logits.size(0), -1, 3, logits.size(2), logits.size(3)
-            ).permute(0, 2, 3, 4, 1)
-            output = torch.distributions.categorical.Categorical(
-                logits=logits / T
-            ).sample()
+def sample2img(seq, height, width):
+    intact = seq[:, : height * width].reshape(-1, height, width)
+    masked = seq[:, height * width + 1 :].reshape(-1, height, width)
+    img_intact, img_masked = colors[intact], colors[masked]
+
+    img = torch.cat(
+        [
+            img_intact,
+            torch.full(
+                (img_intact.size(0), img_intact.size(1), 1, img_intact.size(3)), 1
+            ),
+            img_masked,
+        ],
+        dim=2,
+    )
 
 
-            frames.append(output)
+    return img.permute(0, 3, 1, 2)
 
 
-        return torch.cat(frames, dim=0)
 
 
-    return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame
+def seq2str(seq):
+    result = []
+    for s in seq:
+        result.append("".join([token2char[v] for v in s]))
+    return result
 
 
 ######################################################################
 
 if __name__ == "__main__":
 
 
 ######################################################################
 
 if __name__ == "__main__":
-    (
-        train_input,
-        train_actions,
-        test_input,
-        test_actions,
-        frame2seq,
-        seq2frame,
-    ) = create_data_and_processors(
-        250,
-        1000,
-        nb_epochs=5,
-        mode="first_last",
-        nb_steps=20,
-    )
+    import time
 
 
-    input = test_input[:256]
+    height, width = 6, 8
+    start_time = time.perf_counter()
+    seq = generate(nb=64, height=height, width=width)
+    delay = time.perf_counter() - start_time
+    print(f"{seq.size(0)/delay:02f} samples/s")
 
 
-    seq = frame2seq(input)
-    output = seq2frame(seq)
+    print(seq2str(seq[:4]))
 
 
-    torchvision.utils.save_image(
-        input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16
-    )
+    img = sample2img(seq, height, width)
+    print(img.size())
 
 
-    torchvision.utils.save_image(
-        output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16
-    )
+    torchvision.utils.save_image(img.float() / 255.0, "world.png", nrow=8, padding=2)