#!/usr/bin/env python
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+# Written by Francois Fleuret <francois@fleuret.org>
+
import math, sys, tqdm
import torch, torchvision
from torch.nn import functional as F
import cairo
+######################################################################
+
class Box:
nb_rgb_levels = 10
return False
-def scene2tensor(xh, yh, scene, size):
- width, height = size, size
- pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
- data = pixel_map.numpy()
- surface = cairo.ImageSurface.create_for_data(
- data, cairo.FORMAT_ARGB32, width, height
- )
-
- ctx = cairo.Context(surface)
- ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
-
- for b in scene:
- ctx.move_to(b.x * size, b.y * size)
- ctx.rel_line_to(b.w * size, 0)
- ctx.rel_line_to(0, b.h * size)
- ctx.rel_line_to(-b.w * size, 0)
- ctx.close_path()
- ctx.set_source_rgba(
- b.r / (Box.nb_rgb_levels - 1),
- b.g / (Box.nb_rgb_levels - 1),
- b.b / (Box.nb_rgb_levels - 1),
- 1.0,
- )
- ctx.fill()
-
- hs = size * 0.1
- ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
- ctx.move_to(xh * size - hs / 2, yh * size - hs / 2)
- ctx.rel_line_to(hs, 0)
- ctx.rel_line_to(0, hs)
- ctx.rel_line_to(-hs, 0)
- ctx.close_path()
- ctx.fill()
-
- return (
- pixel_map[None, :, :, :3]
- .flip(-1)
- .permute(0, 3, 1, 2)
- .long()
- .mul(Box.nb_rgb_levels)
- .floor_divide(256)
- )
-
-
-def random_scene():
- scene = []
- colors = [
- ((Box.nb_rgb_levels - 1), 0, 0),
- (0, (Box.nb_rgb_levels - 1), 0),
- (0, 0, (Box.nb_rgb_levels - 1)),
- ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
- (
- (Box.nb_rgb_levels * 2) // 3,
- (Box.nb_rgb_levels * 2) // 3,
- (Box.nb_rgb_levels * 2) // 3,
- ),
- ]
-
- for k in range(10):
- wh = torch.rand(2) * 0.2 + 0.2
- xy = torch.rand(2) * (1 - wh)
- c = colors[torch.randint(len(colors), (1,))]
- b = Box(
- xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2]
- )
- if not b.collision(scene):
- scene.append(b)
-
- return scene
-
-
-def generate_episode(nb_steps=10, size=64):
- delta = 0.1
- effects = [
- (False, 0, 0),
- (False, delta, 0),
- (False, 0, delta),
- (False, -delta, 0),
- (False, 0, -delta),
- (True, delta, 0),
- (True, 0, delta),
- (True, -delta, 0),
- (True, 0, -delta),
- ]
-
- while True:
- frames = []
-
- scene = random_scene()
- xh, yh = tuple(x.item() for x in torch.rand(2))
-
- frames.append(scene2tensor(xh, yh, scene, size=size))
-
- actions = torch.randint(len(effects), (nb_steps,))
- change = False
-
- for a in actions:
- g, dx, dy = effects[a]
- if g:
- for b in scene:
- if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
- x, y = b.x, b.y
- b.x += dx
- b.y += dy
- if (
- b.x < 0
- or b.y < 0
- or b.x + b.w > 1
- or b.y + b.h > 1
- or b.collision(scene)
- ):
- b.x, b.y = x, y
- else:
- xh += dx
- yh += dy
- change = True
- else:
- x, y = xh, yh
- xh += dx
- yh += dy
- if xh < 0 or xh > 1 or yh < 0 or yh > 1:
- xh, yh = x, y
-
- frames.append(scene2tensor(xh, yh, scene, size=size))
-
- if change:
- break
-
- return frames, actions
-
-
######################################################################
-# ||x_i - c_j||^2 = ||x_i||^2 + ||c_j||^2 - 2<x_i, c_j>
-def sq2matrix(x, c):
- nx = x.pow(2).sum(1)
- nc = c.pow(2).sum(1)
- return nx[:, None] + nc[None, :] - 2 * x @ c.t()
-
-
-def update_centroids(x, c, nb_min=1):
- _, b = sq2matrix(x, c).min(1)
- b.squeeze_()
- nb_resets = 0
-
- for k in range(0, c.size(0)):
- i = b.eq(k).nonzero(as_tuple=False).squeeze()
- if i.numel() >= nb_min:
- c[k] = x.index_select(0, i).mean(0)
- else:
- n = torch.randint(x.size(0), (1,))
- nb_resets += 1
- c[k] = x[n]
-
- return c, b, nb_resets
-
-
-def kmeans(x, nb_centroids, nb_min=1):
- if x.size(0) < nb_centroids * nb_min:
- print("Not enough points!")
- exit(1)
-
- c = x[torch.randperm(x.size(0))[:nb_centroids]]
- t = torch.full((x.size(0),), -1)
- n = 0
-
- while True:
- c, u, nb_resets = update_centroids(x, c, nb_min)
- n = n + 1
- nb_changes = (u - t).sign().abs().sum() + nb_resets
- t = u
- if nb_changes == 0:
- break
-
- return c, t
-
-
-######################################################################
-
-
-def patchify(x, factor, invert_size=None):
- if invert_size is None:
- return (
- x.reshape(
- x.size(0), # 0
- x.size(1), # 1
- factor, # 2
- x.size(2) // factor, # 3
- factor, # 4
- x.size(3) // factor, # 5
- )
- .permute(0, 2, 4, 1, 3, 5)
- .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor)
- )
- else:
- return (
- x.reshape(
- invert_size[0], # 0
- factor, # 1
- factor, # 2
- invert_size[1], # 3
- invert_size[2] // factor, # 4
- invert_size[3] // factor, # 5
- )
- .permute(0, 3, 1, 4, 2, 5)
- .reshape(invert_size)
- )
-
-
class Normalizer(nn.Module):
def __init__(self, mu, std):
super().__init__()
def forward(self, x):
# torch.sign() takes three values
s = (x >= 0).float() * 2 - 1
+
if self.training:
u = torch.tanh(x)
return s + u - u.detach()
return s
+class DiscreteSampler2d(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x):
+ s = (x >= x.max(-3, keepdim=True).values).float()
+
+ if self.training:
+ u = x.softmax(dim=-3)
+ return s + u - u.detach()
+ else:
+ return s
+
+
+def loss_H(binary_logits, h_threshold=1):
+ p = binary_logits.sigmoid().mean(0)
+ h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2)
+ h.clamp_(max=h_threshold)
+ return h_threshold - h.mean()
+
+
def train_encoder(
train_input,
test_input,
- depth=2,
+ depth,
+ nb_bits_per_token,
dim_hidden=48,
- nb_bits_per_token=10,
+ lambda_entropy=0.0,
lr_start=1e-3,
lr_end=1e-4,
nb_epochs=10,
batch_size=25,
+ logger=None,
device=torch.device("cpu"),
):
mu, std = train_input.float().mean(), train_input.float().std()
nb_parameters = sum(p.numel() for p in model.parameters())
- print(f"nb_parameters {nb_parameters}")
+ logger(f"vqae nb_parameters {nb_parameters}")
model.to(device)
- g5x5 = torch.exp(-torch.tensor([[-2.0, -1.0, 0.0, 1.0, 2.0]]) ** 2 / 2)
- g5x5 = (g5x5.t() @ g5x5).view(1, 1, 5, 5)
- g5x5 = g5x5 / g5x5.sum()
-
for k in range(nb_epochs):
lr = math.exp(
math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k
acc_train_loss = 0.0
- for input in train_input.split(batch_size):
+ for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"):
+ input = input.to(device)
z = encoder(input)
- zq = z if k < 1 else quantizer(z)
+ zq = quantizer(z)
output = decoder(zq)
output = output.reshape(
train_loss = F.cross_entropy(output, input)
+ if lambda_entropy > 0:
+ train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5)
+
acc_train_loss += train_loss.item() * input.size(0)
optimizer.zero_grad()
acc_test_loss = 0.0
- for input in test_input.split(batch_size):
+ for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"):
+ input = input.to(device)
z = encoder(input)
- zq = z if k < 1 else quantizer(z)
+ zq = quantizer(z)
output = decoder(zq)
output = output.reshape(
train_loss = acc_train_loss / train_input.size(0)
test_loss = acc_test_loss / test_input.size(0)
- print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+ logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
sys.stdout.flush()
return encoder, quantizer, decoder
-def generate_episodes(nb):
- all_frames = []
- for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
- frames, actions = generate_episode(nb_steps=31)
- all_frames += [ frames[0], frames[-1] ]
- return torch.cat(all_frames, 0).contiguous()
-def create_data_and_processors(nb_train_samples, nb_test_samples):
- train_input = generate_episodes(nb_train_samples)
- test_input = generate_episodes(nb_test_samples)
- encoder, quantizer, decoder = train_encoder(train_input, test_input, nb_epochs=2)
+######################################################################
- input = test_input[:64]
- z = encoder(input.float())
- height, width = z.size(2), z.size(3)
- zq = quantizer(z).long()
- pow2=(2**torch.arange(zq.size(1), device=zq.device))[None,None,:]
- seq = (zq.permute(0,2,3,1).clamp(min=0).reshape(zq.size(0),-1,zq.size(1)) * pow2).sum(-1)
- print(f"{seq.size()=}")
+def scene2tensor(xh, yh, scene, size):
+ width, height = size, size
+ pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
+ data = pixel_map.numpy()
+ surface = cairo.ImageSurface.create_for_data(
+ data, cairo.FORMAT_ARGB32, width, height
+ )
- ZZ=zq
+ ctx = cairo.Context(surface)
+ ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
- zq = ((seq[:,:,None] // pow2)%2)*2-1
- zq = zq.reshape(zq.size(0), height, width, -1).permute(0,3,1,2)
+ for b in scene:
+ ctx.move_to(b.x * size, b.y * size)
+ ctx.rel_line_to(b.w * size, 0)
+ ctx.rel_line_to(0, b.h * size)
+ ctx.rel_line_to(-b.w * size, 0)
+ ctx.close_path()
+ ctx.set_source_rgba(
+ b.r / (Box.nb_rgb_levels - 1),
+ b.g / (Box.nb_rgb_levels - 1),
+ b.b / (Box.nb_rgb_levels - 1),
+ 1.0,
+ )
+ ctx.fill()
- print(ZZ[0])
- print(zq[0])
+ hs = size * 0.1
+ ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
+ ctx.move_to(xh * size - hs / 2, yh * size - hs / 2)
+ ctx.rel_line_to(hs, 0)
+ ctx.rel_line_to(0, hs)
+ ctx.rel_line_to(-hs, 0)
+ ctx.close_path()
+ ctx.fill()
- print("CHECK", (ZZ-zq).abs().sum())
+ return (
+ pixel_map[None, :, :, :3]
+ .flip(-1)
+ .permute(0, 3, 1, 2)
+ .long()
+ .mul(Box.nb_rgb_levels)
+ .floor_divide(256)
+ )
- results = decoder(zq.float())
- T = 0.1
- results = results.reshape(
- results.size(0), -1, 3, results.size(2), results.size(3)
- ).permute(0, 2, 3, 4, 1)
- results = torch.distributions.categorical.Categorical(logits=results / T).sample()
+def random_scene(nb_insert_attempts=3):
+ scene = []
+ colors = [
+ ((Box.nb_rgb_levels - 1), 0, 0),
+ (0, (Box.nb_rgb_levels - 1), 0),
+ (0, 0, (Box.nb_rgb_levels - 1)),
+ ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
+ (
+ (Box.nb_rgb_levels * 2) // 3,
+ (Box.nb_rgb_levels * 2) // 3,
+ (Box.nb_rgb_levels * 2) // 3,
+ ),
+ ]
- torchvision.utils.save_image(
- input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8
+ for k in range(nb_insert_attempts):
+ wh = torch.rand(2) * 0.2 + 0.2
+ xy = torch.rand(2) * (1 - wh)
+ c = colors[torch.randint(len(colors), (1,))]
+ b = Box(
+ xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2]
+ )
+ if not b.collision(scene):
+ scene.append(b)
+
+ return scene
+
+
+def generate_episode(steps, size=64):
+ delta = 0.1
+ effects = [
+ (False, 0, 0),
+ (False, delta, 0),
+ (False, 0, delta),
+ (False, -delta, 0),
+ (False, 0, -delta),
+ (True, delta, 0),
+ (True, 0, delta),
+ (True, -delta, 0),
+ (True, 0, -delta),
+ ]
+
+ while True:
+ frames = []
+
+ scene = random_scene()
+ xh, yh = tuple(x.item() for x in torch.rand(2))
+
+ actions = torch.randint(len(effects), (len(steps),))
+ nb_changes = 0
+
+ for s, a in zip(steps, actions):
+ if s:
+ frames.append(scene2tensor(xh, yh, scene, size=size))
+
+ grasp, dx, dy = effects[a]
+
+ if grasp:
+ for b in scene:
+ if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
+ x, y = b.x, b.y
+ b.x += dx
+ b.y += dy
+ if (
+ b.x < 0
+ or b.y < 0
+ or b.x + b.w > 1
+ or b.y + b.h > 1
+ or b.collision(scene)
+ ):
+ b.x, b.y = x, y
+ else:
+ xh += dx
+ yh += dy
+ nb_changes += 1
+ else:
+ x, y = xh, yh
+ xh += dx
+ yh += dy
+ if xh < 0 or xh > 1 or yh < 0 or yh > 1:
+ xh, yh = x, y
+
+ if nb_changes > len(steps) // 3:
+ break
+
+ return frames, actions
+
+
+######################################################################
+
+
+def generate_episodes(nb, steps):
+ all_frames, all_actions = [], []
+ for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
+ frames, actions = generate_episode(steps)
+ all_frames += frames
+ all_actions += [actions[None, :]]
+ return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
+
+
+def create_data_and_processors(
+ nb_train_samples,
+ nb_test_samples,
+ mode,
+ nb_steps,
+ depth=3,
+ nb_bits_per_token=8,
+ nb_epochs=10,
+ device=torch.device("cpu"),
+ device_storage=torch.device("cpu"),
+ logger=None,
+):
+ assert mode in ["first_last"]
+
+ if mode == "first_last":
+ steps = [True] + [False] * (nb_steps + 1) + [True]
+
+ if logger is None:
+ logger = lambda s: print(s)
+
+ train_input, train_actions = generate_episodes(nb_train_samples, steps)
+ train_input, train_actions = train_input.to(device_storage), train_actions.to(
+ device_storage
+ )
+ test_input, test_actions = generate_episodes(nb_test_samples, steps)
+ test_input, test_actions = test_input.to(device_storage), test_actions.to(
+ device_storage
)
- torchvision.utils.save_image(
- results.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8
+ encoder, quantizer, decoder = train_encoder(
+ train_input,
+ test_input,
+ depth=depth,
+ nb_bits_per_token=nb_bits_per_token,
+ lambda_entropy=1.0,
+ nb_epochs=nb_epochs,
+ logger=logger,
+ device=device,
)
+ encoder.train(False)
+ quantizer.train(False)
+ decoder.train(False)
+
+ z = encoder(train_input[:1].to(device))
+ pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
+ z_h, z_w = z.size(2), z.size(3)
+
+ logger(f"vqae input {train_input[0].size()} output {z[0].size()}")
+
+ def frame2seq(input, batch_size=25):
+ seq = []
+ p = pow2.to(device)
+ for x in input.split(batch_size):
+ x = x.to(device)
+ z = encoder(x)
+ ze_bool = (quantizer(z) >= 0).long()
+ output = (
+ ze_bool.permute(0, 2, 3, 1).reshape(
+ ze_bool.size(0), -1, ze_bool.size(1)
+ )
+ * p
+ ).sum(-1)
+
+ seq.append(output)
+
+ return torch.cat(seq, dim=0)
+
+ def seq2frame(input, batch_size=25, T=1e-2):
+ frames = []
+ p = pow2.to(device)
+ for seq in input.split(batch_size):
+ seq = seq.to(device)
+ zd_bool = (seq[:, :, None] // p) % 2
+ zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
+ logits = decoder(zd_bool * 2.0 - 1.0)
+ logits = logits.reshape(
+ logits.size(0), -1, 3, logits.size(2), logits.size(3)
+ ).permute(0, 2, 3, 4, 1)
+ output = torch.distributions.categorical.Categorical(
+ logits=logits / T
+ ).sample()
+
+ frames.append(output)
+
+ return torch.cat(frames, dim=0)
+
+ return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame
######################################################################
if __name__ == "__main__":
- create_data_and_processors(250,100)
+ (
+ train_input,
+ train_actions,
+ test_input,
+ test_actions,
+ frame2seq,
+ seq2frame,
+ ) = create_data_and_processors(
+ 250,
+ 1000,
+ nb_epochs=5,
+ mode="first_last",
+ nb_steps=20,
+ )
- # train_input = generate_episodes(2500)
- # test_input = generate_episodes(1000)
+ input = test_input[:256]
- # encoder, quantizer, decoder = train_encoder(train_input, test_input)
+ seq = frame2seq(input)
+ output = seq2frame(seq)
- # input = test_input[torch.randperm(test_input.size(0))[:64]]
- # z = encoder(input.float())
- # zq = quantizer(z)
- # results = decoder(zq)
+ torchvision.utils.save_image(
+ input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16
+ )
- # T = 0.1
- # results = torch.distributions.categorical.Categorical(logits=results / T).sample()
+ torchvision.utils.save_image(
+ output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16
+ )