#!/usr/bin/env python
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+# Written by Francois Fleuret <francois@fleuret.org>
+
import math, sys, tqdm
import torch, torchvision
return s
+class DiscreteSampler2d(nn.Module):
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, x):
+ s = (x >= x.max(-3, keepdim=True).values).float()
+
+ if self.training:
+ u = x.softmax(dim=-3)
+ return s + u - u.detach()
+ else:
+ return s
+
+
+def loss_H(binary_logits, h_threshold=1):
+ p = binary_logits.sigmoid().mean(0)
+ h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2)
+ h.clamp_(max=h_threshold)
+ return h_threshold - h.mean()
+
+
def train_encoder(
train_input,
test_input,
- depth=2,
+ depth,
+ nb_bits_per_token,
dim_hidden=48,
- nb_bits_per_token=8,
+ lambda_entropy=0.0,
lr_start=1e-3,
lr_end=1e-4,
nb_epochs=10,
logger=None,
device=torch.device("cpu"),
):
- if logger is None:
- logger = lambda s: print(s)
-
mu, std = train_input.float().mean(), train_input.float().std()
def encoder_core(depth, dim):
nb_parameters = sum(p.numel() for p in model.parameters())
- logger(f"nb_parameters {nb_parameters}")
+ logger(f"vqae nb_parameters {nb_parameters}")
model.to(device)
acc_train_loss = 0.0
for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"):
+ input = input.to(device)
z = encoder(input)
- zq = z if k < 2 else quantizer(z)
+ zq = quantizer(z)
output = decoder(zq)
output = output.reshape(
train_loss = F.cross_entropy(output, input)
+ if lambda_entropy > 0:
+ train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5)
+
acc_train_loss += train_loss.item() * input.size(0)
optimizer.zero_grad()
acc_test_loss = 0.0
for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"):
+ input = input.to(device)
z = encoder(input)
- zq = z if k < 1 else quantizer(z)
+ zq = quantizer(z)
output = decoder(zq)
output = output.reshape(
train_loss = acc_train_loss / train_input.size(0)
test_loss = acc_test_loss / test_input.size(0)
- logger(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+ logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
sys.stdout.flush()
return encoder, quantizer, decoder
)
-def random_scene():
+def random_scene(nb_insert_attempts=3):
scene = []
colors = [
((Box.nb_rgb_levels - 1), 0, 0),
),
]
- for k in range(10):
+ for k in range(nb_insert_attempts):
wh = torch.rand(2) * 0.2 + 0.2
xy = torch.rand(2) * (1 - wh)
c = colors[torch.randint(len(colors), (1,))]
xh, yh = tuple(x.item() for x in torch.rand(2))
actions = torch.randint(len(effects), (len(steps),))
- change = False
+ nb_changes = 0
for s, a in zip(steps, actions):
if s:
frames.append(scene2tensor(xh, yh, scene, size=size))
- g, dx, dy = effects[a]
- if g:
+ grasp, dx, dy = effects[a]
+
+ if grasp:
for b in scene:
if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
x, y = b.x, b.y
else:
xh += dx
yh += dy
- change = True
+ nb_changes += 1
else:
x, y = xh, yh
xh += dx
if xh < 0 or xh > 1 or yh < 0 or yh > 1:
xh, yh = x, y
- if change:
+ if nb_changes > len(steps) // 3:
break
return frames, actions
nb_test_samples,
mode,
nb_steps,
+ depth=3,
+ nb_bits_per_token=8,
nb_epochs=10,
device=torch.device("cpu"),
+ device_storage=torch.device("cpu"),
logger=None,
):
assert mode in ["first_last"]
if mode == "first_last":
steps = [True] + [False] * (nb_steps + 1) + [True]
+ if logger is None:
+ logger = lambda s: print(s)
+
train_input, train_actions = generate_episodes(nb_train_samples, steps)
- train_input, train_actions = train_input.to(device), train_actions.to(device)
+ train_input, train_actions = train_input.to(device_storage), train_actions.to(
+ device_storage
+ )
test_input, test_actions = generate_episodes(nb_test_samples, steps)
- test_input, test_actions = test_input.to(device), test_actions.to(device)
+ test_input, test_actions = test_input.to(device_storage), test_actions.to(
+ device_storage
+ )
encoder, quantizer, decoder = train_encoder(
- train_input, test_input, nb_epochs=nb_epochs, logger=logger, device=device
+ train_input,
+ test_input,
+ depth=depth,
+ nb_bits_per_token=nb_bits_per_token,
+ lambda_entropy=1.0,
+ nb_epochs=nb_epochs,
+ logger=logger,
+ device=device,
)
encoder.train(False)
quantizer.train(False)
decoder.train(False)
- z = encoder(train_input[:1])
- pow2 = (2 ** torch.arange(z.size(1), device=z.device))[None, None, :]
+ z = encoder(train_input[:1].to(device))
+ pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
z_h, z_w = z.size(2), z.size(3)
+ logger(f"vqae input {train_input[0].size()} output {z[0].size()}")
+
def frame2seq(input, batch_size=25):
seq = []
-
+ p = pow2.to(device)
for x in input.split(batch_size):
+ x = x.to(device)
z = encoder(x)
ze_bool = (quantizer(z) >= 0).long()
output = (
ze_bool.permute(0, 2, 3, 1).reshape(
ze_bool.size(0), -1, ze_bool.size(1)
)
- * pow2
+ * p
).sum(-1)
seq.append(output)
def seq2frame(input, batch_size=25, T=1e-2):
frames = []
-
+ p = pow2.to(device)
for seq in input.split(batch_size):
- zd_bool = (seq[:, :, None] // pow2) % 2
+ seq = seq.to(device)
+ zd_bool = (seq[:, :, None] // p) % 2
zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
logits = decoder(zd_bool * 2.0 - 1.0)
logits = logits.reshape(
frame2seq,
seq2frame,
) = create_data_and_processors(
- # 10000, 1000,
- 100,
- 100,
- nb_epochs=2,
+ 25000,
+ 1000,
+ nb_epochs=5,
mode="first_last",
nb_steps=20,
)
- input = test_input[:64]
+ input = test_input[:256]
seq = frame2seq(input)
-
- print(f"{seq.size()=} {seq.dtype=} {seq.min()=} {seq.max()=}")
-
output = seq2frame(seq)
torchvision.utils.save_image(
- input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8
+ input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16
)
torchvision.utils.save_image(
- output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8
+ output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16
)