3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
10 import torch, torchvision
13 from torch.nn import functional as F
16 ######################################################################
22 def __init__(self, x, y, w, h, r, g, b):
31 def collision(self, scene):
35 and max(self.x, c.x) <= min(self.x + self.w, c.x + c.w)
36 and max(self.y, c.y) <= min(self.y + self.h, c.y + c.h)
42 ######################################################################
45 class Normalizer(nn.Module):
46 def __init__(self, mu, std):
48 self.register_buffer("mu", mu)
49 self.register_buffer("log_var", 2 * torch.log(std))
52 return (x - self.mu) / torch.exp(self.log_var / 2.0)
55 class SignSTE(nn.Module):
60 # torch.sign() takes three values
61 s = (x >= 0).float() * 2 - 1
65 return s + u - u.detach()
70 class DiscreteSampler2d(nn.Module):
75 s = (x >= x.max(-3, keepdim=True).values).float()
79 return s + u - u.detach()
84 def loss_H(binary_logits, h_threshold=1):
85 p = binary_logits.sigmoid().mean(0)
86 h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2)
87 h.clamp_(max=h_threshold)
88 return h_threshold - h.mean()
103 device=torch.device("cpu"),
105 mu, std = train_input.float().mean(), train_input.float().std()
107 def encoder_core(depth, dim):
111 dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2
114 nn.Conv2d(dim * 2**k, dim * 2 ** (k + 1), kernel_size=2, stride=2),
117 for k in range(depth)
120 return nn.Sequential(*[x for m in l for x in m])
122 def decoder_core(depth, dim):
126 dim * 2 ** (k + 1), dim * 2**k, kernel_size=2, stride=2
130 dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2
134 for k in range(depth - 1, -1, -1)
137 return nn.Sequential(*[x for m in l for x in m])
139 encoder = nn.Sequential(
141 nn.Conv2d(3, dim_hidden, kernel_size=1, stride=1),
144 encoder_core(depth=depth, dim=dim_hidden),
146 nn.Conv2d(dim_hidden * 2**depth, nb_bits_per_token, kernel_size=1, stride=1),
149 quantizer = SignSTE()
151 decoder = nn.Sequential(
152 nn.Conv2d(nb_bits_per_token, dim_hidden * 2**depth, kernel_size=1, stride=1),
154 decoder_core(depth=depth, dim=dim_hidden),
156 nn.ConvTranspose2d(dim_hidden, 3 * Box.nb_rgb_levels, kernel_size=1, stride=1),
159 model = nn.Sequential(encoder, decoder)
161 nb_parameters = sum(p.numel() for p in model.parameters())
163 logger(f"vqae nb_parameters {nb_parameters}")
167 for k in range(nb_epochs):
169 math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k
171 optimizer = torch.optim.Adam(model.parameters(), lr=lr)
175 for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"):
176 input = input.to(device)
181 output = output.reshape(
182 output.size(0), -1, 3, output.size(2), output.size(3)
185 train_loss = F.cross_entropy(output, input)
187 if lambda_entropy > 0:
188 train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5)
190 acc_train_loss += train_loss.item() * input.size(0)
192 optimizer.zero_grad()
193 train_loss.backward()
198 for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"):
199 input = input.to(device)
204 output = output.reshape(
205 output.size(0), -1, 3, output.size(2), output.size(3)
208 test_loss = F.cross_entropy(output, input)
210 acc_test_loss += test_loss.item() * input.size(0)
212 train_loss = acc_train_loss / train_input.size(0)
213 test_loss = acc_test_loss / test_input.size(0)
215 logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
218 return encoder, quantizer, decoder
221 ######################################################################
224 def scene2tensor(xh, yh, scene, size):
225 width, height = size, size
226 pixel_map = torch.ByteTensor(width, height, 4).fill_(255)
227 data = pixel_map.numpy()
228 surface = cairo.ImageSurface.create_for_data(
229 data, cairo.FORMAT_ARGB32, width, height
232 ctx = cairo.Context(surface)
233 ctx.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
236 ctx.move_to(b.x * size, b.y * size)
237 ctx.rel_line_to(b.w * size, 0)
238 ctx.rel_line_to(0, b.h * size)
239 ctx.rel_line_to(-b.w * size, 0)
242 b.r / (Box.nb_rgb_levels - 1),
243 b.g / (Box.nb_rgb_levels - 1),
244 b.b / (Box.nb_rgb_levels - 1),
250 ctx.set_source_rgba(0.0, 0.0, 0.0, 1.0)
251 ctx.move_to(xh * size - hs / 2, yh * size - hs / 2)
252 ctx.rel_line_to(hs, 0)
253 ctx.rel_line_to(0, hs)
254 ctx.rel_line_to(-hs, 0)
259 pixel_map[None, :, :, :3]
263 .mul(Box.nb_rgb_levels)
268 def random_scene(nb_insert_attempts=3):
271 ((Box.nb_rgb_levels - 1), 0, 0),
272 (0, (Box.nb_rgb_levels - 1), 0),
273 (0, 0, (Box.nb_rgb_levels - 1)),
274 ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0),
276 (Box.nb_rgb_levels * 2) // 3,
277 (Box.nb_rgb_levels * 2) // 3,
278 (Box.nb_rgb_levels * 2) // 3,
282 for k in range(nb_insert_attempts):
283 wh = torch.rand(2) * 0.2 + 0.2
284 xy = torch.rand(2) * (1 - wh)
285 c = colors[torch.randint(len(colors), (1,))]
287 xy[0].item(), xy[1].item(), wh[0].item(), wh[1].item(), c[0], c[1], c[2]
289 if not b.collision(scene):
295 def generate_episode(steps, size=64):
312 scene = random_scene()
313 xh, yh = tuple(x.item() for x in torch.rand(2))
315 actions = torch.randint(len(effects), (len(steps),))
318 for s, a in zip(steps, actions):
320 frames.append(scene2tensor(xh, yh, scene, size=size))
322 grasp, dx, dy = effects[a]
326 if b.x <= xh and b.x + b.w >= xh and b.y <= yh and b.y + b.h >= yh:
335 or b.collision(scene)
346 if xh < 0 or xh > 1 or yh < 0 or yh > 1:
349 if nb_changes > len(steps) // 3:
352 return frames, actions
355 ######################################################################
358 def generate_episodes(nb, steps):
359 all_frames, all_actions = [], []
360 for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
361 frames, actions = generate_episode(steps)
363 all_actions += [actions[None, :]]
364 return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
367 def create_data_and_processors(
375 device=torch.device("cpu"),
376 device_storage=torch.device("cpu"),
379 assert mode in ["first_last"]
381 if mode == "first_last":
382 steps = [True] + [False] * (nb_steps + 1) + [True]
385 logger = lambda s: print(s)
387 train_input, train_actions = generate_episodes(nb_train_samples, steps)
388 train_input, train_actions = train_input.to(device_storage), train_actions.to(
391 test_input, test_actions = generate_episodes(nb_test_samples, steps)
392 test_input, test_actions = test_input.to(device_storage), test_actions.to(
396 encoder, quantizer, decoder = train_encoder(
400 nb_bits_per_token=nb_bits_per_token,
407 quantizer.train(False)
410 z = encoder(train_input[:1].to(device))
411 pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
412 z_h, z_w = z.size(2), z.size(3)
414 logger(f"vqae input {train_input[0].size()} output {z[0].size()}")
416 def frame2seq(input, batch_size=25):
419 for x in input.split(batch_size):
422 ze_bool = (quantizer(z) >= 0).long()
424 ze_bool.permute(0, 2, 3, 1).reshape(
425 ze_bool.size(0), -1, ze_bool.size(1)
432 return torch.cat(seq, dim=0)
434 def seq2frame(input, batch_size=25, T=1e-2):
437 for seq in input.split(batch_size):
439 zd_bool = (seq[:, :, None] // p) % 2
440 zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2)
441 logits = decoder(zd_bool * 2.0 - 1.0)
442 logits = logits.reshape(
443 logits.size(0), -1, 3, logits.size(2), logits.size(3)
444 ).permute(0, 2, 3, 4, 1)
445 output = torch.distributions.categorical.Categorical(
449 frames.append(output)
451 return torch.cat(frames, dim=0)
453 return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame
456 ######################################################################
458 if __name__ == "__main__":
466 ) = create_data_and_processors(
474 input = test_input[:256]
476 seq = frame2seq(input)
477 output = seq2frame(seq)
479 torchvision.utils.save_image(
480 input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16
483 torchvision.utils.save_image(
484 output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16