X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=c4527f8835ad9a864cb07252c75ce6639d72b04f;hb=6935899c1050d4f6a956fc8d2b50d2ba1544b6cc;hp=a43eff9f3787d216daada4b20da5904c19c1dffa;hpb=c03e968adc7bf73df07a0fad89a835b98f4e76df;p=picoclvr.git diff --git a/world.py b/world.py index a43eff9..c4527f8 100755 --- a/world.py +++ b/world.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -import math, sys +import math, sys, tqdm import torch, torchvision @@ -8,8 +8,12 @@ from torch import nn from torch.nn import functional as F import cairo +###################################################################### + class Box: + nb_rgb_levels = 10 + def __init__(self, x, y, w, h, r, g, b): self.x = x self.y = y @@ -30,6 +34,160 @@ class Box: return False +###################################################################### + + +class Normalizer(nn.Module): + def __init__(self, mu, std): + super().__init__() + self.register_buffer("mu", mu) + self.register_buffer("log_var", 2 * torch.log(std)) + + def forward(self, x): + return (x - self.mu) / torch.exp(self.log_var / 2.0) + + +class SignSTE(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + # torch.sign() takes three values + s = (x >= 0).float() * 2 - 1 + + if self.training: + u = torch.tanh(x) + return s + u - u.detach() + else: + return s + + +def train_encoder( + train_input, + test_input, + depth=3, + dim_hidden=48, + nb_bits_per_token=8, + lr_start=1e-3, + lr_end=1e-4, + nb_epochs=10, + batch_size=25, + device=torch.device("cpu"), +): + mu, std = train_input.float().mean(), train_input.float().std() + + def encoder_core(depth, dim): + l = [ + [ + nn.Conv2d( + dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2 + ), + nn.ReLU(), + nn.Conv2d(dim * 2**k, dim * 2 ** (k + 1), kernel_size=2, stride=2), + nn.ReLU(), + ] + for k in range(depth) + ] + + return nn.Sequential(*[x for m in l for x in m]) + + def decoder_core(depth, dim): + l = [ + [ + nn.ConvTranspose2d( + dim * 2 ** (k + 1), dim * 2**k, kernel_size=2, stride=2 + ), + nn.ReLU(), + nn.ConvTranspose2d( + dim * 2**k, dim * 2**k, kernel_size=5, stride=1, padding=2 + ), + nn.ReLU(), + ] + for k in range(depth - 1, -1, -1) + ] + + return nn.Sequential(*[x for m in l for x in m]) + + encoder = nn.Sequential( + Normalizer(mu, std), + nn.Conv2d(3, dim_hidden, kernel_size=1, stride=1), + nn.ReLU(), + # 64x64 + encoder_core(depth=depth, dim=dim_hidden), + # 8x8 + nn.Conv2d(dim_hidden * 2**depth, nb_bits_per_token, kernel_size=1, stride=1), + ) + + quantizer = SignSTE() + + decoder = nn.Sequential( + nn.Conv2d(nb_bits_per_token, dim_hidden * 2**depth, kernel_size=1, stride=1), + # 8x8 + decoder_core(depth=depth, dim=dim_hidden), + # 64x64 + nn.ConvTranspose2d(dim_hidden, 3 * Box.nb_rgb_levels, kernel_size=1, stride=1), + ) + + model = nn.Sequential(encoder, decoder) + + nb_parameters = sum(p.numel() for p in model.parameters()) + + print(f"nb_parameters {nb_parameters}") + + model.to(device) + + for k in range(nb_epochs): + lr = math.exp( + math.log(lr_start) + math.log(lr_end / lr_start) / (nb_epochs - 1) * k + ) + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + + acc_train_loss = 0.0 + + for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"): + z = encoder(input) + zq = z if k < 1 else quantizer(z) + output = decoder(zq) + + output = output.reshape( + output.size(0), -1, 3, output.size(2), output.size(3) + ) + + train_loss = F.cross_entropy(output, input) + + acc_train_loss += train_loss.item() * input.size(0) + + optimizer.zero_grad() + train_loss.backward() + optimizer.step() + + acc_test_loss = 0.0 + + for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"): + z = encoder(input) + zq = z if k < 1 else quantizer(z) + output = decoder(zq) + + output = output.reshape( + output.size(0), -1, 3, output.size(2), output.size(3) + ) + + test_loss = F.cross_entropy(output, input) + + acc_test_loss += test_loss.item() * input.size(0) + + train_loss = acc_train_loss / train_input.size(0) + test_loss = acc_test_loss / test_input.size(0) + + print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}") + sys.stdout.flush() + + return encoder, quantizer, decoder + + +###################################################################### + + def scene2tensor(xh, yh, scene, size): width, height = size, size pixel_map = torch.ByteTensor(width, height, 4).fill_(255) @@ -47,7 +205,12 @@ def scene2tensor(xh, yh, scene, size): ctx.rel_line_to(0, b.h * size) ctx.rel_line_to(-b.w * size, 0) ctx.close_path() - ctx.set_source_rgba(b.r, b.g, b.b, 1.0) + ctx.set_source_rgba( + b.r / (Box.nb_rgb_levels - 1), + b.g / (Box.nb_rgb_levels - 1), + b.b / (Box.nb_rgb_levels - 1), + 1.0, + ) ctx.fill() hs = size * 0.1 @@ -59,17 +222,28 @@ def scene2tensor(xh, yh, scene, size): ctx.close_path() ctx.fill() - return pixel_map[None, :, :, :3].flip(-1).permute(0, 3, 1, 2).float() / 255 + return ( + pixel_map[None, :, :, :3] + .flip(-1) + .permute(0, 3, 1, 2) + .long() + .mul(Box.nb_rgb_levels) + .floor_divide(256) + ) def random_scene(): scene = [] colors = [ - (1.00, 0.00, 0.00), - (0.00, 1.00, 0.00), - (0.60, 0.60, 1.00), - (1.00, 1.00, 0.00), - (0.75, 0.75, 0.75), + ((Box.nb_rgb_levels - 1), 0, 0), + (0, (Box.nb_rgb_levels - 1), 0), + (0, 0, (Box.nb_rgb_levels - 1)), + ((Box.nb_rgb_levels - 1), (Box.nb_rgb_levels - 1), 0), + ( + (Box.nb_rgb_levels * 2) // 3, + (Box.nb_rgb_levels * 2) // 3, + (Box.nb_rgb_levels * 2) // 3, + ), ] for k in range(10): @@ -85,7 +259,7 @@ def random_scene(): return scene -def generate_sequence(nb_steps=10, all_frames=False, size=64): +def generate_episode(steps, size=64): delta = 0.1 effects = [ (False, 0, 0), @@ -105,12 +279,13 @@ def generate_sequence(nb_steps=10, all_frames=False, size=64): scene = random_scene() xh, yh = tuple(x.item() for x in torch.rand(2)) - frames.append(scene2tensor(xh, yh, scene, size=size)) - - actions = torch.randint(len(effects), (nb_steps,)) + actions = torch.randint(len(effects), (len(steps),)) change = False - for a in actions: + for s, a in zip(steps, actions): + if s: + frames.append(scene2tensor(xh, yh, scene, size=size)) + g, dx, dy = effects[a] if g: for b in scene: @@ -137,12 +312,6 @@ def generate_sequence(nb_steps=10, all_frames=False, size=64): if xh < 0 or xh > 1 or yh < 0 or yh > 1: xh, yh = x, y - if all_frames: - frames.append(scene2tensor(xh, yh, scene, size=size)) - - if not all_frames: - frames.append(scene2tensor(xh, yh, scene, size=size)) - if change: break @@ -199,174 +368,75 @@ def kmeans(x, nb_centroids, nb_min=1): ###################################################################### -def patchify(x, factor, invert_size=None): - if invert_size is None: - return ( - x.reshape( - x.size(0), # 0 - x.size(1), # 1 - factor, # 2 - x.size(2) // factor, # 3 - factor, # 4 - x.size(3) // factor, # 5 - ) - .permute(0, 2, 4, 1, 3, 5) - .reshape(-1, x.size(1), x.size(2) // factor, x.size(3) // factor) - ) - else: - return ( - x.reshape( - invert_size[0], # 0 - factor, # 1 - factor, # 2 - invert_size[1], # 3 - invert_size[2] // factor, # 4 - invert_size[3] // factor, # 5 - ) - .permute(0, 3, 1, 4, 2, 5) - .reshape(invert_size) - ) - - -class Normalizer(nn.Module): - def __init__(self, mu, std): - super().__init__() - self.mu = nn.Parameter(mu) - self.log_var = nn.Parameter(2*torch.log(std)) - - def forward(self, x): - return (x-self.mu)/torch.exp(self.log_var/2.0) - -class SignSTE(nn.Module): - def __init__(self): - super().__init__() +def generate_episodes(nb, steps): + all_frames = [] + for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"): + frames, actions = generate_episode(steps) + all_frames += frames + return torch.cat(all_frames, 0).contiguous() - def forward(self, x): - # torch.sign() takes three values - s = (x >= 0).float() * 2 - 1 - if self.training: - u = torch.tanh(x) - return s + u - u.detach() - else: - return s +def create_data_and_processors(nb_train_samples, nb_test_samples, nb_epochs=10): + steps = [True] + [False] * 30 + [True] + train_input = generate_episodes(nb_train_samples, steps) + test_input = generate_episodes(nb_test_samples, steps) -def train_encoder( - train_input, - dim_hidden=64, - block_size=16, - nb_bits_per_block=10, - lr_start=1e-3, lr_end=1e-5, - nb_epochs=50, - batch_size=25, - device=torch.device("cpu"), -): - mu, std = train_input.mean(), train_input.std() + print(f"{train_input.size()=} {test_input.size()=}") - encoder = nn.Sequential( - Normalizer(mu, std), - nn.Conv2d(3, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d( - dim_hidden, - nb_bits_per_block, - kernel_size=block_size, - stride=block_size, - padding=0, - ), - SignSTE(), - ) - - decoder = nn.Sequential( - nn.ConvTranspose2d( - nb_bits_per_block, - dim_hidden, - kernel_size=block_size, - stride=block_size, - padding=0, - ), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, dim_hidden, kernel_size=5, stride=1, padding=2), - nn.ReLU(), - nn.Conv2d(dim_hidden, 3, kernel_size=5, stride=1, padding=2), + encoder, quantizer, decoder = train_encoder( + train_input, test_input, nb_epochs=nb_epochs ) - - model = nn.Sequential(encoder, decoder) - - nb_parameters = sum(p.numel() for p in model.parameters()) - - print(f"nb_parameters {nb_parameters}") - - model.to(device) - - for k in range(nb_epochs): - lr=math.exp(math.log(lr_start) + math.log(lr_end/lr_start)/(nb_epochs-1)*k) - print(f"lr {lr}") - optimizer = torch.optim.Adam(model.parameters(), lr=lr) - acc_loss, nb_samples = 0.0, 0 - - for input in train_input.split(batch_size): - output = model(input) - loss = F.mse_loss(output, input) - acc_loss += loss.item() * input.size(0) - nb_samples += input.size(0) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - print(f"loss {k} {acc_loss/nb_samples}") - sys.stdout.flush() - - return encoder, decoder + encoder.train(False) + quantizer.train(False) + decoder.train(False) + + z = encoder(train_input[:1]) + pow2 = (2 ** torch.arange(z.size(1), device=z.device))[None, None, :] + z_h, z_w = z.size(2), z.size(3) + + def frame2seq(x): + z = encoder(x) + ze_bool = (quantizer(z) >= 0).long() + seq = ( + ze_bool.permute(0, 2, 3, 1).reshape(ze_bool.size(0), -1, ze_bool.size(1)) + * pow2 + ).sum(-1) + return seq + + def seq2frame(seq, T=1e-2): + zd_bool = (seq[:, :, None] // pow2) % 2 + zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2) + logits = decoder(zd_bool * 2.0 - 1.0) + logits = logits.reshape( + logits.size(0), -1, 3, logits.size(2), logits.size(3) + ).permute(0, 2, 3, 4, 1) + results = torch.distributions.categorical.Categorical( + logits=logits / T + ).sample() + return results + + return train_input, test_input, frame2seq, seq2frame ###################################################################### if __name__ == "__main__": - import time - - all_frames = [] - nb = 25000 - start_time = time.perf_counter() - for n in range(nb): - frames, actions = generate_sequence(nb_steps=31) - all_frames += frames - end_time = time.perf_counter() - print(f"{nb / (end_time - start_time):.02f} samples per second") - - input = torch.cat(all_frames, 0) - encoder, decoder = train_encoder(input) + train_input, test_input, frame2seq, seq2frame = create_data_and_processors( + 10000, 1000 + ) - # x = patchify(input, 8) - # y = x.reshape(x.size(0), -1) - # print(f"{x.size()=} {y.size()=}") - # centroids, t = kmeans(y, 4096) - # results = centroids[t] - # results = results.reshape(x.size()) - # results = patchify(results, 8, input.size()) + input = test_input[:64] - z = encoder(input) - results = decoder(z) + seq = frame2seq(input) - print(f"{input.size()=} {z.size()=} {results.size()=}") + print(f"{seq.size()=} {seq.dtype=} {seq.min()=} {seq.max()=}") - torchvision.utils.save_image(input[:64], "orig.png", nrow=8) + output = seq2frame(seq) - torchvision.utils.save_image(results[:64], "qtiz.png", nrow=8) + torchvision.utils.save_image( + input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8 + ) - # frames, actions = generate_sequence(nb_steps=31, all_frames=True) - # frames = torch.cat(frames, 0) - # torchvision.utils.save_image(frames, "seq.png", nrow=8) + torchvision.utils.save_image( + output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8 + )