X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=aad0bfb9727a3757dd90a0bfcb56e74040c6e011;hb=732349f7c16e43ff84380d28e021d671f2c56492;hp=da7de75bd143e95244812b6666179ff915bd5d1e;hpb=a92a5ca00f4277f7a133fa6cfaada2bc1981f524;p=picoclvr.git diff --git a/world.py b/world.py index da7de75..aad0bfb 100755 --- a/world.py +++ b/world.py @@ -1,5 +1,10 @@ #!/usr/bin/env python +# Any copyright is dedicated to the Public Domain. +# https://creativecommons.org/publicdomain/zero/1.0/ + +# Written by Francois Fleuret + import math, sys, tqdm import torch, torchvision @@ -62,6 +67,20 @@ class SignSTE(nn.Module): return s +class DiscreteSampler2d(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + s = (x >= x.max(-3, keepdim=True).values).float() + + if self.training: + u = x.softmax(dim=-3) + return s + u - u.detach() + else: + return s + + def loss_H(binary_logits, h_threshold=1): p = binary_logits.sigmoid().mean(0) h = (-p.xlogy(p) - (1 - p).xlogy(1 - p)) / math.log(2) @@ -72,9 +91,9 @@ def loss_H(binary_logits, h_threshold=1): def train_encoder( train_input, test_input, - depth=2, + depth, + nb_bits_per_token, dim_hidden=48, - nb_bits_per_token=8, lambda_entropy=0.0, lr_start=1e-3, lr_end=1e-4, @@ -83,9 +102,6 @@ def train_encoder( logger=None, device=torch.device("cpu"), ): - if logger is None: - logger = lambda s: print(s) - mu, std = train_input.float().mean(), train_input.float().std() def encoder_core(depth, dim): @@ -144,7 +160,7 @@ def train_encoder( nb_parameters = sum(p.numel() for p in model.parameters()) - logger(f"nb_parameters {nb_parameters}") + logger(f"vqae nb_parameters {nb_parameters}") model.to(device) @@ -159,7 +175,7 @@ def train_encoder( for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"): input = input.to(device) z = encoder(input) - zq = z if k < 2 else quantizer(z) + zq = quantizer(z) output = decoder(zq) output = output.reshape( @@ -169,7 +185,7 @@ def train_encoder( train_loss = F.cross_entropy(output, input) if lambda_entropy > 0: - loss = loss + lambda_entropy * loss_H(z, h_threshold=0.5) + train_loss = train_loss + lambda_entropy * loss_H(z, h_threshold=0.5) acc_train_loss += train_loss.item() * input.size(0) @@ -182,7 +198,7 @@ def train_encoder( for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"): input = input.to(device) z = encoder(input) - zq = z if k < 1 else quantizer(z) + zq = quantizer(z) output = decoder(zq) output = output.reshape( @@ -196,7 +212,7 @@ def train_encoder( train_loss = acc_train_loss / train_input.size(0) test_loss = acc_test_loss / test_input.size(0) - logger(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}") + logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}") sys.stdout.flush() return encoder, quantizer, decoder @@ -353,6 +369,8 @@ def create_data_and_processors( nb_test_samples, mode, nb_steps, + depth=3, + nb_bits_per_token=8, nb_epochs=10, device=torch.device("cpu"), device_storage=torch.device("cpu"), @@ -363,6 +381,9 @@ def create_data_and_processors( if mode == "first_last": steps = [True] + [False] * (nb_steps + 1) + [True] + if logger is None: + logger = lambda s: print(s) + train_input, train_actions = generate_episodes(nb_train_samples, steps) train_input, train_actions = train_input.to(device_storage), train_actions.to( device_storage @@ -375,6 +396,8 @@ def create_data_and_processors( encoder, quantizer, decoder = train_encoder( train_input, test_input, + depth=depth, + nb_bits_per_token=nb_bits_per_token, lambda_entropy=1.0, nb_epochs=nb_epochs, logger=logger, @@ -388,6 +411,8 @@ def create_data_and_processors( pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :] z_h, z_w = z.size(2), z.size(3) + logger(f"vqae input {train_input[0].size()} output {z[0].size()}") + def frame2seq(input, batch_size=25): seq = [] p = pow2.to(device) @@ -439,26 +464,22 @@ if __name__ == "__main__": frame2seq, seq2frame, ) = create_data_and_processors( - # 10000, 1000, - 100, - 100, - nb_epochs=2, + 25000, + 1000, + nb_epochs=5, mode="first_last", nb_steps=20, ) - input = test_input[:64] + input = test_input[:256] seq = frame2seq(input) - - print(f"{seq.size()=} {seq.dtype=} {seq.min()=} {seq.max()=}") - output = seq2frame(seq) torchvision.utils.save_image( - input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=8 + input.float() / (Box.nb_rgb_levels - 1), "orig.png", nrow=16 ) torchvision.utils.save_image( - output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=8 + output.float() / (Box.nb_rgb_levels - 1), "qtiz.png", nrow=16 )