X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=b35a08ef9a2a149b7820288273bde973aa68fe55;hb=0f580d4facb4b4b485d0a38d62d06c0639715b77;hp=64c7434129c15eb1bd630e67c33b330c7bb26b9b;hpb=e3a8032a070175ece08fc79c77312d5f2f59150e;p=picoclvr.git diff --git a/world.py b/world.py index 64c7434..b35a08e 100755 --- a/world.py +++ b/world.py @@ -61,6 +61,19 @@ class SignSTE(nn.Module): else: return s +class DiscreteSampler2d(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + s = (x >= x.max(-3,keepdim=True).values).float() + + if self.training: + u = x.softmax(dim=-3) + return s + u - u.detach() + else: + return s + def loss_H(binary_logits, h_threshold=1): p = binary_logits.sigmoid().mean(0) @@ -72,9 +85,9 @@ def loss_H(binary_logits, h_threshold=1): def train_encoder( train_input, test_input, - depth=2, + depth, + nb_bits_per_token, dim_hidden=48, - nb_bits_per_token=8, lambda_entropy=0.0, lr_start=1e-3, lr_end=1e-4, @@ -159,7 +172,7 @@ def train_encoder( for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"): input = input.to(device) z = encoder(input) - zq = z if k < 2 else quantizer(z) + zq = quantizer(z) output = decoder(zq) output = output.reshape( @@ -182,7 +195,7 @@ def train_encoder( for input in tqdm.tqdm(test_input.split(batch_size), desc="vqae-test"): input = input.to(device) z = encoder(input) - zq = z if k < 1 else quantizer(z) + zq = quantizer(z) output = decoder(zq) output = output.reshape( @@ -353,6 +366,8 @@ def create_data_and_processors( nb_test_samples, mode, nb_steps, + depth=3, + nb_bits_per_token=8, nb_epochs=10, device=torch.device("cpu"), device_storage=torch.device("cpu"), @@ -375,6 +390,8 @@ def create_data_and_processors( encoder, quantizer, decoder = train_encoder( train_input, test_input, + depth=depth, + nb_bits_per_token=nb_bits_per_token, lambda_entropy=1.0, nb_epochs=nb_epochs, logger=logger, @@ -440,7 +457,7 @@ if __name__ == "__main__": seq2frame, ) = create_data_and_processors( 25000, 1000, - nb_epochs=10, + nb_epochs=5, mode="first_last", nb_steps=20, )