#!/usr/bin/env python
+# Any copyright is dedicated to the Public Domain.
+# https://creativecommons.org/publicdomain/zero/1.0/
+
+# Written by Francois Fleuret <francois@fleuret.org>
+
import math, sys, tqdm
import torch, torchvision
else:
return s
+
class DiscreteSampler2d(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
- s = (x >= x.max(-3,keepdim=True).values).float()
+ s = (x >= x.max(-3, keepdim=True).values).float()
if self.training:
u = x.softmax(dim=-3)
logger=None,
device=torch.device("cpu"),
):
- if logger is None:
- logger = lambda s: print(s)
-
mu, std = train_input.float().mean(), train_input.float().std()
def encoder_core(depth, dim):
nb_parameters = sum(p.numel() for p in model.parameters())
- logger(f"nb_parameters {nb_parameters}")
+ logger(f"vqae nb_parameters {nb_parameters}")
model.to(device)
train_loss = acc_train_loss / train_input.size(0)
test_loss = acc_test_loss / test_input.size(0)
- logger(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+ logger(f"vqae train {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
sys.stdout.flush()
return encoder, quantizer, decoder
if mode == "first_last":
steps = [True] + [False] * (nb_steps + 1) + [True]
+ if logger is None:
+ logger = lambda s: print(s)
+
train_input, train_actions = generate_episodes(nb_train_samples, steps)
train_input, train_actions = train_input.to(device_storage), train_actions.to(
device_storage
pow2 = (2 ** torch.arange(z.size(1), device=device))[None, None, :]
z_h, z_w = z.size(2), z.size(3)
+ logger(f"vqae input {train_input[0].size()} output {z[0].size()}")
+
def frame2seq(input, batch_size=25):
seq = []
p = pow2.to(device)
frame2seq,
seq2frame,
) = create_data_and_processors(
- 25000, 1000,
+ 250,
+ 1000,
nb_epochs=5,
mode="first_last",
nb_steps=20,