X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=world.py;h=c3eb1019f275d74bcf8a266cac6eecdb7daba25b;hb=bf48dc69f7f57ad391481c8917570e35f661cc4a;hp=b0779878e0264d79e368f9147bb95363e7caf122;hpb=0c495b7d79915a65d6680203086a94e06df80580;p=picoclvr.git diff --git a/world.py b/world.py index b077987..c3eb101 100755 --- a/world.py +++ b/world.py @@ -146,7 +146,7 @@ def train_encoder( for input in tqdm.tqdm(train_input.split(batch_size), desc="vqae-train"): z = encoder(input) - zq = z if k < 1 else quantizer(z) + zq = z if k < 2 else quantizer(z) output = decoder(zq) output = output.reshape( @@ -322,19 +322,24 @@ def generate_episode(steps, size=64): def generate_episodes(nb, steps): - all_frames = [] + all_frames, all_actions = [], [] for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"): frames, actions = generate_episode(steps) all_frames += frames - return torch.cat(all_frames, 0).contiguous() + all_actions += [actions] + return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0) + +def create_data_and_processors( + nb_train_samples, nb_test_samples, mode, nb_steps, nb_epochs=10 +): + assert mode in ["first_last"] -def create_data_and_processors(nb_train_samples, nb_test_samples, nb_epochs=10): - steps = [True] + [False] * 30 + [True] - train_input = generate_episodes(nb_train_samples, steps) - test_input = generate_episodes(nb_test_samples, steps) + if mode == "first_last": + steps = [True] + [False] * (nb_steps + 1) + [True] - print(f"{train_input.size()=} {test_input.size()=}") + train_input, train_actions = generate_episodes(nb_train_samples, steps) + test_input, test_actions = generate_episodes(nb_test_samples, steps) encoder, quantizer, decoder = train_encoder( train_input, test_input, nb_epochs=nb_epochs @@ -347,35 +352,61 @@ def create_data_and_processors(nb_train_samples, nb_test_samples, nb_epochs=10): pow2 = (2 ** torch.arange(z.size(1), device=z.device))[None, None, :] z_h, z_w = z.size(2), z.size(3) - def frame2seq(x): - z = encoder(x) - ze_bool = (quantizer(z) >= 0).long() - seq = ( - ze_bool.permute(0, 2, 3, 1).reshape(ze_bool.size(0), -1, ze_bool.size(1)) - * pow2 - ).sum(-1) - return seq - - def seq2frame(seq, T=1e-2): - zd_bool = (seq[:, :, None] // pow2) % 2 - zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2) - logits = decoder(zd_bool * 2.0 - 1.0) - logits = logits.reshape( - logits.size(0), -1, 3, logits.size(2), logits.size(3) - ).permute(0, 2, 3, 4, 1) - results = torch.distributions.categorical.Categorical( - logits=logits / T - ).sample() - return results - - return train_input, test_input, frame2seq, seq2frame + def frame2seq(input, batch_size=25): + seq = [] + + for x in input.split(batch_size): + z = encoder(x) + ze_bool = (quantizer(z) >= 0).long() + output = ( + ze_bool.permute(0, 2, 3, 1).reshape( + ze_bool.size(0), -1, ze_bool.size(1) + ) + * pow2 + ).sum(-1) + + seq.append(output) + + return torch.cat(seq, dim=0) + + def seq2frame(input, batch_size=25, T=1e-2): + frames = [] + + for seq in input.split(batch_size): + zd_bool = (seq[:, :, None] // pow2) % 2 + zd_bool = zd_bool.reshape(zd_bool.size(0), z_h, z_w, -1).permute(0, 3, 1, 2) + logits = decoder(zd_bool * 2.0 - 1.0) + logits = logits.reshape( + logits.size(0), -1, 3, logits.size(2), logits.size(3) + ).permute(0, 2, 3, 4, 1) + output = torch.distributions.categorical.Categorical( + logits=logits / T + ).sample() + + frames.append(output) + + return torch.cat(frames, dim=0) + + return train_input, train_actions, test_input, test_actions, frame2seq, seq2frame ###################################################################### if __name__ == "__main__": - train_input, test_input, frame2seq, seq2frame = create_data_and_processors( - 10000, 1000 + ( + train_input, + train_actions, + test_input, + test_actions, + frame2seq, + seq2frame, + ) = create_data_and_processors( + # 10000, 1000, + 100, + 100, + nb_epochs=2, + mode="first_last", + nb_steps=20, ) input = test_input[:64]