Update.
[picoclvr.git] / world.py
index c3eb101..fb8609d 100755 (executable)
--- a/world.py
+++ b/world.py
@@ -65,15 +65,19 @@ class SignSTE(nn.Module):
 def train_encoder(
     train_input,
     test_input,
-    depth=3,
+    depth=2,
     dim_hidden=48,
     nb_bits_per_token=8,
     lr_start=1e-3,
     lr_end=1e-4,
     nb_epochs=10,
     batch_size=25,
+    logger=None,
     device=torch.device("cpu"),
 ):
+    if logger is None:
+        logger = lambda s: print(s)
+
     mu, std = train_input.float().mean(), train_input.float().std()
 
     def encoder_core(depth, dim):
@@ -132,7 +136,7 @@ def train_encoder(
 
     nb_parameters = sum(p.numel() for p in model.parameters())
 
-    print(f"nb_parameters {nb_parameters}")
+    logger(f"nb_parameters {nb_parameters}")
 
     model.to(device)
 
@@ -179,7 +183,7 @@ def train_encoder(
         train_loss = acc_train_loss / train_input.size(0)
         test_loss = acc_test_loss / test_input.size(0)
 
-        print(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
+        logger(f"train_ae {k} lr {lr} train_loss {train_loss} test_loss {test_loss}")
         sys.stdout.flush()
 
     return encoder, quantizer, decoder
@@ -326,12 +330,18 @@ def generate_episodes(nb, steps):
     for n in tqdm.tqdm(range(nb), dynamic_ncols=True, desc="world-data"):
         frames, actions = generate_episode(steps)
         all_frames += frames
-        all_actions += [actions]
+        all_actions += [actions[None, :]]
     return torch.cat(all_frames, 0).contiguous(), torch.cat(all_actions, 0)
 
 
 def create_data_and_processors(
-    nb_train_samples, nb_test_samples, mode, nb_steps, nb_epochs=10
+    nb_train_samples,
+    nb_test_samples,
+    mode,
+    nb_steps,
+    nb_epochs=10,
+    device=torch.device("cpu"),
+    logger=None,
 ):
     assert mode in ["first_last"]
 
@@ -339,10 +349,12 @@ def create_data_and_processors(
         steps = [True] + [False] * (nb_steps + 1) + [True]
 
     train_input, train_actions = generate_episodes(nb_train_samples, steps)
+    train_input, train_actions = train_input.to(device), train_actions.to(device)
     test_input, test_actions = generate_episodes(nb_test_samples, steps)
+    test_input, test_actions = test_input.to(device), test_actions.to(device)
 
     encoder, quantizer, decoder = train_encoder(
-        train_input, test_input, nb_epochs=nb_epochs
+        train_input, test_input, nb_epochs=nb_epochs, logger=logger, device=device
     )
     encoder.train(False)
     quantizer.train(False)