Update
[beaver.git] / beaver.py
index 2cc2140..c3b7e09 100755 (executable)
--- a/beaver.py
+++ b/beaver.py
@@ -173,15 +173,14 @@ def one_shot(gpt, task):
     t = gpt.training
     gpt.eval()
     model = nn.Sequential(
+        nn.Linear(args.dim_model, args.dim_model),
+        nn.ReLU(),
         nn.Linear(args.dim_model, args.dim_model),
         nn.ReLU(),
         nn.Linear(args.dim_model, 4),
     ).to(device)
 
-    print(f"{args.nb_epochs=}")
-
     for n_epoch in range(args.nb_epochs):
-        print(f"{n_epoch=}")
         learning_rate = learning_rate_schedule[n_epoch]
         optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
 
@@ -189,9 +188,12 @@ def one_shot(gpt, task):
         for input, targets in task.policy_batches(split="train"):
             output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
             output = model(output_gpt)
+            targets = targets * (input.unsqueeze(-1) == maze.v_empty)
+            output = output * (input.unsqueeze(-1) == maze.v_empty)
             loss = (
-                -(output.log_softmax(-1) * targets).sum(-1).mean()
-                + targets.xlogy(targets).sum(-1).mean()
+                -(output.log_softmax(-1) * targets).sum()
+                / (input == maze.v_empty).sum()
+                + targets.xlogy(targets).sum() / (input == maze.v_empty).sum()
             )
             acc_train_loss += loss.item() * input.size(0)
             nb_train_samples += input.size(0)
@@ -204,9 +206,12 @@ def one_shot(gpt, task):
         for input, targets in task.policy_batches(split="test"):
             output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
             output = model(output_gpt)
+            targets = targets * (input.unsqueeze(-1) == maze.v_empty)
+            output = output * (input.unsqueeze(-1) == maze.v_empty)
             loss = (
-                -(output.log_softmax(-1) * targets).sum(-1).mean()
-                + targets.xlogy(targets).sum(-1).mean()
+                -(output.log_softmax(-1) * targets).sum()
+                / (input == maze.v_empty).sum()
+                + targets.xlogy(targets).sum() / (input == maze.v_empty).sum()
             )
             acc_test_loss += loss.item() * input.size(0)
             nb_test_samples += input.size(0)
@@ -216,13 +221,13 @@ def one_shot(gpt, task):
         )
 
         # -------------------
-        input, targets = next(task.policy_batches(split="test"))
+        input = task.test_input[:32, : task.height * task.width]
+        targets = task.test_policies[:32]
         output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
         output = model(output_gpt)
         losses = (-output.log_softmax(-1) * targets + targets.xlogy(targets)).sum(-1)
+        losses = losses * (input == maze.v_empty)
         losses = losses / losses.max()
-        print(f"{input.size()=} {losses.size()=} {losses.min()=} {losses.max()=}")
-        losses = losses * (input == 0)
         losses = losses.reshape(-1, args.maze_height, args.maze_width)
         input = input.reshape(-1, args.maze_height, args.maze_width)
         maze.save_image(