Update.
authorFrançois Fleuret <francois@fleuret.org>
Sun, 19 Mar 2023 18:17:50 +0000 (19:17 +0100)
committerFrançois Fleuret <francois@fleuret.org>
Sun, 19 Mar 2023 18:17:50 +0000 (19:17 +0100)
beaver.py
mygpt.py

index afec61d..de38ff4 100755 (executable)
--- a/beaver.py
+++ b/beaver.py
@@ -172,9 +172,10 @@ def compute_perplexity(model, split="train"):
 def one_shot(gpt, task):
     t = gpt.training
     gpt.eval()
-
+    mode='head'
+    dim_in=args.dim_model * (args.nb_blocks * 2 if mode=='deep' else 1)
     model = nn.Sequential(
-        nn.Linear(args.dim_model, args.dim_model),
+        nn.Linear(dim_in, args.dim_model),
         nn.ReLU(),
         nn.Linear(args.dim_model, args.dim_model),
         nn.ReLU(),
@@ -187,7 +188,7 @@ def one_shot(gpt, task):
 
         acc_train_loss, nb_train_samples = 0, 0
         for input, targets in task.policy_batches(split="train"):
-            output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+            output_gpt = gpt(mygpt.BracketedSequence(input), mode=mode).x
             output = model(output_gpt)
             targets = targets * (input.unsqueeze(-1) == maze.v_empty)
             output = output * (input.unsqueeze(-1) == maze.v_empty)
@@ -205,7 +206,7 @@ def one_shot(gpt, task):
 
         acc_test_loss, nb_test_samples = 0, 0
         for input, targets in task.policy_batches(split="test"):
-            output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+            output_gpt = gpt(mygpt.BracketedSequence(input), mode=mode).x
             output = model(output_gpt)
             targets = targets * (input.unsqueeze(-1) == maze.v_empty)
             output = output * (input.unsqueeze(-1) == maze.v_empty)
@@ -224,7 +225,7 @@ def one_shot(gpt, task):
         # -------------------
         input = task.test_input[:32, : task.height * task.width]
         targets = task.test_policies[:32]
-        output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+        output_gpt = gpt(mygpt.BracketedSequence(input), mode=mode).x
         output = model(output_gpt)
         # losses = (-output.log_softmax(-1) * targets + targets.xlogy(targets)).sum(-1)
         # losses = losses * (input == maze.v_empty)
index bd79676..a1db2e3 100755 (executable)
--- a/mygpt.py
+++ b/mygpt.py
@@ -246,12 +246,22 @@ class MyGPT(nn.Module):
                     m.bias.zero_()
                     m.weight.fill_(1.0)
 
-    def forward(self, bs, with_readout=True):
+    def forward(self, bs, mode='standard'):
         bs.x = F.pad(bs.x, (1, -1))
         bs = self.embedding(bs)
-        bs = self.trunk(bs)
-        if with_readout:
+        if mode=='standard':
+            bs = self.trunk(bs)
             bs = self.readout(bs)
+        elif mode=='head':
+            bs = self.trunk(bs)
+        elif mode=='deep':
+            r = []
+            for l in self.trunk:
+                bs = l(bs)
+                r += [ bs.slice() ]
+            bs = BracketedSequence(torch.cat(r, -1))
+        else:
+            raise ValueError
         return bs