Update
authorFrançois Fleuret <francois@fleuret.org>
Fri, 24 Mar 2023 21:27:51 +0000 (22:27 +0100)
committerFrançois Fleuret <francois@fleuret.org>
Fri, 24 Mar 2023 21:27:51 +0000 (22:27 +0100)
beaver.py

index 5ee468e..f850f69 100755 (executable)
--- a/beaver.py
+++ b/beaver.py
@@ -205,7 +205,11 @@ def compute_perplexity(model, task, fixed_len, split="train"):
         for input in task.batches(split=split):
             input = input.to(device)
             output = eval_mygpt(model, input, fixed_len=fixed_len)
-            loss = F.cross_entropy(output.transpose(1, 2), input)
+            if args.noncausal_prompt:
+                t = input.size(1) // 2
+                loss = F.cross_entropy(output[:, t:].transpose(1, 2), input[:, t:])
+            else:
+                loss = F.cross_entropy(output.transpose(1, 2), input)
             acc_loss += loss.item() * input.size(0)
             nb_samples += input.size(0)
 
@@ -648,7 +652,11 @@ for n_epoch in range(nb_epochs_finished, args.nb_epochs):
         output = eval_mygpt(
             model, input, mode=args.oneshot_input, fixed_len=task.height * task.width
         )
-        loss = F.cross_entropy(output.transpose(1, 2), input)
+        if args.noncausal_prompt:
+            t = input.size(1) // 2
+            loss = F.cross_entropy(output[:, t:].transpose(1, 2), input[:, t:])
+        else:
+            loss = F.cross_entropy(output.transpose(1, 2), input)
         acc_train_loss += loss.item() * input.size(0)
         nb_train_samples += input.size(0)