Update
[beaver.git] / beaver.py
index 5a15aee..8fe9a9b 100755 (executable)
--- a/beaver.py
+++ b/beaver.py
@@ -64,6 +64,8 @@ parser.add_argument("--dropout", type=float, default=0.1)
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
+parser.add_argument("--random_regression_order", action="store_true", default=False)
+
 parser.add_argument("--no_checkpoint", action="store_true", default=False)
 
 parser.add_argument("--overwrite_results", action="store_true", default=False)
@@ -129,10 +131,15 @@ for n in vars(args):
 ######################################################################
 
 
-def random_order(result, fixed_len):
-    order = torch.rand(result.size(), device=result.device)
-    order[:, :fixed_len] = torch.linspace(-2, -1, fixed_len, device=order.device)
-    return order.sort(1).indices
+def generation_order(x, fixed_len):
+    if args.random_regression_order:
+        order = torch.rand(x.size(), device=x.device)
+        order[:, :fixed_len] = torch.linspace(-2, -1, fixed_len, device=order.device)
+        return order.sort(1).indices
+    else:
+        return (
+            torch.arange(x.size(1), device=x.device).unsqueeze(0).expand(x.size(0), -1)
+        )
 
 
 def shuffle(x, order, reorder=False):
@@ -179,7 +186,7 @@ def compute_perplexity(model, split="train"):
 
         for input in task.batches(split=split):
             input = input.to(device)
-            order = random_order(input, task.height * task.width)
+            order = generation_order(input, task.height * task.width)
             input = shuffle(input, order)
             output = model(mygpt.BracketedSequence(input), order=order).x
             loss = F.cross_entropy(output.transpose(1, 2), input)
@@ -245,7 +252,7 @@ def oneshot(gpt, task):
 
         acc_train_loss, nb_train_samples = 0, 0
         for mazes, policies in task.policy_batches(split="train"):
-            order = random_order(mazes, task.height * task.width)
+            order = generation_order(mazes, task.height * task.width)
             x = shuffle(mazes, order)
             x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
             output_gpt = shuffle(x, order, reorder=True)
@@ -261,7 +268,7 @@ def oneshot(gpt, task):
 
         acc_test_loss, nb_test_samples = 0, 0
         for mazes, policies in task.policy_batches(split="test"):
-            order = random_order(mazes, task.height * task.width)
+            order = generation_order(mazes, task.height * task.width)
             x = shuffle(mazes, order)
             x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
             output_gpt = shuffle(x, order, reorder=True)
@@ -277,7 +284,7 @@ def oneshot(gpt, task):
         # -------------------
         mazes = task.test_input[:32, : task.height * task.width]
         policies = task.test_policies[:32]
-        order = random_order(mazes, task.height * task.width)
+        order = generation_order(mazes, task.height * task.width)
         x = shuffle(mazes, order)
         x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
         output_gpt = shuffle(x, order, reorder=True)
@@ -419,7 +426,7 @@ class TaskMaze(Task):
             ar_mask = result.new_zeros(result.size())
             ar_mask[:, self.height * self.width :] = 1
             result *= 1 - ar_mask
-            order = random_order(result, self.height * self.width)
+            order = generation_order(result, self.height * self.width)
             masked_inplace_autoregression(
                 model, self.batch_size, result, ar_mask, order=order
             )
@@ -579,8 +586,6 @@ if nb_epochs_finished >= args.nb_epochs:
 
     task.produce_results(n_epoch, model)
 
-    exit(0)
-
 ##############################
 
 for n_epoch in range(nb_epochs_finished, args.nb_epochs):
@@ -603,7 +608,7 @@ for n_epoch in range(nb_epochs_finished, args.nb_epochs):
 
     for input in task.batches(split="train"):
         input = input.to(device)
-        order = random_order(input, task.height * task.width)
+        order = generation_order(input, task.height * task.width)
         input = shuffle(input, order)
         output = model(mygpt.BracketedSequence(input), order=order).x
         loss = F.cross_entropy(output.transpose(1, 2), input)