Oups
[picoclvr.git] / main.py
diff --git a/main.py b/main.py
index 1d52b6d..9437136 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -5,7 +5,7 @@
 
 # Written by Francois Fleuret <francois@fleuret.org>
 
-import math, sys, argparse, time, tqdm, os
+import math, sys, argparse, time, tqdm, os, datetime
 
 import torch, torchvision
 from torch import nn
@@ -33,7 +33,7 @@ parser.add_argument(
     "--task",
     type=str,
     default="twotargets",
-    help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
+    help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
 )
 
 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
@@ -86,6 +86,13 @@ parser.add_argument("--overwrite_results", action="store_true", default=False)
 
 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
 
+##############################
+# filetask
+
+parser.add_argument("--filetask_train_file", type=str, default=None)
+
+parser.add_argument("--filetask_test_file", type=str, default=None)
+
 ##############################
 # rpl options
 
@@ -104,6 +111,8 @@ parser.add_argument("--rpl_no_prog", action="store_true", default=False)
 
 parser.add_argument("--grid_size", type=int, default=6)
 
+parser.add_argument("--grid_fraction_play", type=float, default=0)
+
 ##############################
 # picoclvr options
 
@@ -166,6 +175,19 @@ parser.add_argument("--mixing_hard", action="store_true", default=False)
 
 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
 
+##############################
+# greed options
+
+parser.add_argument("--greed_height", type=int, default=5)
+
+parser.add_argument("--greed_width", type=int, default=7)
+
+parser.add_argument("--greed_T", type=int, default=25)
+
+parser.add_argument("--greed_nb_walls", type=int, default=5)
+
+parser.add_argument("--greed_nb_coins", type=int, default=2)
+
 ######################################################################
 
 args = parser.parse_args()
@@ -178,6 +200,12 @@ if args.result_dir is None:
 ######################################################################
 
 default_task_args = {
+    "file": {
+        "model": "37M",
+        "batch_size": 25,
+        "nb_train_samples": 250000,
+        "nb_test_samples": 10000,
+    },
     "addition": {
         "model": "352M",
         "batch_size": 25,
@@ -257,9 +285,9 @@ default_task_args = {
         "nb_test_samples": 10000,
     },
     "memory": {
-        "model": "4M",
+        "model": "37M",
         "batch_size": 100,
-        "nb_train_samples": 5000,
+        "nb_train_samples": 25000,
         "nb_test_samples": 1000,
     },
     "mixing": {
@@ -274,6 +302,12 @@ default_task_args = {
         "nb_train_samples": 60000,
         "nb_test_samples": 10000,
     },
+    "greed": {
+        "model": "37M",
+        "batch_size": 25,
+        "nb_train_samples": 25000,
+        "nb_test_samples": 10000,
+    },
 }
 
 if args.task in default_task_args:
@@ -388,7 +422,22 @@ picoclvr_pruner_eval = (
 
 ######################################################################
 
-if args.task == "byheart":
+if args.task == "file":
+    assert (
+        args.filetask_train_file is not None and args.filetask_test_file is not None
+    ), "You have to specify the task train and test files"
+    task = tasks.TaskFromFile(
+        args.filetask_train_file,
+        args.filetask_test_file,
+        nb_train_samples=args.nb_train_samples,
+        nb_test_samples=args.nb_test_samples,
+        batch_size=args.batch_size,
+        shuffle=True,
+        device=device,
+    )
+    args.max_percents_of_test_in_train = 0
+
+elif args.task == "byheart":
     task = tasks.SandBox(
         problem=problems.ProblemByHeart(),
         nb_train_samples=args.nb_train_samples,
@@ -554,6 +603,7 @@ elif args.task == "grid":
         nb_test_samples=args.nb_test_samples,
         batch_size=args.batch_size,
         size=args.grid_size,
+        fraction_play=args.grid_fraction_play,
         logger=log_string,
         device=device,
     )
@@ -568,6 +618,20 @@ elif args.task == "qmlp":
         device=device,
     )
 
+elif args.task == "greed":
+    task = tasks.Greed(
+        nb_train_samples=args.nb_train_samples,
+        nb_test_samples=args.nb_test_samples,
+        batch_size=args.batch_size,
+        height=args.greed_height,
+        width=args.greed_width,
+        T=args.greed_T,
+        nb_walls=args.greed_nb_walls,
+        nb_coins=args.greed_nb_coins,
+        logger=log_string,
+        device=device,
+    )
+
 else:
     raise ValueError(f"Unknown task {args.task}")
 
@@ -639,12 +703,10 @@ if args.task == "expr" and args.expr_input_file is not None:
 
 ######################################################################
 
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
-
 # Compute the entropy of the training tokens
 
 token_count = 0
-for input in task.batches(split="train"):
+for input in task.batches(split="train", desc="train-entropy"):
     token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
 token_probas = token_count / token_count.sum()
 entropy = -torch.xlogy(token_probas, token_probas).sum()
@@ -666,9 +728,13 @@ if args.max_percents_of_test_in_train >= 0:
         yield s
 
     nb_test, nb_in_train = 0, 0
-    for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
+    for test_subset in subsets_as_tuples(
+        task.batches(split="test", desc="test-check"), 25000
+    ):
         in_train = set()
-        for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
+        for train_subset in subsets_as_tuples(
+            task.batches(split="train", desc="train-check"), 25000
+        ):
             in_train.update(test_subset.intersection(train_subset))
         nb_in_train += len(in_train)
         nb_test += len(test_subset)
@@ -709,7 +775,7 @@ log_string(f"learning_rate_schedule {learning_rate_schedule}")
 
 nb_samples_seen = 0
 
-if nb_epochs_finished >= nb_epochs:
+if nb_epochs_finished >= args.nb_epochs:
     task.produce_results(
         n_epoch=nb_epochs_finished,
         model=model,
@@ -718,7 +784,9 @@ if nb_epochs_finished >= nb_epochs:
         deterministic_synthesis=args.deterministic_synthesis,
     )
 
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+time_pred_result = None
+
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
     learning_rate = learning_rate_schedule[n_epoch]
 
     log_string(f"learning_rate {learning_rate}")
@@ -776,6 +844,13 @@ for n_epoch in range(nb_epochs_finished, nb_epochs):
             deterministic_synthesis=args.deterministic_synthesis,
         )
 
+        time_current_result = datetime.datetime.now()
+        if time_pred_result is not None:
+            log_string(
+                f"next_result {time_current_result + (time_current_result - time_pred_result)}"
+            )
+        time_pred_result = time_current_result
+
     checkpoint = {
         "nb_epochs_finished": n_epoch + 1,
         "model_state": model.state_dict(),