Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index c5acea7..11eb8fd 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -13,13 +13,12 @@ from torch.nn import functional as F
 
 import ffutils
 import mygpt
-import sky, quizz_machine
+import sky, wireworld, quizz_machine
 
 # world quizzes vs. culture quizzes
 
 ######################################################################
 
-accuracy_to_make_c_quizzes = 0.975
 nb_new_c_quizzes_for_train = 1000
 nb_new_c_quizzes_for_test = 100
 
@@ -38,7 +37,7 @@ parser = argparse.ArgumentParser(
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
-parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
+parser.add_argument("--log_filename", type=str, default="train.log")
 
 parser.add_argument("--result_dir", type=str, default=None)
 
@@ -80,10 +79,32 @@ parser.add_argument("--dropout", type=float, default=0.1)
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
+parser.add_argument("--problem", type=str, default="sky")
+
 parser.add_argument("--nb_gpts", type=int, default=5)
 
+parser.add_argument("--nb_models_for_generation", type=int, default=1)
+
+parser.add_argument("--generation_mode", type=str, default="groupthink")
+
+parser.add_argument("--min_to_validate", type=int, default=4)
+
+parser.add_argument("--max_to_validate", type=int, default=4)
+
+parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
+
 parser.add_argument("--dirty_debug", action="store_true", default=False)
 
+parser.add_argument("--sky_height", type=int, default=6)
+
+parser.add_argument("--sky_width", type=int, default=8)
+
+parser.add_argument("--sky_nb_birds", type=int, default=3)
+
+parser.add_argument("--sky_nb_iterations", type=int, default=2)
+
+parser.add_argument("--sky_speed", type=int, default=3)
+
 ######################################################################
 
 args = parser.parse_args()
@@ -94,7 +115,7 @@ if args.result_dir is None:
 ######################################################################
 
 if args.dirty_debug:
-    accuracy_to_make_c_quizzes = 0.0
+    args.accuracy_to_make_c_quizzes = 0.0
     nb_new_c_quizzes_for_train = 100
     nb_new_c_quizzes_for_test = 10
 
@@ -210,8 +231,21 @@ else:
 assert args.nb_train_samples % args.batch_size == 0
 assert args.nb_test_samples % args.batch_size == 0
 
+if args.problem == "sky":
+    problem = sky.Sky(
+        height=args.sky_height,
+        width=args.sky_width,
+        nb_birds=args.sky_nb_birds,
+        nb_iterations=args.sky_nb_iterations,
+        speed=args.sky_speed,
+    )
+elif args.problem == "wireworld":
+    problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5)
+else:
+    raise ValueError
+
 quizz_machine = quizz_machine.QuizzMachine(
-    problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2),
+    problem=problem,
     nb_train_samples=args.nb_train_samples,
     nb_test_samples=args.nb_test_samples,
     batch_size=args.physical_batch_size,
@@ -334,7 +368,6 @@ def run_tests(model, quizz_machine, deterministic_synthesis):
             n_epoch=n_epoch,
             model=model,
             result_dir=args.result_dir,
-            logger=log_string,
             deterministic_synthesis=deterministic_synthesis,
         )
 
@@ -355,58 +388,90 @@ def create_c_quizzes(
     nb_for_test=100,
     min_ave_seq_logproba=None,
 ):
-    kept = []
+    # We will store the generated quizzes for each number of
+    # correct prediction
+    recorded = dict([(n, []) for n in range(len(models) + 1)])
+
     model_indexes = []
     sum_logits, sum_nb_c_quizzes = 0, 0
 
-    while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
-        nb_to_generate = nb_for_train + nb_for_test
-
-        if len(model_indexes) == 0:
-            model_indexes = [i.item() for i in torch.randperm(len(models))]
+    def nb_generated():
+        return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()])
 
-        model = models[model_indexes.pop()]
+    def nb_validated():
+        return sum(
+            [
+                sum([x.size(0) for x in recorded[n]])
+                for n in range(args.min_to_validate, args.max_to_validate + 1)
+            ]
+        )
 
-        new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
-            nb=nb_to_generate,
-            model_for_generation=model,
-            models_for_validation=models,
+    nb_to_create = nb_for_train + nb_for_test
+
+    while nb_validated() < nb_to_create:
+        (
+            new_c_quizzes,
+            nb_correct,
+            ave_seq_logproba,
+        ) = quizz_machine.gang_create_c_quizzes(
+            nb=nb_to_create,
+            nb_models_for_generation=args.nb_models_for_generation,
+            models=models,
+            mode=args.generation_mode,
             min_ave_seq_logproba=min_ave_seq_logproba,
             n_epoch=n_epoch,
             result_dir=args.result_dir,
-            logger=log_string,
         )
 
         sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
         sum_nb_c_quizzes += new_c_quizzes.size(0)
 
-        to_keep = new_c_quizzes[nb_correct == len(models) - 1]
-
         if args.dirty_debug:
-            to_keep = new_c_quizzes[
-                torch.randint(3, (new_c_quizzes.size(0),), device=new_c_quizzes.device)
-                == 0
-            ]
+            nb_correct = torch.randint(
+                len(models) + 1, nb_correct.size(), device=new_c_quizzes.device
+            )
 
-        kept.append(to_keep)
+        for n in range(nb_correct.max() + 1):
+            recorded[n].append(new_c_quizzes[nb_correct == n].clone())
 
-        log_string(
-            f"keep c_quizzes {to_keep.size(0)}/{new_c_quizzes.size(0)} ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%) total {sum([ x.size(0) for x in kept])}/{nb_to_generate}"
-        )
+        nv = [recorded[n][-1].size(0) for n in recorded.keys()]
+
+        log_string(f"keep c_quizzes kept {nv} total {nb_validated()} / {nb_to_create}")
+
+    # concatenate and shuffle
+    for n in recorded.keys():
+        if len(recorded[n]) > 0:
+            q = torch.cat(recorded[n], dim=0)
+            q = q[torch.randperm(q.size(0), device=q.device)]
+            recorded[n] = q
+        else:
+            del recorded[n]
+
+    new_c_quizzes = torch.cat(
+        [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)],
+        dim=0,
+    )
 
-    new_c_quizzes = torch.cat(kept, dim=0)
     new_c_quizzes = new_c_quizzes[
-        torch.randperm(new_c_quizzes.size(0))[: nb_for_train + nb_for_test]
+        torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[
+            : nb_for_train + nb_for_test
+        ]
     ]
 
     quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
     quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
 
-    quizz_machine.problem.save_quizzes(
-        new_c_quizzes[:72],
-        args.result_dir,
-        f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
-    )
+    for n in recorded.keys():
+        s = (
+            "_validated"
+            if n >= args.min_to_validate and n <= args.max_to_validate
+            else ""
+        )
+        quizz_machine.problem.save_quizzes(
+            recorded[n][:72],
+            args.result_dir,
+            f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
+        )
 
     return sum_logits / sum_nb_c_quizzes
 
@@ -445,7 +510,8 @@ for n_epoch in range(args.nb_epochs):
 
     a = [(model.id, float(model.main_test_accuracy)) for model in models]
     a.sort(key=lambda p: p[0])
-    log_string(f"current accuracies {a}")
+    s = " ".join([f"{p[1]*100:.02f}%" for p in a])
+    log_string(f"current accuracies {s}")
 
     # select the model with lowest accuracy
     models.sort(key=lambda model: model.main_test_accuracy)
@@ -471,7 +537,7 @@ for n_epoch in range(args.nb_epochs):
         f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
     )
 
-    if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
+    if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
         ave_seq_logproba = create_c_quizzes(
             models,
             quizz_machine,