Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index ebecad8..43241dd 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -12,13 +12,13 @@ from torch import nn
 from torch.nn import functional as F
 
 import ffutils
-import mygpt, tasks
+import mygpt
+import sky, reasoning, quizz_machine
 
 # world quizzes vs. culture quizzes
 
 ######################################################################
 
-accuracy_to_make_c_quizzes = 0.975
 nb_new_c_quizzes_for_train = 1000
 nb_new_c_quizzes_for_test = 100
 
@@ -37,7 +37,7 @@ parser = argparse.ArgumentParser(
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
-parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
+parser.add_argument("--log_filename", type=str, default="train.log")
 
 parser.add_argument("--result_dir", type=str, default=None)
 
@@ -57,7 +57,7 @@ parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
-parser.add_argument("--learning_rate", type=float, default=1e-4)
+parser.add_argument("--learning_rate", type=float, default=1e-3)
 
 ########################################
 
@@ -79,21 +79,53 @@ parser.add_argument("--dropout", type=float, default=0.1)
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
+parser.add_argument("--problem", type=str, default="sky")
+
 parser.add_argument("--nb_gpts", type=int, default=5)
 
+parser.add_argument("--min_to_validate", type=int, default=None)
+
+parser.add_argument("--max_to_validate", type=int, default=None)
+
+parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
+
+parser.add_argument("--generation_temperature", type=float, default=2.0)
+
+parser.add_argument("--deterministic_validation", action="store_true", default=False)
+
+parser.add_argument("--bidirectional_validation", action="store_true", default=False)
+
 parser.add_argument("--dirty_debug", action="store_true", default=False)
 
 ######################################################################
 
+parser.add_argument("--sky_height", type=int, default=6)
+
+parser.add_argument("--sky_width", type=int, default=8)
+
+parser.add_argument("--sky_nb_birds", type=int, default=3)
+
+parser.add_argument("--sky_nb_iterations", type=int, default=2)
+
+parser.add_argument("--sky_speed", type=int, default=3)
+
+######################################################################
+
 args = parser.parse_args()
 
+if args.min_to_validate is None:
+    args.min_to_validate = args.nb_gpts - 1
+
+if args.max_to_validate is None:
+    args.max_to_validate = args.nb_gpts - 1
+
 if args.result_dir is None:
     args.result_dir = f"results_culture"
 
 ######################################################################
 
 if args.dirty_debug:
-    accuracy_to_make_c_quizzes = 0.0
+    args.accuracy_to_make_c_quizzes = 0.0
     nb_new_c_quizzes_for_train = 100
     nb_new_c_quizzes_for_test = 10
 
@@ -102,7 +134,7 @@ if args.dirty_debug:
 default_args = {
     "model": "37M",
     "batch_size": 100,
-    "nb_train_samples": 250000,
+    "nb_train_samples": 100000,
     "nb_test_samples": 10000,
 }
 
@@ -209,9 +241,26 @@ else:
 assert args.nb_train_samples % args.batch_size == 0
 assert args.nb_test_samples % args.batch_size == 0
 
-task = tasks.World(
+if args.problem == "sky":
+    problem = sky.Sky(
+        height=args.sky_height,
+        width=args.sky_width,
+        nb_birds=args.sky_nb_birds,
+        nb_iterations=args.sky_nb_iterations,
+        speed=args.sky_speed,
+    )
+    back_accuracy = False
+elif args.problem == "reasoning":
+    problem = reasoning.Reasoning(device=device)
+    back_accuracy = True
+else:
+    raise ValueError
+
+quizz_machine = quizz_machine.QuizzMachine(
+    problem=problem,
     nb_train_samples=args.nb_train_samples,
     nb_test_samples=args.nb_test_samples,
+    back_accuracy=back_accuracy,
     batch_size=args.physical_batch_size,
     result_dir=args.result_dir,
     logger=log_string,
@@ -222,7 +271,7 @@ task = tasks.World(
 
 log_string(f"device {device}")
 
-vocabulary_size = task.vocabulary_size()
+vocabulary_size = quizz_machine.vocabulary_size()
 
 log_string(f"vocabulary_size {vocabulary_size}")
 
@@ -231,8 +280,10 @@ log_string(f"vocabulary_size {vocabulary_size}")
 # Compute the entropy of the training tokens
 
 token_count = 0
-for input in task.batches(split="train", desc="train-entropy"):
-    token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
+for input in quizz_machine.batches(split="train", desc="train-entropy"):
+    token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
+        (0, 1)
+    )
 token_probas = token_count / token_count.sum()
 entropy = -torch.xlogy(token_probas, token_probas).sum()
 train_set_perplexity = math.exp(entropy)
@@ -254,11 +305,11 @@ if args.max_percents_of_test_in_train >= 0:
 
     nb_test, nb_in_train = 0, 0
     for test_subset in subsets_as_tuples(
-        task.batches(split="test", desc="test-check"), 25000
+        quizz_machine.batches(split="test", desc="test-check"), 25000
     ):
         in_train = set()
         for train_subset in subsets_as_tuples(
-            task.batches(split="train", desc="train-check"), 25000
+            quizz_machine.batches(split="train", desc="train-check"), 25000
         ):
             in_train.update(test_subset.intersection(train_subset))
         nb_in_train += len(in_train)
@@ -275,14 +326,14 @@ if args.max_percents_of_test_in_train >= 0:
 ##############################
 
 
-def one_epoch(model, task):
+def one_epoch(model, quizz_machine):
     optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
 
     model.train()
 
     nb_train_samples, acc_train_loss = 0, 0.0
 
-    for input in task.batches(split="train"):
+    for input in quizz_machine.batches(split="train"):
         input = input.to(device)
 
         if nb_train_samples % args.batch_size == 0:
@@ -307,14 +358,14 @@ def one_epoch(model, task):
 ######################################################################
 
 
-def run_tests(model, task, deterministic_synthesis):
+def run_tests(model, quizz_machine, deterministic_synthesis):
     with torch.autograd.no_grad():
         model.eval()
 
         nb_test_samples, acc_test_loss = 0, 0.0
         nb_samples_accumulated = 0
 
-        for input in task.batches(split="test"):
+        for input in quizz_machine.batches(split="test"):
             input = input.to(device)
 
             bs = model(mygpt.BracketedSequence(input))
@@ -326,76 +377,108 @@ def run_tests(model, task, deterministic_synthesis):
 
             nb_test_samples += input.size(0)
 
-        main_test_accuracy = task.produce_results(
+        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+
+        log_string(f"test_perplexity {n_epoch} {test_perplexity}")
+
+        model.main_test_accuracy = quizz_machine.produce_results(
             n_epoch=n_epoch,
             model=model,
             result_dir=args.result_dir,
-            logger=log_string,
             deterministic_synthesis=deterministic_synthesis,
         )
 
-        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
 
-        log_string(f"test_perplexity {n_epoch} {test_perplexity}")
+######################################################################
+
 
-    model.main_test_accuracy = main_test_accuracy
+def valid_c_quizzes(recorded, criteria):
+    result = [q[criteria(c)] for q, c in recorded]
+    return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([])
 
 
 ######################################################################
 
 
 def create_c_quizzes(
-    model,
-    other_models,
-    task,
+    models,
+    quizz_machine,
     nb_for_train=1000,
     nb_for_test=100,
-    desired_average_logits=None,
 ):
-    kept = []
+    recorded = []
 
-    sum_logits, sum_nb_c_quizzes = 0, 0
+    nb_to_create = nb_for_train + nb_for_test
 
-    while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
-        nb_to_generate = 4 * (nb_for_train + nb_for_test)
+    # ------------------------------------------------------------
 
-        new_c_quizzes, nb_correct, average_logits = task.create_c_quizzes(
-            n_epoch=n_epoch,
-            result_dir=args.result_dir,
-            logger=log_string,
-            nb=nb_to_generate,
-            model=model,
-            other_models=other_models,
-            desired_average_logits=desired_average_logits,
-        )
+    standard_validity = lambda nb_correct: torch.logical_and(
+        nb_correct >= args.min_to_validate, nb_correct <= args.max_to_validate
+    )
 
-        sum_logits += new_c_quizzes.size(0) * average_logits
-        sum_nb_c_quizzes += new_c_quizzes.size(0)
+    file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat")
+    with open(file_name, "w") as logp_file:
+        while valid_c_quizzes(recorded, standard_validity).size(0) < nb_to_create:
+            # Select a model at random to generate the new quizzes
 
-        to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
+            model_for_generation = models[torch.randint(len(models), (1,))]
 
-        if args.dirty_debug:
-            to_keep = new_c_quizzes
+            c_quizzes = quizz_machine.generate_quizzes(
+                nb_to_create,
+                model_for_generation=model_for_generation,
+                temperature=args.generation_temperature,
+            )
 
-        log_string(
-            f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
-        )
+            nb_correct, seq_logproba = quizz_machine.compute_correctness(
+                c_quizzes,
+                models,
+                bidirectional_validation=args.bidirectional_validation,
+                deterministic_validation=args.deterministic_validation,
+            )
 
-        kept.append(to_keep)
+            for n, l in zip(nb_correct, seq_logproba):
+                s = " ".join([str(x.item()) for x in l])
+                logp_file.write(f"{n} {s}\n")
 
-    new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
+            if args.dirty_debug:
+                nb_correct = torch.randint(
+                    len(models) + 1, nb_correct.size(), device=c_quizzes.device
+                )
 
-    task.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
-    task.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
+            recorded.append((c_quizzes, nb_correct))
 
-    task.save_quizzes(
-        new_c_quizzes[:72],
-        args.result_dir,
-        f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
-        log_string,
-    )
+            nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0)
+            nv = " ".join([str(x.item()) for x in nv])
+
+            nb_validated = valid_c_quizzes(recorded, standard_validity).size(0)
+
+            log_string(
+                f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
+            )
+
+    # store the new c_quizzes which have been validated
+
+    new_c_quizzes = valid_c_quizzes(recorded, standard_validity)
 
-    return sum_logits / sum_nb_c_quizzes
+    quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
+    quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
+
+    # save a bunch of images to investigate what quizzes with a
+    # certain nb of correct predictions look like
+
+    for n in range(len(models) + 1):
+        s = (
+            "_validated"
+            if n >= args.min_to_validate and n <= args.max_to_validate
+            else ""
+        )
+
+        q = valid_c_quizzes(recorded, criteria=lambda nb_correct: nb_correct == n)[:72]
+
+        if q.size(0) > 0:
+            quizz_machine.save_quizzes(
+                args.result_dir, f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", q
+            )
 
 
 ######################################################################
@@ -425,63 +508,49 @@ log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
 
 ######################################################################
 
-desired_average_logits = None
-
 for n_epoch in range(args.nb_epochs):
     log_string(f"--- epoch {n_epoch} ----------------------------------------")
 
-    a = [(model.id, float(model.main_test_accuracy)) for model in models]
-    a.sort(key=lambda p: p[0])
-    log_string(f"current accuracies {a}")
+    cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
+    log_string(f"current_test_accuracies {cta}")
 
-    # select the model with lowest accuracy
-    models.sort(key=lambda model: model.main_test_accuracy)
-    model = models[0]
+    # Select, improve, and eval the worst model
+
+    weakest_model = min(models, key=lambda m: float(m.main_test_accuracy))
 
     log_string(
-        f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+        f"training model {weakest_model.id} main_test_accuracy {weakest_model.main_test_accuracy}"
     )
 
-    # improve it
-    one_epoch(model, task)
-
-    task.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+    one_epoch(weakest_model, quizz_machine)
 
     log_string(
-        f"train_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}"
+        f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
     )
 
-    # test it
-    run_tests(model, task, deterministic_synthesis=False)
+    run_tests(weakest_model, quizz_machine, deterministic_synthesis=False)
 
     log_string(
-        f"test_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}"
+        f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
     )
 
-    if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
-        other_models = models.copy()
-        other_models.remove(model)
+    # Replace a fraction of the w_quizzes with fresh ones
+
+    quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+
+    # If all the models are good enough, generate new quizzes and
+    # re-compute the test errors
 
-        average_logits = create_c_quizzes(
-            model,
-            other_models,
-            task,
+    if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
+        create_c_quizzes(
+            models,
+            quizz_machine,
             nb_for_train=nb_new_c_quizzes_for_train,
             nb_for_test=nb_new_c_quizzes_for_test,
-            desired_average_logits=desired_average_logits,
         )
 
-        # We keep the first average logits as a reference
-        if desired_average_logits is None:
-            desired_average_logits = average_logits
-        else:
-            log_string(
-                f"desired_average_logits {desired_average_logits} average_logits {average_logits}"
-            )
-
-        # We update everyone
         for model in models:
-            run_tests(model, task, deterministic_synthesis=False)
+            run_tests(model, quizz_machine, deterministic_synthesis=False)
 
 
 ######################################################################