Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index ebecad8..6c4099f 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -12,38 +12,29 @@ from torch import nn
 from torch.nn import functional as F
 
 import ffutils
 from torch.nn import functional as F
 
 import ffutils
-import mygpt, tasks
 
 
-# world quizzes vs. culture quizzes
+import mygpt
+import sky, grids, quiz_machine
 
 
-######################################################################
+import threading
 
 
-accuracy_to_make_c_quizzes = 0.975
-nb_new_c_quizzes_for_train = 1000
-nb_new_c_quizzes_for_test = 100
-
-######################################################################
-
-if torch.cuda.is_available():
-    device = torch.device("cuda")
-    torch.backends.cuda.matmul.allow_tf32 = True
-else:
-    device = torch.device("cpu")
+import torch.multiprocessing as mp
 
 ######################################################################
 
 parser = argparse.ArgumentParser(
 
 ######################################################################
 
 parser = argparse.ArgumentParser(
-    description="An implementation of GPT with cache.",
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
-parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
+parser.add_argument("--log_filename", type=str, default="train.log")
 
 parser.add_argument("--result_dir", type=str, default=None)
 
 parser.add_argument("--seed", type=int, default=0)
 
 
 parser.add_argument("--result_dir", type=str, default=None)
 
 parser.add_argument("--seed", type=int, default=0)
 
-parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
+parser.add_argument("--resume", action="store_true", default=False)
+
+parser.add_argument("--max_percents_of_test_in_train", type=int, default=-1)
 
 ########################################
 
 
 ########################################
 
@@ -57,7 +48,7 @@ parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
-parser.add_argument("--learning_rate", type=float, default=1e-4)
+parser.add_argument("--learning_rate", type=float, default=5e-4)
 
 ########################################
 
 
 ########################################
 
@@ -79,30 +70,62 @@ parser.add_argument("--dropout", type=float, default=0.1)
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
+parser.add_argument("--problem", type=str, default="grids")
+
+parser.add_argument("--nb_threads", type=int, default=1)
+
+parser.add_argument("--gpus", type=str, default="all")
+
 parser.add_argument("--nb_gpts", type=int, default=5)
 
 parser.add_argument("--nb_gpts", type=int, default=5)
 
+parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
+
+parser.add_argument("--proba_understands", type=float, default=0.99)
+
+parser.add_argument("--proba_not_understands", type=float, default=0.5)
+
+parser.add_argument("--generation_temperature", type=float, default=2.0)
+
 parser.add_argument("--dirty_debug", action="store_true", default=False)
 
 ######################################################################
 
 parser.add_argument("--dirty_debug", action="store_true", default=False)
 
 ######################################################################
 
-args = parser.parse_args()
+grids_tasks = ", ".join(
+    [x.__name__.removeprefix("task_") for x in grids.Grids().all_tasks]
+)
 
 
-if args.result_dir is None:
-    args.result_dir = f"results_culture"
+parser.add_argument(
+    "--grids_tasks",
+    type=str,
+    default=None,
+    help="A comma-separated subset of: " + grids_tasks + ", or None for all.",
+)
 
 ######################################################################
 
 
 ######################################################################
 
-if args.dirty_debug:
-    accuracy_to_make_c_quizzes = 0.0
-    nb_new_c_quizzes_for_train = 100
-    nb_new_c_quizzes_for_test = 10
+parser.add_argument("--sky_height", type=int, default=6)
+
+parser.add_argument("--sky_width", type=int, default=8)
+
+parser.add_argument("--sky_nb_birds", type=int, default=3)
+
+parser.add_argument("--sky_nb_iterations", type=int, default=2)
+
+parser.add_argument("--sky_speed", type=int, default=3)
+
+######################################################################
+
+args = parser.parse_args()
+
+if args.result_dir is None:
+    args.result_dir = f"results_culture"
 
 ######################################################################
 
 default_args = {
     "model": "37M",
 
 ######################################################################
 
 default_args = {
     "model": "37M",
-    "batch_size": 100,
-    "nb_train_samples": 250000,
+    "batch_size": 25,
+    "nb_train_samples": 100000,
     "nb_test_samples": 10000,
 }
 
     "nb_test_samples": 10000,
 }
 
@@ -159,11 +182,15 @@ else:
 
 ######################################################################
 
 
 ######################################################################
 
-try:
-    os.mkdir(args.result_dir)
-except FileExistsError:
-    print(f"result directory {args.result_dir} already exists")
-    exit(1)
+if args.resume:
+    assert os.path.isdir(args.result_dir)
+
+else:
+    try:
+        os.mkdir(args.result_dir)
+    except FileExistsError:
+        print(f"result directory {args.result_dir} already exists")
+        exit(1)
 
 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
 
 
 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
 
@@ -189,6 +216,10 @@ def log_string(s):
     sys.stdout.flush()
 
 
     sys.stdout.flush()
 
 
+now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
+
+os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py")
+
 log_string(f"argv {' '.join(sys.argv)}")
 
 for n in vars(args):
 log_string(f"argv {' '.join(sys.argv)}")
 
 for n in vars(args):
@@ -197,6 +228,19 @@ for n in vars(args):
 
 ######################################################################
 
 
 ######################################################################
 
+if args.gpus == "all":
+    gpus_idx = range(torch.cuda.device_count())
+else:
+    gpus_idx = [int(k) for k in args.gpus.split(",")]
+
+gpus = [torch.device(f"cuda:{n}") for n in gpus_idx]
+
+if torch.cuda.is_available():
+    main_device = gpus[0]
+else:
+    assert len(gpus) == 0
+    main_device = torch.device("cpu")
+
 if args.dirty_debug:
     args.nb_train_samples = 2500
     args.nb_test_samples = 100
 if args.dirty_debug:
     args.nb_train_samples = 2500
     args.nb_test_samples = 100
@@ -209,81 +253,93 @@ else:
 assert args.nb_train_samples % args.batch_size == 0
 assert args.nb_test_samples % args.batch_size == 0
 
 assert args.nb_train_samples % args.batch_size == 0
 assert args.nb_test_samples % args.batch_size == 0
 
-task = tasks.World(
+if args.problem == "sky":
+    problem = sky.Sky(
+        height=args.sky_height,
+        width=args.sky_width,
+        nb_birds=args.sky_nb_birds,
+        nb_iterations=args.sky_nb_iterations,
+        speed=args.sky_speed,
+        max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
+        chunk_size=100,
+        nb_threads=args.nb_threads,
+    )
+    back_accuracy = False
+elif args.problem == "grids":
+    problem = grids.Grids(
+        max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
+        chunk_size=100,
+        nb_threads=args.nb_threads,
+        tasks=args.grids_tasks,
+    )
+    back_accuracy = True
+else:
+    raise ValueError
+
+problem.save_some_examples(args.result_dir)
+
+quiz_machine = quiz_machine.QuizMachine(
+    problem=problem,
     nb_train_samples=args.nb_train_samples,
     nb_test_samples=args.nb_test_samples,
     nb_train_samples=args.nb_train_samples,
     nb_test_samples=args.nb_test_samples,
+    back_accuracy=back_accuracy,
     batch_size=args.physical_batch_size,
     result_dir=args.result_dir,
     logger=log_string,
     batch_size=args.physical_batch_size,
     result_dir=args.result_dir,
     logger=log_string,
-    device=device,
+    device=main_device,
 )
 
 ######################################################################
 
 )
 
 ######################################################################
 
-log_string(f"device {device}")
+log_string(f"main_device {main_device} gpus {[ str(g) for g in gpus]}")
 
 
-vocabulary_size = task.vocabulary_size()
+vocabulary_size = quiz_machine.vocabulary_size()
 
 log_string(f"vocabulary_size {vocabulary_size}")
 
 ######################################################################
 
 
 log_string(f"vocabulary_size {vocabulary_size}")
 
 ######################################################################
 
-# Compute the entropy of the training tokens
 
 
-token_count = 0
-for input in task.batches(split="train", desc="train-entropy"):
-    token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
-token_probas = token_count / token_count.sum()
-entropy = -torch.xlogy(token_probas, token_probas).sum()
-train_set_perplexity = math.exp(entropy)
+def run_tests(model, quiz_machine, deterministic_synthesis, local_device=main_device):
+    with torch.autograd.no_grad():
+        model.eval().to(local_device)
 
 
-######################################################################
-# A bit of paranoia never hurts
+        nb_test_samples, acc_test_loss = 0, 0.0
+        nb_samples_accumulated = 0
 
 
-if args.max_percents_of_test_in_train >= 0:
+        for input in quiz_machine.batches(model, split="test"):
+            input = input.to(local_device)
 
 
-    def subsets_as_tuples(batches, cs):
-        s = set()
-        for batch in batches:
-            for x in batch:
-                s.add(tuple([v.item() for v in x]))
-                if len(s) == cs:
-                    yield s
-                    s = set()
-        yield s
+            bs = model(mygpt.BracketedSequence(input))
+            output = bs.x
 
 
-    nb_test, nb_in_train = 0, 0
-    for test_subset in subsets_as_tuples(
-        task.batches(split="test", desc="test-check"), 25000
-    ):
-        in_train = set()
-        for train_subset in subsets_as_tuples(
-            task.batches(split="train", desc="train-check"), 25000
-        ):
-            in_train.update(test_subset.intersection(train_subset))
-        nb_in_train += len(in_train)
-        nb_test += len(test_subset)
+            loss = F.cross_entropy(output.transpose(1, 2), input)
 
 
-    log_string(
-        f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
-    )
+            acc_test_loss += loss.item() * input.size(0)
 
 
-    assert (
-        nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
-    ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
+            nb_test_samples += input.size(0)
 
 
-##############################
+        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
 
 
+        log_string(f"test_perplexity {n_epoch} model {model.id} {test_perplexity}")
+
+        model.main_test_accuracy = quiz_machine.produce_results(
+            n_epoch=n_epoch,
+            model=model,
+            result_dir=args.result_dir,
+            deterministic_synthesis=deterministic_synthesis,
+        )
 
 
-def one_epoch(model, task):
-    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
 
 
-    model.train()
+def one_epoch(model, quiz_machine, local_device=main_device):
+    model.to(local_device).train()
+
+    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
 
     nb_train_samples, acc_train_loss = 0, 0.0
 
 
     nb_train_samples, acc_train_loss = 0, 0.0
 
-    for input in task.batches(split="train"):
-        input = input.to(device)
+    for input in quiz_machine.batches(model, split="train"):
+        input = input.to(local_device)
 
         if nb_train_samples % args.batch_size == 0:
             optimizer.zero_grad()
 
         if nb_train_samples % args.batch_size == 0:
             optimizer.zero_grad()
@@ -301,101 +357,111 @@ def one_epoch(model, task):
 
     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
 
 
     train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
 
-    log_string(f"train_perplexity {n_epoch} {train_perplexity}")
+    log_string(f"train_perplexity {n_epoch} model {model.id} {train_perplexity}")
 
 
+    run_tests(model, quiz_machine, deterministic_synthesis=False)
 
 
-######################################################################
+    model.to(main_device)
 
 
 
 
-def run_tests(model, task, deterministic_synthesis):
-    with torch.autograd.no_grad():
-        model.eval()
-
-        nb_test_samples, acc_test_loss = 0, 0.0
-        nb_samples_accumulated = 0
-
-        for input in task.batches(split="test"):
-            input = input.to(device)
+######################################################################
 
 
-            bs = model(mygpt.BracketedSequence(input))
-            output = bs.x
 
 
-            loss = F.cross_entropy(output.transpose(1, 2), input)
+def standard_validity(logproba):
+    l = logproba.sort(dim=-1).values
+    return (l[:, 0] < math.log(args.proba_not_understands)) & (
+        l[:, 1] > math.log(args.proba_understands)
+    )
 
 
-            acc_test_loss += loss.item() * input.size(0)
 
 
-            nb_test_samples += input.size(0)
+def valid_quizzes_and_logprobas(recorded, criteria):
+    validated_quizzes, validated_logprobas = [], []
+    for q, lp in recorded:
+        validated_indices = criteria(lp)
+        validated_quizzes.append(q[validated_indices])
+        validated_logprobas.append(lp[validated_indices])
 
 
-        main_test_accuracy = task.produce_results(
-            n_epoch=n_epoch,
-            model=model,
-            result_dir=args.result_dir,
-            logger=log_string,
-            deterministic_synthesis=deterministic_synthesis,
+    if len(validated_quizzes) > 0:
+        return torch.cat(validated_quizzes, dim=0), torch.cat(
+            validated_logprobas, dim=0
         )
         )
-
-        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
-
-        log_string(f"test_perplexity {n_epoch} {test_perplexity}")
-
-    model.main_test_accuracy = main_test_accuracy
+    else:
+        return None, None
 
 
 ######################################################################
 
 
 
 
 ######################################################################
 
 
-def create_c_quizzes(
-    model,
-    other_models,
-    task,
-    nb_for_train=1000,
-    nb_for_test=100,
-    desired_average_logits=None,
-):
-    kept = []
+def create_c_quizzes(models, quiz_machine, nb_for_train=1000, nb_for_test=100):
+    nb_to_create = nb_for_train + nb_for_test
 
 
-    sum_logits, sum_nb_c_quizzes = 0, 0
+    recorded_quizzes_logprobas = []
 
 
-    while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
-        nb_to_generate = 4 * (nb_for_train + nb_for_test)
+    nb_validated = 0
 
 
-        new_c_quizzes, nb_correct, average_logits = task.create_c_quizzes(
-            n_epoch=n_epoch,
-            result_dir=args.result_dir,
-            logger=log_string,
-            nb=nb_to_generate,
-            model=model,
-            other_models=other_models,
-            desired_average_logits=desired_average_logits,
+    while nb_validated < nb_to_create:
+        model_for_generation = models[torch.randint(len(models), (1,))]
+
+        c_quizzes = quiz_machine.generate_quizzes(
+            nb_to_create,
+            model_for_generation=model_for_generation,
+            temperature=args.generation_temperature,
         )
 
         )
 
-        sum_logits += new_c_quizzes.size(0) * average_logits
-        sum_nb_c_quizzes += new_c_quizzes.size(0)
+        c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)]
 
 
-        to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
+        if c_quizzes.size(0) > 0:
+            logproba = quiz_machine.logproba_of_solutions(models, c_quizzes)
+            recorded_quizzes_logprobas.append((c_quizzes, logproba))
 
 
-        if args.dirty_debug:
-            to_keep = new_c_quizzes
+            validated_quizzes, validated_logprobas = valid_quizzes_and_logprobas(
+                recorded_quizzes_logprobas, standard_validity
+            )
+
+            if validated_quizzes is not None:
+                nb_validated = validated_quizzes.size(0)
 
         log_string(
 
         log_string(
-            f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
+            f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}"
         )
 
         )
 
-        kept.append(to_keep)
+    # store the new c_quizzes which have been validated
 
 
-    new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
+    quiz_machine.reverse_random_half_in_place(validated_quizzes)
+    quiz_machine.store_c_quizzes(validated_quizzes[:nb_for_train], for_train=True)
+    quiz_machine.store_c_quizzes(
+        validated_quizzes[nb_for_train:nb_to_create], for_train=False
+    )
 
 
-    task.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
-    task.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
+    ######################################################################
+    # save the log probas
 
 
-    task.save_quizzes(
-        new_c_quizzes[:72],
-        args.result_dir,
-        f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
-        log_string,
+    file_name = os.path.join(
+        args.result_dir, f"culture_c_quiz_all_{n_epoch:04d}_logp.dat"
     )
 
     )
 
-    return sum_logits / sum_nb_c_quizzes
+    with open(file_name, "w") as logp_file:
+        for _, ll in recorded_quizzes_logprobas:
+            for l in ll:
+                s = " ".join([str(x.item()) for x in l])
+                logp_file.write(s + "\n")
+
+    ######################################################################
+    # save images with their logprobas
+
+    vq = validated_quizzes[:72]
+    vl = validated_logprobas[:72]
+
+    if vq.size(0) > 0:
+        prefix = f"culture_c_quiz_{n_epoch:04d}"
+
+        file_name = os.path.join(args.result_dir, prefix + "_logp.dat")
+        with open(file_name, "w") as logp_file:
+            for l in vl:
+                s = " ".join([str(x.item()) for x in l])
+                logp_file.write(s + "\n")
+
+        quiz_machine.save_quiz_illustrations(args.result_dir, prefix, vq)
 
 
 ######################################################################
 
 
 ######################################################################
@@ -403,6 +469,7 @@ def create_c_quizzes(
 models = []
 
 for k in range(args.nb_gpts):
 models = []
 
 for k in range(args.nb_gpts):
+    log_string(f"creating model {k} and its w_quizzes")
     model = mygpt.MyGPT(
         vocabulary_size=vocabulary_size,
         dim_model=args.dim_model,
     model = mygpt.MyGPT(
         vocabulary_size=vocabulary_size,
         dim_model=args.dim_model,
@@ -412,76 +479,181 @@ for k in range(args.nb_gpts):
         nb_blocks=args.nb_blocks,
         causal=True,
         dropout=args.dropout,
         nb_blocks=args.nb_blocks,
         causal=True,
         dropout=args.dropout,
-    ).to(device)
+    ).to(main_device)
 
     model.main_test_accuracy = 0.0
     model.id = k
 
 
     model.main_test_accuracy = 0.0
     model.id = k
 
+    model.train_w_quizzes = quiz_machine.generate_token_sequences(args.nb_train_samples)
+    quiz_machine.reverse_random_half_in_place(model.train_w_quizzes)
+    model.test_w_quizzes = quiz_machine.generate_token_sequences(args.nb_test_samples)
+    quiz_machine.reverse_random_half_in_place(model.test_w_quizzes)
+
     models.append(model)
 
     models.append(model)
 
+######################################################################
+
+if args.resume:
+    try:
+        for model in models:
+            filename = f"gpt_{model.id:03d}.pth"
+
+            try:
+                d = torch.load(os.path.join(args.result_dir, filename))
+                model.load_state_dict(d[0])
+                model.main_test_accuracy = d[1]
+                log_string(f"successfully loaded {filename}")
+            except FileNotFoundError:
+                log_string(f"cannot find {filename}")
+                pass
+
+        try:
+            filename = "c_quizzes.pth"
+            quiz_machine.load_c_quizzes(os.path.join(args.result_dir, filename))
+            log_string(f"successfully loaded {filename}")
+        except FileNotFoundError:
+            log_string(f"cannot find {filename}")
+            pass
+
+    except:
+        log_string(f"error when loading {filename}.")
+        exit(1)
+
+######################################################################
 
 nb_parameters = sum(p.numel() for p in models[0].parameters())
 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
 
 ######################################################################
 
 
 nb_parameters = sum(p.numel() for p in models[0].parameters())
 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
 
 ######################################################################
 
-desired_average_logits = None
+# Compute the entropy of the training tokens
 
 
-for n_epoch in range(args.nb_epochs):
-    log_string(f"--- epoch {n_epoch} ----------------------------------------")
+token_count = 0
+for input in quiz_machine.batches(models[0], split="train", desc="train-entropy"):
+    token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum(
+        (0, 1)
+    )
+token_probas = token_count / token_count.sum()
+entropy = -torch.xlogy(token_probas, token_probas).sum()
+train_set_perplexity = math.exp(entropy)
 
 
-    a = [(model.id, float(model.main_test_accuracy)) for model in models]
-    a.sort(key=lambda p: p[0])
-    log_string(f"current accuracies {a}")
+######################################################################
+# A bit of paranoia never hurts
+
+if args.max_percents_of_test_in_train >= 0:
 
 
-    # select the model with lowest accuracy
-    models.sort(key=lambda model: model.main_test_accuracy)
-    model = models[0]
+    def subsets_as_tuples(batches, cs):
+        s = set()
+        for batch in batches:
+            for x in batch:
+                s.add(tuple([v.item() for v in x]))
+                if len(s) == cs:
+                    yield s
+                    s = set()
+        yield s
+
+    nb_test, nb_in_train = 0, 0
+    for test_subset in subsets_as_tuples(
+        quiz_machine.batches(models[0], split="test", desc="test-check"), 25000
+    ):
+        in_train = set()
+        for train_subset in subsets_as_tuples(
+            quiz_machine.batches(models[0], split="train", desc="train-check"), 25000
+        ):
+            in_train.update(test_subset.intersection(train_subset))
+        nb_in_train += len(in_train)
+        nb_test += len(test_subset)
 
     log_string(
 
     log_string(
-        f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+        f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
     )
 
     )
 
-    # improve it
-    one_epoch(model, task)
+    assert (
+        nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
+    ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
 
 
-    task.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+######################################################################
 
 
-    log_string(
-        f"train_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}"
-    )
+nb_new_c_quizzes_for_train = args.nb_train_samples // 50
+nb_new_c_quizzes_for_test = args.nb_test_samples // 50
 
 
-    # test it
-    run_tests(model, task, deterministic_synthesis=False)
+log_string(
+    f"nb_new_c_quizzes_for_train {nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {nb_new_c_quizzes_for_test}"
+)
 
 
-    log_string(
-        f"test_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}"
-    )
+######################################################################
 
 
-    if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
-        other_models = models.copy()
-        other_models.remove(model)
+if args.dirty_debug:
+    args.accuracy_to_make_c_quizzes = 0.0
+    args.nb_gpts = 2
+    nb_new_c_quizzes_for_train = 100
+    nb_new_c_quizzes_for_test = 10
 
 
-        average_logits = create_c_quizzes(
-            model,
-            other_models,
-            task,
-            nb_for_train=nb_new_c_quizzes_for_train,
-            nb_for_test=nb_new_c_quizzes_for_test,
-            desired_average_logits=desired_average_logits,
+    def standard_validity(logproba):
+        l = logproba.sort(dim=-1).values
+        return l[:, 0] < math.log(0.5)
+
+
+######################################################################
+
+for n_epoch in range(args.nb_epochs):
+    log_string(f"--- epoch {n_epoch} ----------------------------------------")
+
+    cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
+    log_string(f"current_test_accuracies {cta}")
+
+    ##################################################
+    # Select, improve, and eval the worst model
+
+    ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy))
+
+    weakest_models = ranked_models[: len(gpus)]
+
+    threads = []
+
+    for gpu, model in zip(gpus, weakest_models):
+        log_string(f"training model {model.id}")
+
+        t = threading.Thread(
+            target=one_epoch, daemon=True, args=(model, quiz_machine, gpu)
         )
 
         )
 
-        # We keep the first average logits as a reference
-        if desired_average_logits is None:
-            desired_average_logits = average_logits
-        else:
-            log_string(
-                f"desired_average_logits {desired_average_logits} average_logits {average_logits}"
-            )
+        threads.append(t)
 
 
-        # We update everyone
-        for model in models:
-            run_tests(model, task, deterministic_synthesis=False)
+        t.start()
+
+    for t in threads:
+        t.join()
+
+    # Save the models to disk
+
+    for model in weakest_models:
+        filename = f"gpt_{model.id:03d}.pth"
+        torch.save(
+            (model.state_dict(), model.main_test_accuracy),
+            os.path.join(args.result_dir, filename),
+        )
+        log_string(f"wrote {filename}")
+
+    # Renew the training samples
+
+    for model in weakest_models:
+        quiz_machine.renew_w_quizzes(model, args.nb_train_samples)
+
+    ##################################################
+    # If all the models are good enough, generate new quizzes and
+    # re-compute the test errors
+
+    if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
+        create_c_quizzes(
+            models,
+            quiz_machine,
+            nb_for_train=nb_new_c_quizzes_for_train,
+            nb_for_test=nb_new_c_quizzes_for_test,
+        )
 
 
+        filename = "c_quizzes.pth"
+        quiz_machine.save_c_quizzes(os.path.join(args.result_dir, filename))
+        log_string(f"wrote {filename}")
 
 ######################################################################
 
 ######################################################################