Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index d961301..6c4099f 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -5,60 +5,50 @@
 
 # Written by Francois Fleuret <francois@fleuret.org>
 
 
 # Written by Francois Fleuret <francois@fleuret.org>
 
-import math, sys, argparse, time, tqdm, os
+import math, sys, argparse, time, tqdm, os, datetime, warnings
 
 import torch, torchvision
 from torch import nn
 from torch.nn import functional as F
 
 import ffutils
 
 import torch, torchvision
 from torch import nn
 from torch.nn import functional as F
 
 import ffutils
-import mygpt, tasks, problems
 
 
-######################################################################
+import mygpt
+import sky, grids, quiz_machine
 
 
-if torch.cuda.is_available():
-    device = torch.device("cuda")
-    torch.backends.cuda.matmul.allow_tf32 = True
-else:
-    device = torch.device("cpu")
+import threading
+
+import torch.multiprocessing as mp
 
 ######################################################################
 
 parser = argparse.ArgumentParser(
 
 ######################################################################
 
 parser = argparse.ArgumentParser(
-    description="An implementation of GPT with cache.",
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
-parser.add_argument(
-    "--task",
-    type=str,
-    default="twotargets",
-    help="byheart, learnop, guessop, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
-)
-
-parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
+parser.add_argument("--log_filename", type=str, default="train.log")
 
 parser.add_argument("--result_dir", type=str, default=None)
 
 parser.add_argument("--seed", type=int, default=0)
 
 
 parser.add_argument("--result_dir", type=str, default=None)
 
 parser.add_argument("--seed", type=int, default=0)
 
-parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
+parser.add_argument("--resume", action="store_true", default=False)
+
+parser.add_argument("--max_percents_of_test_in_train", type=int, default=-1)
 
 ########################################
 
 
 ########################################
 
-parser.add_argument("--nb_epochs", type=int, default=25)
+parser.add_argument("--nb_epochs", type=int, default=10000)
 
 parser.add_argument("--batch_size", type=int, default=None)
 
 
 parser.add_argument("--batch_size", type=int, default=None)
 
+parser.add_argument("--physical_batch_size", type=int, default=None)
+
 parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
 parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
-parser.add_argument("--optim", type=str, default="adam")
-
-parser.add_argument("--learning_rate", type=float, default=1e-4)
-
-parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
+parser.add_argument("--learning_rate", type=float, default=5e-4)
 
 ########################################
 
 
 ########################################
 
@@ -80,187 +70,68 @@ parser.add_argument("--dropout", type=float, default=0.1)
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
-parser.add_argument("--no_checkpoint", action="store_true", default=False)
-
-parser.add_argument("--overwrite_results", action="store_true", default=False)
-
-parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
-
-##############################
-# rpl options
-
-parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
-
-parser.add_argument("--rpl_max_input", type=int, default=9)
-
-parser.add_argument("--rpl_prog_len", type=int, default=8)
-
-parser.add_argument("--rpl_nb_runs", type=int, default=5)
-
-parser.add_argument("--rpl_no_prog", action="store_true", default=False)
-
-##############################
-# grid options
-
-parser.add_argument("--grid_size", type=int, default=6)
-
-##############################
-# picoclvr options
-
-parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
+parser.add_argument("--problem", type=str, default="grids")
 
 
-parser.add_argument("--picoclvr_height", type=int, default=12)
+parser.add_argument("--nb_threads", type=int, default=1)
 
 
-parser.add_argument("--picoclvr_width", type=int, default=16)
+parser.add_argument("--gpus", type=str, default="all")
 
 
-parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
+parser.add_argument("--nb_gpts", type=int, default=5)
 
 
-##############################
-# Maze options
+parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
 
 
-parser.add_argument("--maze_height", type=int, default=13)
+parser.add_argument("--proba_understands", type=float, default=0.99)
 
 
-parser.add_argument("--maze_width", type=int, default=21)
+parser.add_argument("--proba_not_understands", type=float, default=0.5)
 
 
-parser.add_argument("--maze_nb_walls", type=int, default=15)
+parser.add_argument("--generation_temperature", type=float, default=2.0)
 
 
-##############################
-# Snake options
+parser.add_argument("--dirty_debug", action="store_true", default=False)
 
 
-parser.add_argument("--snake_height", type=int, default=9)
-
-parser.add_argument("--snake_width", type=int, default=12)
-
-parser.add_argument("--snake_nb_colors", type=int, default=5)
-
-parser.add_argument("--snake_length", type=int, default=200)
-
-##############################
-# Stack options
-
-parser.add_argument("--stack_nb_steps", type=int, default=100)
-
-parser.add_argument("--stack_nb_stacks", type=int, default=3)
+######################################################################
 
 
-parser.add_argument("--stack_nb_digits", type=int, default=3)
+grids_tasks = ", ".join(
+    [x.__name__.removeprefix("task_") for x in grids.Grids().all_tasks]
+)
 
 
-parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
+parser.add_argument(
+    "--grids_tasks",
+    type=str,
+    default=None,
+    help="A comma-separated subset of: " + grids_tasks + ", or None for all.",
+)
 
 
-##############################
-# Expr options
+######################################################################
 
 
-parser.add_argument("--expr_nb_variables", type=int, default=5)
+parser.add_argument("--sky_height", type=int, default=6)
 
 
-parser.add_argument("--expr_sequence_length", type=int, default=40)
+parser.add_argument("--sky_width", type=int, default=8)
 
 
-parser.add_argument("--expr_operand_max", type=int, default=9)
+parser.add_argument("--sky_nb_birds", type=int, default=3)
 
 
-parser.add_argument("--expr_result_max", type=int, default=99)
+parser.add_argument("--sky_nb_iterations", type=int, default=2)
 
 
-parser.add_argument("--expr_input_file", type=str, default=None)
+parser.add_argument("--sky_speed", type=int, default=3)
 
 ######################################################################
 
 args = parser.parse_args()
 
 
 ######################################################################
 
 args = parser.parse_args()
 
-assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
-
 if args.result_dir is None:
 if args.result_dir is None:
-    args.result_dir = f"results_{args.task}"
+    args.result_dir = f"results_culture"
 
 ######################################################################
 
 
 ######################################################################
 
-default_task_args = {
-    "addition": {
-        "model": "352M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "byheart": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 50000,
-        "nb_test_samples": 10000,
-    },
-    "expr": {
-        "model": "352M",
-        "batch_size": 25,
-        "nb_train_samples": 2500000,
-        "nb_test_samples": 10000,
-    },
-    "grid": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "qmlp": {
-        "model": "37M",
-        "batch_size": 10,
-        "nb_train_samples": 100000,
-        "nb_test_samples": 1000,
-    },
-    "guessop": {
-        "model": "352M",
-        "batch_size": 25,
-        "nb_train_samples": 1000000,
-        "nb_test_samples": 10000,
-    },
-    "learnop": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 50000,
-        "nb_test_samples": 10000,
-    },
-    "maze": {
-        "model": "37M",
-        "batch_size": 5,
-        "nb_train_samples": 100000,
-        "nb_test_samples": 10000,
-    },
-    "picoclvr": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "rpl": {
-        "model": "352M",
-        "batch_size": 5,
-        "nb_train_samples": 2500000,
-        "nb_test_samples": 10000,
-    },
-    "snake": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "stack": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 100000,
-        "nb_test_samples": 1000,
-    },
-    "twotargets": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 50000,
-        "nb_test_samples": 10000,
-    },
-    "mnist": {
-        "model": "37M",
-        "batch_size": 10,
-        "nb_train_samples": 60000,
-        "nb_test_samples": 10000,
-    },
+default_args = {
+    "model": "37M",
+    "batch_size": 25,
+    "nb_train_samples": 100000,
+    "nb_test_samples": 10000,
 }
 
 }
 
-if args.task in default_task_args:
-    for k, v in default_task_args[args.task].items():
-        if getattr(args, k) is None:
-            setattr(args, k, v)
+for k, v in default_args.items():
+    if getattr(args, k) is None:
+        setattr(args, k, v)
 
 ######################################################################
 
 
 ######################################################################
 
@@ -272,6 +143,13 @@ default_model_args = {
         "nb_heads": 2,
         "nb_blocks": 2,
     },
         "nb_heads": 2,
         "nb_blocks": 2,
     },
+    "4M": {
+        "dim_model": 256,
+        "dim_keys": 32,
+        "dim_hidden": 1024,
+        "nb_heads": 4,
+        "nb_blocks": 6,
+    },
     "37M": {
         "dim_model": 512,
         "dim_keys": 64,
     "37M": {
         "dim_model": 512,
         "dim_keys": 64,
@@ -304,10 +182,13 @@ else:
 
 ######################################################################
 
 
 ######################################################################
 
-try:
-    os.mkdir(args.result_dir)
-except FileExistsError:
-    if not args.overwrite_results:
+if args.resume:
+    assert os.path.isdir(args.result_dir)
+
+else:
+    try:
+        os.mkdir(args.result_dir)
+    except FileExistsError:
         print(f"result directory {args.result_dir} already exists")
         exit(1)
 
         print(f"result directory {args.result_dir} already exists")
         exit(1)
 
@@ -335,267 +216,323 @@ def log_string(s):
     sys.stdout.flush()
 
 
     sys.stdout.flush()
 
 
+now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
+
+os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py")
+
+log_string(f"argv {' '.join(sys.argv)}")
+
 for n in vars(args):
     log_string(f"args.{n} {getattr(args, n)}")
 
 
 ######################################################################
 
 for n in vars(args):
     log_string(f"args.{n} {getattr(args, n)}")
 
 
 ######################################################################
 
+if args.gpus == "all":
+    gpus_idx = range(torch.cuda.device_count())
+else:
+    gpus_idx = [int(k) for k in args.gpus.split(",")]
 
 
-def picoclvr_pruner_horizontal_green(p):
-    return not ("green" in p and ("left" in p or "right" in p))
+gpus = [torch.device(f"cuda:{n}") for n in gpus_idx]
 
 
+if torch.cuda.is_available():
+    main_device = gpus[0]
+else:
+    assert len(gpus) == 0
+    main_device = torch.device("cpu")
 
 
-picoclvr_pruner_train = (
-    picoclvr_pruner_horizontal_green
-    if args.picocvlr_prune_properties in {"train+eval"}
-    else None
-)
+if args.dirty_debug:
+    args.nb_train_samples = 2500
+    args.nb_test_samples = 100
 
 
-picoclvr_pruner_eval = (
-    (lambda p: not picoclvr_pruner_horizontal_green(p))
-    if args.picocvlr_prune_properties in {"train+eval", "eval"}
-    else None
+if args.physical_batch_size is None:
+    args.physical_batch_size = args.batch_size
+else:
+    assert args.batch_size % args.physical_batch_size == 0
+
+assert args.nb_train_samples % args.batch_size == 0
+assert args.nb_test_samples % args.batch_size == 0
+
+if args.problem == "sky":
+    problem = sky.Sky(
+        height=args.sky_height,
+        width=args.sky_width,
+        nb_birds=args.sky_nb_birds,
+        nb_iterations=args.sky_nb_iterations,
+        speed=args.sky_speed,
+        max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
+        chunk_size=100,
+        nb_threads=args.nb_threads,
+    )
+    back_accuracy = False
+elif args.problem == "grids":
+    problem = grids.Grids(
+        max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
+        chunk_size=100,
+        nb_threads=args.nb_threads,
+        tasks=args.grids_tasks,
+    )
+    back_accuracy = True
+else:
+    raise ValueError
+
+problem.save_some_examples(args.result_dir)
+
+quiz_machine = quiz_machine.QuizMachine(
+    problem=problem,
+    nb_train_samples=args.nb_train_samples,
+    nb_test_samples=args.nb_test_samples,
+    back_accuracy=back_accuracy,
+    batch_size=args.physical_batch_size,
+    result_dir=args.result_dir,
+    logger=log_string,
+    device=main_device,
 )
 
 ######################################################################
 
 )
 
 ######################################################################
 
-if args.task == "byheart":
-    task = tasks.SandBox(
-        problem=problems.ProblemByHeart(),
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        logger=log_string,
-        device=device,
-    )
-    args.max_percents_of_test_in_train = -1
-
-elif args.task == "learnop":
-    task = tasks.SandBox(
-        problem=problems.ProblemLearnOperator(),
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        logger=log_string,
-        device=device,
-    )
+log_string(f"main_device {main_device} gpus {[ str(g) for g in gpus]}")
 
 
+vocabulary_size = quiz_machine.vocabulary_size()
 
 
-elif args.task == "guessop":
-    task = tasks.SandBox(
-        problem=problems.ProblemGuessOperator(),
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        logger=log_string,
-        device=device,
-    )
+log_string(f"vocabulary_size {vocabulary_size}")
 
 
+######################################################################
 
 
-elif args.task == "twotargets":
-    task = tasks.SandBox(
-        problem=problems.ProblemTwoTargets(),
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        logger=log_string,
-        device=device,
-    )
 
 
-elif args.task == "addition":
-    task = tasks.SandBox(
-        problem=problems.ProblemAddition(),
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        logger=log_string,
-        device=device,
-    )
+def run_tests(model, quiz_machine, deterministic_synthesis, local_device=main_device):
+    with torch.autograd.no_grad():
+        model.eval().to(local_device)
 
 
-elif args.task == "picoclvr":
-    task = tasks.PicoCLVR(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        height=args.picoclvr_height,
-        width=args.picoclvr_width,
-        nb_colors=args.picoclvr_nb_colors,
-        logger=log_string,
-        device=device,
-        pruner_train=picoclvr_pruner_train,
-        pruner_eval=picoclvr_pruner_eval,
-    )
+        nb_test_samples, acc_test_loss = 0, 0.0
+        nb_samples_accumulated = 0
 
 
-elif args.task == "mnist":
-    task = tasks.MNIST(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        device=device,
-    )
+        for input in quiz_machine.batches(model, split="test"):
+            input = input.to(local_device)
 
 
-elif args.task == "maze":
-    task = tasks.Maze(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        height=args.maze_height,
-        width=args.maze_width,
-        nb_walls=args.maze_nb_walls,
-        device=device,
-    )
+            bs = model(mygpt.BracketedSequence(input))
+            output = bs.x
 
 
-elif args.task == "snake":
-    task = tasks.Snake(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        height=args.snake_height,
-        width=args.snake_width,
-        nb_colors=args.snake_nb_colors,
-        length=args.snake_length,
-        prompt_length=args.snake_length // 2,
-        device=device,
-    )
+            loss = F.cross_entropy(output.transpose(1, 2), input)
 
 
-elif args.task == "stack":
-    task = tasks.Stack(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        logger=log_string,
-        nb_steps=args.stack_nb_steps,
-        nb_stacks=args.stack_nb_stacks,
-        nb_digits=args.stack_nb_digits,
-        fraction_values_for_train=args.stack_fraction_values_for_train,
-        device=device,
-    )
+            acc_test_loss += loss.item() * input.size(0)
 
 
-elif args.task == "expr":
-    task = tasks.Expr(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        nb_variables=args.expr_nb_variables,
-        sequence_length=args.expr_sequence_length,
-        operand_max=args.expr_operand_max,
-        result_max=args.expr_result_max,
-        batch_size=args.batch_size,
-        device=device,
-    )
+            nb_test_samples += input.size(0)
 
 
-elif args.task == "rpl":
-    task = tasks.RPL(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        nb_starting_values=args.rpl_nb_starting_values,
-        max_input=args.rpl_max_input,
-        prog_len=args.rpl_prog_len,
-        nb_runs=args.rpl_nb_runs,
-        no_prog=args.rpl_no_prog,
-        logger=log_string,
-        device=device,
-    )
+        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+
+        log_string(f"test_perplexity {n_epoch} model {model.id} {test_perplexity}")
+
+        model.main_test_accuracy = quiz_machine.produce_results(
+            n_epoch=n_epoch,
+            model=model,
+            result_dir=args.result_dir,
+            deterministic_synthesis=deterministic_synthesis,
+        )
 
 
-elif args.task == "grid":
-    task = tasks.Grid(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        size=args.grid_size,
-        logger=log_string,
-        device=device,
-    )
 
 
-elif args.task == "qmlp":
-    task = tasks.QMLP(
-        nb_train_samples=args.nb_train_samples,
-        nb_test_samples=args.nb_test_samples,
-        batch_size=args.batch_size,
-        result_dir=args.result_dir,
-        logger=log_string,
-        device=device,
+def one_epoch(model, quiz_machine, local_device=main_device):
+    model.to(local_device).train()
+
+    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
+
+    nb_train_samples, acc_train_loss = 0, 0.0
+
+    for input in quiz_machine.batches(model, split="train"):
+        input = input.to(local_device)
+
+        if nb_train_samples % args.batch_size == 0:
+            optimizer.zero_grad()
+
+        output = model(mygpt.BracketedSequence(input)).x
+        loss = F.cross_entropy(output.transpose(1, 2), input)
+        acc_train_loss += loss.item() * input.size(0)
+
+        nb_train_samples += input.size(0)
+
+        loss.backward()
+
+        if nb_train_samples % args.batch_size == 0:
+            optimizer.step()
+
+    train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+
+    log_string(f"train_perplexity {n_epoch} model {model.id} {train_perplexity}")
+
+    run_tests(model, quiz_machine, deterministic_synthesis=False)
+
+    model.to(main_device)
+
+
+######################################################################
+
+
+def standard_validity(logproba):
+    l = logproba.sort(dim=-1).values
+    return (l[:, 0] < math.log(args.proba_not_understands)) & (
+        l[:, 1] > math.log(args.proba_understands)
     )
 
     )
 
-else:
-    raise ValueError(f"Unknown task {args.task}")
+
+def valid_quizzes_and_logprobas(recorded, criteria):
+    validated_quizzes, validated_logprobas = [], []
+    for q, lp in recorded:
+        validated_indices = criteria(lp)
+        validated_quizzes.append(q[validated_indices])
+        validated_logprobas.append(lp[validated_indices])
+
+    if len(validated_quizzes) > 0:
+        return torch.cat(validated_quizzes, dim=0), torch.cat(
+            validated_logprobas, dim=0
+        )
+    else:
+        return None, None
+
 
 ######################################################################
 
 
 ######################################################################
 
-log_string(f"device {device}")
 
 
-vocabulary_size = task.vocabulary_size()
+def create_c_quizzes(models, quiz_machine, nb_for_train=1000, nb_for_test=100):
+    nb_to_create = nb_for_train + nb_for_test
 
 
-log_string(f"vocabulary_size {vocabulary_size}")
+    recorded_quizzes_logprobas = []
 
 
-##############################
-
-model = mygpt.MyGPT(
-    vocabulary_size=vocabulary_size,
-    dim_model=args.dim_model,
-    dim_keys=args.dim_keys,
-    dim_hidden=args.dim_hidden,
-    nb_heads=args.nb_heads,
-    nb_blocks=args.nb_blocks,
-    causal=True,
-    dropout=args.dropout,
-)
+    nb_validated = 0
 
 
-model.to(device)
+    while nb_validated < nb_to_create:
+        model_for_generation = models[torch.randint(len(models), (1,))]
+
+        c_quizzes = quiz_machine.generate_quizzes(
+            nb_to_create,
+            model_for_generation=model_for_generation,
+            temperature=args.generation_temperature,
+        )
+
+        c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)]
+
+        if c_quizzes.size(0) > 0:
+            logproba = quiz_machine.logproba_of_solutions(models, c_quizzes)
+            recorded_quizzes_logprobas.append((c_quizzes, logproba))
+
+            validated_quizzes, validated_logprobas = valid_quizzes_and_logprobas(
+                recorded_quizzes_logprobas, standard_validity
+            )
+
+            if validated_quizzes is not None:
+                nb_validated = validated_quizzes.size(0)
+
+        log_string(
+            f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}"
+        )
+
+    # store the new c_quizzes which have been validated
+
+    quiz_machine.reverse_random_half_in_place(validated_quizzes)
+    quiz_machine.store_c_quizzes(validated_quizzes[:nb_for_train], for_train=True)
+    quiz_machine.store_c_quizzes(
+        validated_quizzes[nb_for_train:nb_to_create], for_train=False
+    )
+
+    ######################################################################
+    # save the log probas
+
+    file_name = os.path.join(
+        args.result_dir, f"culture_c_quiz_all_{n_epoch:04d}_logp.dat"
+    )
+
+    with open(file_name, "w") as logp_file:
+        for _, ll in recorded_quizzes_logprobas:
+            for l in ll:
+                s = " ".join([str(x.item()) for x in l])
+                logp_file.write(s + "\n")
+
+    ######################################################################
+    # save images with their logprobas
+
+    vq = validated_quizzes[:72]
+    vl = validated_logprobas[:72]
+
+    if vq.size(0) > 0:
+        prefix = f"culture_c_quiz_{n_epoch:04d}"
+
+        file_name = os.path.join(args.result_dir, prefix + "_logp.dat")
+        with open(file_name, "w") as logp_file:
+            for l in vl:
+                s = " ".join([str(x.item()) for x in l])
+                logp_file.write(s + "\n")
+
+        quiz_machine.save_quiz_illustrations(args.result_dir, prefix, vq)
 
 
-nb_parameters = sum(p.numel() for p in model.parameters())
-log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
 
 ######################################################################
 
 
 ######################################################################
 
-nb_epochs_finished = 0
+models = []
 
 
-if args.no_checkpoint:
-    log_string(f"not trying to load checkpoint.")
+for k in range(args.nb_gpts):
+    log_string(f"creating model {k} and its w_quizzes")
+    model = mygpt.MyGPT(
+        vocabulary_size=vocabulary_size,
+        dim_model=args.dim_model,
+        dim_keys=args.dim_keys,
+        dim_hidden=args.dim_hidden,
+        nb_heads=args.nb_heads,
+        nb_blocks=args.nb_blocks,
+        causal=True,
+        dropout=args.dropout,
+    ).to(main_device)
 
 
-else:
-    try:
-        checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
-        checkpoint = torch.load(checkpoint_name)
-        nb_epochs_finished = checkpoint["nb_epochs_finished"]
-        model.load_state_dict(checkpoint["model_state"])
-        torch.set_rng_state(checkpoint["rng_state"])
-        if torch.cuda.is_available():
-            torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
+    model.main_test_accuracy = 0.0
+    model.id = k
+
+    model.train_w_quizzes = quiz_machine.generate_token_sequences(args.nb_train_samples)
+    quiz_machine.reverse_random_half_in_place(model.train_w_quizzes)
+    model.test_w_quizzes = quiz_machine.generate_token_sequences(args.nb_test_samples)
+    quiz_machine.reverse_random_half_in_place(model.test_w_quizzes)
+
+    models.append(model)
 
 
-        log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
+######################################################################
 
 
-    except FileNotFoundError:
-        log_string("starting from scratch.")
+if args.resume:
+    try:
+        for model in models:
+            filename = f"gpt_{model.id:03d}.pth"
+
+            try:
+                d = torch.load(os.path.join(args.result_dir, filename))
+                model.load_state_dict(d[0])
+                model.main_test_accuracy = d[1]
+                log_string(f"successfully loaded {filename}")
+            except FileNotFoundError:
+                log_string(f"cannot find {filename}")
+                pass
+
+        try:
+            filename = "c_quizzes.pth"
+            quiz_machine.load_c_quizzes(os.path.join(args.result_dir, filename))
+            log_string(f"successfully loaded {filename}")
+        except FileNotFoundError:
+            log_string(f"cannot find {filename}")
+            pass
 
     except:
 
     except:
-        log_string("error when loading the checkpoint.")
+        log_string(f"error when loading {filename}.")
         exit(1)
 
 ######################################################################
 
         exit(1)
 
 ######################################################################
 
-if args.task == "expr" and args.expr_input_file is not None:
-    task.produce_results(
-        n_epoch=nb_epochs_finished,
-        model=model,
-        result_dir=args.result_dir,
-        logger=log_string,
-        deterministic_synthesis=args.deterministic_synthesis,
-        input_file=args.expr_input_file,
-    )
-
-    exit(0)
+nb_parameters = sum(p.numel() for p in models[0].parameters())
+log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
 
 ######################################################################
 
 
 ######################################################################
 
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
-
 # Compute the entropy of the training tokens
 
 token_count = 0
 # Compute the entropy of the training tokens
 
 token_count = 0
-for input in task.batches(split="train"):
-    token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
+for input in quiz_machine.batches(models[0], split="train", desc="train-entropy"):
+    token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum(
+        (0, 1)
+    )
 token_probas = token_count / token_count.sum()
 entropy = -torch.xlogy(token_probas, token_probas).sum()
 train_set_perplexity = math.exp(entropy)
 token_probas = token_count / token_count.sum()
 entropy = -torch.xlogy(token_probas, token_probas).sum()
 train_set_perplexity = math.exp(entropy)
@@ -616,9 +553,13 @@ if args.max_percents_of_test_in_train >= 0:
         yield s
 
     nb_test, nb_in_train = 0, 0
         yield s
 
     nb_test, nb_in_train = 0, 0
-    for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
+    for test_subset in subsets_as_tuples(
+        quiz_machine.batches(models[0], split="test", desc="test-check"), 25000
+    ):
         in_train = set()
         in_train = set()
-        for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
+        for train_subset in subsets_as_tuples(
+            quiz_machine.batches(models[0], split="train", desc="train-check"), 25000
+        ):
             in_train.update(test_subset.intersection(train_subset))
         nb_in_train += len(in_train)
         nb_test += len(test_subset)
             in_train.update(test_subset.intersection(train_subset))
         nb_in_train += len(in_train)
         nb_test += len(test_subset)
@@ -631,112 +572,88 @@ if args.max_percents_of_test_in_train >= 0:
         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
 
         nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
     ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
 
-##############################
+######################################################################
 
 
-if args.learning_rate_schedule == "cos":
-    learning_rate_schedule = {}
-    for n_epoch in range(args.nb_epochs):
-        u = n_epoch / args.nb_epochs * math.pi
-        learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
-else:
-    u = {
-        int(k): float(v)
-        for k, v in [
-            tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
-        ]
-    }
-
-    learning_rate_schedule = {}
-    learning_rate = args.learning_rate
-    for n_epoch in range(args.nb_epochs):
-        if n_epoch in u:
-            learning_rate = u[n_epoch]
-        learning_rate_schedule[n_epoch] = learning_rate
-
-log_string(f"learning_rate_schedule {learning_rate_schedule}")
-
-##############################
-
-nb_samples_seen = 0
-
-if nb_epochs_finished >= nb_epochs:
-    task.produce_results(
-        n_epoch=nb_epochs_finished,
-        model=model,
-        result_dir=args.result_dir,
-        logger=log_string,
-        deterministic_synthesis=args.deterministic_synthesis,
-    )
+nb_new_c_quizzes_for_train = args.nb_train_samples // 50
+nb_new_c_quizzes_for_test = args.nb_test_samples // 50
 
 
-for n_epoch in range(nb_epochs_finished, nb_epochs):
-    learning_rate = learning_rate_schedule[n_epoch]
+log_string(
+    f"nb_new_c_quizzes_for_train {nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {nb_new_c_quizzes_for_test}"
+)
 
 
-    log_string(f"learning_rate {learning_rate}")
+######################################################################
 
 
-    if args.optim == "sgd":
-        optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
-    elif args.optim == "adam":
-        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
-    elif args.optim == "adamw":
-        optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
-    else:
-        raise ValueError(f"Unknown optimizer {args.optim}.")
+if args.dirty_debug:
+    args.accuracy_to_make_c_quizzes = 0.0
+    args.nb_gpts = 2
+    nb_new_c_quizzes_for_train = 100
+    nb_new_c_quizzes_for_test = 10
 
 
-    model.train()
+    def standard_validity(logproba):
+        l = logproba.sort(dim=-1).values
+        return l[:, 0] < math.log(0.5)
 
 
-    nb_train_samples, acc_train_loss = 0, 0.0
 
 
-    for input in task.batches(split="train"):
-        input = input.to(device)
-        output = model(mygpt.BracketedSequence(input)).x
-        loss = F.cross_entropy(output.transpose(1, 2), input)
-        acc_train_loss += loss.item() * input.size(0)
-        nb_train_samples += input.size(0)
-        nb_samples_seen += input.size(0)
+######################################################################
 
 
-        optimizer.zero_grad()
-        loss.backward()
-        optimizer.step()
+for n_epoch in range(args.nb_epochs):
+    log_string(f"--- epoch {n_epoch} ----------------------------------------")
 
 
-    with torch.autograd.no_grad():
-        model.eval()
+    cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
+    log_string(f"current_test_accuracies {cta}")
 
 
-        nb_test_samples, acc_test_loss = 0, 0.0
+    ##################################################
+    # Select, improve, and eval the worst model
 
 
-        for input in task.batches(split="test"):
-            input = input.to(device)
+    ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy))
 
 
-            output = model(mygpt.BracketedSequence(input)).x
-            loss = F.cross_entropy(output.transpose(1, 2), input)
-            acc_test_loss += loss.item() * input.size(0)
-            nb_test_samples += input.size(0)
+    weakest_models = ranked_models[: len(gpus)]
 
 
-        train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
-        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+    threads = []
 
 
-        log_string(
-            f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
+    for gpu, model in zip(gpus, weakest_models):
+        log_string(f"training model {model.id}")
+
+        t = threading.Thread(
+            target=one_epoch, daemon=True, args=(model, quiz_machine, gpu)
         )
 
         )
 
-        task.produce_results(
-            n_epoch=n_epoch,
-            model=model,
-            result_dir=args.result_dir,
-            logger=log_string,
-            deterministic_synthesis=args.deterministic_synthesis,
+        threads.append(t)
+
+        t.start()
+
+    for t in threads:
+        t.join()
+
+    # Save the models to disk
+
+    for model in weakest_models:
+        filename = f"gpt_{model.id:03d}.pth"
+        torch.save(
+            (model.state_dict(), model.main_test_accuracy),
+            os.path.join(args.result_dir, filename),
         )
         )
+        log_string(f"wrote {filename}")
 
 
-    checkpoint = {
-        "nb_epochs_finished": n_epoch + 1,
-        "model_state": model.state_dict(),
-        "rng_state": torch.get_rng_state(),
-    }
+    # Renew the training samples
 
 
-    if torch.cuda.is_available():
-        checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
+    for model in weakest_models:
+        quiz_machine.renew_w_quizzes(model, args.nb_train_samples)
+
+    ##################################################
+    # If all the models are good enough, generate new quizzes and
+    # re-compute the test errors
+
+    if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
+        create_c_quizzes(
+            models,
+            quiz_machine,
+            nb_for_train=nb_new_c_quizzes_for_train,
+            nb_for_test=nb_new_c_quizzes_for_test,
+        )
 
 
-    checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
-    torch.save(checkpoint, checkpoint_name)
-    log_string(f"saved checkpoint {checkpoint_name}")
+        filename = "c_quizzes.pth"
+        quiz_machine.save_c_quizzes(os.path.join(args.result_dir, filename))
+        log_string(f"wrote {filename}")
 
 ######################################################################
 
 ######################################################################