Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index dace5f2..97c7130 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -29,12 +29,7 @@ parser = argparse.ArgumentParser(
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
     formatter_class=argparse.ArgumentDefaultsHelpFormatter,
 )
 
-parser.add_argument(
-    "--task",
-    type=str,
-    default="twotargets",
-    help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
-)
+parser.add_argument("--task", type=str, default="world", help="world")
 
 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
 
 
 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
 
@@ -46,7 +41,7 @@ parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
 
 ########################################
 
 
 ########################################
 
-parser.add_argument("--nb_epochs", type=int, default=50)
+parser.add_argument("--nb_epochs", type=int, default=10000)
 
 parser.add_argument("--batch_size", type=int, default=None)
 
 
 parser.add_argument("--batch_size", type=int, default=None)
 
@@ -56,12 +51,8 @@ parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
-parser.add_argument("--optim", type=str, default="adam")
-
 parser.add_argument("--learning_rate", type=float, default=1e-4)
 
 parser.add_argument("--learning_rate", type=float, default=1e-4)
 
-parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
-
 ########################################
 
 parser.add_argument("--model", type=str, default=None)
 ########################################
 
 parser.add_argument("--model", type=str, default=None)
@@ -82,239 +73,22 @@ parser.add_argument("--dropout", type=float, default=0.1)
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
 
 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
 
-parser.add_argument("--no_checkpoint", action="store_true", default=False)
-
-parser.add_argument("--resume", action="store_true", default=False)
-
-parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
-
-##############################
-# filetask
-
-parser.add_argument("--filetask_train_file", type=str, default=None)
-
-parser.add_argument("--filetask_test_file", type=str, default=None)
-
-##############################
-# rpl options
-
-parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
-
-parser.add_argument("--rpl_max_input", type=int, default=9)
-
-parser.add_argument("--rpl_prog_len", type=int, default=8)
-
-parser.add_argument("--rpl_nb_runs", type=int, default=5)
-
-parser.add_argument("--rpl_no_prog", action="store_true", default=False)
-
-##############################
-# grid options
-
-parser.add_argument("--grid_size", type=int, default=6)
-
-parser.add_argument("--grid_fraction_play", type=float, default=0)
-
-##############################
-# picoclvr options
-
-parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
-
-parser.add_argument("--picoclvr_height", type=int, default=12)
-
-parser.add_argument("--picoclvr_width", type=int, default=16)
-
-parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
-
-##############################
-# Maze options
-
-parser.add_argument("--maze_height", type=int, default=13)
-
-parser.add_argument("--maze_width", type=int, default=21)
-
-parser.add_argument("--maze_nb_walls", type=int, default=15)
-
-##############################
-# Snake options
-
-parser.add_argument("--snake_height", type=int, default=9)
-
-parser.add_argument("--snake_width", type=int, default=12)
-
-parser.add_argument("--snake_nb_colors", type=int, default=5)
-
-parser.add_argument("--snake_length", type=int, default=200)
-
-##############################
-# ByHeart options
-
-parser.add_argument("--byheart_separation", type=int, default=1)
-
-##############################
-# Stack options
-
-parser.add_argument("--stack_nb_steps", type=int, default=100)
-
-parser.add_argument("--stack_nb_stacks", type=int, default=3)
-
-parser.add_argument("--stack_nb_digits", type=int, default=3)
-
-parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
-
-##############################
-# Expr options
-
-parser.add_argument("--expr_nb_variables", type=int, default=5)
-
-parser.add_argument("--expr_sequence_length", type=int, default=40)
-
-parser.add_argument("--expr_operand_max", type=int, default=9)
-
-parser.add_argument("--expr_result_max", type=int, default=99)
-
-parser.add_argument("--expr_input_file", type=str, default=None)
-
-##############################
-# Mixing
-
-parser.add_argument("--mixing_hard", action="store_true", default=False)
-
-parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
-
-##############################
-# greed options
-
-parser.add_argument("--greed_height", type=int, default=5)
-
-parser.add_argument("--greed_width", type=int, default=7)
-
-parser.add_argument("--greed_T", type=int, default=25)
-
-parser.add_argument("--greed_nb_walls", type=int, default=5)
-
-parser.add_argument("--greed_nb_coins", type=int, default=2)
-
 ######################################################################
 
 args = parser.parse_args()
 
 ######################################################################
 
 args = parser.parse_args()
 
-assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
-
 if args.result_dir is None:
     args.result_dir = f"results_{args.task}"
 
 ######################################################################
 
 default_task_args = {
 if args.result_dir is None:
     args.result_dir = f"results_{args.task}"
 
 ######################################################################
 
 default_task_args = {
-    "file": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "addition": {
-        "model": "352M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "byheart": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 50000,
-        "nb_test_samples": 10000,
-    },
-    "expr": {
-        "model": "352M",
-        "batch_size": 25,
-        "nb_train_samples": 2500000,
-        "nb_test_samples": 10000,
-    },
-    "grid": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "qmlp": {
-        "model": "37M",
-        "batch_size": 10,
-        "nb_train_samples": 100000,
-        "nb_test_samples": 1000,
-    },
-    "guessop": {
-        "model": "352M",
-        "batch_size": 25,
-        "nb_train_samples": 1000000,
-        "nb_test_samples": 10000,
-    },
-    "learnop": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 50000,
-        "nb_test_samples": 10000,
-    },
-    "maze": {
-        "model": "37M",
-        "batch_size": 5,
-        "nb_train_samples": 100000,
-        "nb_test_samples": 10000,
-    },
-    "picoclvr": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "rpl": {
-        "model": "352M",
-        "batch_size": 5,
-        "nb_train_samples": 2500000,
-        "nb_test_samples": 10000,
-    },
-    "snake": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 250000,
-        "nb_test_samples": 10000,
-    },
-    "stack": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 100000,
-        "nb_test_samples": 1000,
-    },
-    "twotargets": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 50000,
-        "nb_test_samples": 10000,
-    },
-    "memory": {
+    "world": {
         "model": "37M",
         "batch_size": 100,
         "model": "37M",
         "batch_size": 100,
-        "nb_train_samples": 25000,
-        "nb_test_samples": 1000,
-    },
-    "mixing": {
-        "model": "37M",
-        "batch_size": 25,
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
         "nb_train_samples": 250000,
         "nb_test_samples": 10000,
     },
-    "mnist": {
-        "model": "37M",
-        "batch_size": 10,
-        "nb_train_samples": 60000,
-        "nb_test_samples": 10000,
-    },
-    "greed": {
-        "model": "37M",
-        "batch_size": 25,
-        "nb_train_samples": 25000,
-        "nb_test_samples": 10000,
-    },
 }
 
 if args.task in default_task_args:
 }
 
 if args.task in default_task_args:
@@ -374,9 +148,8 @@ else:
 try:
     os.mkdir(args.result_dir)
 except FileExistsError:
 try:
     os.mkdir(args.result_dir)
 except FileExistsError:
-    if not args.resume:
-        print(f"result directory {args.result_dir} already exists")
-        exit(1)
+    print(f"result directory {args.result_dir} already exists")
+    exit(1)
 
 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
 
 
 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
 
@@ -411,24 +184,6 @@ for n in vars(args):
 ######################################################################
 
 
 ######################################################################
 
 
-def picoclvr_pruner_horizontal_green(p):
-    return not ("green" in p and ("left" in p or "right" in p))
-
-
-picoclvr_pruner_train = (
-    picoclvr_pruner_horizontal_green
-    if args.picocvlr_prune_properties in {"train+eval"}
-    else None
-)
-
-picoclvr_pruner_eval = (
-    (lambda p: not picoclvr_pruner_horizontal_green(p))
-    if args.picocvlr_prune_properties in {"train+eval", "eval"}
-    else None
-)
-
-######################################################################
-
 if args.physical_batch_size is None:
     args.physical_batch_size = args.batch_size
 else:
 if args.physical_batch_size is None:
     args.physical_batch_size = args.batch_size
 else:
@@ -463,6 +218,17 @@ elif args.task == "byheart":
     )
     args.max_percents_of_test_in_train = -1
 
     )
     args.max_percents_of_test_in_train = -1
 
+elif args.task == "world":
+    task = tasks.World(
+        nb_train_samples=args.nb_train_samples,
+        nb_test_samples=args.nb_test_samples,
+        batch_size=args.physical_batch_size,
+        result_dir=args.result_dir,
+        logger=log_string,
+        device=device,
+    )
+    args.max_percents_of_test_in_train = -1
+
 elif args.task == "learnop":
     task = tasks.SandBox(
         problem=problems.ProblemLearnOperator(),
 elif args.task == "learnop":
     task = tasks.SandBox(
         problem=problems.ProblemLearnOperator(),
@@ -658,64 +424,6 @@ vocabulary_size = task.vocabulary_size()
 
 log_string(f"vocabulary_size {vocabulary_size}")
 
 
 log_string(f"vocabulary_size {vocabulary_size}")
 
-##############################
-
-model = mygpt.MyGPT(
-    vocabulary_size=vocabulary_size,
-    dim_model=args.dim_model,
-    dim_keys=args.dim_keys,
-    dim_hidden=args.dim_hidden,
-    nb_heads=args.nb_heads,
-    nb_blocks=args.nb_blocks,
-    causal=True,
-    dropout=args.dropout,
-)
-
-model.to(device)
-
-nb_parameters = sum(p.numel() for p in model.parameters())
-log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
-
-######################################################################
-
-nb_epochs_finished = 0
-
-if args.no_checkpoint:
-    log_string(f"not trying to load checkpoint.")
-
-else:
-    try:
-        checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
-        checkpoint = torch.load(checkpoint_name)
-        nb_epochs_finished = checkpoint["nb_epochs_finished"]
-        model.load_state_dict(checkpoint["model_state"])
-        torch.set_rng_state(checkpoint["rng_state"])
-        if torch.cuda.is_available():
-            torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
-
-        log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
-
-    except FileNotFoundError:
-        log_string("starting from scratch.")
-
-    except:
-        log_string("error when loading the checkpoint.")
-        exit(1)
-
-######################################################################
-
-if args.task == "expr" and args.expr_input_file is not None:
-    task.produce_results(
-        n_epoch=nb_epochs_finished,
-        model=model,
-        result_dir=args.result_dir,
-        logger=log_string,
-        deterministic_synthesis=args.deterministic_synthesis,
-        input_file=args.expr_input_file,
-    )
-
-    exit(0)
-
 ######################################################################
 
 # Compute the entropy of the training tokens
 ######################################################################
 
 # Compute the entropy of the training tokens
@@ -764,58 +472,13 @@ if args.max_percents_of_test_in_train >= 0:
 
 ##############################
 
 
 ##############################
 
-if args.learning_rate_schedule == "cos":
-    learning_rate_schedule = {}
-    for n_epoch in range(args.nb_epochs):
-        u = n_epoch / args.nb_epochs * math.pi
-        learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
-else:
-    u = {
-        int(k): float(v)
-        for k, v in [
-            tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
-        ]
-    }
-
-    learning_rate_schedule = {}
-    learning_rate = args.learning_rate
-    for n_epoch in range(args.nb_epochs):
-        if n_epoch in u:
-            learning_rate = u[n_epoch]
-        learning_rate_schedule[n_epoch] = learning_rate
-
-log_string(f"learning_rate_schedule {learning_rate_schedule}")
-
-##############################
-
-if nb_epochs_finished >= args.nb_epochs:
-    task.produce_results(
-        n_epoch=nb_epochs_finished,
-        model=model,
-        result_dir=args.result_dir,
-        logger=log_string,
-        deterministic_synthesis=args.deterministic_synthesis,
-    )
-
-time_pred_result = None
-
-for n_epoch in range(nb_epochs_finished, args.nb_epochs):
-    learning_rate = learning_rate_schedule[n_epoch]
 
 
-    log_string(f"learning_rate {learning_rate}")
-
-    if args.optim == "sgd":
-        optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
-    elif args.optim == "adam":
-        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
-    elif args.optim == "adamw":
-        optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
-    else:
-        raise ValueError(f"Unknown optimizer {args.optim}.")
+def one_epoch(model, task):
+    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
 
     model.train()
 
 
     model.train()
 
-    nb_train_samples, acc_train_loss_ar, acc_train_loss_ae = 0, 0.0, 0.0
+    nb_train_samples, acc_train_loss = 0, 0.0
 
     for input in task.batches(split="train"):
         input = input.to(device)
 
     for input in task.batches(split="train"):
         input = input.to(device)
@@ -823,95 +486,157 @@ for n_epoch in range(nb_epochs_finished, args.nb_epochs):
         if nb_train_samples % args.batch_size == 0:
             optimizer.zero_grad()
 
         if nb_train_samples % args.batch_size == 0:
             optimizer.zero_grad()
 
-        if args.autoencoder_weight > 0:
-            bs_ar, bs_ae = model(mygpt.BracketedSequence(input), autoencoder=True)
-            output_ar, output_ae = bs_ar.x, bs_ae.x
-            loss_ar = F.cross_entropy(output_ar.transpose(1, 2), input)
-            loss_ae = F.cross_entropy(output_ae[:, 1:].transpose(1, 2), input[:, :-1])
-        else:
-            output = model(mygpt.BracketedSequence(input)).x
-            loss_ar = F.cross_entropy(output.transpose(1, 2), input)
-            loss_ae = loss_ar.new_full((1,), 0.0)
-
-        acc_train_loss_ar += loss_ar.item() * input.size(0)
-        acc_train_loss_ae += loss_ae.item() * input.size(0)
+        output = model(mygpt.BracketedSequence(input)).x
+        loss = F.cross_entropy(output.transpose(1, 2), input)
+        acc_train_loss += loss.item() * input.size(0)
 
         nb_train_samples += input.size(0)
 
 
         nb_train_samples += input.size(0)
 
-        (loss_ar + args.autoencoder_weight * loss_ae).backward()
+        loss.backward()
 
         if nb_train_samples % args.batch_size == 0:
             optimizer.step()
 
 
         if nb_train_samples % args.batch_size == 0:
             optimizer.step()
 
+    train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+
+    log_string(f"train_perplexity {n_epoch} {train_perplexity}")
+
+
+######################################################################
+
+
+def run_tests(model, task, deterministic_synthesis):
     with torch.autograd.no_grad():
         model.eval()
 
     with torch.autograd.no_grad():
         model.eval()
 
-        nb_test_samples, acc_test_loss_ar, acc_test_loss_ae = 0, 0.0, 0.0
+        nb_test_samples, acc_test_loss = 0, 0.0
         nb_samples_accumulated = 0
 
         for input in task.batches(split="test"):
             input = input.to(device)
 
         nb_samples_accumulated = 0
 
         for input in task.batches(split="test"):
             input = input.to(device)
 
-            if args.autoencoder_weight > 0:
-                bs_ar, bs_ae = model(mygpt.BracketedSequence(input), autoencoder=True)
-                output_ar, output_ae = bs_ar.x, bs_ae.x
-                loss_ae = F.cross_entropy(
-                    output_ae[:, 1:].transpose(1, 2), input[:, :-1]
-                )
-                acc_test_loss_ae += loss_ae.item() * input.size(0)
-            else:
-                bs_ar = model(mygpt.BracketedSequence(input))
-                output_ar = bs_ar.x
+            bs = model(mygpt.BracketedSequence(input))
+            output = bs.x
 
 
-            loss_ar = F.cross_entropy(output_ar.transpose(1, 2), input)
+            loss = F.cross_entropy(output.transpose(1, 2), input)
 
 
-            acc_test_loss_ar += loss_ar.item() * input.size(0)
+            acc_test_loss += loss.item() * input.size(0)
 
             nb_test_samples += input.size(0)
 
 
             nb_test_samples += input.size(0)
 
-        train_ar_perplexity = math.exp(min(100, acc_train_loss_ar / nb_train_samples))
-        test_ar_perplexity = math.exp(min(100, acc_test_loss_ar / nb_test_samples))
-
-        log_string(
-            f"perplexity_ar {n_epoch} train_set {train_set_perplexity} train_prediction {train_ar_perplexity} test_prediction {test_ar_perplexity}"
+        main_test_accuracy = task.produce_results(
+            n_epoch=n_epoch,
+            model=model,
+            result_dir=args.result_dir,
+            logger=log_string,
+            deterministic_synthesis=deterministic_synthesis,
         )
 
         )
 
-        if args.autoencoder_weight > 0:
-            train_ae_perplexity = math.exp(
-                min(100, acc_train_loss_ae / nb_train_samples)
-            )
-            test_ae_perplexity = math.exp(min(100, acc_test_loss_ae / nb_test_samples))
+        test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+
+        log_string(f"test_perplexity {n_epoch} {test_perplexity}")
 
 
-            log_string(
-                f"perplexity_ae {n_epoch} train_set {train_set_perplexity} train_prediction {train_ae_perplexity} test_prediction {test_ae_perplexity}"
-            )
+    model.main_test_accuracy = main_test_accuracy
+
+
+######################################################################
 
 
-        task.produce_results(
+
+def create_quizzes(
+    model,
+    other_models,
+    task,
+    nb_for_train=1000,
+    nb_for_test=100,
+):
+    kept = []
+
+    while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
+        new_quizzes, nb_correct = task.create_new_quizzes(
             n_epoch=n_epoch,
             n_epoch=n_epoch,
-            model=model,
             result_dir=args.result_dir,
             logger=log_string,
             result_dir=args.result_dir,
             logger=log_string,
-            deterministic_synthesis=args.deterministic_synthesis,
+            nb=4 * (nb_for_train + nb_for_test),
+            model=model,
+            other_models=other_models,
         )
 
         )
 
-        time_current_result = datetime.datetime.now()
-        if time_pred_result is not None:
-            log_string(
-                f"next_result {time_current_result + (time_current_result - time_pred_result)}"
-            )
-        time_pred_result = time_current_result
+        to_keep = new_quizzes[nb_correct == len(other_models) - 1]
+        log_string(f"keep {to_keep.size(0)} quizzes")
+        kept.append(to_keep)
 
 
-    checkpoint = {
-        "nb_epochs_finished": n_epoch + 1,
-        "model_state": model.state_dict(),
-        "rng_state": torch.get_rng_state(),
-    }
+    new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
 
 
-    if torch.cuda.is_available():
-        checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
+    task.store_new_quizzes(new_quizzes[:nb_for_train], for_train=True)
+    task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False)
+
+    task.save_image(
+        new_quizzes[:96],
+        args.result_dir,
+        f"world_new_{n_epoch:04d}_{model.id:02d}.png",
+        log_string,
+    )
+
+
+######################################################################
+
+models = []
+
+for k in range(5):
+    model = mygpt.MyGPT(
+        vocabulary_size=vocabulary_size,
+        dim_model=args.dim_model,
+        dim_keys=args.dim_keys,
+        dim_hidden=args.dim_hidden,
+        nb_heads=args.nb_heads,
+        nb_blocks=args.nb_blocks,
+        causal=True,
+        dropout=args.dropout,
+    ).to(device)
+
+    model.main_test_accuracy = 0.0
+    model.id = k
+
+    models.append(model)
+
+
+nb_parameters = sum(p.numel() for p in models[0].parameters())
+log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
+
+######################################################################
+
+accuracy_to_make_quizzes = 0.975
+
+for n_epoch in range(args.nb_epochs):
+    # select the model with lowest accuracy
+    models.sort(key=lambda model: model.main_test_accuracy)
+    model = models[0]
+
+    log_string(
+        f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+    )
+
+    # improve it
+    one_epoch(model, task)
+
+    log_string(
+        f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}"
+    )
+
+    # test it
+    run_tests(model, task, deterministic_synthesis=False)
+
+    if model.main_test_accuracy >= accuracy_to_make_quizzes:
+        other_models = models.copy()
+        other_models.remove(model)
+
+        create_quizzes(
+            model,
+            other_models,
+            task,
+            nb_for_train=1000,
+            nb_for_test=100,
+        )
 
 
-    checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
-    torch.save(checkpoint, checkpoint_name)
-    log_string(f"saved checkpoint {checkpoint_name}")
 
 ######################################################################
 
 ######################################################################