X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=inline;f=main.py;h=97c71301fb8062cff3e66afb0bb00e4a815cce49;hb=61c98647a2d708c8f2c5f0d25bcf05df92e1233f;hp=71026c59640be3752f3a311dc6f58cbcf49b11d1;hpb=5d46a9bd7d032d90ef4c4b38ac3c9b5b66526527;p=culture.git diff --git a/main.py b/main.py index 71026c5..97c7130 100755 --- a/main.py +++ b/main.py @@ -5,14 +5,14 @@ # Written by Francois Fleuret -import math, sys, argparse, time, tqdm, os +import math, sys, argparse, time, tqdm, os, datetime, warnings import torch, torchvision from torch import nn from torch.nn import functional as F import ffutils -import mygpt, tasks +import mygpt, tasks, problems ###################################################################### @@ -29,12 +29,7 @@ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) -parser.add_argument( - "--task", - type=str, - default="sandbox", - help="sandbox, picoclvr, mnist, maze, snake, stack, expr, rpl, world", -) +parser.add_argument("--task", type=str, default="world", help="world") parser.add_argument("--log_filename", type=str, default="train.log", help=" ") @@ -42,21 +37,25 @@ parser.add_argument("--result_dir", type=str, default=None) parser.add_argument("--seed", type=int, default=0) -parser.add_argument("--nb_epochs", type=int, default=None) +parser.add_argument("--max_percents_of_test_in_train", type=int, default=1) + +######################################## + +parser.add_argument("--nb_epochs", type=int, default=10000) parser.add_argument("--batch_size", type=int, default=None) +parser.add_argument("--physical_batch_size", type=int, default=None) + parser.add_argument("--nb_train_samples", type=int, default=None) parser.add_argument("--nb_test_samples", type=int, default=None) -parser.add_argument("--optim", type=str, default="adam") - parser.add_argument("--learning_rate", type=float, default=1e-4) -parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6") +######################################## -parser.add_argument("--model", type=str, default="37M") +parser.add_argument("--model", type=str, default=None) parser.add_argument("--dim_model", type=int, default=None) @@ -70,151 +69,26 @@ parser.add_argument("--nb_blocks", type=int, default=None) parser.add_argument("--dropout", type=float, default=0.1) -parser.add_argument("--deterministic_synthesis", action="store_true", default=False) - -parser.add_argument("--no_checkpoint", action="store_true", default=False) - -parser.add_argument("--overwrite_results", action="store_true", default=False) - -parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth") - -############################## -# picoclvr options - -parser.add_argument("--sandbox_level", type=int, default=0) - -parser.add_argument("--sandbox_levels_nb_items", type=int, default=25) - -parser.add_argument("--sandbox_levels_len_source", type=int, default=6) - -parser.add_argument("--sandbox_levels_len_result", type=int, default=8) - -############################## -# picoclvr options - -parser.add_argument("--picoclvr_nb_colors", type=int, default=5) - -parser.add_argument("--picoclvr_height", type=int, default=12) - -parser.add_argument("--picoclvr_width", type=int, default=16) - -parser.add_argument("--picocvlr_prune_properties", type=str, default="none") - -############################## -# Maze options - -parser.add_argument("--maze_height", type=int, default=23) - -parser.add_argument("--maze_width", type=int, default=39) - -parser.add_argument("--maze_nb_walls", type=int, default=45) - -############################## -# Snake options - -parser.add_argument("--snake_height", type=int, default=6) - -parser.add_argument("--snake_width", type=int, default=8) +######################################## -parser.add_argument("--snake_nb_colors", type=int, default=5) - -parser.add_argument("--snake_length", type=int, default=200) - -############################## -# Stack options - -parser.add_argument("--stack_nb_steps", type=int, default=100) - -parser.add_argument("--stack_nb_stacks", type=int, default=3) - -parser.add_argument("--stack_nb_digits", type=int, default=3) - -parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75) - -############################## -# Expr options - -parser.add_argument("--expr_nb_variables", type=int, default=5) - -parser.add_argument("--expr_sequence_length", type=int, default=40) - -parser.add_argument("--expr_operand_max", type=int, default=9) - -parser.add_argument("--expr_result_max", type=int, default=99) - -parser.add_argument("--expr_input_file", type=str, default=None) - -############################## -# World options - -parser.add_argument("--world_vqae_nb_epochs", type=int, default=25) +parser.add_argument("--deterministic_synthesis", action="store_true", default=False) ###################################################################### args = parser.parse_args() -assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} - if args.result_dir is None: args.result_dir = f"results_{args.task}" ###################################################################### default_task_args = { - "sandbox": { - "nb_epochs": 50, - "batch_size": 25, - "nb_train_samples": 100000, - "nb_test_samples": 10000, - }, - "picoclvr": { - "nb_epochs": 25, - "batch_size": 25, - "nb_train_samples": 250000, - "nb_test_samples": 10000, - }, - "mnist": { - "nb_epochs": 25, - "batch_size": 10, - "nb_train_samples": 250000, - "nb_test_samples": 10000, - }, - "maze": { - "nb_epochs": 25, - "batch_size": 5, - "nb_train_samples": 250000, - "nb_test_samples": 10000, - }, - "snake": { - "nb_epochs": 5, - "batch_size": 25, + "world": { + "model": "37M", + "batch_size": 100, "nb_train_samples": 250000, "nb_test_samples": 10000, }, - "stack": { - "nb_epochs": 5, - "batch_size": 25, - "nb_train_samples": 100000, - "nb_test_samples": 1000, - }, - "expr": { - "nb_epochs": 40, - "batch_size": 25, - "nb_train_samples": 1000000, - "nb_test_samples": 10000, - }, - "rpl": { - "nb_epochs": 40, - "batch_size": 25, - "nb_train_samples": 100000, - "nb_test_samples": 10000, - }, - "world": { - "nb_epochs": 10, - "batch_size": 25, - "nb_train_samples": 25000, - "nb_test_samples": 1000, - }, } if args.task in default_task_args: @@ -232,6 +106,13 @@ default_model_args = { "nb_heads": 2, "nb_blocks": 2, }, + "4M": { + "dim_model": 256, + "dim_keys": 32, + "dim_hidden": 1024, + "nb_heads": 4, + "nb_blocks": 6, + }, "37M": { "dim_model": 512, "dim_keys": 64, @@ -267,9 +148,8 @@ else: try: os.mkdir(args.result_dir) except FileExistsError: - if not args.overwrite_results: - print(f"result directory {args.result_dir} already exists") - exit(1) + print(f"result directory {args.result_dir} already exists") + exit(1) log_file = open(os.path.join(args.result_dir, args.log_filename), "a") @@ -295,6 +175,8 @@ def log_string(s): sys.stdout.flush() +log_string(f"argv {' '.join(sys.argv)}") + for n in vars(args): log_string(f"args.{n} {getattr(args, n)}") @@ -302,51 +184,111 @@ for n in vars(args): ###################################################################### -def picoclvr_pruner_horizontal_green(p): - return not ("green" in p and ("left" in p or "right" in p)) +if args.physical_batch_size is None: + args.physical_batch_size = args.batch_size +else: + assert args.batch_size % args.physical_batch_size == 0 + +assert args.nb_train_samples % args.batch_size == 0 +assert args.nb_test_samples % args.batch_size == 0 + +if args.task == "file": + assert ( + args.filetask_train_file is not None and args.filetask_test_file is not None + ), "You have to specify the task train and test files" + task = tasks.TaskFromFile( + args.filetask_train_file, + args.filetask_test_file, + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + shuffle=True, + device=device, + ) + args.max_percents_of_test_in_train = 0 +elif args.task == "byheart": + task = tasks.SandBox( + problem=problems.ProblemByHeart(separation=args.byheart_separation), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + args.max_percents_of_test_in_train = -1 -picoclvr_pruner_train = ( - picoclvr_pruner_horizontal_green - if args.picocvlr_prune_properties in {"train+eval"} - else None -) +elif args.task == "world": + task = tasks.World( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + result_dir=args.result_dir, + logger=log_string, + device=device, + ) + args.max_percents_of_test_in_train = -1 -picoclvr_pruner_eval = ( - (lambda p: not picoclvr_pruner_horizontal_green(p)) - if args.picocvlr_prune_properties in {"train+eval", "eval"} - else None -) +elif args.task == "learnop": + task = tasks.SandBox( + problem=problems.ProblemLearnOperator(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) -###################################################################### -if args.task == "sandbox": - if args.sandbox_level == 0: - problem = tasks.ProblemLevel0( - nb_sentences=args.sandbox_levels_nb_items, - len_prompt=args.sandbox_levels_len_source, - len_result=args.sandbox_levels_len_result, - ) - elif args.sandbox_level == 1: - problem = tasks.ProblemLevel1( - nb_operators=args.sandbox_levels_nb_items, - len_source=args.sandbox_levels_len_source, - len_result=args.sandbox_levels_len_result, - ) - elif args.sandbox_level == 2: - problem = tasks.ProblemLevel2( - len_source=args.sandbox_levels_len_source, - len_result=args.sandbox_levels_len_result, - ) - else: - raise ValueError(f"Unknown sandbox level {args.sandbox_level}") +elif args.task == "guessop": + task = tasks.SandBox( + problem=problems.ProblemGuessOperator(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + + +elif args.task == "twotargets": + task = tasks.SandBox( + problem=problems.ProblemTwoTargets(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) +elif args.task == "memory": task = tasks.SandBox( - problem, - # tasks.ProblemAddition(zero_padded=False, inverted_result=False), + problem=problems.ProblemMemory(), nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + +elif args.task == "mixing": + task = tasks.SandBox( + problem=problems.ProblemMixing( + hard=args.mixing_hard, random_start=not args.mixing_deterministic_start + ), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + logger=log_string, + device=device, + ) + +elif args.task == "addition": + task = tasks.SandBox( + problem=problems.ProblemAddition(), + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, logger=log_string, device=device, ) @@ -355,7 +297,7 @@ elif args.task == "picoclvr": task = tasks.PicoCLVR( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, height=args.picoclvr_height, width=args.picoclvr_width, nb_colors=args.picoclvr_nb_colors, @@ -369,7 +311,7 @@ elif args.task == "mnist": task = tasks.MNIST( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, device=device, ) @@ -377,18 +319,18 @@ elif args.task == "maze": task = tasks.Maze( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, height=args.maze_height, width=args.maze_width, nb_walls=args.maze_nb_walls, - device=device, + device="cpu", ) elif args.task == "snake": task = tasks.Snake( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, height=args.snake_height, width=args.snake_width, nb_colors=args.snake_nb_colors, @@ -401,7 +343,7 @@ elif args.task == "stack": task = tasks.Stack( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, logger=log_string, nb_steps=args.stack_nb_steps, nb_stacks=args.stack_nb_stacks, @@ -418,7 +360,7 @@ elif args.task == "expr": sequence_length=args.expr_sequence_length, operand_max=args.expr_operand_max, result_max=args.expr_result_max, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, device=device, ) @@ -426,17 +368,47 @@ elif args.task == "rpl": task = tasks.RPL( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, + batch_size=args.physical_batch_size, + nb_starting_values=args.rpl_nb_starting_values, + max_input=args.rpl_max_input, + prog_len=args.rpl_prog_len, + nb_runs=args.rpl_nb_runs, + no_prog=args.rpl_no_prog, logger=log_string, device=device, ) -elif args.task == "world": - task = tasks.World( +elif args.task == "grid": + task = tasks.Grid( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, - batch_size=args.batch_size, - vqae_nb_epochs=args.world_vqae_nb_epochs, + batch_size=args.physical_batch_size, + size=args.grid_size, + fraction_play=args.grid_fraction_play, + logger=log_string, + device=device, + ) + +elif args.task == "qmlp": + task = tasks.QMLP( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + result_dir=args.result_dir, + logger=log_string, + device=device, + ) + +elif args.task == "greed": + task = tasks.Greed( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + height=args.greed_height, + width=args.greed_width, + T=args.greed_T, + nb_walls=args.greed_nb_walls, + nb_coins=args.greed_nb_coins, logger=log_string, device=device, ) @@ -452,207 +424,219 @@ vocabulary_size = task.vocabulary_size() log_string(f"vocabulary_size {vocabulary_size}") +###################################################################### + +# Compute the entropy of the training tokens + +token_count = 0 +for input in task.batches(split="train", desc="train-entropy"): + token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1)) +token_probas = token_count / token_count.sum() +entropy = -torch.xlogy(token_probas, token_probas).sum() +train_set_perplexity = math.exp(entropy) + +###################################################################### +# A bit of paranoia never hurts + +if args.max_percents_of_test_in_train >= 0: + + def subsets_as_tuples(batches, cs): + s = set() + for batch in batches: + for x in batch: + s.add(tuple([v.item() for v in x])) + if len(s) == cs: + yield s + s = set() + yield s + + nb_test, nb_in_train = 0, 0 + for test_subset in subsets_as_tuples( + task.batches(split="test", desc="test-check"), 25000 + ): + in_train = set() + for train_subset in subsets_as_tuples( + task.batches(split="train", desc="train-check"), 25000 + ): + in_train.update(test_subset.intersection(train_subset)) + nb_in_train += len(in_train) + nb_test += len(test_subset) + + log_string( + f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set" + ) + + assert ( + nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100 + ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set" + ############################## -model = mygpt.MyGPT( - vocabulary_size=vocabulary_size, - dim_model=args.dim_model, - dim_keys=args.dim_keys, - dim_hidden=args.dim_hidden, - nb_heads=args.nb_heads, - nb_blocks=args.nb_blocks, - causal=True, - dropout=args.dropout, -) -model.to(device) +def one_epoch(model, task): + optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) -nb_parameters = sum(p.numel() for p in model.parameters()) -log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") + model.train() -###################################################################### + nb_train_samples, acc_train_loss = 0, 0.0 -nb_epochs_finished = 0 + for input in task.batches(split="train"): + input = input.to(device) -if args.no_checkpoint: - log_string(f"not trying to load checkpoint.") + if nb_train_samples % args.batch_size == 0: + optimizer.zero_grad() -else: - try: - checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name) - checkpoint = torch.load(checkpoint_name) - nb_epochs_finished = checkpoint["nb_epochs_finished"] - model.load_state_dict(checkpoint["model_state"]) - torch.set_rng_state(checkpoint["rng_state"]) - if torch.cuda.is_available(): - torch.cuda.set_rng_state(checkpoint["cuda_rng_state"]) + output = model(mygpt.BracketedSequence(input)).x + loss = F.cross_entropy(output.transpose(1, 2), input) + acc_train_loss += loss.item() * input.size(0) - log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.") + nb_train_samples += input.size(0) - except FileNotFoundError: - log_string("starting from scratch.") + loss.backward() - except: - log_string("error when loading the checkpoint.") - exit(1) + if nb_train_samples % args.batch_size == 0: + optimizer.step() -###################################################################### + train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples)) -if args.task == "expr" and args.expr_input_file is not None: - task.produce_results( - nb_epochs_finished, - model, - args.result_dir, - log_string, - args.deterministic_synthesis, - args.expr_input_file, - ) + log_string(f"train_perplexity {n_epoch} {train_perplexity}") - exit(0) ###################################################################### -nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default -# Compute the entropy of the training tokens +def run_tests(model, task, deterministic_synthesis): + with torch.autograd.no_grad(): + model.eval() -token_count = 0 -for input in task.batches(split="train"): - token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1)) -token_probas = token_count / token_count.sum() -entropy = -torch.xlogy(token_probas, token_probas).sum() -train_set_perplexity = math.exp(entropy) + nb_test_samples, acc_test_loss = 0, 0.0 + nb_samples_accumulated = 0 -############################## + for input in task.batches(split="test"): + input = input.to(device) -# A bit of paranoia never hurts + bs = model(mygpt.BracketedSequence(input)) + output = bs.x -train_examples = {} + loss = F.cross_entropy(output.transpose(1, 2), input) + acc_test_loss += loss.item() * input.size(0) -for input in task.batches(split="train"): - assert input.dim() == 2 and input.dtype == torch.int64 - for x in input: - train_examples[x.sum().item()] = x + nb_test_samples += input.size(0) -nb_total, nb_collisions = 0, 0 -for input in task.batches(split="test"): - assert input.dim() == 2 and input.dtype == torch.int64 - for x in input: - nb_total += 1 - y = train_examples.get(x.sum().item()) - if y is not None: - if x.size() == y.size() and (x - y).abs().sum() == 0: - nb_collisions += 1 + main_test_accuracy = task.produce_results( + n_epoch=n_epoch, + model=model, + result_dir=args.result_dir, + logger=log_string, + deterministic_synthesis=deterministic_synthesis, + ) -del train_examples + test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) -log_string( - f"data_check {nb_collisions*100/nb_total:.02f}% ({nb_collisions}/{nb_total}) of test samples are in the train set" -) + log_string(f"test_perplexity {n_epoch} {test_perplexity}") -############################## + model.main_test_accuracy = main_test_accuracy -if args.learning_rate_schedule == "cos": - learning_rate_schedule = {} - for n_epoch in range(args.nb_epochs): - u = n_epoch / args.nb_epochs * math.pi - learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u)) -else: - u = { - int(k): float(v) - for k, v in [ - tuple(x.split(":")) for x in args.learning_rate_schedule.split(",") - ] - } - - learning_rate_schedule = {} - learning_rate = args.learning_rate - for n_epoch in range(args.nb_epochs): - if n_epoch in u: - learning_rate = u[n_epoch] - learning_rate_schedule[n_epoch] = learning_rate - -log_string(f"learning_rate_schedule {learning_rate_schedule}") -############################## +###################################################################### + + +def create_quizzes( + model, + other_models, + task, + nb_for_train=1000, + nb_for_test=100, +): + kept = [] + + while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: + new_quizzes, nb_correct = task.create_new_quizzes( + n_epoch=n_epoch, + result_dir=args.result_dir, + logger=log_string, + nb=4 * (nb_for_train + nb_for_test), + model=model, + other_models=other_models, + ) + + to_keep = new_quizzes[nb_correct == len(other_models) - 1] + log_string(f"keep {to_keep.size(0)} quizzes") + kept.append(to_keep) -nb_samples_seen = 0 + new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] -if nb_epochs_finished >= nb_epochs: - task.produce_results( - nb_epochs_finished, - model, + task.store_new_quizzes(new_quizzes[:nb_for_train], for_train=True) + task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False) + + task.save_image( + new_quizzes[:96], args.result_dir, + f"world_new_{n_epoch:04d}_{model.id:02d}.png", log_string, - args.deterministic_synthesis, ) -for n_epoch in range(nb_epochs_finished, nb_epochs): - learning_rate = learning_rate_schedule[n_epoch] - log_string(f"learning_rate {learning_rate}") +###################################################################### - if args.optim == "sgd": - optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) - elif args.optim == "adam": - optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) - elif args.optim == "adamw": - optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) - else: - raise ValueError(f"Unknown optimizer {args.optim}.") +models = [] - model.train() +for k in range(5): + model = mygpt.MyGPT( + vocabulary_size=vocabulary_size, + dim_model=args.dim_model, + dim_keys=args.dim_keys, + dim_hidden=args.dim_hidden, + nb_heads=args.nb_heads, + nb_blocks=args.nb_blocks, + causal=True, + dropout=args.dropout, + ).to(device) - nb_train_samples, acc_train_loss = 0, 0.0 + model.main_test_accuracy = 0.0 + model.id = k - for input in task.batches(split="train"): - input = input.to(device) - output = model(mygpt.BracketedSequence(input)).x - loss = F.cross_entropy(output.transpose(1, 2), input) - acc_train_loss += loss.item() * input.size(0) - nb_train_samples += input.size(0) - nb_samples_seen += input.size(0) + models.append(model) - optimizer.zero_grad() - loss.backward() - optimizer.step() - with torch.autograd.no_grad(): - model.eval() +nb_parameters = sum(p.numel() for p in models[0].parameters()) +log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") - nb_test_samples, acc_test_loss = 0, 0.0 +###################################################################### - for input in task.batches(split="test"): - input = input.to(device) +accuracy_to_make_quizzes = 0.975 - output = model(mygpt.BracketedSequence(input)).x - loss = F.cross_entropy(output.transpose(1, 2), input) - acc_test_loss += loss.item() * input.size(0) - nb_test_samples += input.size(0) +for n_epoch in range(args.nb_epochs): + # select the model with lowest accuracy + models.sort(key=lambda model: model.main_test_accuracy) + model = models[0] - train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples)) - test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) + log_string( + f"training model {model.id} main_test_accuracy {model.main_test_accuracy}" + ) - log_string( - f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}" - ) + # improve it + one_epoch(model, task) - task.produce_results( - n_epoch, model, args.result_dir, log_string, args.deterministic_synthesis - ) + log_string( + f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" + ) - checkpoint = { - "nb_epochs_finished": n_epoch + 1, - "model_state": model.state_dict(), - "rng_state": torch.get_rng_state(), - } + # test it + run_tests(model, task, deterministic_synthesis=False) - if torch.cuda.is_available(): - checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state() + if model.main_test_accuracy >= accuracy_to_make_quizzes: + other_models = models.copy() + other_models.remove(model) + + create_quizzes( + model, + other_models, + task, + nb_for_train=1000, + nb_for_test=100, + ) - checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name) - torch.save(checkpoint, checkpoint_name) - log_string(f"saved checkpoint {checkpoint_name}") ######################################################################