X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=inline;f=main.py;h=a6c482f746e2b15dbe750dd964d2edddf0266b44;hb=db8c21397d370ae16fd6078858c649e2ab14fe4e;hp=ebecad8a471353400fc7e6e472bdb95f594b48ab;hpb=15192743a5dee8d88650319d64610f1603d21472;p=culture.git diff --git a/main.py b/main.py index ebecad8..a6c482f 100755 --- a/main.py +++ b/main.py @@ -12,13 +12,13 @@ from torch import nn from torch.nn import functional as F import ffutils -import mygpt, tasks +import mygpt +import sky, wireworld, quizz_machine # world quizzes vs. culture quizzes ###################################################################### -accuracy_to_make_c_quizzes = 0.975 nb_new_c_quizzes_for_train = 1000 nb_new_c_quizzes_for_test = 100 @@ -37,7 +37,7 @@ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) -parser.add_argument("--log_filename", type=str, default="train.log", help=" ") +parser.add_argument("--log_filename", type=str, default="train.log") parser.add_argument("--result_dir", type=str, default=None) @@ -57,7 +57,7 @@ parser.add_argument("--nb_train_samples", type=int, default=None) parser.add_argument("--nb_test_samples", type=int, default=None) -parser.add_argument("--learning_rate", type=float, default=1e-4) +parser.add_argument("--learning_rate", type=float, default=1e-3) ######################################## @@ -79,8 +79,20 @@ parser.add_argument("--dropout", type=float, default=0.1) parser.add_argument("--deterministic_synthesis", action="store_true", default=False) +parser.add_argument("--problem", type=str, default="sky") + parser.add_argument("--nb_gpts", type=int, default=5) +parser.add_argument("--nb_models_for_generation", type=int, default=1) + +parser.add_argument("--generation_mode", type=str, default="groupthink") + +parser.add_argument("--min_to_validate", type=int, default=4) + +parser.add_argument("--max_to_validate", type=int, default=4) + +parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975) + parser.add_argument("--dirty_debug", action="store_true", default=False) ###################################################################### @@ -93,7 +105,7 @@ if args.result_dir is None: ###################################################################### if args.dirty_debug: - accuracy_to_make_c_quizzes = 0.0 + args.accuracy_to_make_c_quizzes = 0.0 nb_new_c_quizzes_for_train = 100 nb_new_c_quizzes_for_test = 10 @@ -102,7 +114,7 @@ if args.dirty_debug: default_args = { "model": "37M", "batch_size": 100, - "nb_train_samples": 250000, + "nb_train_samples": 100000, "nb_test_samples": 10000, } @@ -209,7 +221,15 @@ else: assert args.nb_train_samples % args.batch_size == 0 assert args.nb_test_samples % args.batch_size == 0 -task = tasks.World( +if args.problem == "sky": + problem = sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2, speed=3) +elif args.problem == "wireworld": + problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5) +else: + raise ValueError + +quizz_machine = quizz_machine.QuizzMachine( + problem=problem, nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, batch_size=args.physical_batch_size, @@ -222,7 +242,7 @@ task = tasks.World( log_string(f"device {device}") -vocabulary_size = task.vocabulary_size() +vocabulary_size = quizz_machine.vocabulary_size() log_string(f"vocabulary_size {vocabulary_size}") @@ -231,8 +251,10 @@ log_string(f"vocabulary_size {vocabulary_size}") # Compute the entropy of the training tokens token_count = 0 -for input in task.batches(split="train", desc="train-entropy"): - token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1)) +for input in quizz_machine.batches(split="train", desc="train-entropy"): + token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum( + (0, 1) + ) token_probas = token_count / token_count.sum() entropy = -torch.xlogy(token_probas, token_probas).sum() train_set_perplexity = math.exp(entropy) @@ -254,11 +276,11 @@ if args.max_percents_of_test_in_train >= 0: nb_test, nb_in_train = 0, 0 for test_subset in subsets_as_tuples( - task.batches(split="test", desc="test-check"), 25000 + quizz_machine.batches(split="test", desc="test-check"), 25000 ): in_train = set() for train_subset in subsets_as_tuples( - task.batches(split="train", desc="train-check"), 25000 + quizz_machine.batches(split="train", desc="train-check"), 25000 ): in_train.update(test_subset.intersection(train_subset)) nb_in_train += len(in_train) @@ -275,14 +297,14 @@ if args.max_percents_of_test_in_train >= 0: ############################## -def one_epoch(model, task): +def one_epoch(model, quizz_machine): optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) model.train() nb_train_samples, acc_train_loss = 0, 0.0 - for input in task.batches(split="train"): + for input in quizz_machine.batches(split="train"): input = input.to(device) if nb_train_samples % args.batch_size == 0: @@ -307,14 +329,14 @@ def one_epoch(model, task): ###################################################################### -def run_tests(model, task, deterministic_synthesis): +def run_tests(model, quizz_machine, deterministic_synthesis): with torch.autograd.no_grad(): model.eval() nb_test_samples, acc_test_loss = 0, 0.0 nb_samples_accumulated = 0 - for input in task.batches(split="test"): + for input in quizz_machine.batches(split="test"): input = input.to(device) bs = model(mygpt.BracketedSequence(input)) @@ -326,11 +348,10 @@ def run_tests(model, task, deterministic_synthesis): nb_test_samples += input.size(0) - main_test_accuracy = task.produce_results( + main_test_accuracy = quizz_machine.produce_results( n_epoch=n_epoch, model=model, result_dir=args.result_dir, - logger=log_string, deterministic_synthesis=deterministic_synthesis, ) @@ -345,55 +366,96 @@ def run_tests(model, task, deterministic_synthesis): def create_c_quizzes( - model, - other_models, - task, + models, + quizz_machine, nb_for_train=1000, nb_for_test=100, - desired_average_logits=None, + min_ave_seq_logproba=None, ): - kept = [] + # We will store the generated quizzes for each number of + # correct prediction + recorded = dict([(n, []) for n in range(len(models) + 1)]) + model_indexes = [] sum_logits, sum_nb_c_quizzes = 0, 0 - while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - nb_to_generate = 4 * (nb_for_train + nb_for_test) + def nb_generated(): + return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()]) - new_c_quizzes, nb_correct, average_logits = task.create_c_quizzes( + def nb_validated(): + return sum( + [ + sum([x.size(0) for x in recorded[n]]) + for n in range(args.min_to_validate, args.max_to_validate + 1) + ] + ) + + nb_to_create = nb_for_train + nb_for_test + + while nb_validated() < nb_to_create: + ( + new_c_quizzes, + nb_correct, + ave_seq_logproba, + ) = quizz_machine.gang_create_c_quizzes( + nb=nb_to_create, + nb_models_for_generation=args.nb_models_for_generation, + models=models, + mode=args.generation_mode, + min_ave_seq_logproba=min_ave_seq_logproba, n_epoch=n_epoch, result_dir=args.result_dir, - logger=log_string, - nb=nb_to_generate, - model=model, - other_models=other_models, - desired_average_logits=desired_average_logits, ) - sum_logits += new_c_quizzes.size(0) * average_logits + sum_logits += new_c_quizzes.size(0) * ave_seq_logproba sum_nb_c_quizzes += new_c_quizzes.size(0) - to_keep = new_c_quizzes[nb_correct == len(other_models) - 1] - if args.dirty_debug: - to_keep = new_c_quizzes + nb_correct = torch.randint( + len(models) + 1, nb_correct.size(), device=new_c_quizzes.device + ) + + for n in range(nb_correct.max() + 1): + recorded[n].append(new_c_quizzes[nb_correct == n].clone()) log_string( - f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)" + f"keep c_quizzes {nb_validated()*100/nb_generated():.02f}% kept total {nb_validated()} / {nb_to_create}" ) - kept.append(to_keep) + # concatenate and shuffle + for n in recorded.keys(): + if len(recorded[n]) > 0: + q = torch.cat(recorded[n], dim=0) + q = q[torch.randperm(q.size(0), device=q.device)] + recorded[n] = q + else: + del recorded[n] - new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] + new_c_quizzes = torch.cat( + [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)], + dim=0, + ) - task.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) - task.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) + new_c_quizzes = new_c_quizzes[ + torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[ + : nb_for_train + nb_for_test + ] + ] - task.save_quizzes( - new_c_quizzes[:72], - args.result_dir, - f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}", - log_string, - ) + quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) + quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) + + for n in recorded.keys(): + s = ( + "_validated" + if n >= args.min_to_validate and n <= args.max_to_validate + else "" + ) + quizz_machine.problem.save_quizzes( + recorded[n][:72], + args.result_dir, + f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", + ) return sum_logits / sum_nb_c_quizzes @@ -425,14 +487,15 @@ log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") ###################################################################### -desired_average_logits = None +min_ave_seq_logproba = None for n_epoch in range(args.nb_epochs): log_string(f"--- epoch {n_epoch} ----------------------------------------") a = [(model.id, float(model.main_test_accuracy)) for model in models] a.sort(key=lambda p: p[0]) - log_string(f"current accuracies {a}") + s = " ".join([f"{p[1]*100:.02f}%" for p in a]) + log_string(f"current accuracies {s}") # select the model with lowest accuracy models.sort(key=lambda model: model.main_test_accuracy) @@ -443,45 +506,41 @@ for n_epoch in range(args.nb_epochs): ) # improve it - one_epoch(model, task) + one_epoch(model, quizz_machine) - task.renew_w_quizzes(args.nb_train_samples // args.nb_gpts) + quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts) log_string( - f"train_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}" + f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" ) # test it - run_tests(model, task, deterministic_synthesis=False) + run_tests(model, quizz_machine, deterministic_synthesis=False) log_string( - f"test_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}" + f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" ) - if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes: - other_models = models.copy() - other_models.remove(model) - - average_logits = create_c_quizzes( - model, - other_models, - task, + if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes: + ave_seq_logproba = create_c_quizzes( + models, + quizz_machine, nb_for_train=nb_new_c_quizzes_for_train, nb_for_test=nb_new_c_quizzes_for_test, - desired_average_logits=desired_average_logits, + min_ave_seq_logproba=min_ave_seq_logproba, ) # We keep the first average logits as a reference - if desired_average_logits is None: - desired_average_logits = average_logits - else: - log_string( - f"desired_average_logits {desired_average_logits} average_logits {average_logits}" - ) + # if min_ave_seq_logproba is None: + # min_ave_seq_logproba = ave_seq_logproba + # else: + # log_string( + # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}" + # ) # We update everyone for model in models: - run_tests(model, task, deterministic_synthesis=False) + run_tests(model, quizz_machine, deterministic_synthesis=False) ######################################################################