X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=d0de5afd5b687a463ed9945604297ec712f97240;hb=2186d96fccfc525884f1b3fb722c40642891ab0a;hp=05c3557fe0e8158126506aad654a094645044b0a;hpb=336130cc923761658029a0af9d5862d59405d47a;p=culture.git diff --git a/main.py b/main.py index 05c3557..d0de5af 100755 --- a/main.py +++ b/main.py @@ -12,7 +12,8 @@ from torch import nn from torch.nn import functional as F import ffutils -import mygpt, quizz_machine +import mygpt +import sky, quizz_machine # world quizzes vs. culture quizzes @@ -57,7 +58,7 @@ parser.add_argument("--nb_train_samples", type=int, default=None) parser.add_argument("--nb_test_samples", type=int, default=None) -parser.add_argument("--learning_rate", type=float, default=1e-4) +parser.add_argument("--learning_rate", type=float, default=1e-3) ######################################## @@ -102,7 +103,7 @@ if args.dirty_debug: default_args = { "model": "37M", "batch_size": 100, - "nb_train_samples": 250000, + "nb_train_samples": 100000, "nb_test_samples": 10000, } @@ -210,6 +211,7 @@ assert args.nb_train_samples % args.batch_size == 0 assert args.nb_test_samples % args.batch_size == 0 quizz_machine = quizz_machine.QuizzMachine( + problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2), nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, batch_size=args.physical_batch_size, @@ -347,55 +349,82 @@ def run_tests(model, quizz_machine, deterministic_synthesis): def create_c_quizzes( - model, - other_models, + models, quizz_machine, nb_for_train=1000, nb_for_test=100, min_ave_seq_logproba=None, ): - kept = [] + # We will store the generated quizzes for each number of + # correct prediction + recorded = dict([(n, []) for n in range(len(models) + 1)]) + model_indexes = [] sum_logits, sum_nb_c_quizzes = 0, 0 + nb_correct_to_validate = len(models) - 1 - while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - nb_to_generate = 4 * (nb_for_train + nb_for_test) + while ( + sum([x.size(0) for x in recorded[nb_correct_to_validate]]) + < nb_for_train + nb_for_test + ): + nb_to_validate = nb_for_train + nb_for_test + + if len(model_indexes) == 0: + model_indexes = [i.item() for i in torch.randperm(len(models))] + + model = models[model_indexes.pop()] new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes( + nb=nb_to_validate, + model_for_generation=model, + models_for_validation=models, + min_ave_seq_logproba=min_ave_seq_logproba, n_epoch=n_epoch, result_dir=args.result_dir, logger=log_string, - nb=nb_to_generate, - model=model, - other_models=other_models, - min_ave_seq_logproba=min_ave_seq_logproba, ) sum_logits += new_c_quizzes.size(0) * ave_seq_logproba sum_nb_c_quizzes += new_c_quizzes.size(0) - to_keep = new_c_quizzes[nb_correct == len(other_models) - 1] - if args.dirty_debug: - to_keep = new_c_quizzes + nb_correct = torch.randint( + len(models) + 1, nb_correct.size(), device=new_c_quizzes.device + ) + + for n in range(nb_correct.max() + 1): + recorded[n].append(new_c_quizzes[nb_correct == n].clone()) + + nb_validated = sum([x.size(0) for x in recorded[nb_correct_to_validate]]) + nb_generated = sum( + [sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()] + ) log_string( - f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)" + f"keep c_quizzes {nb_validated*100/nb_generated:.02f}% kept total {nb_validated}/{nb_to_validate}" ) - kept.append(to_keep) + # concatenate and shuffle + for n in recorded.keys(): + if len(recorded[n]) > 0: + q = torch.cat(recorded[n], dim=0) + q = q[torch.randperm(q.size(0), device=q.device)] + recorded[n] = q + else: + del recorded[n] - new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] + new_c_quizzes = recorded[nb_correct_to_validate][: nb_for_train + nb_for_test] quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) - quizz_machine.save_quizzes( - new_c_quizzes[:72], - args.result_dir, - f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}", - log_string, - ) + for n in recorded.keys(): + s = "_validated" if n == nb_correct_to_validate else "" + quizz_machine.problem.save_quizzes( + recorded[n][:72], + args.result_dir, + f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", + ) return sum_logits / sum_nb_c_quizzes @@ -461,12 +490,8 @@ for n_epoch in range(args.nb_epochs): ) if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes: - other_models = models.copy() - other_models.remove(model) - ave_seq_logproba = create_c_quizzes( - model, - other_models, + models, quizz_machine, nb_for_train=nb_new_c_quizzes_for_train, nb_for_test=nb_new_c_quizzes_for_test, @@ -474,12 +499,12 @@ for n_epoch in range(args.nb_epochs): ) # We keep the first average logits as a reference - if min_ave_seq_logproba is None: - min_ave_seq_logproba = ave_seq_logproba - else: - log_string( - f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}" - ) + # if min_ave_seq_logproba is None: + # min_ave_seq_logproba = ave_seq_logproba + # else: + # log_string( + # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}" + # ) # We update everyone for model in models: