X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=585cbdfc80116fe8210d3eaaf9ef71a1cb87aa15;hb=11ef362542f061cf5a0feb717877d60b4cd6b24a;hp=cb28a7d5d9addc2f85eb46930a74bd88763178c5;hpb=f78cdbad69a877df92df41094a9f3f1036a1582a;p=culture.git diff --git a/main.py b/main.py index cb28a7d..585cbdf 100755 --- a/main.py +++ b/main.py @@ -13,13 +13,12 @@ from torch.nn import functional as F import ffutils import mygpt -import sky, quizz_machine +import sky, reasoning, quiz_machine # world quizzes vs. culture quizzes ###################################################################### -accuracy_to_make_c_quizzes = 0.975 nb_new_c_quizzes_for_train = 1000 nb_new_c_quizzes_for_test = 100 @@ -38,7 +37,7 @@ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) -parser.add_argument("--log_filename", type=str, default="train.log", help=" ") +parser.add_argument("--log_filename", type=str, default="train.log") parser.add_argument("--result_dir", type=str, default=None) @@ -80,21 +79,53 @@ parser.add_argument("--dropout", type=float, default=0.1) parser.add_argument("--deterministic_synthesis", action="store_true", default=False) +parser.add_argument("--problem", type=str, default="sky") + parser.add_argument("--nb_gpts", type=int, default=5) +parser.add_argument("--min_to_validate", type=int, default=None) + +parser.add_argument("--max_to_validate", type=int, default=None) + +parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975) + +parser.add_argument("--generation_temperature", type=float, default=2.0) + +parser.add_argument("--deterministic_validation", action="store_true", default=False) + +parser.add_argument("--bidirectional_validation", action="store_true", default=False) + parser.add_argument("--dirty_debug", action="store_true", default=False) ###################################################################### +parser.add_argument("--sky_height", type=int, default=6) + +parser.add_argument("--sky_width", type=int, default=8) + +parser.add_argument("--sky_nb_birds", type=int, default=3) + +parser.add_argument("--sky_nb_iterations", type=int, default=2) + +parser.add_argument("--sky_speed", type=int, default=3) + +###################################################################### + args = parser.parse_args() +if args.min_to_validate is None: + args.min_to_validate = args.nb_gpts - 1 + +if args.max_to_validate is None: + args.max_to_validate = args.nb_gpts - 1 + if args.result_dir is None: args.result_dir = f"results_culture" ###################################################################### if args.dirty_debug: - accuracy_to_make_c_quizzes = 0.0 + args.accuracy_to_make_c_quizzes = 0.0 nb_new_c_quizzes_for_train = 100 nb_new_c_quizzes_for_test = 10 @@ -210,10 +241,26 @@ else: assert args.nb_train_samples % args.batch_size == 0 assert args.nb_test_samples % args.batch_size == 0 -quizz_machine = quizz_machine.QuizzMachine( - problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2), +if args.problem == "sky": + problem = sky.Sky( + height=args.sky_height, + width=args.sky_width, + nb_birds=args.sky_nb_birds, + nb_iterations=args.sky_nb_iterations, + speed=args.sky_speed, + ) + back_accuracy = False +elif args.problem == "reasoning": + problem = reasoning.Reasoning(device=device) + back_accuracy = True +else: + raise ValueError + +quiz_machine = quiz_machine.QuizMachine( + problem=problem, nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, + back_accuracy=back_accuracy, batch_size=args.physical_batch_size, result_dir=args.result_dir, logger=log_string, @@ -224,7 +271,7 @@ quizz_machine = quizz_machine.QuizzMachine( log_string(f"device {device}") -vocabulary_size = quizz_machine.vocabulary_size() +vocabulary_size = quiz_machine.vocabulary_size() log_string(f"vocabulary_size {vocabulary_size}") @@ -233,8 +280,8 @@ log_string(f"vocabulary_size {vocabulary_size}") # Compute the entropy of the training tokens token_count = 0 -for input in quizz_machine.batches(split="train", desc="train-entropy"): - token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum( +for input in quiz_machine.batches(split="train", desc="train-entropy"): + token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum( (0, 1) ) token_probas = token_count / token_count.sum() @@ -258,11 +305,11 @@ if args.max_percents_of_test_in_train >= 0: nb_test, nb_in_train = 0, 0 for test_subset in subsets_as_tuples( - quizz_machine.batches(split="test", desc="test-check"), 25000 + quiz_machine.batches(split="test", desc="test-check"), 25000 ): in_train = set() for train_subset in subsets_as_tuples( - quizz_machine.batches(split="train", desc="train-check"), 25000 + quiz_machine.batches(split="train", desc="train-check"), 25000 ): in_train.update(test_subset.intersection(train_subset)) nb_in_train += len(in_train) @@ -279,14 +326,14 @@ if args.max_percents_of_test_in_train >= 0: ############################## -def one_epoch(model, quizz_machine): +def one_epoch(model, quiz_machine): optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) model.train() nb_train_samples, acc_train_loss = 0, 0.0 - for input in quizz_machine.batches(split="train"): + for input in quiz_machine.batches(split="train"): input = input.to(device) if nb_train_samples % args.batch_size == 0: @@ -311,14 +358,14 @@ def one_epoch(model, quizz_machine): ###################################################################### -def run_tests(model, quizz_machine, deterministic_synthesis): +def run_tests(model, quiz_machine, deterministic_synthesis): with torch.autograd.no_grad(): model.eval() nb_test_samples, acc_test_loss = 0, 0.0 nb_samples_accumulated = 0 - for input in quizz_machine.batches(split="test"): + for input in quiz_machine.batches(split="test"): input = input.to(device) bs = model(mygpt.BracketedSequence(input)) @@ -330,19 +377,24 @@ def run_tests(model, quizz_machine, deterministic_synthesis): nb_test_samples += input.size(0) - main_test_accuracy = quizz_machine.produce_results( + test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) + + log_string(f"test_perplexity {n_epoch} {test_perplexity}") + + model.main_test_accuracy = quiz_machine.produce_results( n_epoch=n_epoch, model=model, result_dir=args.result_dir, - logger=log_string, deterministic_synthesis=deterministic_synthesis, ) - test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) - log_string(f"test_perplexity {n_epoch} {test_perplexity}") +###################################################################### + - model.main_test_accuracy = main_test_accuracy +def valid_c_quizzes(recorded, criteria): + result = [q[criteria(c)] for q, c in recorded] + return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([]) ###################################################################### @@ -350,62 +402,92 @@ def run_tests(model, quizz_machine, deterministic_synthesis): def create_c_quizzes( models, - quizz_machine, + quiz_machine, nb_for_train=1000, nb_for_test=100, - min_ave_seq_logproba=None, ): - kept = [] - model_indexes = [] - sum_logits, sum_nb_c_quizzes = 0, 0 + quizzes_and_nb_correct_records = [] - while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - nb_to_generate = nb_for_train + nb_for_test + nb_to_create = nb_for_train + nb_for_test - if len(model_indexes) == 0: - model_indexes = [i.item() for i in torch.randperm(len(models))] + # ------------------------------------------------------------ - model = models[model_indexes.pop()] + standard_validity = lambda nb_correct: torch.logical_and( + nb_correct >= args.min_to_validate, nb_correct <= args.max_to_validate + ) - new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes( - nb=nb_to_generate, - model_for_generation=model, - models_for_validation=models, - min_ave_seq_logproba=min_ave_seq_logproba, - n_epoch=n_epoch, - result_dir=args.result_dir, - logger=log_string, - ) + file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat") + with open(file_name, "w") as logp_file: + while ( + valid_c_quizzes(quizzes_and_nb_correct_records, standard_validity).size(0) + < nb_to_create + ): + # Select a model at random to generate the new quizzes - sum_logits += new_c_quizzes.size(0) * ave_seq_logproba - sum_nb_c_quizzes += new_c_quizzes.size(0) + model_for_generation = models[torch.randint(len(models), (1,))] - to_keep = new_c_quizzes[nb_correct == len(models) - 1] + c_quizzes = quiz_machine.generate_quizzes( + nb_to_create, + model_for_generation=model_for_generation, + temperature=args.generation_temperature, + ) - if args.dirty_debug: - to_keep = new_c_quizzes[ - torch.randint(3, (new_c_quizzes.size(0),), device=new_c_quizzes.device) - == 0 - ] + nb_correct, seq_logproba = quiz_machine.compute_correctness( + c_quizzes, + models, + bidirectional_validation=args.bidirectional_validation, + deterministic_validation=args.deterministic_validation, + ) - kept.append(to_keep) + for n, l in zip(nb_correct, seq_logproba): + s = " ".join([str(x.item()) for x in l]) + logp_file.write(f"{n} {s}\n") - log_string( - f"keep c_quizzes {to_keep.size(0)}/{new_c_quizzes.size(0)} ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%) total {sum([ x.size(0) for x in kept])}/{nb_to_generate}" - ) + if args.dirty_debug: + nb_correct = torch.randint( + len(models) + 1, nb_correct.size(), device=c_quizzes.device + ) - new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] + quizzes_and_nb_correct_records.append((c_quizzes, nb_correct)) - quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) - quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) + nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0) + nv = " ".join([str(x.item()) for x in nv]) - quizz_machine.problem.save_quizzes( - new_c_quizzes[:72], - args.result_dir, - f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}", - ) + nb_validated = valid_c_quizzes( + quizzes_and_nb_correct_records, standard_validity + ).size(0) + + log_string( + f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}" + ) + + # store the new c_quizzes which have been validated + + new_c_quizzes = valid_c_quizzes(quizzes_and_nb_correct_records, standard_validity) + + quiz_machine.reverse_random_half_in_place(new_c_quizzes) - return sum_logits / sum_nb_c_quizzes + quiz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) + quiz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) + + # save a bunch of images to investigate what quizzes with a + # certain nb of correct predictions look like + + for n in range(len(models) + 1): + s = ( + "_validated" + if n >= args.min_to_validate and n <= args.max_to_validate + else "" + ) + + q = valid_c_quizzes( + quizzes_and_nb_correct_records, criteria=lambda nb_correct: nb_correct == n + )[:72] + + if q.size(0) > 0: + quiz_machine.save_quizzes( + args.result_dir, f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", q + ) ###################################################################### @@ -435,59 +517,49 @@ log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") ###################################################################### -min_ave_seq_logproba = None - for n_epoch in range(args.nb_epochs): log_string(f"--- epoch {n_epoch} ----------------------------------------") - a = [(model.id, float(model.main_test_accuracy)) for model in models] - a.sort(key=lambda p: p[0]) - log_string(f"current accuracies {a}") + cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models]) + log_string(f"current_test_accuracies {cta}") - # select the model with lowest accuracy - models.sort(key=lambda model: model.main_test_accuracy) - model = models[0] + # Select, improve, and eval the worst model + + weakest_model = min(models, key=lambda m: float(m.main_test_accuracy)) log_string( - f"training model {model.id} main_test_accuracy {model.main_test_accuracy}" + f"training model {weakest_model.id} main_test_accuracy {weakest_model.main_test_accuracy}" ) - # improve it - one_epoch(model, quizz_machine) - - quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts) + one_epoch(weakest_model, quiz_machine) log_string( - f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" + f"train_set_composition w_quizzes {quiz_machine.nb_batch_w_quizzes} c_quizzes {quiz_machine.nb_batch_c_quizzes}" ) - # test it - run_tests(model, quizz_machine, deterministic_synthesis=False) + run_tests(weakest_model, quiz_machine, deterministic_synthesis=False) log_string( - f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" + f"test_set_composition w_quizzes {quiz_machine.nb_batch_w_quizzes} c_quizzes {quiz_machine.nb_batch_c_quizzes}" ) - if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes: - ave_seq_logproba = create_c_quizzes( + # Replace a fraction of the w_quizzes with fresh ones + + quiz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts) + + # If all the models are good enough, generate new quizzes and + # re-compute the test errors + + if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes: + create_c_quizzes( models, - quizz_machine, + quiz_machine, nb_for_train=nb_new_c_quizzes_for_train, nb_for_test=nb_new_c_quizzes_for_test, - min_ave_seq_logproba=min_ave_seq_logproba, ) - # We keep the first average logits as a reference - # if min_ave_seq_logproba is None: - # min_ave_seq_logproba = ave_seq_logproba - # else: - # log_string( - # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}" - # ) - - # We update everyone for model in models: - run_tests(model, quizz_machine, deterministic_synthesis=False) + run_tests(model, quiz_machine, deterministic_synthesis=False) ######################################################################