X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=803383626669647984f3a542421f5b2c57d461c2;hb=24283925c294168184b34e9270cd7b1c7234c54a;hp=683c07d94d3b3b41c266069cb149ed87734c4097;hpb=32ca97e128baa0392684fd5a84632651178b3d89;p=culture.git diff --git a/main.py b/main.py index 683c07d..8033836 100755 --- a/main.py +++ b/main.py @@ -73,7 +73,7 @@ parser.add_argument("--deterministic_synthesis", action="store_true", default=Fa parser.add_argument("--nb_gpts", type=int, default=5) -parser.add_argument("--check", action="store_true", default=False) +parser.add_argument("--dirty_debug", action="store_true", default=False) ###################################################################### @@ -182,7 +182,7 @@ for n in vars(args): ###################################################################### -if args.check: +if args.dirty_debug: args.nb_train_samples = 2500 args.nb_test_samples = 100 @@ -335,23 +335,37 @@ def create_quizzes( task, nb_for_train=1000, nb_for_test=100, + desired_average_logits=None, ): kept = [] + sum_logits, sum_nb_quizzes = 0, 0 + while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - new_quizzes, nb_correct = task.create_new_quizzes( + nb_to_generate = 4 * (nb_for_train + nb_for_test) + + new_quizzes, nb_correct, average_logits = task.create_new_quizzes( n_epoch=n_epoch, result_dir=args.result_dir, logger=log_string, - nb=4 * (nb_for_train + nb_for_test), + nb=nb_to_generate, model=model, other_models=other_models, + desired_average_logits=desired_average_logits, ) - print(nb_correct) + sum_logits += new_quizzes.size(0) * average_logits + sum_nb_quizzes += new_quizzes.size(0) to_keep = new_quizzes[nb_correct == len(other_models) - 1] - log_string(f"keep {to_keep.size(0)} quizzes") + + if args.dirty_debug: + to_keep = new_quizzes + + log_string( + f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)" + ) + kept.append(to_keep) new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] @@ -366,6 +380,8 @@ def create_quizzes( log_string, ) + return sum_logits / sum_nb_quizzes + ###################################################################### @@ -398,12 +414,16 @@ accuracy_to_make_quizzes = 0.975 nb_new_quizzes_for_train = 1000 nb_new_quizzes_for_test = 100 -if args.check: +if args.dirty_debug: accuracy_to_make_quizzes = 0.0 - nb_new_quizzes_for_train = 10 + nb_new_quizzes_for_train = 100 nb_new_quizzes_for_test = 10 +desired_average_logits = None + for n_epoch in range(args.nb_epochs): + log_string(f"--- epoch {n_epoch} ----------------------------------------") + a = [(model.id, float(model.main_test_accuracy)) for model in models] a.sort(key=lambda p: p[0]) log_string(f"current accuracies {a}") @@ -428,18 +448,31 @@ for n_epoch in range(args.nb_epochs): # test it run_tests(model, task, deterministic_synthesis=False) + log_string( + f"test_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" + ) + if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_quizzes: other_models = models.copy() other_models.remove(model) - create_quizzes( + average_logits = create_quizzes( model, other_models, task, nb_for_train=nb_new_quizzes_for_train, nb_for_test=nb_new_quizzes_for_test, + desired_average_logits=desired_average_logits, ) + # We keep the first average logits as a reference + if desired_average_logits is None: + desired_average_logits = average_logits + else: + log_string( + f"desired_average_logits {desired_average_logits} average_logits {average_logits}" + ) + # We update everyone for model in models: run_tests(model, task, deterministic_synthesis=False)