X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=ee4e9e5b3aea5fb5f6dcd426bbecbd7687bfbb35;hb=694923fcfa606cf8fc9ee6066ef4bbdea27003ce;hp=45fa68c9eeaf0a0c802d0825ad82c4c1833614e6;hpb=9df1bd18f930f6b4a30b94fed6de684d5ceae3b7;p=culture.git diff --git a/main.py b/main.py index 45fa68c..ee4e9e5 100755 --- a/main.py +++ b/main.py @@ -183,8 +183,8 @@ for n in vars(args): ###################################################################### if args.check: - args.nb_train_samples = 500 - args.nb_test_samples = 100 + args.nb_train_samples = 25000 + args.nb_test_samples = 1000 if args.physical_batch_size is None: args.physical_batch_size = args.batch_size @@ -335,23 +335,31 @@ def create_quizzes( task, nb_for_train=1000, nb_for_test=100, + desired_average_logits=None, ): kept = [] + sum_logits = 0 + while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - new_quizzes, nb_correct = task.create_new_quizzes( + nb_to_generate = 4 * (nb_for_train + nb_for_test) + + new_quizzes, nb_correct, _sum_logits = task.create_new_quizzes( n_epoch=n_epoch, result_dir=args.result_dir, logger=log_string, - nb=4 * (nb_for_train + nb_for_test), + nb=nb_to_generate, model=model, other_models=other_models, + desired_average_logits=desired_average_logits, ) - print(nb_correct) + sum_logits += _sum_logits to_keep = new_quizzes[nb_correct == len(other_models) - 1] - log_string(f"keep {to_keep.size(0)} quizzes") + log_string( + f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)" + ) kept.append(to_keep) new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] @@ -366,6 +374,8 @@ def create_quizzes( log_string, ) + return sum_logits / new_quizzes.size(0) + ###################################################################### @@ -400,11 +410,15 @@ nb_new_quizzes_for_test = 100 if args.check: accuracy_to_make_quizzes = 0.0 - nb_new_quizzes_for_train = 10 + nb_new_quizzes_for_train = 100 nb_new_quizzes_for_test = 10 +desired_average_logits = None + for n_epoch in range(args.nb_epochs): - a = [(model.id, model.main_test_accuracy) for model in models] + log_string(f"--- epoch {n_epoch} ----------------------------------------") + + a = [(model.id, float(model.main_test_accuracy)) for model in models] a.sort(key=lambda p: p[0]) log_string(f"current accuracies {a}") @@ -419,6 +433,8 @@ for n_epoch in range(args.nb_epochs): # improve it one_epoch(model, task) + task.renew_samples(args.nb_train_samples // args.nb_gpts) + log_string( f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" ) @@ -426,18 +442,31 @@ for n_epoch in range(args.nb_epochs): # test it run_tests(model, task, deterministic_synthesis=False) - if model.main_test_accuracy >= accuracy_to_make_quizzes: + log_string( + f"test_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" + ) + + if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_quizzes: other_models = models.copy() other_models.remove(model) - create_quizzes( + average_logits = create_quizzes( model, other_models, task, nb_for_train=nb_new_quizzes_for_train, nb_for_test=nb_new_quizzes_for_test, + desired_average_logits=desired_average_logits, ) + # We keep the first average logits as a reference + if desired_average_logits is None: + desired_average_logits = average_logits + else: + log_string( + f"desired_average_logits {desired_average_logits} average_logits {average_logits}" + ) + # We update everyone for model in models: run_tests(model, task, deterministic_synthesis=False)