X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=a021a71c76ed7525c40db2b01ef11b6e9b1e9059;hb=2a9d74f8b24441dc56505f9bdce8a2974e2552cb;hp=b6f278359f149446b416cbb4a7d6f632f5e13f96;hpb=36a1440d01cc15643849f5ba421f89ac403ccd82;p=culture.git diff --git a/main.py b/main.py index b6f2783..a021a71 100755 --- a/main.py +++ b/main.py @@ -183,7 +183,7 @@ for n in vars(args): ###################################################################### if args.check: - args.nb_train_samples = 500 + args.nb_train_samples = 2500 args.nb_test_samples = 100 if args.physical_batch_size is None: @@ -335,23 +335,30 @@ def create_quizzes( task, nb_for_train=1000, nb_for_test=100, + desired_average_logits=None, ): kept = [] + nb_generated_tokens, sum_logits = 0, 0 while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - new_quizzes, nb_correct = task.create_new_quizzes( + nb_to_generate = 4 * (nb_for_train + nb_for_test) + new_quizzes, nb_correct, average_logits = task.create_new_quizzes( n_epoch=n_epoch, result_dir=args.result_dir, logger=log_string, - nb=4 * (nb_for_train + nb_for_test), + nb=nb_to_generate, model=model, other_models=other_models, + desired_average_logits=desired_average_logits, ) - print(nb_correct) + nb_generated_tokens += new_quizzes.numel() + sum_logits += average_logits * new_quizzes.numel() to_keep = new_quizzes[nb_correct == len(other_models) - 1] - log_string(f"keep {to_keep.size(0)} quizzes") + log_string( + f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)" + ) kept.append(to_keep) new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] @@ -366,6 +373,8 @@ def create_quizzes( log_string, ) + return sum_logits / nb_generated_tokens + ###################################################################### @@ -403,8 +412,10 @@ if args.check: nb_new_quizzes_for_train = 10 nb_new_quizzes_for_test = 10 +desired_average_logits = None + for n_epoch in range(args.nb_epochs): - a = [(model.id, model.main_test_accuracy.item()) for model in models] + a = [(model.id, float(model.main_test_accuracy)) for model in models] a.sort(key=lambda p: p[0]) log_string(f"current accuracies {a}") @@ -419,6 +430,8 @@ for n_epoch in range(args.nb_epochs): # improve it one_epoch(model, task) + task.renew_samples(args.nb_train_samples // args.nb_gpts) + log_string( f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" ) @@ -426,18 +439,31 @@ for n_epoch in range(args.nb_epochs): # test it run_tests(model, task, deterministic_synthesis=False) - if model.main_test_accuracy >= accuracy_to_make_quizzes: + log_string( + f"test_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" + ) + + if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_quizzes: other_models = models.copy() other_models.remove(model) - create_quizzes( + average_logits = create_quizzes( model, other_models, task, nb_for_train=nb_new_quizzes_for_train, nb_for_test=nb_new_quizzes_for_test, + desired_average_logits=desired_average_logits, ) + # We keep the first average logits as a reference + if desired_average_logits is None: + desired_average_logits = average_logits + else: + log_string( + f"desired_average_logits {desired_average_logits} average_logits {average_logits}" + ) + # We update everyone for model in models: run_tests(model, task, deterministic_synthesis=False)