X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=inline;f=main.py;h=803383626669647984f3a542421f5b2c57d461c2;hb=779e3675414e061ad294c6b5599a7843d9e887bc;hp=a021a71c76ed7525c40db2b01ef11b6e9b1e9059;hpb=17c63771f2ca82ce39d8406e377ace2015fe69fc;p=culture.git diff --git a/main.py b/main.py index a021a71..8033836 100755 --- a/main.py +++ b/main.py @@ -73,7 +73,7 @@ parser.add_argument("--deterministic_synthesis", action="store_true", default=Fa parser.add_argument("--nb_gpts", type=int, default=5) -parser.add_argument("--check", action="store_true", default=False) +parser.add_argument("--dirty_debug", action="store_true", default=False) ###################################################################### @@ -182,7 +182,7 @@ for n in vars(args): ###################################################################### -if args.check: +if args.dirty_debug: args.nb_train_samples = 2500 args.nb_test_samples = 100 @@ -338,10 +338,12 @@ def create_quizzes( desired_average_logits=None, ): kept = [] - nb_generated_tokens, sum_logits = 0, 0 + + sum_logits, sum_nb_quizzes = 0, 0 while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: nb_to_generate = 4 * (nb_for_train + nb_for_test) + new_quizzes, nb_correct, average_logits = task.create_new_quizzes( n_epoch=n_epoch, result_dir=args.result_dir, @@ -352,13 +354,18 @@ def create_quizzes( desired_average_logits=desired_average_logits, ) - nb_generated_tokens += new_quizzes.numel() - sum_logits += average_logits * new_quizzes.numel() + sum_logits += new_quizzes.size(0) * average_logits + sum_nb_quizzes += new_quizzes.size(0) to_keep = new_quizzes[nb_correct == len(other_models) - 1] + + if args.dirty_debug: + to_keep = new_quizzes + log_string( f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)" ) + kept.append(to_keep) new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] @@ -373,7 +380,7 @@ def create_quizzes( log_string, ) - return sum_logits / nb_generated_tokens + return sum_logits / sum_nb_quizzes ###################################################################### @@ -407,14 +414,16 @@ accuracy_to_make_quizzes = 0.975 nb_new_quizzes_for_train = 1000 nb_new_quizzes_for_test = 100 -if args.check: +if args.dirty_debug: accuracy_to_make_quizzes = 0.0 - nb_new_quizzes_for_train = 10 + nb_new_quizzes_for_train = 100 nb_new_quizzes_for_test = 10 desired_average_logits = None for n_epoch in range(args.nb_epochs): + log_string(f"--- epoch {n_epoch} ----------------------------------------") + a = [(model.id, float(model.main_test_accuracy)) for model in models] a.sort(key=lambda p: p[0]) log_string(f"current accuracies {a}")