parser.add_argument("--nb_gpts", type=int, default=5)
-parser.add_argument("--check", action="store_true", default=False)
+parser.add_argument("--dirty_debug", action="store_true", default=False)
######################################################################
######################################################################
-if args.check:
+if args.dirty_debug:
args.nb_train_samples = 2500
args.nb_test_samples = 100
task,
nb_for_train=1000,
nb_for_test=100,
+ desired_average_logits=None,
):
kept = []
+ sum_logits, sum_nb_quizzes = 0, 0
+
while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
- new_quizzes, nb_correct = task.create_new_quizzes(
+ nb_to_generate = 4 * (nb_for_train + nb_for_test)
+
+ new_quizzes, nb_correct, average_logits = task.create_new_quizzes(
n_epoch=n_epoch,
result_dir=args.result_dir,
logger=log_string,
- nb=4 * (nb_for_train + nb_for_test),
+ nb=nb_to_generate,
model=model,
other_models=other_models,
+ desired_average_logits=desired_average_logits,
)
- print(nb_correct)
+ sum_logits += new_quizzes.size(0) * average_logits
+ sum_nb_quizzes += new_quizzes.size(0)
to_keep = new_quizzes[nb_correct == len(other_models) - 1]
- log_string(f"keep {to_keep.size(0)} quizzes")
+
+ if args.dirty_debug:
+ to_keep = new_quizzes
+
+ log_string(
+ f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)"
+ )
+
kept.append(to_keep)
new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
log_string,
)
+ return sum_logits / sum_nb_quizzes
+
######################################################################
nb_new_quizzes_for_train = 1000
nb_new_quizzes_for_test = 100
-if args.check:
+if args.dirty_debug:
accuracy_to_make_quizzes = 0.0
- nb_new_quizzes_for_train = 10
+ nb_new_quizzes_for_train = 100
nb_new_quizzes_for_test = 10
+desired_average_logits = None
+
for n_epoch in range(args.nb_epochs):
+ log_string(f"--- epoch {n_epoch} ----------------------------------------")
+
a = [(model.id, float(model.main_test_accuracy)) for model in models]
a.sort(key=lambda p: p[0])
log_string(f"current accuracies {a}")
# test it
run_tests(model, task, deterministic_synthesis=False)
+ log_string(
+ f"test_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}"
+ )
+
if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_quizzes:
other_models = models.copy()
other_models.remove(model)
- create_quizzes(
+ average_logits = create_quizzes(
model,
other_models,
task,
nb_for_train=nb_new_quizzes_for_train,
nb_for_test=nb_new_quizzes_for_test,
+ desired_average_logits=desired_average_logits,
)
+ # We keep the first average logits as a reference
+ if desired_average_logits is None:
+ desired_average_logits = average_logits
+ else:
+ log_string(
+ f"desired_average_logits {desired_average_logits} average_logits {average_logits}"
+ )
+
# We update everyone
for model in models:
run_tests(model, task, deterministic_synthesis=False)