parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
-parser.add_argument("--reverse_cleanup", action="store_true", default=False)
+parser.add_argument("--both_directions", action="store_true", default=False)
parser.add_argument("--problem", type=str, default="sky")
nb_test_samples += input.size(0)
- main_test_accuracy = quizz_machine.produce_results(
+ model.main_test_accuracy = quizz_machine.produce_results(
n_epoch=n_epoch,
model=model,
result_dir=args.result_dir,
log_string(f"test_perplexity {n_epoch} {test_perplexity}")
- model.main_test_accuracy = main_test_accuracy
-
######################################################################
):
recorded = []
- sum_logits, sum_nb_c_quizzes = 0, 0
-
nb_to_create = nb_for_train + nb_for_test
# ------------------------------------------------------------
c_quizzes, ave_seq_logproba = quizz_machine.generate_quizzes(
nb_to_create,
model_for_generation=model_for_generation,
- reverse_cleanup=args.reverse_cleanup,
)
- sum_logits += c_quizzes.size(0) * ave_seq_logproba
- sum_nb_c_quizzes += c_quizzes.size(0)
-
- nb_correct = quizz_machine.compute_correctness(c_quizzes, models)
+ nb_correct = quizz_machine.compute_correctness(
+ c_quizzes, models, both_directions=args.both_directions
+ )
if args.dirty_debug:
nb_correct = torch.randint(
nb_validated = valid_c_quizzes(recorded, standard_validity).size(0)
log_string(
- f"keep c_quizzes kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
+ f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
)
# store the new c_quizzes which have been validated
else ""
)
- quizz_machine.problem.save_quizzes(
- valid_c_quizzes(recorded, criteria=lambda nb_correct: nb_correct == n)[:72],
- args.result_dir,
- f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
- )
+ q = valid_c_quizzes(recorded, criteria=lambda nb_correct: nb_correct == n)[:72]
- return sum_logits / sum_nb_c_quizzes
+ if q.size(0) > 0:
+ quizz_machine.save_quizzes(
+ args.result_dir, f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", q
+ )
######################################################################
for n_epoch in range(args.nb_epochs):
log_string(f"--- epoch {n_epoch} ----------------------------------------")
+ # Select, improve, and eval the worst model
+
weakest_model = min(models, key=lambda m: float(m.main_test_accuracy))
log_string(
f"training model {weakest_model.id} main_test_accuracy {weakest_model.main_test_accuracy}"
)
- # improve it
one_epoch(weakest_model, quizz_machine)
log_string(
f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
)
- # test it
run_tests(weakest_model, quizz_machine, deterministic_synthesis=False)
log_string(
f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
)
- cta = " ".join([f"{float(m.main_test_accuracy):.02f}" for m in models])
+ cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
log_string(f"current_test_accuracies {cta}")
- # replace a fraction of the w_quizzes with a fresh ones
+ # Replace a fraction of the w_quizzes with fresh ones
+
quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+ # If all the models are good enough, generate new quizzes and
+ # re-compute the test errors
+
if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
create_c_quizzes(
models,
nb_for_test=nb_new_c_quizzes_for_test,
)
- # We update everyone
for model in models:
run_tests(model, quizz_machine, deterministic_synthesis=False)