From 82d46926a10776910886aa1123c27a3867ecc721 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Wed, 31 Jul 2024 09:10:51 +0200 Subject: [PATCH] Update. --- main.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/main.py b/main.py index 3cc536c..4903585 100755 --- a/main.py +++ b/main.py @@ -674,6 +674,13 @@ def record_new_c_quizzes(models, quiz_machine, nb_for_train=1000, nb_for_test=10 ###################################################################### +# The generator is very similar to a "solving GPT" except that it +# deals with quizzes prologued with one token per solving GPT that +# indicates if the said model solves it or not. +# +# There are three levels of solving 0->proba<=proba_not_understands, +# 2->proba>=proba_understands and 1 otherwise. + def generate_c_quizz_with_generator(generator, quiz_machine, nb): generator.to(main_device) @@ -1024,6 +1031,17 @@ if args.dirty_debug: ###################################################################### if args.test_generator: + filename = f"generator.pth" + + try: + d = torch.load(os.path.join(args.result_dir, filename)) + generator.load_state_dict(d[0]) + generator.main_test_accuracy = d[1] + log_string(f"successfully loaded {filename}") + except FileNotFoundError: + log_string(f"cannot find {filename}") + pass + token_prolog_0 = vocabulary_size + 0 token_prolog_1 = vocabulary_size + 1 token_prolog_2 = vocabulary_size + 2 -- 2.20.1