Update.
authorFrançois Fleuret <francois@fleuret.org>
Wed, 31 Jul 2024 07:10:51 +0000 (09:10 +0200)
committerFrançois Fleuret <francois@fleuret.org>
Wed, 31 Jul 2024 07:10:51 +0000 (09:10 +0200)
main.py

diff --git a/main.py b/main.py
index 3cc536c..4903585 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -674,6 +674,13 @@ def record_new_c_quizzes(models, quiz_machine, nb_for_train=1000, nb_for_test=10
 
 ######################################################################
 
+# The generator is very similar to a "solving GPT" except that it
+# deals with quizzes prologued with one token per solving GPT that
+# indicates if the said model solves it or not.
+#
+# There are three levels of solving 0->proba<=proba_not_understands,
+# 2->proba>=proba_understands and 1 otherwise.
+
 
 def generate_c_quizz_with_generator(generator, quiz_machine, nb):
     generator.to(main_device)
@@ -1024,6 +1031,17 @@ if args.dirty_debug:
 ######################################################################
 
 if args.test_generator:
+    filename = f"generator.pth"
+
+    try:
+        d = torch.load(os.path.join(args.result_dir, filename))
+        generator.load_state_dict(d[0])
+        generator.main_test_accuracy = d[1]
+        log_string(f"successfully loaded {filename}")
+    except FileNotFoundError:
+        log_string(f"cannot find {filename}")
+        pass
+
     token_prolog_0 = vocabulary_size + 0
     token_prolog_1 = vocabulary_size + 1
     token_prolog_2 = vocabulary_size + 2