Update.
authorFrançois Fleuret <francois@fleuret.org>
Tue, 25 Jun 2024 21:41:17 +0000 (23:41 +0200)
committerFrançois Fleuret <francois@fleuret.org>
Tue, 25 Jun 2024 21:41:17 +0000 (23:41 +0200)
main.py

diff --git a/main.py b/main.py
index 7f9d521..cb28a7d 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -58,7 +58,7 @@ parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
-parser.add_argument("--learning_rate", type=float, default=1e-4)
+parser.add_argument("--learning_rate", type=float, default=1e-3)
 
 ########################################
 
@@ -103,7 +103,7 @@ if args.dirty_debug:
 default_args = {
     "model": "37M",
     "batch_size": 100,
-    "nb_train_samples": 250000,
+    "nb_train_samples": 100000,
     "nb_test_samples": 10000,
 }
 
@@ -478,12 +478,12 @@ for n_epoch in range(args.nb_epochs):
         )
 
         # We keep the first average logits as a reference
-        if min_ave_seq_logproba is None:
-            min_ave_seq_logproba = ave_seq_logproba
-        else:
-            log_string(
-                f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
-            )
+        if min_ave_seq_logproba is None:
+        # min_ave_seq_logproba = ave_seq_logproba
+        else:
+        # log_string(
+        # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
+        # )
 
         # We update everyone
         for model in models: