Update.
[culture.git] / main.py
diff --git a/main.py b/main.py
index 2d5b148..c5acea7 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -58,7 +58,7 @@ parser.add_argument("--nb_train_samples", type=int, default=None)
 
 parser.add_argument("--nb_test_samples", type=int, default=None)
 
-parser.add_argument("--learning_rate", type=float, default=1e-4)
+parser.add_argument("--learning_rate", type=float, default=1e-3)
 
 ########################################
 
@@ -103,7 +103,7 @@ if args.dirty_debug:
 default_args = {
     "model": "37M",
     "batch_size": 100,
-    "nb_train_samples": 250000,
+    "nb_train_samples": 100000,
     "nb_test_samples": 10000,
 }
 
@@ -211,7 +211,7 @@ assert args.nb_train_samples % args.batch_size == 0
 assert args.nb_test_samples % args.batch_size == 0
 
 quizz_machine = quizz_machine.QuizzMachine(
-    sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2),
+    problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2),
     nb_train_samples=args.nb_train_samples,
     nb_test_samples=args.nb_test_samples,
     batch_size=args.physical_batch_size,
@@ -394,7 +394,10 @@ def create_c_quizzes(
             f"keep c_quizzes {to_keep.size(0)}/{new_c_quizzes.size(0)} ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%) total {sum([ x.size(0) for x in kept])}/{nb_to_generate}"
         )
 
-    new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
+    new_c_quizzes = torch.cat(kept, dim=0)
+    new_c_quizzes = new_c_quizzes[
+        torch.randperm(new_c_quizzes.size(0))[: nb_for_train + nb_for_test]
+    ]
 
     quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
     quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
@@ -478,12 +481,12 @@ for n_epoch in range(args.nb_epochs):
         )
 
         # We keep the first average logits as a reference
-        if min_ave_seq_logproba is None:
-            min_ave_seq_logproba = ave_seq_logproba
-        else:
-            log_string(
-                f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
-            )
+        if min_ave_seq_logproba is None:
+        # min_ave_seq_logproba = ave_seq_logproba
+        else:
+        # log_string(
+        # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
+        # )
 
         # We update everyone
         for model in models: