Update.
authorFrançois Fleuret <francois@fleuret.org>
Thu, 11 Jul 2024 20:43:35 +0000 (22:43 +0200)
committerFrançois Fleuret <francois@fleuret.org>
Thu, 11 Jul 2024 20:43:35 +0000 (22:43 +0200)
grids.py
main.py
quiz_machine.py

index 20a964b..7aec62c 100755 (executable)
--- a/grids.py
+++ b/grids.py
@@ -869,6 +869,74 @@ class Grids(problem.Problem):
     # i,j=q%self.height,q//self.height
     # if
 
+    # @torch.compile
+    def task_puzzle(self, A, f_A, B, f_B):
+        S = 4
+        i0, j0 = (self.height - S) // 2, (self.width - S) // 2
+        c = torch.randperm(len(self.colors) - 1)[:4] + 1
+        for X, f_X in [(A, f_A), (B, f_B)]:
+            while True:
+                f_X[...] = 0
+                h = list(torch.randperm(c.size(0)))
+                n = torch.zeros(c.max() + 1)
+                for _ in range(2):
+                    k = torch.randperm(S * S)
+                    for q in k:
+                        i, j = q % S + i0, q // S + j0
+                        if f_X[i, j] == 0:
+                            r, s, t, u = (
+                                f_X[i - 1, j],
+                                f_X[i, j - 1],
+                                f_X[i + 1, j],
+                                f_X[i, j + 1],
+                            )
+                            r, s, t, u = torch.tensor([r, s, t, u])[torch.randperm(4)]
+                            if r > 0 and n[r] < 6:
+                                n[r] += 1
+                                f_X[i, j] = r
+                            elif s > 0 and n[s] < 6:
+                                n[s] += 1
+                                f_X[i, j] = s
+                            elif t > 0 and n[t] < 6:
+                                n[t] += 1
+                                f_X[i, j] = t
+                            elif u > 0 and n[u] < 6:
+                                n[u] += 1
+                                f_X[i, j] = u
+                            else:
+                                if len(h) > 0:
+                                    d = c[h.pop()]
+                                    n[d] += 1
+                                    f_X[i, j] = d
+
+                if n.sum() == S * S:
+                    break
+
+            k = 0
+            for d in range(4):
+                while True:
+                    ii, jj = torch.randint(self.height, (1,)), torch.randint(
+                        self.width, (1,)
+                    )
+                    e = 0
+                    for i in range(S):
+                        for j in range(S):
+                            if (
+                                ii + i >= self.height
+                                or jj + j >= self.width
+                                or (
+                                    f_X[i + i0, j + j0] == c[d]
+                                    and X[ii + i, jj + j] > 0
+                                )
+                            ):
+                                e = 1
+                    if e == 0:
+                        break
+                for i in range(S):
+                    for j in range(S):
+                        if f_X[i + i0, j + j0] == c[d]:
+                            X[ii + i, jj + j] = c[d]
+
     ######################################################################
 
     def all_tasks(self):
@@ -976,12 +1044,12 @@ if __name__ == "__main__":
     # nb, nrow = 8, 2
 
     # for t in grids.all_tasks():
-    for t in [grids.task_path]:
+    for t in [grids.task_puzzle]:
         print(t.__name__)
         prompts, answers = grids.generate_prompts_and_answers_(nb, tasks=[t])
         grids.save_quizzes("/tmp", t.__name__, prompts[:nb], answers[:nb], nrow=nrow)
 
-    exit(0)
+    exit(0)
 
     nb = 1000
 
diff --git a/main.py b/main.py
index a7338c7..8d18119 100755 (executable)
--- a/main.py
+++ b/main.py
@@ -347,8 +347,6 @@ def one_epoch(model, quiz_machine, local_device=None):
 
     run_tests(model, quiz_machine, deterministic_synthesis=False)
 
-    model.TRAINING_LOCK.release()
-
 
 ######################################################################
 
@@ -449,7 +447,6 @@ for k in range(args.nb_gpts):
 
     model.main_test_accuracy = 0.0
     model.id = k
-    model.TRAINING_LOCK = threading.Lock()
 
     model.train_w_quizzes = quiz_machine.generate_token_sequences(args.nb_train_samples)
     quiz_machine.reverse_random_half_in_place(model.train_w_quizzes)
@@ -547,20 +544,21 @@ for n_epoch in range(args.nb_epochs):
 
     weakest_models = ranked_models[: args.nb_gpus]
 
+    threads = []
+
     for gpu_id, model in enumerate(weakest_models):
-        model.TRAINING_LOCK.acquire()
+        log_string(f"training model {model.id}")
 
-        log_string(
-            f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+        t = threading.Thread(
+            target=one_epoch, daemon=True, args=(model, quiz_machine, f"cuda:{gpu_id}")
         )
 
-        threading.Thread(
-            target=one_epoch, daemon=True, args=(model, quiz_machine, f"cuda:{gpu_id}")
-        ).start()
+        threads.append(t)
 
-    for model in weakest_models:
-        model.TRAINING_LOCK.acquire()
-        model.TRAINING_LOCK.release()
+        t.start()
+
+    for t in threads:
+        t.join()
 
     ##################################################
     # Replace a fraction of the w_quizzes with fresh ones
index 8ab5696..4f704a0 100755 (executable)
@@ -368,11 +368,7 @@ class QuizMachine:
                 backward_nb_total = correct[n_backward].size(0)
 
                 self.logger(
-                    f"{log_prefix}_forward_accuracy {n_epoch} model {model.id} nb_correct {forward_nb_correct} / {forward_nb_total} ({forward_nb_correct*100/forward_nb_total} %)"
-                )
-
-                self.logger(
-                    f"{log_prefix}_backward_accuracy {n_epoch} model {model.id} nb_correct {backward_nb_correct} / {backward_nb_total} ({backward_nb_correct*100/backward_nb_total} %)"
+                    f"{log_prefix}_accuracy {n_epoch} model {model.id} forward {forward_nb_correct} / {forward_nb_total} backward {backward_nb_correct} / {backward_nb_total}"
                 )
 
             return result, correct