Update.
[culture.git] / quiz_machine.py
index 1f1046d..8ab5696 100755 (executable)
@@ -327,6 +327,7 @@ class QuizMachine:
         self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
     ):
         def compute_accuracy(input, log_prefix=None):
+            input = input.to(self.device)
             ar_mask = self.make_ar_mask(input)
             result = input.clone() * (1 - ar_mask)
             seq_logproba = torch.empty(input.size(0), device=self.device)
@@ -404,35 +405,44 @@ class QuizMachine:
         input[:-nb] = input[nb:].clone()
         fresh_w_quizzes = self.generate_token_sequences(nb)
         self.reverse_random_half_in_place(fresh_w_quizzes)
-        input[-nb:] = fresh_w_quizzes.to(self.device)
+        input[-nb:] = fresh_w_quizzes.to("cpu")
 
     ######################################################################
 
     def store_c_quizzes(self, new_c_quizzes, for_train=True):
         with self.LOCK_C_QUIZZES:
             if for_train:
-                self.train_c_quizzes.append(new_c_quizzes)
+                self.train_c_quizzes.append(new_c_quizzes.to("cpu"))
             else:
-                self.test_c_quizzes.append(new_c_quizzes)
+                self.test_c_quizzes.append(new_c_quizzes.to("cpu"))
 
     ######################################################################
 
     def logproba_of_solutions(self, models, c_quizzes):
-        logproba = c_quizzes.new_zeros(c_quizzes.size(0), len(models))
+        logproba = c_quizzes.new_zeros(
+            c_quizzes.size(0), len(models), device=self.device
+        )
 
         for model in models:
-            for input, l in zip(
-                c_quizzes.split(self.batch_size), logproba.split(self.batch_size)
-            ):
-                ar_mask = self.make_ar_mask(input)
-                output = model(mygpt.BracketedSequence(input)).x
-                ce = (
-                    F.cross_entropy(output.transpose(1, 2), input, reduction="none")
-                    * ar_mask
-                )
-                l[:, model.id] = -ce.sum(dim=-1)
-
-        return logproba
+            with torch.autograd.no_grad():
+                t = model.training
+                model.eval()
+
+                for input, l in zip(
+                    c_quizzes.split(self.batch_size), logproba.split(self.batch_size)
+                ):
+                    input = input.to(self.device)
+                    ar_mask = self.make_ar_mask(input)
+                    output = model(mygpt.BracketedSequence(input)).x
+                    ce = (
+                        F.cross_entropy(output.transpose(1, 2), input, reduction="none")
+                        * ar_mask
+                    )
+                    l[:, model.id] = -ce.sum(dim=-1)
+
+                model.train(t)
+
+        return logproba.to("cpu")
 
     ###############################################################
 
@@ -561,4 +571,4 @@ class QuizMachine:
             device=self.device,
         )
 
-        return c_quizzes
+        return c_quizzes.to("cpu")