Update.
[picoclvr.git] / tasks.py
index 183c3cf..066f1bb 100755 (executable)
--- a/tasks.py
+++ b/tasks.py
@@ -1550,3 +1550,94 @@ class Grid(Task):
 
 
 ######################################################################
+
+import qmlp
+
+
+class QMLP(Task):
+
+    ######################
+
+    def __init__(
+        self,
+        nb_train_samples,
+        nb_test_samples,
+        batch_size,
+        logger=None,
+        device=torch.device("cpu"),
+    ):
+        super().__init__()
+
+        self.device = device
+        self.batch_size = batch_size
+        self.nb_samples_per_mlp = 256
+
+        if logger is not None:
+            logger(
+                f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
+            )
+
+        seq, q_test_set = generate_sequence_and_test_set(
+            nb_mlps=nb_train_samples+nb_test_samples,
+            nb_samples=self.nb_samples_per_mlp,
+            device=self.device,
+            batch_size=64,
+            nb_epochs=250,
+            nb_mlps_per_batch=1024
+        )
+
+        self.train_input = seq[:nb_train_samples]
+        self.train_q_test_set = q_test_set[:nb_train_samples]
+        self.test_input = seq[nb_train_samples:]
+        self.test_q_test_set = q_test_set[nb_train_samples:]
+
+        self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+    def batches(self, split="train"):
+        assert split in {"train", "test"}
+        input = self.train_input if split == "train" else self.test_input
+        for batch in tqdm.tqdm(
+            input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
+        ):
+            yield self.trim(batch)
+
+    def vocabulary_size(self):
+        return self.nb_codes
+
+    def produce_results(
+        self, n_epoch, model, result_dir, logger, deterministic_synthesis
+    ):
+        correct = self.test_input[:1000]
+        result = correct.clone()
+        ar_mask = torch.arange(result.size(1)) > self.nb_samples_per_mlp * 3 + 1
+        result *= 1 - ar_mask  # paraaaaanoiaaaaaaa
+
+        logger(f"----------------------------------------------------------")
+
+        for e in self.tensor2str(result[:10]):
+            logger(f"test_before {e}")
+
+        masked_inplace_autoregression(
+            model,
+            self.batch_size,
+            result,
+            ar_mask,
+            deterministic_synthesis,
+            device=self.device,
+        )
+
+        logger(f"----------------------------------------------------------")
+
+        for e in self.tensor2str(result[:10]):
+            logger(f"test_after  {e}")
+
+        logger(f"----------------------------------------------------------")
+
+        q_train_set = result[:, : nb_samples * 3]
+        q_params = result[:, nb_samples * 3 + 1 :]
+        error_test = evaluate_q_params(q_params, q_test_set, nb_mlps_per_batch=17)
+
+        logger(f"{error_test=}")
+
+
+######################################################################