X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=0858282fa3b5cab185d14025d5ad758de44411c2;hb=16e7952b7cc32ca21498fa3a12fb79f679ea8c21;hp=066f1bbec05fcc0d65365823ea60931010e118cb;hpb=26ef53ee3769c3b6b92b85d15b5a43cbd18ede07;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 066f1bb..0858282 100755 --- a/tasks.py +++ b/tasks.py @@ -110,13 +110,14 @@ class SandBox(Task): self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 + # A bit of paranoia never hurts assert ( self.nb_codes <= max_nb_codes and self.train_input.min() >= 0 and self.test_input.min() >= 0 - and tuple(self.train_ar_mask.unique()) == (0, 1) - and tuple(self.test_ar_mask.unique()) == (0, 1) + and tuple(x.item() for x in self.train_ar_mask.unique()) in { (0,), (1,), (0,1) } + and tuple(x.item() for x in self.test_ar_mask.unique()) in { (0,), (1,), (0,1) } ) def batches(self, split="train", nb_to_use=-1, desc=None): @@ -160,8 +161,10 @@ class SandBox(Task): f" {n_epoch} ground truth {self.problem.seq2str(st)}" ) - nb_total = ar_mask.sum().item() - nb_correct = ((result == input).long() * ar_mask).sum().item() + nb_total, nb_correct = self.problem.compute_nb_correct(input, ar_mask, result) + + # nb_total = ar_mask.sum().item() + # nb_correct = ((result == input).long() * ar_mask).sum().item() return nb_total, nb_correct @@ -1555,7 +1558,6 @@ import qmlp class QMLP(Task): - ###################### def __init__( @@ -1563,6 +1565,7 @@ class QMLP(Task): nb_train_samples, nb_test_samples, batch_size, + result_dir, logger=None, device=torch.device("cpu"), ): @@ -1577,19 +1580,31 @@ class QMLP(Task): f"generating {nb_train_samples+nb_test_samples} samples (can take some time)" ) - seq, q_test_set = generate_sequence_and_test_set( - nb_mlps=nb_train_samples+nb_test_samples, + seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set( + nb_mlps=nb_train_samples + nb_test_samples, nb_samples=self.nb_samples_per_mlp, device=self.device, batch_size=64, nb_epochs=250, - nb_mlps_per_batch=1024 + nb_mlps_per_batch=1024, ) self.train_input = seq[:nb_train_samples] self.train_q_test_set = q_test_set[:nb_train_samples] + self.train_ref_test_errors = test_error[:nb_train_samples] self.test_input = seq[nb_train_samples:] self.test_q_test_set = q_test_set[nb_train_samples:] + self.test_ref_test_errors = test_error[nb_train_samples:] + + filename = os.path.join(result_dir, f"train_errors_ref.dat") + with open(filename, "w") as f: + for e in self.train_ref_test_errors: + f.write(f"{e}\n") + + filename = os.path.join(result_dir, f"test_errors_ref.dat") + with open(filename, "w") as f: + for e in self.test_ref_test_errors: + f.write(f"{e}\n") self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 @@ -1599,7 +1614,7 @@ class QMLP(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}" ): - yield self.trim(batch) + yield batch def vocabulary_size(self): return self.nb_codes @@ -1609,14 +1624,13 @@ class QMLP(Task): ): correct = self.test_input[:1000] result = correct.clone() - ar_mask = torch.arange(result.size(1)) > self.nb_samples_per_mlp * 3 + 1 + ar_mask = ( + torch.arange(result.size(1), device=result.device) + > self.nb_samples_per_mlp * 3 + 1 + ).long()[None, :] + ar_mask = ar_mask.expand_as(result) result *= 1 - ar_mask # paraaaaanoiaaaaaaa - logger(f"----------------------------------------------------------") - - for e in self.tensor2str(result[:10]): - logger(f"test_before {e}") - masked_inplace_autoregression( model, self.batch_size, @@ -1626,18 +1640,14 @@ class QMLP(Task): device=self.device, ) - logger(f"----------------------------------------------------------") - - for e in self.tensor2str(result[:10]): - logger(f"test_after {e}") - - logger(f"----------------------------------------------------------") - - q_train_set = result[:, : nb_samples * 3] - q_params = result[:, nb_samples * 3 + 1 :] - error_test = evaluate_q_params(q_params, q_test_set, nb_mlps_per_batch=17) + q_train_set = result[:, : self.nb_samples_per_mlp * 3] + q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :] + error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set) - logger(f"{error_test=}") + filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat") + with open(filename, "w") as f: + for e in error_test: + f.write(f"{e}\n") ######################################################################