X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=7a4abbeea23d9494d835cf6a040a36ab2eb53cc2;hb=8d9cd6a2c09da2105ca17b04df94fcf84e8de954;hp=066f1bbec05fcc0d65365823ea60931010e118cb;hpb=26ef53ee3769c3b6b92b85d15b5a43cbd18ede07;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 066f1bb..7a4abbe 100755 --- a/tasks.py +++ b/tasks.py @@ -111,13 +111,19 @@ class SandBox(Task): self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 # A bit of paranoia never hurts - assert ( - self.nb_codes <= max_nb_codes - and self.train_input.min() >= 0 - and self.test_input.min() >= 0 - and tuple(self.train_ar_mask.unique()) == (0, 1) - and tuple(self.test_ar_mask.unique()) == (0, 1) - ) + assert self.nb_codes <= max_nb_codes + assert self.train_input.min() >= 0 + assert self.test_input.min() >= 0 + assert tuple(x.item() for x in self.train_ar_mask.unique()) in { + (0,), + (1,), + (0, 1), + } + assert tuple(x.item() for x in self.test_ar_mask.unique()) in { + (0,), + (1,), + (0, 1), + } def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} @@ -151,17 +157,24 @@ class SandBox(Task): device=self.device, ) + log_ground_truth = ar_mask.min() == 0 + if logger is not None: for sp, st in zip(result[:10], input[:10]): logger( f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}" ) - logger( - f" {n_epoch} ground truth {self.problem.seq2str(st)}" - ) + if log_ground_truth: + logger( + f" {n_epoch} ground truth {self.problem.seq2str(st)}" + ) - nb_total = ar_mask.sum().item() - nb_correct = ((result == input).long() * ar_mask).sum().item() + nb_total, nb_correct = self.problem.compute_nb_correct( + input, ar_mask, result + ) + + # nb_total = ar_mask.sum().item() + # nb_correct = ((result == input).long() * ar_mask).sum().item() return nb_total, nb_correct @@ -1555,7 +1568,6 @@ import qmlp class QMLP(Task): - ###################### def __init__( @@ -1563,6 +1575,7 @@ class QMLP(Task): nb_train_samples, nb_test_samples, batch_size, + result_dir, logger=None, device=torch.device("cpu"), ): @@ -1577,19 +1590,31 @@ class QMLP(Task): f"generating {nb_train_samples+nb_test_samples} samples (can take some time)" ) - seq, q_test_set = generate_sequence_and_test_set( - nb_mlps=nb_train_samples+nb_test_samples, + seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set( + nb_mlps=nb_train_samples + nb_test_samples, nb_samples=self.nb_samples_per_mlp, device=self.device, batch_size=64, nb_epochs=250, - nb_mlps_per_batch=1024 + nb_mlps_per_batch=1024, ) self.train_input = seq[:nb_train_samples] self.train_q_test_set = q_test_set[:nb_train_samples] + self.train_ref_test_errors = test_error[:nb_train_samples] self.test_input = seq[nb_train_samples:] self.test_q_test_set = q_test_set[nb_train_samples:] + self.test_ref_test_errors = test_error[nb_train_samples:] + + filename = os.path.join(result_dir, f"train_errors_ref.dat") + with open(filename, "w") as f: + for e in self.train_ref_test_errors: + f.write(f"{e}\n") + + filename = os.path.join(result_dir, f"test_errors_ref.dat") + with open(filename, "w") as f: + for e in self.test_ref_test_errors: + f.write(f"{e}\n") self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 @@ -1599,7 +1624,7 @@ class QMLP(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}" ): - yield self.trim(batch) + yield batch def vocabulary_size(self): return self.nb_codes @@ -1609,14 +1634,13 @@ class QMLP(Task): ): correct = self.test_input[:1000] result = correct.clone() - ar_mask = torch.arange(result.size(1)) > self.nb_samples_per_mlp * 3 + 1 + ar_mask = ( + torch.arange(result.size(1), device=result.device) + > self.nb_samples_per_mlp * 3 + 1 + ).long()[None, :] + ar_mask = ar_mask.expand_as(result) result *= 1 - ar_mask # paraaaaanoiaaaaaaa - logger(f"----------------------------------------------------------") - - for e in self.tensor2str(result[:10]): - logger(f"test_before {e}") - masked_inplace_autoregression( model, self.batch_size, @@ -1626,18 +1650,14 @@ class QMLP(Task): device=self.device, ) - logger(f"----------------------------------------------------------") - - for e in self.tensor2str(result[:10]): - logger(f"test_after {e}") - - logger(f"----------------------------------------------------------") - - q_train_set = result[:, : nb_samples * 3] - q_params = result[:, nb_samples * 3 + 1 :] - error_test = evaluate_q_params(q_params, q_test_set, nb_mlps_per_batch=17) + q_train_set = result[:, : self.nb_samples_per_mlp * 3] + q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :] + error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set) - logger(f"{error_test=}") + filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat") + with open(filename, "w") as f: + for e in error_test: + f.write(f"{e}\n") ######################################################################