X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=7a4abbeea23d9494d835cf6a040a36ab2eb53cc2;hb=8d9cd6a2c09da2105ca17b04df94fcf84e8de954;hp=ea10d7cbfc758373e8e75f7d419b45bec16d3ad6;hpb=f44ab6863f93ae348e66ffbf52251d96d3b5453c;p=picoclvr.git diff --git a/tasks.py b/tasks.py index ea10d7c..7a4abbe 100755 --- a/tasks.py +++ b/tasks.py @@ -111,13 +111,19 @@ class SandBox(Task): self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 # A bit of paranoia never hurts - assert ( - self.nb_codes <= max_nb_codes - and self.train_input.min() >= 0 - and self.test_input.min() >= 0 - and tuple(self.train_ar_mask.unique()) == (0, 1) - and tuple(self.test_ar_mask.unique()) == (0, 1) - ) + assert self.nb_codes <= max_nb_codes + assert self.train_input.min() >= 0 + assert self.test_input.min() >= 0 + assert tuple(x.item() for x in self.train_ar_mask.unique()) in { + (0,), + (1,), + (0, 1), + } + assert tuple(x.item() for x in self.test_ar_mask.unique()) in { + (0,), + (1,), + (0, 1), + } def batches(self, split="train", nb_to_use=-1, desc=None): assert split in {"train", "test"} @@ -151,17 +157,24 @@ class SandBox(Task): device=self.device, ) + log_ground_truth = ar_mask.min() == 0 + if logger is not None: for sp, st in zip(result[:10], input[:10]): logger( f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}" ) - logger( - f" {n_epoch} ground truth {self.problem.seq2str(st)}" - ) + if log_ground_truth: + logger( + f" {n_epoch} ground truth {self.problem.seq2str(st)}" + ) + + nb_total, nb_correct = self.problem.compute_nb_correct( + input, ar_mask, result + ) - nb_total = ar_mask.sum().item() - nb_correct = ((result == input).long() * ar_mask).sum().item() + # nb_total = ar_mask.sum().item() + # nb_correct = ((result == input).long() * ar_mask).sum().item() return nb_total, nb_correct @@ -1555,7 +1568,6 @@ import qmlp class QMLP(Task): - ###################### def __init__( @@ -1563,6 +1575,7 @@ class QMLP(Task): nb_train_samples, nb_test_samples, batch_size, + result_dir, logger=None, device=torch.device("cpu"), ): @@ -1570,39 +1583,40 @@ class QMLP(Task): self.device = device self.batch_size = batch_size + self.nb_samples_per_mlp = 256 if logger is not None: logger( f"generating {nb_train_samples+nb_test_samples} samples (can take some time)" ) - self.train_descr = self.grid_factory.generate_samples( - nb_train_samples, lambda r: tqdm.tqdm(r) - ) - self.test_descr = self.grid_factory.generate_samples( - nb_test_samples, lambda r: tqdm.tqdm(r) + seq, q_test_set, test_error = qmlp.generate_sequence_and_test_set( + nb_mlps=nb_train_samples + nb_test_samples, + nb_samples=self.nb_samples_per_mlp, + device=self.device, + batch_size=64, + nb_epochs=250, + nb_mlps_per_batch=1024, ) - # Build the tokenizer - tokens = set() - for d in [self.train_descr, self.test_descr]: - for s in d: - for t in s.strip().split(" "): - tokens.add(t) - # make this set a sorted list to get the same tensors given - # the same descr - tokens = list(tokens) - tokens.sort() - tokens = ["#"] + tokens - self.token2id = dict([(t, n) for n, t in enumerate(tokens)]) - self.id2token = dict([(n, t) for n, t in enumerate(tokens)]) - self.t_nul = self.token2id["#"] - self.t_true = self.token2id["true"] - self.t_false = self.token2id["false"] + self.train_input = seq[:nb_train_samples] + self.train_q_test_set = q_test_set[:nb_train_samples] + self.train_ref_test_errors = test_error[:nb_train_samples] + self.test_input = seq[nb_train_samples:] + self.test_q_test_set = q_test_set[nb_train_samples:] + self.test_ref_test_errors = test_error[nb_train_samples:] - # Tokenize the train and test sets - self.train_input = self.str2tensor(self.train_descr) - self.test_input = self.str2tensor(self.test_descr) + filename = os.path.join(result_dir, f"train_errors_ref.dat") + with open(filename, "w") as f: + for e in self.train_ref_test_errors: + f.write(f"{e}\n") + + filename = os.path.join(result_dir, f"test_errors_ref.dat") + with open(filename, "w") as f: + for e in self.test_ref_test_errors: + f.write(f"{e}\n") + + self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 def batches(self, split="train"): assert split in {"train", "test"} @@ -1610,24 +1624,23 @@ class QMLP(Task): for batch in tqdm.tqdm( input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}" ): - yield self.trim(batch) + yield batch def vocabulary_size(self): - return len(self.token2id) + return self.nb_codes def produce_results( self, n_epoch, model, result_dir, logger, deterministic_synthesis ): correct = self.test_input[:1000] result = correct.clone() - ar_mask = torch.logical_or(result == self.t_true, result == self.t_false).long() + ar_mask = ( + torch.arange(result.size(1), device=result.device) + > self.nb_samples_per_mlp * 3 + 1 + ).long()[None, :] + ar_mask = ar_mask.expand_as(result) result *= 1 - ar_mask # paraaaaanoiaaaaaaa - logger(f"----------------------------------------------------------") - - for e in self.tensor2str(result[:10]): - logger(f"test_before {e}") - masked_inplace_autoregression( model, self.batch_size, @@ -1637,18 +1650,14 @@ class QMLP(Task): device=self.device, ) - logger(f"----------------------------------------------------------") - - for e in self.tensor2str(result[:10]): - logger(f"test_after {e}") - - logger(f"----------------------------------------------------------") + q_train_set = result[:, : self.nb_samples_per_mlp * 3] + q_params = result[:, self.nb_samples_per_mlp * 3 + 1 :] + error_test = qmlp.evaluate_q_params(q_params, self.test_q_test_set) - nb_total = ar_mask.sum().item() - nb_correct = ((correct == result).long() * ar_mask).sum().item() - - logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}") - logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}") + filename = os.path.join(result_dir, f"test_errors_{n_epoch:04d}.dat") + with open(filename, "w") as f: + for e in error_test: + f.write(f"{e}\n") ######################################################################