+class TaskFromFile(Task):
+ def tensorize(self, pairs, shuffle):
+ len_max = max([len(x[0]) for x in pairs])
+
+ input = torch.cat(
+ [
+ torch.tensor(
+ [
+ [self.char2id[c] for c in s[0] + "#" * (len_max - len(s[0]))]
+ for s in pairs
+ ]
+ )
+ ],
+ 0,
+ ).to("cpu")
+
+ pred_mask = torch.cat(
+ [
+ torch.tensor(
+ [
+ [int(c) for c in s[1] + "0" * (len_max - len(s[1]))]
+ for s in pairs
+ ]
+ )
+ ],
+ 0,
+ ).to("cpu")
+
+ if shuffle:
+ print("SHUFFLING!")
+ i = torch.randperm(input.size(0))
+ input = input[i].contiguous()
+ pred_mask = pred_mask[i].contiguous()
+
+ return input, pred_mask
+
+ # trim all the tensors in the tuple z to remove as much token from
+ # left and right in the first tensor. If z is a tuple, all its
+ # elements are trimed according to the triming for the first
+ def trim(self, z, token="#"):
+ n = self.char2id[token]
+ if type(z) == tuple:
+ x = z[0]
+ i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
+ a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
+ return tuple([t[:, a:b] for t in z])
+ else:
+ i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
+ a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
+ return z[:, a:b]
+
+ def __init__(
+ self,
+ train_filename,
+ test_filename,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ shuffle=False,
+ device=torch.device("cpu"),
+ ):
+ self.batch_size = batch_size
+ self.device = device
+
+ def read_file(filename, nb=-1):
+ pairs = []
+ with open(filename, "r") as f:
+ while True:
+ sequence = f.readline().strip()
+ if not sequence:
+ break
+ pred_mask = f.readline().strip()
+ assert len(sequence) == len(pred_mask)
+ assert set(pred_mask).issubset({"0", "1", "2"}), f"{set(pred_mask)}"
+ pairs.append((sequence, pred_mask))
+ if len(pairs) == nb:
+ break
+
+ if nb > 0:
+ pairs = pairs[:nb]
+ assert len(pairs) == nb
+
+ return pairs
+
+ train_pairs = read_file(train_filename, nb_train_samples)
+ test_pairs = read_file(test_filename, nb_test_samples)
+
+ symbols = ["#"] + list(
+ set("".join([x[0] for x in train_pairs + test_pairs])) - set(["#"])
+ )
+ self.char2id = dict([(c, n) for n, c in enumerate(symbols)])
+ self.id2char = dict([(n, c) for c, n in self.char2id.items()])
+
+ self.train_input, self.train_pred_masks = self.tensorize(
+ train_pairs, shuffle=shuffle
+ )
+ self.test_input, self.test_pred_masks = self.tensorize(
+ test_pairs, shuffle=shuffle
+ )
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield self.trim(batch).to(self.device)
+
+ def vocabulary_size(self):
+ return len(self.char2id)
+
+ def tensor2str(self, t):
+ return ["".join([self.id2char[x.item()] for x in s]) for s in t]
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ correct = self.trim(self.test_input[:1000]).to(self.device)
+ result = correct.clone()
+ pred_mask = self.test_pred_masks[:1000, : result.size(1)].to(self.device)
+ ar_mask = (pred_mask > 0).long()
+ result *= 1 - ar_mask # paraaaaanoiaaaaaaa
+
+ logger(f"----------------------------------------------------------")
+
+ for e in self.tensor2str(result[:50]):
+ logger(f"test_before {e}")
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ logger(f"----------------------------------------------------------")
+
+ for e, c in zip(self.tensor2str(result[:50]), self.tensor2str(correct[:50])):
+ logger(f"test_after {e}")
+ logger(f"correct {c}")
+
+ logger(f"----------------------------------------------------------")
+
+ err_mask = (pred_mask == 2).long()
+ nb_total = err_mask.sum().item()
+ nb_correct = ((correct == result).long() * err_mask).sum().item()
+
+ logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
+ logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
+
+
+####################
+
+import problems
+
+
+class SandBox(Task):
+ def __init__(
+ self,
+ problem,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ logger=None,
+ device=torch.device("cpu"),
+ max_nb_codes=1024,
+ ):
+ super().__init__()
+
+ self.batch_size = batch_size
+ self.device = device
+ self.problem = problem
+
+ self.train_input, self.train_ar_mask = self.problem.generate_sequences(
+ nb_train_samples
+ )
+ self.test_input, self.test_ar_mask = self.problem.generate_sequences(
+ nb_test_samples
+ )
+
+ self.train_input, self.train_ar_mask = self.train_input.to(
+ device
+ ), self.train_ar_mask.to(device)
+ self.test_input, self.test_ar_mask = self.test_input.to(
+ device
+ ), self.test_ar_mask.to(device)
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ # A bit of paranoia never hurts
+ assert self.nb_codes <= max_nb_codes
+ assert self.train_input.min() >= 0
+ assert self.test_input.min() >= 0
+ assert tuple(x.item() for x in self.train_ar_mask.unique()) in {
+ (0,),
+ (1,),
+ (0, 1),
+ }
+ assert tuple(x.item() for x in self.test_ar_mask.unique()) in {
+ (0,),
+ (1,),
+ (0, 1),
+ }
+
+ if logger is not None:
+ for s, a in zip(self.train_input[:100], self.train_ar_mask[:100]):
+ logger(f"train_sequences {self.problem.seq2str(s)}")
+ a = "".join(["01"[x.item()] for x in a])
+ logger(f" {a}")
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+ ):
+ def compute_accuracy(input, ar_mask, logger=None):
+ input, ar_mask = input[:nmax], ar_mask[:nmax]
+ result = input.clone() * (1 - ar_mask)
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ progress_bar_desc=None,
+ device=self.device,
+ )
+
+ log_ground_truth = ar_mask.min() == 0
+
+ if logger is not None:
+ for sp, st in zip(result[:10], input[:10]):
+ logger(
+ f"test_sequences {n_epoch} prediction {self.problem.seq2str(sp)}"
+ )
+ if log_ground_truth:
+ logger(
+ f" {n_epoch} ground truth {self.problem.seq2str(st)}"
+ )
+
+ nb_total, nb_correct = self.problem.compute_nb_correct(
+ input, ar_mask, result
+ )
+
+ # nb_total = ar_mask.sum().item()
+ # nb_correct = ((result == input).long() * ar_mask).sum().item()
+
+ return nb_total, nb_correct
+
+ train_nb_total, train_nb_correct = compute_accuracy(
+ self.train_input, self.train_ar_mask
+ )
+
+ logger(
+ f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ )
+
+ test_nb_total, test_nb_correct = compute_accuracy(
+ self.test_input, self.test_ar_mask, logger
+ )
+
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
+
+ logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}")
+
+ if save_attention_image is not None:
+ for k in range(10):
+ ns = torch.randint(self.test_input.size(0), (1,)).item()
+ input = self.test_input[ns : ns + 1].clone()
+
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+ # model.record_attention(True)
+ model(BracketedSequence(input))
+ model.train(t)
+ # ram = model.retrieve_attention()
+ # model.record_attention(False)
+
+ # tokens_output = [c for c in self.problem.seq2str(input[0])]
+ # tokens_input = ["n/a"] + tokens_output[:-1]
+ # for n_head in range(ram[0].size(1)):
+ # filename = os.path.join(
+ # result_dir, f"sandbox_attention_{k}_h{n_head}.pdf"
+ # )
+ # attention_matrices = [m[0, n_head] for m in ram]
+ # save_attention_image(
+ # filename,
+ # tokens_input,
+ # tokens_output,
+ # attention_matrices,
+ # k_top=10,
+ ##min_total_attention=0.9,
+ # token_gap=12,
+ # layer_gap=50,
+ # )
+ # logger(f"wrote {filename}")
+
+