class ProblemLevel0(Problem):
def __init__(self, nb_sentences=100, len_prompt=5, len_result=5):
- self.seq = torch.randint(10, (nb_seq, len_prompt + 1 + len_result))
+ self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result))
self.seq[:, len_prompt] = 10
def generate_sequences(self, nb):
// 10 ** torch.arange(self.len_nb_operator - 1, -1, -1)
) % 10
marker1 = torch.full((nb, 1), 10)
- source = torch.randint(10, (nb, self.len_source))
+ # source = torch.randint(10, (nb, self.len_source))
+ source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
marker2 = torch.full((nb, 1), 11)
result = operators.bmm(source[:, :, None]).squeeze(-1)
print(f"{nb_operators.dtype=} {marker1.dtype=}")
torch.rand(nb, self.len_result, self.len_source).argmax(-1),
num_classes=self.len_source,
)
- source1 = torch.randint(10, (nb, self.len_source))
+ source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
+ # source1 = torch.randint(10, (nb, self.len_source))
marker1 = torch.full((nb, 1), 10)
result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
marker2 = torch.full((nb, 1), 11)
##############################################################
+######################################################################
+
+import rpl
+
+
+class RPL(Task):
+ def tensorize(self, sequences):
+ len_max = max([len(x) for x in sequences])
+ return torch.cat(
+ [
+ torch.tensor(
+ [
+ [
+ self.token2id[str(c)]
+ for c in s + ["<nul>"] * (len_max - len(s))
+ ]
+ for s in sequences
+ ]
+ )
+ ],
+ 0,
+ )
+
+ def seq2str(self, seq):
+ return " ".join([self.id2token[i] for i in seq])
+
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ nb_starting_values=3,
+ max_input=9,
+ prog_len=6,
+ nb_runs=5,
+ logger=None,
+ device=torch.device("cpu"),
+ ):
+ super().__init__()
+
+ self.batch_size = batch_size
+ self.device = device
+
+ train_sequences = [
+ rpl.generate(
+ nb_starting_values=nb_starting_values,
+ max_input=max_input,
+ prog_len=prog_len,
+ nb_runs=nb_runs,
+ )
+ for _ in tqdm.tqdm(range(nb_train_samples), desc="train-data")
+ ]
+
+ test_sequences = [
+ rpl.generate(
+ nb_starting_values=nb_starting_values,
+ max_input=max_input,
+ prog_len=prog_len,
+ nb_runs=nb_runs,
+ )
+ for _ in tqdm.tqdm(range(nb_test_samples), desc="test-data")
+ ]
+
+ symbols = list(
+ set(["<nul>"] + [x for l in train_sequences + test_sequences for x in l])
+ )
+ val_max = max([x if type(x) is int else 0 for x in symbols])
+ symbols = list(filter(lambda x: type(x) is str, symbols))
+ symbols.sort()
+ symbols += [str(n) for n in range(val_max + 1)]
+ print(f"{val_max=}")
+ self.token2id = dict([(c, n) for n, c in enumerate(symbols)])
+ self.id2token = dict([(n, c) for c, n in self.token2id.items()])
+
+ self.t_nul, self.t_prog = self.token2id["<nul>"], self.token2id["<prog>"]
+
+ self.train_input = self.tensorize(train_sequences)
+ self.test_input = self.tensorize(test_sequences)
+
+ if logger is not None:
+ for x in self.train_input[:25]:
+ end = (x != self.t_nul).nonzero().max().item() + 1
+ seq = [self.id2token[i.item()] for i in x[:end]]
+ s = " ".join(seq)
+ logger(f"example_seq {s}")
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ last = (batch != self.t_nul).max(0).values.nonzero().max() + 3
+ batch = batch[:, :last].to(self.device)
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(
+ self, n_epoch, model, result_dir, logger, deterministic_synthesis
+ ):
+ # --------------------------------------------------------------------
+ def compute_nb_errors(input, nb_to_log=0):
+ result = input.clone()
+ s = (result == self.t_prog).long()
+ ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.t_nul
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ sum_nb_total, sum_nb_errors = 0, 0
+ for x, y in zip(input, result):
+ seq = [self.id2token[i.item()] for i in y]
+ nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
+ sum_nb_total += 1
+ sum_nb_errors += 0 if nb_errors == 0 else 1
+ if nb_to_log > 0:
+ gt_seq = [self.id2token[i.item()] for i in x]
+ _, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
+ gt_prog = " ".join([str(x) for x in gt_prog])
+ prog = " ".join([str(x) for x in prog])
+ comment = "*" if nb_errors == 0 else "-"
+ logger(f"{comment} PROG [{gt_prog}] PREDICTED [{prog}]")
+ for start_stack, target_stack, result_stack, correct in stacks:
+ comment = "*" if correct else "-"
+ start_stack = " ".join([str(x) for x in start_stack])
+ target_stack = " ".join([str(x) for x in target_stack])
+ result_stack = " ".join([str(x) for x in result_stack])
+ logger(
+ f" {comment} [{start_stack}] -> [{target_stack}] PREDICTED [{result_stack}]"
+ )
+ nb_to_log -= 1
+
+ return sum_nb_total, sum_nb_errors
+
+ # --------------------------------------------------------------------
+
+ test_nb_total, test_nb_errors = compute_nb_errors(
+ self.test_input[:1000].to(self.device), nb_to_log=10
+ )
+
+ logger(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
+ )
+
+
######################################################################