+ def compute_nb_correct(self, input, ar_mask, result):
+ nb_total = ar_mask.sum().item()
+ nb_correct = ((result == input).long() * ar_mask).sum().item()
+ return nb_total, nb_correct
+
+
+####################
+
+
+class ProblemDegradation(Problem):
+ def __init__(self, nb_state_tokens=5, nb_time_steps=12, value_max=25, hard=False):
+ assert value_max // nb_state_tokens >= 2
+ self.nb_state_tokens = nb_state_tokens
+ self.nb_time_steps = nb_time_steps
+ self.value_max = value_max
+ self.hard = hard
+
+ def generate_sequences(self, nb):
+ x = (
+ torch.rand(nb, self.nb_state_tokens).sort(dim=-1).indices == 0
+ ).long() * self.value_max
+ seq = [x]
+
+ for t in range(self.nb_time_steps - 1):
+ v = (torch.rand(x.size()).sort(dim=-1).indices + 1) * (x >= 2).long()
+ u = (v.max(dim=-1, keepdim=True).values == v).long()
+ n = (
+ (u * x)
+ .minimum(2 + torch.randint(self.value_max // 4 - 2, x.size()))
+ .sum(dim=-1, keepdim=True)
+ )
+ m = 1 + ((n - 1) * torch.rand(n.size())).long()
+ x = (
+ x
+ + m * u.roll(shifts=-1, dims=-1)
+ - n * u
+ + (n - m) * u.roll(shifts=1, dims=-1)
+ )
+ seq.append(x)
+
+ if self.hard:
+ seq.reverse()
+
+ seq = torch.cat(seq, dim=1)
+ return seq, seq.new_full(seq.size(), 1, dtype=torch.int64)
+
+ def compute_nb_correct(self, input, ar_mask, result):
+ nb_total = result.size(0)
+ nb_correct = 0
+ e = result.new_zeros(self.nb_state_tokens)
+
+ for seq in result:
+ states = list(seq.split(self.nb_state_tokens))
+ if self.hard:
+ states.reverse()
+
+ d = states[0]
+ j = d.sort(descending=True).indices[0]
+ e.zero_()
+ e[j] = self.value_max
+ if (d - e).abs().sum() == 0:
+ nb_errors = 0
+ for k in range(len(states) - 1):
+ d = states[k + 1] - states[k]
+ j = d.sort(descending=False).indices[0]
+ if (
+ d[j] == 0
+ or d[j] > self.value_max // 4
+ or d[(j + 1) % e.size(0)] <= 0
+ or d[(j + 1) % e.size(0)] >= -d[j]
+ ):
+ nb_errors += 1
+ else:
+ e.zero_()
+ e[j] = d[j]
+ e[(j + 1) % e.size(0)] = d[(j + 1) % e.size(0)]
+ e[(j - 1) % e.size(0)] = -d[(j + 1) % e.size(0)] - d[j]
+ if (d - e).abs().sum() > 0:
+ nb_errors += 1
+ if nb_errors == 0:
+ nb_correct += 1
+
+ return nb_total, nb_correct
+
+ def seq2str(self, seq):
+ return " | ".join(
+ [" ".join([f"{x:02d}" for x in s]) for s in seq.split(self.nb_state_tokens)]
+ )
+
+
+####################
+
+
+class ProblemTwoTargets(Problem):
+ def __init__(self, len_total=10, len_targets=3):
+ assert len_targets >= 3
+ assert len_total >= 3 * len_targets - 1
+ self.len_total = len_total
+ self.len_targets = len_targets
+
+ def generate_sequences(self, nb):
+ k = torch.arange(self.len_total)[None, :]
+ s = torch.randint(10, (nb, self.len_total))
+ l = torch.rand(nb, self.len_total)
+ l = l * (k <= self.len_total - self.len_targets).long()
+ k1 = l.argmax(dim=1, keepdim=True)
+ m = (k != k1).long() * (k != k1 + self.len_targets - 1).long()
+ s = s * m + 10 * (1 - m)
+ l = l * (
+ 1
+ - (k + self.len_targets - 1 >= k1).long()
+ * (k < k1 + self.len_targets).long()
+ )
+ k2 = l.argmax(dim=1, keepdim=True)
+ m = (k != k2).long() * (k != k2 + self.len_targets - 1).long()
+ s = s * m + 11 * (1 - m)
+ a1 = s.gather(dim=1, index=k1 + 1 + torch.arange(self.len_targets - 2)[None, :])
+ a2 = s.gather(dim=1, index=k2 + 1 + torch.arange(self.len_targets - 2)[None, :])
+ sequences = torch.cat(
+ (
+ s,
+ torch.full((nb, 1), 12),
+ a1,
+ torch.full((nb, 1), 12),
+ a2,
+ torch.full((nb, 1), 12),
+ ),
+ 1,
+ )
+ ar_mask = (sequences == 12).long()
+ ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
+ return sequences, ar_mask
+
+ def seq2str(self, seq):
+ return "".join("0123456789-+|"[x.item()] for x in seq)
+