from torch import nn
from torch.nn import functional as F
+from mygpt import BracketedSequence
+
+try:
+ from graph import save_attention_image
+except ImportError:
+ save_attention_image = None
+
######################################################################
pass
-######################################################################
-
-
-class Problem:
- def generate_sequences(self, nb):
- pass
-
- def seq2str(self, seq):
- return "[NOT IMPLEMENTED]"
-
-
-####################
-
-
-class ProblemLevel0(Problem):
- def __init__(self, nb_sentences=100, len_prompt=5, len_result=5):
- self.seq = torch.randint(10, (nb_sentences, len_prompt + 1 + len_result))
- self.seq[:, len_prompt] = 10
-
- def generate_sequences(self, nb):
- sequences = self.seq[torch.randint(self.seq.size(0), (nb,))]
- ar_mask = (sequences == 10).long()
- ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
- return sequences, ar_mask
-
-
-class ProblemLevel1(Problem):
- def __init__(self, nb_operators=100, len_source=5, len_result=8):
- self.len_source = len_source
- self.len_result = len_result
- self.len_nb_operator = int(math.log(nb_operators) / math.log(10)) + 1
- self.operators = F.one_hot(
- torch.rand(nb_operators, len_result, len_source).argmax(-1),
- num_classes=len_source,
- )
-
- def generate_sequences(self, nb):
- nb_operators = torch.randint(self.operators.size(0), (nb,))
- operators = self.operators[nb_operators]
- nb_operators = (
- nb_operators[:, None]
- // 10 ** torch.arange(self.len_nb_operator - 1, -1, -1)
- ) % 10
- marker1 = torch.full((nb, 1), 10)
- # source = torch.randint(10, (nb, self.len_source))
- source = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
- marker2 = torch.full((nb, 1), 11)
- result = operators.bmm(source[:, :, None]).squeeze(-1)
- sequences = torch.cat((nb_operators, marker1, source, marker2, result), 1)
- ar_mask = (sequences == 11).long()
- ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
- return sequences, ar_mask
-
- def seq2str(self, seq):
- return "".join("0123456789|>"[x.item()] for x in seq)
-
-
-class ProblemLevel2(Problem):
- def __init__(self, len_source=5, len_result=8):
- self.len_source = len_source
- self.len_result = len_result
-
- def generate_sequences(self, nb):
- operators = F.one_hot(
- torch.rand(nb, self.len_result, self.len_source).argmax(-1),
- num_classes=self.len_source,
- )
- source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
- # source1 = torch.randint(10, (nb, self.len_source))
- marker1 = torch.full((nb, 1), 10)
- result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
- marker2 = torch.full((nb, 1), 11)
- source2 = torch.randint(10, (nb, self.len_source))
- marker3 = torch.full((nb, 1), 12)
- result2 = operators.bmm(source2[:, :, None]).squeeze(-1)
-
- sequences = torch.cat(
- (source1, marker1, result1, marker2, source2, marker3, result2), 1
- )
- ar_mask = (sequences == 12).long()
- ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
- return sequences, ar_mask
-
- def seq2str(self, seq):
- return "".join("0123456789>|~"[x.item()] for x in seq)
-
-
-####################
-
-
-class ProblemAddition(Problem):
- def __init__(self, nb_digits=10, zero_padded=False, inverted_result=False):
- self.nb_digits = nb_digits
- self.zero_padded = zero_padded
- self.inverted_result = inverted_result
- self.char2id = dict([(c, n) for n, c in enumerate("0123456789+=$")])
- self.id2char = dict([(n, c) for c, n in self.char2id.items()])
-
- def tensorize(self, strings):
- len_max = max([len(x) for x in strings])
- return torch.cat(
- [
- torch.tensor(
- [
- [self.char2id[c] for c in s + "$" * (len_max - len(s))]
- for s in strings
- ]
- )
- ],
- 0,
- )
-
- def generate_sequences(self, nb):
- sequences = []
- for k in range(nb):
- a, b = torch.randint(10**self.nb_digits, (2,))
- c = a + b
- a, b, c = str(a.item()), str(b.item()), str(c.item())
- if self.zero_padded:
- a = "0" * (self.nb_digits - len(a)) + a
- b = "0" * (self.nb_digits - len(b)) + b
- c = "0" * (self.nb_digits + 1 - len(c)) + c
- if self.inverted_result:
- c = c[::-1]
- sequences.append(f"{a}+{b}={c}$")
-
- sequences = self.tensorize(sequences)
- ar_mask = (sequences == self.char2id["="]).long()
- ar_mask = (ar_mask.cumsum(1) - ar_mask).clamp(max=1)
- return sequences, ar_mask
-
- def seq2str(self, seq):
- return "".join(self.id2char[x.item()] for x in seq)
-
-
-# class ProblemUnion(Problem):
-# problems = [ProblemByheart()]
-# nb_common_codes = 100
-
-# def generate_sequences(nb_samples):
-# problem_indexes = torch.randint(len(problems), (nb_samples,))
-# nb_samples_per_problem = torch.one_hot(problem_indexes).sum(0)
-# print(f"{nb_samples_per_problem}")
-# all_seq = []
-# for nb, p in zip(nb_samples_per_problem, problems):
-# all_seq.append(p.generate_sequences(nb_samples_per_problem[nb]))
-# return all_seq
-
-# for strain, stest in zip(train_seq, test_seq):
-# s = torch.cat((strain, stest), 0)
-
####################
+import problems
class SandBox(Task):
def __init__(
self.id2token = dict([(n, c) for c, n in self.token2id.items()])
self.t_nul = self.token2id["<nul>"]
- self.t_input = self.token2id["<input>"]
- self.t_output = self.token2id["<output>"]
- self.t_prog = self.token2id["<prog>"]
+ self.t_input = self.token2id["<in>"]
+ self.t_output = self.token2id["<out>"]
+ self.t_prog = self.token2id["<prg>"]
self.t_end = self.token2id["<end>"]
self.train_input = self.tensorize(train_sequences)
self.test_input = self.tensorize(test_sequences)
if no_prog:
+ # Excise the program from every train and test example
k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
None, :
]
)
sum_nb_total, sum_nb_errors = 0, 0
- for x, y in zip(input, result):
- seq = [self.id2token[i.item()] for i in y]
+ for one_input, one_result in zip(input, result):
+ seq = [self.id2token[i.item()] for i in one_result]
nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
sum_nb_total += 1
sum_nb_errors += 0 if nb_errors == 0 else 1
if nb_to_log > 0:
- gt_seq = [self.id2token[i.item()] for i in x]
+ gt_seq = [self.id2token[i.item()] for i in one_input]
_, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
gt_prog = " ".join([str(x) for x in gt_prog])
prog = " ".join([str(x) for x in prog])
)
sum_nb_total, sum_nb_errors = 0, 0
- for x, y, i, j in zip(input, result, last_output_idx, first_prog_idx):
- seq = [self.id2token[i.item()] for i in y]
+ for one_input, one_result, i, j in zip(
+ input, result, last_output_idx, first_prog_idx
+ ):
+ seq = [self.id2token[i.item()] for i in one_result]
sum_nb_total += 1
- correct = (x - y).abs().max() == 0
+ correct = (one_input - one_result).abs().max() == 0
sum_nb_errors += 0 if correct else 1
if nb_to_log > 0:
- result_stack = [self.id2token[i.item()] for i in y[i : j + 1]]
- target_stack = [self.id2token[i.item()] for i in x[i : j + 1]]
+ result_stack = [
+ self.id2token[i.item()] for i in one_result[i : j + 1]
+ ]
+ target_stack = [
+ self.id2token[i.item()] for i in one_input[i : j + 1]
+ ]
comment = "*" if correct else "-"
result_stack = " ".join([str(x) for x in result_stack])
target_stack = " ".join([str(x) for x in target_stack])
f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
)
+ if save_attention_image is not None:
+ ns=torch.randint(self.test_input.size(0),(1,)).item()
+ input = self.test_input[ns:ns+1].clone()
+ last = (input != self.t_nul).max(0).values.nonzero().max() + 3
+ input = input[:, :last].to(self.device)
+
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+ model.record_attention(True)
+ model(BracketedSequence(input))
+ model.train(t)
+ ram = model.retrieve_attention()
+ model.record_attention(False)
+
+ tokens_output = [self.id2token[i.item()] for i in input[0]]
+ tokens_input = ["n/a"] + tokens_output[:-1]
+ for n_head in range(ram[0].size(1)):
+ filename = os.path.join(
+ result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ )
+ attention_matrices = [m[0, n_head] for m in ram]
+ save_attention_image(
+ filename,
+ tokens_input,
+ tokens_output,
+ attention_matrices,
+ k_top=10,
+ # min_total_attention=0.9,
+ token_gap=12,
+ layer_gap=50,
+ )
+ logger(f"wrote {filename}")
+
######################################################################