from torch import nn
from torch.nn import functional as F
+from mygpt import BracketedSequence
+
+try:
+ from graph import save_attention_image
+except ImportError:
+ save_attention_image = None
+
######################################################################
self.id2token = dict([(n, c) for c, n in self.token2id.items()])
self.t_nul = self.token2id["<nul>"]
- self.t_input = self.token2id["<input>"]
- self.t_output = self.token2id["<output>"]
- self.t_prog = self.token2id["<prog>"]
+ self.t_input = self.token2id["<in>"]
+ self.t_output = self.token2id["<out>"]
+ self.t_prog = self.token2id["<prg>"]
self.t_end = self.token2id["<end>"]
self.train_input = self.tensorize(train_sequences)
self.test_input = self.tensorize(test_sequences)
if no_prog:
+ # Excise the program from every train and test example
k = torch.arange(self.train_input.size(1), device=self.train_input.device)[
None, :
]
)
sum_nb_total, sum_nb_errors = 0, 0
- for x, y in zip(input, result):
- seq = [self.id2token[i.item()] for i in y]
+ for one_input, one_result in zip(input, result):
+ seq = [self.id2token[i.item()] for i in one_result]
nb_total, nb_errors, prog, stacks = rpl.compute_nb_errors(seq)
sum_nb_total += 1
sum_nb_errors += 0 if nb_errors == 0 else 1
if nb_to_log > 0:
- gt_seq = [self.id2token[i.item()] for i in x]
+ gt_seq = [self.id2token[i.item()] for i in one_input]
_, _, gt_prog, _ = rpl.compute_nb_errors(gt_seq)
gt_prog = " ".join([str(x) for x in gt_prog])
prog = " ".join([str(x) for x in prog])
)
sum_nb_total, sum_nb_errors = 0, 0
- for x, y, i, j in zip(input, result, last_output_idx, first_prog_idx):
- seq = [self.id2token[i.item()] for i in y]
+ for one_input, one_result, i, j in zip(
+ input, result, last_output_idx, first_prog_idx
+ ):
+ seq = [self.id2token[i.item()] for i in one_result]
sum_nb_total += 1
- correct = (x - y).abs().max() == 0
+ correct = (one_input - one_result).abs().max() == 0
sum_nb_errors += 0 if correct else 1
if nb_to_log > 0:
- result_stack = [self.id2token[i.item()] for i in y[i : j + 1]]
- target_stack = [self.id2token[i.item()] for i in x[i : j + 1]]
+ result_stack = [
+ self.id2token[i.item()] for i in one_result[i : j + 1]
+ ]
+ target_stack = [
+ self.id2token[i.item()] for i in one_input[i : j + 1]
+ ]
comment = "*" if correct else "-"
result_stack = " ".join([str(x) for x in result_stack])
target_stack = " ".join([str(x) for x in target_stack])
f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
)
+ if save_attention_image is not None:
+ input = self.test_input[:10]
+ result = input.clone()
+ s = (result == self.t_prog).long()
+ ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
+ result = (1 - ar_mask) * result + ar_mask * self.t_nul
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+ model.record_attention(True)
+ model(BracketedSequence(result))
+ model.train(t)
+ attention = model.retrieve_attention()
+ model.record_attention(False)
+
+ n_sample = 0
+ tokens_output = [self.id2token[i.item()] for i in result[n_sample]]
+ tokens_input = ["n/a"] + tokens_output[:-1]
+ for n_head in range(attention[0].size(1)):
+ filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ save_attention_image(
+ filename,
+ tokens_input,
+ tokens_output,
+ attention,
+ n_sample=n_sample,
+ n_head=n_head,
+ token_gap=12,
+ layer_gap=40,
+ # k_top=2,
+ )
+ logger(f"wrote {filename}")
+
######################################################################