num_classes=self.len_source,
)
source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
- # source1 = torch.randint(10, (nb, self.len_source))
marker1 = torch.full((nb, 1), 10)
result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
marker2 = torch.full((nb, 1), 11)
)
if save_attention_image is not None:
- input = self.test_input[:10]
- result = input.clone()
- s = (result == self.t_prog).long()
- ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1)
- result = (1 - ar_mask) * result + ar_mask * self.t_nul
-
- masked_inplace_autoregression(
- model,
- self.batch_size,
- result,
- ar_mask,
- deterministic_synthesis,
- device=self.device,
- )
+ input = self.test_input[:1].clone()
+ last = (input != self.t_nul).max(0).values.nonzero().max() + 3
+ input = input[:, :last].to(self.device)
with torch.autograd.no_grad():
t = model.training
model.eval()
model.record_attention(True)
- model(BracketedSequence(result))
+ model(BracketedSequence(input))
model.train(t)
- attention = model.retrieve_attention()
+ ram = model.retrieve_attention()
model.record_attention(False)
- n_sample = 0
- tokens_output = [self.id2token[i.item()] for i in result[n_sample]]
+ tokens_output = [self.id2token[i.item()] for i in input[0]]
tokens_input = ["n/a"] + tokens_output[:-1]
- for n_head in range(attention[0].size(1)):
- filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ for n_head in range(ram[0].size(1)):
+ filename = os.path.join(
+ result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ )
+ attention_matrices = [m[0, n_head] for m in ram]
save_attention_image(
filename,
tokens_input,
tokens_output,
- attention,
- n_sample=n_sample,
- n_head=n_head,
+ attention_matrices,
+ k_top=10,
+ # min_total_attention=0.9,
token_gap=12,
- layer_gap=40,
- # k_top=2,
+ layer_gap=50,
)
logger(f"wrote {filename}")