def save_attention_image(
- filename,
+ filename, # image to save
tokens_input,
tokens_output,
- # An iterable set of BxHxTxT attention matrices
- attention_matrices,
- pixel_scale=8,
- token_gap=15,
- layer_gap=25,
- y_eps=0.5,
- padding=10,
+ attention_matrices, # list of 2d tensors T1xT2, T2xT3, ..., Tk-1xTk
# do not draw links with a lesser attention
min_link_attention=0,
# draw only the strongest links necessary to reache
# draw only the top k links
k_top=None,
curved=True,
+ pixel_scale=8,
+ token_gap=15,
+ layer_gap=25,
+ y_eps=0.5,
+ padding=10,
):
if k_top is not None:
am = []
nb_heads=2,
nb_blocks=5,
dropout=0.1,
- #causal=True,
+ causal=True,
)
model.eval()
attention_matrices = [m[0, 0] for m in model.retrieve_attention()]
-
-
# attention_matrices = [ torch.rand(3,5), torch.rand(8,3), torch.rand(5,8) ]
# for a in attention_matrices: a=a/a.sum(-1,keepdim=True)
num_classes=self.len_source,
)
source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source]
- # source1 = torch.randint(10, (nb, self.len_source))
marker1 = torch.full((nb, 1), 10)
result1 = operators.bmm(source1[:, :, None]).squeeze(-1)
marker2 = torch.full((nb, 1), 11)
tokens_output = [self.id2token[i.item()] for i in result[0]]
tokens_input = ["n/a"] + tokens_output[:-1]
for n_head in range(ram[0].size(1)):
- filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ filename = os.path.join(
+ result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf"
+ )
attention_matrices = [m[0, n_head] for m in ram]
save_attention_image(
filename,
tokens_input,
tokens_output,
attention_matrices,
- token_gap=12,
- layer_gap=50,
k_top=10,
# min_total_attention=0.9,
+ token_gap=12,
+ layer_gap=50,
)
logger(f"wrote {filename}")