attention_matrices, # list of 2d tensors T1xT2, T2xT3, ..., Tk-1xTk
# do not draw links with a lesser attention
min_link_attention=0,
- # draw only the strongest links necessary to reache
- # min_total_attention
+ # draw only the strongest links necessary so that their summed
+ # attention is above min_total_attention
min_total_attention=None,
# draw only the top k links
k_top=None,
ctx.set_line_width(0.25)
for d in range(len(attention_matrices)):
- at = attention_matrices[d]
+ at = attention_matrices[d].to("cpu")
ni = torch.arange(at.size(0))[:, None].expand_as(at)
nj = torch.arange(at.size(1))[None, :].expand_as(at)
at = at.flatten()
x_advance,
y_advance,
) = ctx.text_extents(s)
- ctx.move_to(k * token_gap - width_t / 2, token_gap / 5 - y_bearing)
+ ctx.move_to(k * token_gap - width_t / 2, 2 * token_gap / 5)
ctx.show_text(s)
for k, t in enumerate(tokens_output):
if __name__ == "__main__":
import mygpt
- tokens_output = ["<wat>", 2, 3, 4, "<end>"]
+ tokens_output = ["<wat>", "-", 3, 4, "<end>"]
tokens_input = [""] + tokens_output[:-1]
vocabulary_size = 3