+ ctx.set_line_width(0.25)
+ for d in range(len(attention_matrices)):
+ at = attention_matrices[d].to("cpu")
+ ni = torch.arange(at.size(0))[:, None].expand_as(at)
+ nj = torch.arange(at.size(1))[None, :].expand_as(at)
+ at = at.flatten()
+ o = at.sort().indices
+ at = at[o]
+ ni = ni.flatten()[o]
+ nj = nj.flatten()[o]
+ for i, j, a in zip(ni, nj, at):
+ if a > 0 and a >= min_link_attention:
+ c = 1 - a.item()
+ ctx.set_source_rgb(c, c, c)
+ ax, ay = j * token_gap, y - y_eps
+ ctx.move_to(ax, ay)
+ dx, dy = i * token_gap, y - layer_gap + y_eps
+ if curved:
+ bx, by = ax, ay - layer_gap * 0.5
+ cx, cy = dx, dy + layer_gap * 0.5
+ ctx.curve_to(bx, by, cx, cy, dx, dy)
+ else:
+ ctx.line_to(dx, dy)
+ ctx.stroke()
+ y -= layer_gap
+
+ for d in range(0, len(attention_matrices) + 1):
+ n = (
+ attention_matrices[0].size(-1)
+ if d == 0
+ else attention_matrices[d - 1].size(-2)
+ )
+ for n in range(n):
+ xc, yc = n * token_gap, -d * layer_gap
+ ctx.set_source_rgb(1.0, 1.0, 1.0)
+ ctx.arc(xc, yc, token_gap / 10, 0, 2 * math.pi)
+ ctx.fill()
+ ctx.set_source_rgb(0.0, 0.0, 0.0)
+ ctx.arc(xc, yc, token_gap / 20, 0, 2 * math.pi)
+ ctx.fill()
+
+ ctx.set_source_rgb(0.0, 0.0, 0.0)
+
+ for k, t in enumerate(tokens_input):
+ s = str(t)