5 import torch, torchvision
8 from torch.nn import functional as F
13 ######################################################################
14 def save_attention_image(
26 # do not draw links with a lesser attention
28 # draw only the strongest links necessary to have less than
31 # draw only the top k links
34 attention = torch.cat(
35 [x[n_sample : n_sample + 1, n_head] for x in attention], dim=0
39 attention = attention * (
40 attention.sort(dim=-1, descending=True).indices < k_top
43 if residual is not None:
44 s = attention.sort(dim=-1)
45 m = 1 - (s.values.cumsum(-1) < residual).long()
46 b = m.new(attention.size()).scatter_(dim=-1, index=s.indices, src=m)
47 attention = attention * b
49 surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, None)
51 ctx = cairo.Context(surface)
52 ctx.scale(pixel_scale, pixel_scale)
54 ctx.set_source_rgb(0.0, 0.0, 0.0)
55 ctx.set_font_size(4.0)
56 # ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
60 for d in range(attention.size(0)):
62 ni = torch.arange(at.size(0))[:, None].expand_as(at)
63 nj = torch.arange(at.size(1))[None, :].expand_as(at)
69 for i, j, a in zip(ni, nj, at):
70 if a > 0 and a >= min_att:
72 ctx.set_source_rgb(c, c, c)
73 ctx.set_line_width(0.5)
74 ctx.move_to(j * token_gap, y - y_eps)
75 ctx.line_to(i * token_gap, y - layer_gap + y_eps)
79 for d in range(0, attention.size(0) + 1):
80 for n in range(attention.size(-1)):
81 xc, yc = n * token_gap, -d * layer_gap
82 ctx.set_source_rgb(1.0, 1.0, 1.0)
83 ctx.arc(xc, yc, token_gap / 10, 0, 2 * math.pi)
85 ctx.set_source_rgb(0.0, 0.0, 0.0)
86 ctx.arc(xc, yc, token_gap / 20, 0, 2 * math.pi)
89 ctx.set_source_rgb(0.0, 0.0, 0.0)
91 for k, t in enumerate(tokens_input):
100 ) = ctx.text_extents(s)
101 ctx.move_to(k * token_gap - width_t / 2, token_gap / 5 - y_bearing)
104 for k, t in enumerate(tokens_output):
113 ) = ctx.text_extents(s)
115 k * token_gap - width_t / 2, -token_gap / 5 - attention.size(0) * layer_gap
119 x, y, width, height = surface.ink_extents()
123 height += 2 * padding
124 pdf_surface = cairo.PDFSurface(filename, width, height)
125 ctx_pdf = cairo.Context(pdf_surface)
126 ctx_pdf.set_source_surface(surface, -x, -y)
131 ######################################################################
133 if __name__ == "__main__":
136 tokens_output = ["bluh", 2, 3, 4, "blih"]
137 tokens_input = ["n/a"] + tokens_output[:-1]
140 x = torch.randint(vocabulary_size, (1, len(tokens_input)))
143 vocabulary_size=vocabulary_size,
154 model.record_attention()
156 y1 = model(mygpt.BracketedSequence(x)).x
158 attention = model.retrieve_attention()
160 save_attention_image("attention.pdf", tokens_input, tokens_output, attention)