5 import torch, torchvision
8 from torch.nn import functional as F
13 ######################################################################
16 def save_attention_image(
17 filename, # image to save
20 attention_matrices, # list of 2d tensors T1xT2, T2xT3, ..., Tk-1xTk
21 # do not draw links with a lesser attention
23 # draw only the strongest links necessary so that their summed
24 # attention is above min_total_attention
25 min_total_attention=None,
26 # draw only the top k links
37 for m in attention_matrices:
38 am.append(m * (m.sort(dim=-1, descending=True).indices < k_top))
39 attention_matrices = am
41 if min_total_attention is not None:
43 for m in attention_matrices:
45 m = 1 - (s.values.cumsum(-1) < 1 - min_total_attention).long()
46 b = m.new(m.size()).scatter_(dim=-1, index=s.indices, src=m)
49 surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, None)
51 ctx = cairo.Context(surface)
52 ctx.scale(pixel_scale, pixel_scale)
54 ctx.set_source_rgb(0.0, 0.0, 0.0)
55 ctx.set_font_size(4.0)
56 # ctx.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
60 ctx.set_line_width(0.25)
61 for d in range(len(attention_matrices)):
62 at = attention_matrices[d]
63 ni = torch.arange(at.size(0))[:, None].expand_as(at)
64 nj = torch.arange(at.size(1))[None, :].expand_as(at)
70 for i, j, a in zip(ni, nj, at):
71 if a > 0 and a >= min_link_attention:
73 ctx.set_source_rgb(c, c, c)
74 ax, ay = j * token_gap, y - y_eps
76 dx, dy = i * token_gap, y - layer_gap + y_eps
78 bx, by = ax, ay - layer_gap * 0.5
79 cx, cy = dx, dy + layer_gap * 0.5
80 ctx.curve_to(bx, by, cx, cy, dx, dy)
86 for d in range(0, len(attention_matrices) + 1):
88 attention_matrices[0].size(-1)
90 else attention_matrices[d - 1].size(-2)
93 xc, yc = n * token_gap, -d * layer_gap
94 ctx.set_source_rgb(1.0, 1.0, 1.0)
95 ctx.arc(xc, yc, token_gap / 10, 0, 2 * math.pi)
97 ctx.set_source_rgb(0.0, 0.0, 0.0)
98 ctx.arc(xc, yc, token_gap / 20, 0, 2 * math.pi)
101 ctx.set_source_rgb(0.0, 0.0, 0.0)
103 for k, t in enumerate(tokens_input):
112 ) = ctx.text_extents(s)
113 ctx.move_to(k * token_gap - width_t / 2, token_gap / 5 - y_bearing)
116 for k, t in enumerate(tokens_output):
125 ) = ctx.text_extents(s)
127 k * token_gap - width_t / 2,
128 -token_gap / 5 - len(attention_matrices) * layer_gap,
132 x, y, width, height = surface.ink_extents()
136 height += 2 * padding
137 pdf_surface = cairo.PDFSurface(filename, width, height)
138 ctx_pdf = cairo.Context(pdf_surface)
139 ctx_pdf.set_source_surface(surface, -x, -y)
144 ######################################################################
146 if __name__ == "__main__":
149 tokens_output = ["<wat>", 2, 3, 4, "<end>"]
150 tokens_input = [""] + tokens_output[:-1]
153 x = torch.randint(vocabulary_size, (1, len(tokens_input)))
156 vocabulary_size=vocabulary_size,
167 model.record_attention()
169 y1 = model(mygpt.BracketedSequence(x)).x
171 attention_matrices = [m[0, 0] for m in model.retrieve_attention()]
173 # attention_matrices = [ torch.rand(3,5), torch.rand(8,3), torch.rand(5,8) ]
174 # for a in attention_matrices: a=a/a.sum(-1,keepdim=True)
176 save_attention_image(
182 min_total_attention=0.9,