projects
/
culture.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Update.
[culture.git]
/
graph.py
diff --git
a/graph.py
b/graph.py
index
a819283
..
2c7caf8
100755
(executable)
--- a/
graph.py
+++ b/
graph.py
@@
-14,24
+14,23
@@
import cairo
def save_attention_image(
def save_attention_image(
- filename,
+ filename,
# image to save
tokens_input,
tokens_output,
tokens_input,
tokens_output,
- # An iterable set of BxHxTxT attention matrices
- attention_matrices,
- pixel_scale=8,
- token_gap=15,
- layer_gap=25,
- y_eps=0.5,
- padding=10,
+ attention_matrices, # list of 2d tensors T1xT2, T2xT3, ..., Tk-1xTk
# do not draw links with a lesser attention
min_link_attention=0,
# do not draw links with a lesser attention
min_link_attention=0,
- # draw only the strongest links necessary
to reache
- # min_total_attention
+ # draw only the strongest links necessary
so that their summed
+ #
attention is above
min_total_attention
min_total_attention=None,
# draw only the top k links
k_top=None,
curved=True,
min_total_attention=None,
# draw only the top k links
k_top=None,
curved=True,
+ pixel_scale=8,
+ token_gap=15,
+ layer_gap=25,
+ y_eps=0.5,
+ padding=10,
):
if k_top is not None:
am = []
):
if k_top is not None:
am = []
@@
-60,7
+59,7
@@
def save_attention_image(
ctx.set_line_width(0.25)
for d in range(len(attention_matrices)):
ctx.set_line_width(0.25)
for d in range(len(attention_matrices)):
- at = attention_matrices[d]
+ at = attention_matrices[d]
.to("cpu")
ni = torch.arange(at.size(0))[:, None].expand_as(at)
nj = torch.arange(at.size(1))[None, :].expand_as(at)
at = at.flatten()
ni = torch.arange(at.size(0))[:, None].expand_as(at)
nj = torch.arange(at.size(1))[None, :].expand_as(at)
at = at.flatten()