ctx.set_line_width(0.25)
for d in range(len(attention_matrices)):
- at = attention_matrices[d]
+ at = attention_matrices[d].to("cpu")
ni = torch.arange(at.size(0))[:, None].expand_as(at)
nj = torch.arange(at.size(1))[None, :].expand_as(at)
at = at.flatten()
if save_attention_image is not None:
input = self.test_input[:1].clone()
last = (input != self.t_nul).max(0).values.nonzero().max() + 3
- input = input[:, :last]
+ input = input[:, :last].to(self.device)
with torch.autograd.no_grad():
t = model.training