X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=42d912674641db0fec26a48ce5687b7040cba5cb;hb=291c38d093894d46fba6eb45f82e5b65a2a1cb8b;hp=0eed2aa7192dc8cc9265fdf4c1b3ee805252b1e7;hpb=ef3bef5253ff719953dfffff28d4122c19acdd77;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 0eed2aa..42d9126 100755 --- a/tasks.py +++ b/tasks.py @@ -140,7 +140,6 @@ class ProblemLevel2(Problem): num_classes=self.len_source, ) source1 = torch.rand(nb, 10).sort(dim=1).indices[:, : self.len_source] - # source1 = torch.randint(10, (nb, self.len_source)) marker1 = torch.full((nb, 1), 10) result1 = operators.bmm(source1[:, :, None]).squeeze(-1) marker2 = torch.full((nb, 1), 11) @@ -1284,45 +1283,35 @@ class RPL(Task): ) if save_attention_image is not None: - input = self.test_input[:10] - result = input.clone() - s = (result == self.t_prog).long() - ar_mask = (s.cumsum(dim=1) - s).clamp(min=0, max=1) - result = (1 - ar_mask) * result + ar_mask * self.t_nul - - masked_inplace_autoregression( - model, - self.batch_size, - result, - ar_mask, - deterministic_synthesis, - device=self.device, - ) + input = self.test_input[:1].clone() + last = (input != self.t_nul).max(0).values.nonzero().max() + 3 + input = input[:, :last].to(self.device) with torch.autograd.no_grad(): t = model.training model.eval() model.record_attention(True) - model(BracketedSequence(result)) + model(BracketedSequence(input)) model.train(t) - attention = model.retrieve_attention() + ram = model.retrieve_attention() model.record_attention(False) - n_sample = 0 - tokens_output = [self.id2token[i.item()] for i in result[n_sample]] + tokens_output = [self.id2token[i.item()] for i in input[0]] tokens_input = ["n/a"] + tokens_output[:-1] - for n_head in range(attention[0].size(1)): - filename = f"rpl_attention_{n_epoch}_h{n_head}.pdf" + for n_head in range(ram[0].size(1)): + filename = os.path.join( + result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf" + ) + attention_matrices = [m[0, n_head] for m in ram] save_attention_image( filename, tokens_input, tokens_output, - attention, - n_sample=n_sample, - n_head=n_head, + attention_matrices, + k_top=10, + # min_total_attention=0.9, token_gap=12, - layer_gap=40, - # k_top=2, + layer_gap=50, ) logger(f"wrote {filename}")