X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=tasks.py;h=5019aed3b0953a7037bc213296ca7371d1b3c279;hb=960c93d7c0aea41d180814c46d3a05686a426764;hp=038a8ac55dc59d94a44787c542c23354446f22e1;hpb=6c8bed86221baae24a7c2aaaa41c009444efb5c9;p=picoclvr.git diff --git a/tasks.py b/tasks.py index 038a8ac..5019aed 100755 --- a/tasks.py +++ b/tasks.py @@ -181,37 +181,42 @@ class SandBox(Task): f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) - if save_attention_image is not None: - ns = torch.randint(self.test_input.size(0), (1,)).item() - input = self.test_input[ns : ns + 1].clone() - - with torch.autograd.no_grad(): - t = model.training - model.eval() - model.record_attention(True) - model(BracketedSequence(input)) - model.train(t) - ram = model.retrieve_attention() - model.record_attention(False) + logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}") - tokens_output = [c for c in self.problem.seq2str(input[0])] - tokens_input = ["n/a"] + tokens_output[:-1] - for n_head in range(ram[0].size(1)): - filename = os.path.join( - result_dir, f"rpl_attention_{n_epoch}_h{n_head}.pdf" - ) - attention_matrices = [m[0, n_head] for m in ram] - save_attention_image( - filename, - tokens_input, - tokens_output, - attention_matrices, - k_top=10, - # min_total_attention=0.9, - token_gap=12, - layer_gap=50, - ) - logger(f"wrote {filename}") + if save_attention_image is None: + logger("no save_attention_image (is pycairo installed?)") + else: + for k in range(10): + ns = torch.randint(self.test_input.size(0), (1,)).item() + input = self.test_input[ns : ns + 1].clone() + + with torch.autograd.no_grad(): + t = model.training + model.eval() + model.record_attention(True) + model(BracketedSequence(input)) + model.train(t) + ram = model.retrieve_attention() + model.record_attention(False) + + tokens_output = [c for c in self.problem.seq2str(input[0])] + tokens_input = ["n/a"] + tokens_output[:-1] + for n_head in range(ram[0].size(1)): + filename = os.path.join( + result_dir, f"sandbox_attention_{k}_h{n_head}.pdf" + ) + attention_matrices = [m[0, n_head] for m in ram] + save_attention_image( + filename, + tokens_input, + tokens_output, + attention_matrices, + k_top=10, + # min_total_attention=0.9, + token_gap=12, + layer_gap=50, + ) + logger(f"wrote {filename}") ###################################################################### @@ -368,6 +373,10 @@ class PicoCLVR(Task): f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%" ) + logger( + f"main_test_accuracy {n_epoch} {1-nb_missing_properties/nb_requested_properties}" + ) + ###################################################################### def produce_results( @@ -638,6 +647,8 @@ class Maze(Task): f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}") + if count is not None: proportion_optimal = count.diagonal().sum().float() / count.sum() logger(f"proportion_optimal_test {proportion_optimal*100:.02f}%") @@ -777,6 +788,8 @@ class Snake(Task): f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}") + ###################################################################### @@ -886,6 +899,8 @@ class Stack(Task): f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}") + ############################################################## # Log a few generated sequences input = self.test_input[:10, : 12 * (1 + self.nb_digits)] @@ -1158,6 +1173,8 @@ class RPL(Task): f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%" ) + logger(f"main_test_accuracy {n_epoch} {1-test_nb_errors/test_nb_total}") + test_nb_total, test_nb_errors = compute_nb_errors_output( self.test_input[:1000].to(self.device), nb_to_log=10 ) @@ -1166,7 +1183,9 @@ class RPL(Task): f"accuracy_output_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%" ) - if save_attention_image is not None: + if save_attention_image is None: + logger("no save_attention_image (is pycairo installed?)") + else: ns = torch.randint(self.test_input.size(0), (1,)).item() input = self.test_input[ns : ns + 1].clone() last = (input != self.t_nul).max(0).values.nonzero().max() + 3 @@ -1354,6 +1373,8 @@ class Expr(Task): f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) + logger(f"main_test_accuracy {n_epoch} {test_nb_correct/test_nb_total}") + nb_total = test_nb_delta.sum() + test_nb_missed for d in range(test_nb_delta.size(0)): logger(