+ # --------------------------------------------------------------------
+ def compute_nb_errors_output(input, nb_to_log=0):
+ result = input.clone()
+ k = torch.arange(result.size(1), device=result.device)[None, :]
+ last_output_idx = (
+ ((result == self.t_output) * k).max(dim=1, keepdim=True).values
+ )
+ first_prog_idx = (
+ ((result == self.t_prog) * k).max(dim=1, keepdim=True).values
+ )
+ ar_mask = (k > last_output_idx).long() * (k < first_prog_idx).long()
+ result = (1 - ar_mask) * result + ar_mask * self.t_nul
+
+ masked_inplace_autoregression(
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ deterministic_synthesis,
+ device=self.device,
+ )
+
+ sum_nb_total, sum_nb_errors = 0, 0
+ for one_input, one_result, i, j in zip(
+ input, result, last_output_idx, first_prog_idx
+ ):
+ seq = [self.id2token[i.item()] for i in one_result]
+ sum_nb_total += 1
+ correct = (one_input - one_result).abs().max() == 0
+ sum_nb_errors += 0 if correct else 1
+ if nb_to_log > 0:
+ result_stack = [
+ self.id2token[i.item()] for i in one_result[i : j + 1]
+ ]
+ target_stack = [
+ self.id2token[i.item()] for i in one_input[i : j + 1]
+ ]
+ comment = "*" if correct else "-"
+ result_stack = " ".join([str(x) for x in result_stack])
+ target_stack = " ".join([str(x) for x in target_stack])
+ logger(
+ f"output_test {comment} [{target_stack}] PREDICTED [{result_stack}]"
+ )
+ nb_to_log -= 1
+
+ return sum_nb_total, sum_nb_errors
+
+ # --------------------------------------------------------------------
+
+ if not self.no_prog:
+ test_nb_total, test_nb_errors = compute_nb_errors_prog(
+ self.test_input[:1000].to(self.device), nb_to_log=10
+ )
+
+ logger(
+ f"accuracy_prog_test {n_epoch} nb_total {test_nb_total} nb_errors {test_nb_errors} accuracy {100.0*(1-test_nb_errors/test_nb_total):.02f}%"
+ )
+
+ test_nb_total, test_nb_errors = compute_nb_errors_output(
+ self.test_input[:1000].to(self.device), nb_to_log=10