X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=quizz_machine.py;h=ee7af90b51b0259a99dcfd4fedba375f0d6cf510;hb=c4d4765d75961b8cdfa69ef1c56aeabcf3bdf42a;hp=9b64941edbbf30e11bc6611bafe32485ee6f2cfd;hpb=64abc9f3a07a8211f308271fde7d8f876a968ab5;p=culture.git diff --git a/quizz_machine.py b/quizz_machine.py index 9b64941..ee7af90 100755 --- a/quizz_machine.py +++ b/quizz_machine.py @@ -105,19 +105,94 @@ def masked_inplace_autoregression( class QuizzMachine: - def make_ar_mask(self, input): - b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2 - return b.long()[None, :].expand_as(input) + def indices_forward_and_backward(self, quizzes): + i_forward = quizzes[:, 0] == self.token_forward + j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward + i_backward = quizzes[:, 0] == self.token_backward + j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward + assert torch.logical_or( + torch.logical_and(i_forward, j_forward), + torch.logical_and(i_backward, j_backward), + ).all() + return i_forward, i_backward + + def reverse_time(self, quizzes): + i_forward, i_backward = self.indices_forward_and_backward(quizzes) + + forward_to_backward = torch.cat( + [ + quizzes[:, 0:1], + quizzes[:, 2 + self.prompt_len : 2 + self.prompt_len + self.answer_len], + quizzes[:, 1 + self.prompt_len : 1 + self.prompt_len + 1], + quizzes[:, 1 : 1 + self.prompt_len], + ], + dim=1, + ) + + forward_to_backward[:, 0] = self.token_backward + forward_to_backward[:, 1 + self.answer_len] = self.token_backward + + backward_to_forward = torch.cat( + [ + quizzes[:, 0:1], + quizzes[:, 2 + self.answer_len :], + quizzes[:, 1 + self.answer_len : 2 + self.answer_len], + quizzes[:, 1 : 1 + self.answer_len], + ], + dim=1, + ) + + backward_to_forward[:, 0] = self.token_forward + backward_to_forward[:, 1 + self.prompt_len] = self.token_forward + + m = i_forward.long()[:, None] + + return m * forward_to_backward + (1 - m) * backward_to_forward + + def make_ar_mask(self, quizzes, first=False): + i_forward, i_backward = self.indices_forward_and_backward(quizzes) + + t = torch.arange(quizzes.size(1), device=quizzes.device) + + if first: + m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long() + m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long() + else: + m_forward = (t >= 2 + self.prompt_len).long() + m_backward = (t >= 2 + self.answer_len).long() + + m = i_forward.long()[:, None] + + return m * m_forward + (1 - m) * m_backward def generate_token_sequences(self, nb): prompts, answers = self.problem.generate_prompts_and_answers(nb) + + if self.prompt_len is None: + self.prompt_len = prompts.size(1) + + if self.answer_len is None: + self.answer_len = answers.size(1) + + assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len + result = [] for prompt, answer in zip(prompts, answers): if torch.rand(1) < 0.5: - a = [torch.tensor([self.token_forward]), prompt, answer] + a = [ + torch.tensor([self.token_forward]), + prompt, + torch.tensor([self.token_forward]), + answer, + ] else: - a = [torch.tensor([self.token_backward]), answer, prompt] + a = [ + torch.tensor([self.token_backward]), + answer, + torch.tensor([self.token_backward]), + prompt, + ] result.append(torch.cat(a, dim=0)[None, :]) @@ -128,6 +203,7 @@ class QuizzMachine: problem, nb_train_samples, nb_test_samples, + back_accuracy, batch_size, result_dir, logger, @@ -141,9 +217,12 @@ class QuizzMachine: self.nb_token_values = v + 2 self.problem = problem + self.back_accuracy = back_accuracy self.batch_size = batch_size self.device = device self.logger = logger + self.prompt_len = None + self.answer_len = None self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to( device @@ -156,22 +235,23 @@ class QuizzMachine: if result_dir is not None: self.save_quizzes( - result_dir, "culture_w_quizzes", self.train_w_quizzes[:72] + result_dir, + "culture_w_quizzes", + self.train_w_quizzes[:72], + prediction=True, ) def save_quizzes(self, result_dir, filename_prefix, quizzes, prediction=False): - l = (quizzes.size(1) - 1) // 2 - forward = (quizzes[:, 0] == self.token_forward).long() - backward = (quizzes[:, 0] == self.token_backward).long() - assert forward.equal(1 - backward) - first = quizzes[:, 1 : 1 + l] - second = quizzes[:, 1 + l : 1 + 2 * l] - prompts = forward[:, None] * first + backward[:, None] * second - answers = forward[:, None] * second + backward[:, None] * first + quizzes = quizzes.clone() + forward = quizzes[quizzes[:, 0] == self.token_forward] + ib = quizzes[:, 0] == self.token_backward + backward = quizzes[ib] + assert forward.size(0) + backward.size(0) == quizzes.size(0) + quizzes[ib] = self.reverse_time(quizzes[ib]) if prediction: - predicted_prompts = backward - predicted_answers = forward + predicted_prompts = ib + predicted_answers = torch.logical_not(ib) else: predicted_prompts = None predicted_answers = None @@ -179,8 +259,8 @@ class QuizzMachine: self.problem.save_quizzes( result_dir, filename_prefix, - prompts, - answers, + quizzes[:, 1 : 1 + self.prompt_len], + quizzes[:, 2 + self.prompt_len :], predicted_prompts, predicted_answers, ) @@ -231,7 +311,6 @@ class QuizzMachine: self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000 ): def compute_accuracy(input): - input = input[:nmax] ar_mask = self.make_ar_mask(input) result = input.clone() * (1 - ar_mask) seq_logproba = torch.empty(input.size(0), device=self.device) @@ -248,20 +327,65 @@ class QuizzMachine: device=self.device, ) - nb_total, nb_correct = ( - input.size(0), - (input == result).long().min(dim=1).values.sum(), + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + self.save_quizzes( + result_dir, + f"DEBUG_input_{n_epoch}_{result.size(0):04d}", + quizzes=input[:72], + prediction=True, ) + self.save_quizzes( + result_dir, + f"DEBUG_result_{n_epoch}_{result.size(0):04d}", + quizzes=result[:72], + prediction=True, + ) + #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + + if self.back_accuracy: + n_forward = input[:, 0] == self.token_forward + nb_total = input[n_forward].size(0) + nb_correct = ( + (input[n_forward] == result[n_forward]) + .long() + .min(dim=1) + .values.sum() + .item() + ) + + self.logger( + f"back_accuracy {n_epoch=} {model.id=} {nb_correct=} {nb_total=}" + ) + + n_backward = input[:, 0] == self.token_backward + back_input = self.reverse_time(result[n_backward]) + + if back_input.size(0) > 0: + back_input[:, 2 + self.prompt_len :] = input[ + n_backward, 1 : 1 + self.answer_len + ] + back_nb_total, back_nb_correct = compute_accuracy(back_input) + self.logger( + f"back_accuracy {n_epoch=} {model.id=} {back_nb_correct=} {back_nb_total=}" + ) + nb_total += back_nb_total + nb_correct += back_nb_correct + + else: + nb_total = input.size(0) + nb_correct = (input == result).long().min(dim=1).values.sum() + + exit(0) return nb_total, nb_correct - train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes) + train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes[:nmax]) self.logger( f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" ) - test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes) + test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes[:nmax]) self.logger( f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" @@ -310,35 +434,22 @@ class QuizzMachine: else: self.test_c_quizzes.append(new_c_quizzes) - def reverse_time(self, c_quizzes): - l = (c_quizzes.size(1) - 1) // 2 - direction = c_quizzes[:, 0:1] - direction = self.token_forward * ( - direction == self.token_backward - ) + self.token_backward * (direction == self.token_forward) - - return torch.cat( - [direction, c_quizzes[:, l + 1 :], c_quizzes[:, 1 : l + 1]], dim=1 - ) - def compute_correctness( self, c_quizzes, models_for_validation, - both_directions=False, + bidirectional_validation=False, deterministic_validation=True, ): - reversed_c_quizzes = self.reverse_time(c_quizzes) + if bidirectional_validation: + backward_c_quizzes = self.forward_to_backward(c_quizzes) - ar_mask = self.make_ar_mask(c_quizzes) seq_logproba = torch.zeros( c_quizzes.size(0), max([m.id for m in models_for_validation]) + 1, device=self.device, ) - # Check how many of models can solve the quizzes in both directions - nb_correct = 0 for model in models_for_validation: @@ -346,6 +457,8 @@ class QuizzMachine: seq_logproba[...] = 0.0 + ar_mask = self.make_ar_mask(result) + masked_inplace_autoregression( model=model, batch_size=self.batch_size, @@ -360,26 +473,28 @@ class QuizzMachine: correct = (c_quizzes == result).long().min(dim=-1).values - if both_directions: - reversed_result = reversed_c_quizzes.clone() + if bidirectional_validation: + backward_result = backward_c_quizzes.clone() + + ar_mask = self.make_ar_mask(backward_result) masked_inplace_autoregression( model=model, batch_size=self.batch_size, - input=reversed_result, + input=backward_result, ar_mask=ar_mask, seq_logproba=seq_logproba[:, model.id], temperature=1.0, deterministic_synthesis=deterministic_validation, - # progress_bar_desc="solving reversed c_quizzes", + # progress_bar_desc="solving backward c_quizzes", device=self.device, ) - reversed_correct = ( - (reversed_c_quizzes == reversed_result).long().min(dim=-1).values + backward_correct = ( + (backward_c_quizzes == backward_result).long().min(dim=-1).values ) - correct *= reversed_correct + correct *= backward_correct # endif @@ -394,23 +509,18 @@ class QuizzMachine: nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64 ) - ar_mask_first = torch.zeros(c_quizzes.size(), device=self.device) - ar_mask_first[:, : ar_mask_first.size(1) // 2 + 1] = 1 - ar_mask_second = 1 - ar_mask_first - ar_mask_first[:, 0] = 0 - ar_mask_second[:, 0] = 0 - - seq_logproba = torch.zeros(ar_mask_first.size(0), device=self.device) + seq_logproba = torch.zeros(nb, device=self.device) # First, we generate the answer at high temperature c_quizzes[:, 0] = self.token_backward + c_quizzes[:, 1 + self.answer_len] = self.token_backward masked_inplace_autoregression( model=model_for_generation, batch_size=self.batch_size, input=c_quizzes, - ar_mask=ar_mask_first, + ar_mask=self.make_ar_mask(c_quizzes, first=True), seq_logproba=seq_logproba, temperature=temperature, deterministic_synthesis=False, @@ -423,7 +533,7 @@ class QuizzMachine: model=model_for_generation, batch_size=self.batch_size, input=c_quizzes, - ar_mask=ar_mask_second, + ar_mask=self.make_ar_mask(c_quizzes), seq_logproba=seq_logproba, temperature=1 / temperature, deterministic_synthesis=False, @@ -439,7 +549,7 @@ class QuizzMachine: model=model_for_generation, batch_size=self.batch_size, input=c_quizzes, - ar_mask=ar_mask_second, + ar_mask=self.make_ar_mask(c_quizzes), seq_logproba=seq_logproba, temperature=1 / temperature, deterministic_synthesis=False,