X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=quizz_machine.py;h=92b579980ec1a10aec128f8a91927f303417163c;hb=d3d4ce7bb2b799f4bf81a936987e3a8938514af8;hp=8ee022675322547ed1ceb4fbdde4e1fefbcc712f;hpb=07458603bfb24d5a12b530839e52e42fe8b0e6b8;p=culture.git diff --git a/quizz_machine.py b/quizz_machine.py index 8ee0226..92b5799 100755 --- a/quizz_machine.py +++ b/quizz_machine.py @@ -12,10 +12,48 @@ import torch, torchvision from torch import nn from torch.nn import functional as F +import mygpt from mygpt import BracketedSequence ###################################################################### +# ar_mask is a tensor with 0s and 1s, of same shape as input, with +# 1s where tokens should be generated. The others are kept +# unchanged. + + +def one_batch_masked_inplace_autoregression( + model, + input, + ar_mask, + seq_logproba, + temperature=1.0, + deterministic_synthesis=False, +): + to_generate = (ar_mask.sum(0) > 0).nonzero() + + if to_generate.min() > 0: + model( + BracketedSequence(input, 0, to_generate.min()) + ) # Needed to initialize the model's cache + for s in range(to_generate.min(), to_generate.max() + 1): + output = model(BracketedSequence(input, s, 1)).x + + logits = output[:, s] + + logits = (logits / temperature).log_softmax(dim=-1) + + if deterministic_synthesis: + t_next = logits.argmax(-1) + else: + dist = torch.distributions.categorical.Categorical(logits=logits) + t_next = dist.sample() + + all_n = torch.arange(t_next.size(0)) + seq_logproba += logits[all_n, t_next].sum(dim=-1) + + input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] + def masked_inplace_autoregression( model, @@ -51,14 +89,13 @@ def masked_inplace_autoregression( model.eval() for input, ar_mask, seq_logproba in batches: - model.masked_inplace_autoregression( + one_batch_masked_inplace_autoregression( + model=model, input=input, ar_mask=ar_mask, seq_logproba=seq_logproba, temperature=temperature, deterministic_synthesis=deterministic_synthesis, - forbidden_tokens=forbidden_tokens, - forced_biases=logit_biases, ) model.train(t) @@ -68,39 +105,181 @@ def masked_inplace_autoregression( class QuizzMachine: - def make_ar_mask(self, input): - b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2 - return b.long()[None, :].expand_as(input) + def indices_forward_and_backward(self, quizzes): + i_forward = quizzes[:, 0] == self.token_forward + j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward + i_backward = quizzes[:, 0] == self.token_backward + j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward + assert torch.logical_or( + torch.logical_and(i_forward, j_forward), + torch.logical_and(i_backward, j_backward), + ).all() + return i_forward, i_backward + + def reverse_time(self, quizzes): + i_forward, i_backward = self.indices_forward_and_backward(quizzes) + + forward_to_backward = torch.cat( + [ + quizzes[:, 0:1], + quizzes[:, 2 + self.prompt_len : 2 + self.prompt_len + self.answer_len], + quizzes[:, 1 + self.prompt_len : 1 + self.prompt_len + 1], + quizzes[:, 1 : 1 + self.prompt_len], + ], + dim=1, + ) + + forward_to_backward[:, 0] = self.token_backward + forward_to_backward[:, 1 + self.answer_len] = self.token_backward + + backward_to_forward = torch.cat( + [ + quizzes[:, 0:1], + quizzes[:, 2 + self.answer_len :], + quizzes[:, 1 + self.answer_len : 2 + self.answer_len], + quizzes[:, 1 : 1 + self.answer_len], + ], + dim=1, + ) + + backward_to_forward[:, 0] = self.token_forward + backward_to_forward[:, 1 + self.prompt_len] = self.token_forward + + m = i_forward.long()[:, None] + + return m * forward_to_backward + (1 - m) * backward_to_forward + + def make_ar_mask(self, quizzes, first=False): + i_forward, i_backward = self.indices_forward_and_backward(quizzes) + + t = torch.arange(quizzes.size(1), device=quizzes.device) + + if first: + m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long() + m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long() + else: + m_forward = (t >= 2 + self.prompt_len).long() + m_backward = (t >= 2 + self.answer_len).long() + + m = i_forward.long()[:, None] + + return m * m_forward + (1 - m) * m_backward + + def generate_token_sequences(self, nb): + prompts, answers = self.problem.generate_prompts_and_answers(nb) + + if self.prompt_len is None: + self.prompt_len = prompts.size(1) + + if self.answer_len is None: + self.answer_len = answers.size(1) + + assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len + + result = [] + + for prompt, answer in zip(prompts, answers): + if torch.rand(1) < 0.5: + a = [ + torch.tensor([self.token_forward]), + prompt, + torch.tensor([self.token_forward]), + answer, + ] + else: + a = [ + torch.tensor([self.token_backward]), + answer, + torch.tensor([self.token_backward]), + prompt, + ] + + result.append(torch.cat(a, dim=0)[None, :]) + + return torch.cat(result, dim=0) def __init__( self, problem, nb_train_samples, nb_test_samples, + back_accuracy, batch_size, - result_dir=None, - logger=None, + result_dir, + logger, device=torch.device("cpu"), ): super().__init__() + v = problem.nb_token_values() + self.token_forward = v + self.token_backward = v + 1 + self.nb_token_values = v + 2 + self.problem = problem + self.back_accuracy = back_accuracy self.batch_size = batch_size self.device = device + self.logger = logger + self.prompt_len = None + self.answer_len = None - self.train_w_quizzes = self.problem.generate_seq(nb_train_samples).to(device) - self.test_w_quizzes = self.problem.generate_seq(nb_test_samples).to(device) + self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to( + device + ) - self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1 + self.test_w_quizzes = self.generate_token_sequences(nb_test_samples).to(device) self.train_c_quizzes = [] self.test_c_quizzes = [] if result_dir is not None: - self.problem.save_quizzes( - self.train_w_quizzes[:72], result_dir, "culture_w_quizzes" + self.save_quizzes( + result_dir, + "culture_w_quizzes", + self.train_w_quizzes[:72], + n_backward=self.train_w_quizzes[:72, 0] == self.token_backward, ) + def save_quizzes( + self, + result_dir, + filename_prefix, + quizzes, + n_backward=None, + mistakes=None, + ): + quizzes = quizzes.clone() + forward = quizzes[quizzes[:, 0] == self.token_forward] + ib = quizzes[:, 0] == self.token_backward + backward = quizzes[ib] + assert forward.size(0) + backward.size(0) == quizzes.size(0) + quizzes[ib] = self.reverse_time(quizzes[ib]) + + if n_backward is None: + predicted_prompts = None + predicted_answers = None + else: + predicted_prompts = n_backward.long() + predicted_answers = 1 - predicted_prompts + if mistakes is not None: + # 0/-1/+1 ~ not-to-predict / predicted wrong / predicted correct + predicted_prompts *= mistakes + predicted_answers *= mistakes + else: + # 0/2 ~ not-to-predict / to predict + predicted_prompts *= 2 + predicted_answers *= 2 + + self.problem.save_quizzes( + result_dir, + filename_prefix, + quizzes[:, 1 : 1 + self.prompt_len], + quizzes[:, 2 + self.prompt_len :], + predicted_prompts, + predicted_answers, + ) + def batches(self, split="train", desc=None): assert split in {"train", "test"} if split == "train": @@ -141,13 +320,12 @@ class QuizzMachine: yield batch def vocabulary_size(self): - return self.nb_codes + return self.nb_token_values def produce_results( - self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000 ): - def compute_accuracy(input, logger=None): - input = input[:nmax] + def compute_accuracy(input, log_prefix=None): ar_mask = self.make_ar_mask(input) result = input.clone() * (1 - ar_mask) seq_logproba = torch.empty(input.size(0), device=self.device) @@ -164,49 +342,56 @@ class QuizzMachine: device=self.device, ) - nb_total, nb_correct = ( - input.size(0), - (input == result).long().min(dim=1).values.sum(), - ) + correct = torch.empty(input.size(0), dtype=torch.int64, device=input.device) - return nb_total, nb_correct + n_forward = input[:, 0] == self.token_forward + n_backward = input[:, 0] == self.token_backward - train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes) + correct[n_forward] = ( + (input[n_forward] == result[n_forward]).long().min(dim=1).values + ) - logger( - f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" - ) + if self.back_accuracy and n_backward.any(): + # accuracy of B->A*->B*=B instead of B->A*=A + back_input = self.reverse_time(result[n_backward]) + back_input[:, 2 + self.prompt_len :] = input[ + n_backward, 1 : 1 + self.answer_len + ] + result[n_backward], correct[n_backward] = compute_accuracy(back_input) - test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger) + if log_prefix is not None: + forward_nb_correct = correct[n_forward].sum() + forward_nb_total = correct[n_forward].size(0) + backward_nb_correct = correct[n_backward].sum() + backward_nb_total = correct[n_backward].size(0) - logger( - f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" - ) + self.logger( + f"forward_accuracy {log_prefix} {n_epoch} {model.id=} {forward_nb_correct} / {forward_nb_total}" + ) - main_test_accuracy = test_nb_correct / test_nb_total - logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}") + self.logger( + f"backward_accuracy {log_prefix} {n_epoch} {model.id=} {backward_nb_correct} / {backward_nb_total}" + ) - ############################## + return result, correct - input = self.test_w_quizzes[:96] - ar_mask = self.make_ar_mask(input) - result = input.clone() * (1 - ar_mask) - seq_logproba = torch.empty(input.size(0), device=self.device) + compute_accuracy(self.train_w_quizzes[:nmax], log_prefix="train") - masked_inplace_autoregression( - model=model, - batch_size=self.batch_size, - input=result, - ar_mask=ar_mask, - seq_logproba=seq_logproba, - temperature=1.0, - deterministic_synthesis=deterministic_synthesis, - progress_bar_desc=None, - device=self.device, + test_result, test_correct = compute_accuracy( + self.test_w_quizzes[:nmax], log_prefix="test" ) - self.problem.save_quizzes( - result[:72], result_dir, f"culture_prediction_{n_epoch:04d}_{model.id:02d}" + main_test_accuracy = test_correct.sum() / test_correct.size(0) + self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}") + + ############################## + + self.save_quizzes( + result_dir, + f"culture_prediction_{n_epoch:04d}_{model.id:02d}", + quizzes=test_result[:72], + n_backward=self.test_w_quizzes[:72, 0] == self.token_backward, + mistakes=test_correct[:72] * 2 - 1, ) return main_test_accuracy @@ -215,7 +400,7 @@ class QuizzMachine: input = self.train_w_quizzes if for_train else self.test_w_quizzes nb = min(nb, input.size(0)) input[:-nb] = input[nb:].clone() - input[-nb:] = self.problem.generate_seq(nb).to(self.device) + input[-nb:] = self.generate_token_sequences(nb).to(self.device) def store_c_quizzes(self, new_c_quizzes, for_train=True): if for_train: @@ -223,123 +408,126 @@ class QuizzMachine: else: self.test_c_quizzes.append(new_c_quizzes) - def create_c_quizzes( + def compute_correctness( self, - nb, - model_for_generation, + c_quizzes, models_for_validation, - min_ave_seq_logproba, - n_epoch, - result_dir, - logger, + bidirectional_validation=False, + deterministic_validation=True, ): - ############################################################### - # Generate quizzes with model + if bidirectional_validation: + backward_c_quizzes = self.forward_to_backward(c_quizzes) - c_quizzes = torch.empty( - nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64 + seq_logproba = torch.zeros( + c_quizzes.size(0), + max([m.id for m in models_for_validation]) + 1, + device=self.device, ) - ar_mask = torch.full(c_quizzes.size(), 1, device=self.device) - seq_logproba = torch.empty(ar_mask.size(0), device=self.device) + nb_correct = 0 + + for model in models_for_validation: + result = c_quizzes.clone() - temperature = 1 - d_temperature = 1 / 3 + seq_logproba[...] = 0.0 - while True: - seq_logproba[...] = 0 + ar_mask = self.make_ar_mask(result) masked_inplace_autoregression( - model=model_for_generation, + model=model, batch_size=self.batch_size, - input=c_quizzes, + input=result, ar_mask=ar_mask, - seq_logproba=seq_logproba, - temperature=temperature, - deterministic_synthesis=False, - # progress_bar_desc="sampling c_quizzes", + seq_logproba=seq_logproba[:, model.id], + temperature=1.0, + deterministic_synthesis=deterministic_validation, + # progress_bar_desc="solving c_quizzes", device=self.device, ) - ave_seq_logproba = seq_logproba.mean() + correct = (c_quizzes == result).long().min(dim=-1).values - if min_ave_seq_logproba is None: - break + if bidirectional_validation: + backward_result = backward_c_quizzes.clone() - # Oh man that's ugly - if ave_seq_logproba < min_ave_seq_logproba: - if d_temperature > 0: - d_temperature *= -1 / 3 - temperature += d_temperature - elif ave_seq_logproba > min_ave_seq_logproba * 0.99: - if d_temperature < 0: - d_temperature *= -1 / 3 - temperature += d_temperature - else: - break + ar_mask = self.make_ar_mask(backward_result) - logger(f"changing temperature to {temperature}") + masked_inplace_autoregression( + model=model, + batch_size=self.batch_size, + input=backward_result, + ar_mask=ar_mask, + seq_logproba=seq_logproba[:, model.id], + temperature=1.0, + deterministic_synthesis=deterministic_validation, + # progress_bar_desc="solving backward c_quizzes", + device=self.device, + ) - ############################################################### - # Create the reverse quizzes + backward_correct = ( + (backward_c_quizzes == backward_result).long().min(dim=-1).values + ) - token_forward, token_backward = self.problem.direction_tokens() + correct *= backward_correct - l = (c_quizzes.size(1) - 1) // 2 - direction = c_quizzes[:, l : l + 1] - direction = self.problem.token_forward * ( - direction == self.problem.token_backward - ) + self.problem.token_backward * (direction == self.problem.token_forward) - reverse_c_quizzes = torch.cat( - [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1 - ) + # endif - ar_mask = self.make_ar_mask(c_quizzes) - seq_logproba = torch.empty(ar_mask.size(0), device=self.device) + nb_correct += correct - ############################################################### - # Check how many of the other models can solve them in both - # directions + return nb_correct, seq_logproba - nb_correct = [] + ############################################################### - for model in models_for_validation: - result = c_quizzes.clone() + def generate_quizzes(self, nb, model_for_generation, temperature=1.0): + c_quizzes = torch.empty( + nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64 + ) - masked_inplace_autoregression( - model=model, - batch_size=self.batch_size, - input=result, - ar_mask=ar_mask, - seq_logproba=seq_logproba, - temperature=1.0, - deterministic_synthesis=True, - # progress_bar_desc="solving c_quizzes", - device=self.device, - ) + seq_logproba = torch.zeros(nb, device=self.device) - correct = (c_quizzes == result).long().min(dim=-1).values + # First, we generate the answer at high temperature - reverse_result = reverse_c_quizzes.clone() + c_quizzes[:, 0] = self.token_backward + c_quizzes[:, 1 + self.answer_len] = self.token_backward - masked_inplace_autoregression( - model=model, - batch_size=self.batch_size, - input=reverse_result, - ar_mask=ar_mask, - seq_logproba=seq_logproba, - temperature=1.0, - deterministic_synthesis=True, - # progress_bar_desc="solving reversed c_quizzes", - device=self.device, - ) + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=self.make_ar_mask(c_quizzes, first=True), + seq_logproba=seq_logproba, + temperature=temperature, + deterministic_synthesis=False, + device=self.device, + ) - reverse_correct = ( - (reverse_c_quizzes == reverse_result).long().min(dim=-1).values - ) + # Then, we generate the prompt at low temperature - nb_correct.append((correct * reverse_correct)[None, :]) + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=self.make_ar_mask(c_quizzes), + seq_logproba=seq_logproba, + temperature=1 / temperature, + deterministic_synthesis=False, + device=self.device, + ) - nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0) + # Then we return the quizz, and re-generate the response, now + # at low temperature + + c_quizzes = self.reverse_time(c_quizzes) + + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=self.make_ar_mask(c_quizzes), + seq_logproba=seq_logproba, + temperature=1 / temperature, + deterministic_synthesis=False, + device=self.device, + ) - return c_quizzes, nb_correct, seq_logproba.mean() + return c_quizzes