X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=quizz_machine.py;h=697f27ece4e353fe8a264657013a7fe2f693a630;hb=c9c018e4c19ce92892d7652082fb90719d57441c;hp=239dc687cbb33264e330f305cabab73b28fbebbf;hpb=bfcef9a8c82ed45528601e85725166241bbee916;p=culture.git diff --git a/quizz_machine.py b/quizz_machine.py index 239dc68..697f27e 100755 --- a/quizz_machine.py +++ b/quizz_machine.py @@ -12,47 +12,11 @@ import torch, torchvision from torch import nn from torch.nn import functional as F +import mygpt from mygpt import BracketedSequence ###################################################################### - -class Gang(nn.Module): - def __init__(self, models, nb_models_for_generation, mode="groupthink"): - super().__init__() - self.models = models - self.nb_models_for_generation = nb_models_for_generation - self.mode = mode - - def forward(self, bs): - # If first = 0, we are re-starting an auto-regressive process, - # that's the right moment to randomize who gonna do it - if bs.first == 0: - self.models_to_use = [ - self.models[k] - for k in torch.randperm(len(self.models))[ - : self.nb_models_for_generation - ] - ] - - all_the_logits = torch.cat( - [model(bs).x[None] for model in self.models_to_use], dim=0 - ) - - if self.mode == "groupthink": - y = all_the_logits.mean(dim=0) - elif self.mode == "groupwork": - m = torch.rand(all_the_logits.size(), device=all_the_logits.device) - m = (m.sort(dim=0).indices == 0).long() - y = (y * m).sum(dim=0) - else: - raise ValueError(f"Invalid mode {self.mode}") - - return BracketedSequence(y, bs.first, bs.nb) - - -###################################################################### - # ar_mask is a tensor with 0s and 1s, of same shape as input, with # 1s where tokens should be generated. The others are kept # unchanged. @@ -161,8 +125,8 @@ class QuizzMachine: nb_train_samples, nb_test_samples, batch_size, - result_dir=None, - logger=None, + result_dir, + logger, device=torch.device("cpu"), ): super().__init__() @@ -170,10 +134,12 @@ class QuizzMachine: self.problem = problem self.batch_size = batch_size self.device = device + self.logger = logger self.train_w_quizzes = self.problem.generate_token_sequences( nb_train_samples ).to(device) + self.test_w_quizzes = self.problem.generate_token_sequences(nb_test_samples).to( device ) @@ -231,9 +197,9 @@ class QuizzMachine: return self.nb_codes def produce_results( - self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000 + self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000 ): - def compute_accuracy(input, logger=None): + def compute_accuracy(input): input = input[:nmax] ar_mask = self.make_ar_mask(input) result = input.clone() * (1 - ar_mask) @@ -260,18 +226,18 @@ class QuizzMachine: train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes) - logger( + self.logger( f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" ) - test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger) + test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes) - logger( + self.logger( f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" ) main_test_accuracy = test_nb_correct / test_nb_total - logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}") + self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}") ############################## @@ -310,10 +276,7 @@ class QuizzMachine: else: self.test_c_quizzes.append(new_c_quizzes) - def comput_correctness(self, c_quizzes, models_for_validation): - ############################################################### - # Create the reverse quizzes - + def reverse_time(self, c_quizzes): token_forward, token_backward = self.problem.direction_tokens() l = (c_quizzes.size(1) - 1) // 2 @@ -321,18 +284,20 @@ class QuizzMachine: direction = self.problem.token_forward * ( direction == self.problem.token_backward ) + self.problem.token_backward * (direction == self.problem.token_forward) - reverse_c_quizzes = torch.cat( - [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1 - ) + + return torch.cat([c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1) + + def compute_correctness( + self, c_quizzes, models_for_validation, both_directions=True + ): + reversed_c_quizzes = self.reverse_time(c_quizzes) ar_mask = self.make_ar_mask(c_quizzes) seq_logproba = torch.empty(ar_mask.size(0), device=self.device) - ############################################################### - # Check how many of the other models can solve them in both - # directions + # Check how many of models can solve the quizzes in both directions - nb_correct = [] + nb_correct = 0 for model in models_for_validation: result = c_quizzes.clone() @@ -351,122 +316,106 @@ class QuizzMachine: correct = (c_quizzes == result).long().min(dim=-1).values - reverse_result = reverse_c_quizzes.clone() + if both_directions: + reversed_result = reversed_c_quizzes.clone() - masked_inplace_autoregression( - model=model, - batch_size=self.batch_size, - input=reverse_result, - ar_mask=ar_mask, - seq_logproba=seq_logproba, - temperature=1.0, - deterministic_synthesis=True, - # progress_bar_desc="solving reversed c_quizzes", - device=self.device, - ) + masked_inplace_autoregression( + model=model, + batch_size=self.batch_size, + input=reversed_result, + ar_mask=ar_mask, + seq_logproba=seq_logproba, + temperature=1.0, + deterministic_synthesis=True, + # progress_bar_desc="solving reversed c_quizzes", + device=self.device, + ) - reverse_correct = ( - (reverse_c_quizzes == reverse_result).long().min(dim=-1).values - ) + reversed_correct = ( + (reversed_c_quizzes == reversed_result).long().min(dim=-1).values + ) - nb_correct.append((correct * reverse_correct)[None, :]) + correct *= reversed_correct - return torch.cat(nb_correct, dim=0).sum(dim=0) + # endif - def generate_quizzes(self, nb, model_for_generation, min_ave_seq_logproba): - ############################################################### - # Generate quizzes with model + nb_correct += correct + return nb_correct + + ############################################################### + + def generate_quizzes(self, nb, model_for_generation, reverse_cleanup=False): c_quizzes = torch.empty( nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64 ) - ar_mask = torch.full(c_quizzes.size(), 1, device=self.device) - seq_logproba = torch.empty(ar_mask.size(0), device=self.device) + ar_mask_prompt = torch.zeros(c_quizzes.size(), device=self.device) + ar_mask_prompt[:, : ar_mask_prompt.size(1) // 2 + 1] = 1 + ar_mask_solve = 1 - ar_mask_prompt + seq_logproba = torch.empty(ar_mask_prompt.size(0), device=self.device) + + if reverse_cleanup: + warnings.warn("very high temperature with reversed cleanup", RuntimeWarning) + temperature = 10.0 + else: + temperature = 1.0 + + # warnings.warn("noise injection", RuntimeWarning) + # noise_std = torch.rand(1).item() + # self.logger(f"{noise_std=}") + + # mygpt.set_noise_injection(model_for_generation, noise_std) + + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=ar_mask_prompt, + seq_logproba=seq_logproba, + temperature=temperature, + deterministic_synthesis=False, + device=self.device, + ) - # bracketing of the temperature to get the target logproba + # mygpt.set_noise_injection(model_for_generation, 0.0) - temperature = 1 - d_temperature = 1 / 3 + ave_seq_logproba = seq_logproba.mean() - while True: - seq_logproba[...] = 0 + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=ar_mask_solve, + seq_logproba=seq_logproba, + temperature=temperature, + deterministic_synthesis=True, + device=self.device, + ) + if reverse_cleanup: + c_quizzes = self.reverse_time(c_quizzes) masked_inplace_autoregression( model=model_for_generation, batch_size=self.batch_size, input=c_quizzes, - ar_mask=ar_mask, + ar_mask=ar_mask_solve, seq_logproba=seq_logproba, temperature=temperature, - deterministic_synthesis=False, - # progress_bar_desc="sampling c_quizzes", + deterministic_synthesis=True, device=self.device, ) - ave_seq_logproba = seq_logproba.mean() - - # If we do not have target logprobs, get out now - if min_ave_seq_logproba is None: - break - - # Oh man that's ugly - if ave_seq_logproba < min_ave_seq_logproba: - if d_temperature > 0: - d_temperature *= -1 / 3 - temperature += d_temperature - elif ave_seq_logproba > min_ave_seq_logproba * 0.99: - if d_temperature < 0: - d_temperature *= -1 / 3 - temperature += d_temperature - else: - break - - logger(f"changing temperature to {temperature}") + c_quizzes = self.reverse_time(c_quizzes) + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=ar_mask_solve, + seq_logproba=seq_logproba, + temperature=temperature, + deterministic_synthesis=True, + device=self.device, + ) return c_quizzes, seq_logproba.mean() - - ###################################################################### - - def create_c_quizzes( - self, - nb, - model_for_generation, - models_for_validation, - min_ave_seq_logproba, - n_epoch, - result_dir, - logger, - ): - c_quizzes, ave_seq_logproba = self.generate_quizzes( - nb, model_for_generation, min_ave_seq_logproba - ) - - nb_correct = self.comput_correctness(c_quizzes, models_for_validation) - - return c_quizzes, nb_correct, ave_seq_logproba - - ###################################################################### - - def gang_create_c_quizzes( - self, - nb, - nb_models_for_generation, - models, - mode, - min_ave_seq_logproba, - n_epoch, - result_dir, - logger, - ): - model_for_generation = Gang(models, nb_models_for_generation, mode) - models_for_validation = models - return self.create_c_quizzes( - nb, - model_for_generation, - models_for_validation, - min_ave_seq_logproba, - n_epoch, - result_dir, - logger, - )