X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=inline;f=quizz_machine.py;h=84bb558f1a10fc4853c3f65fe136daecb1fc2024;hb=17a885dc2c98bc5370dcc2ebd32493dcebdd4225;hp=49e783586ebc662ac6bad80e34c2ef84169b1667;hpb=bee6e628aabc1380772409f6aabffb024c0e70ab;p=culture.git diff --git a/quizz_machine.py b/quizz_machine.py index 49e7835..84bb558 100755 --- a/quizz_machine.py +++ b/quizz_machine.py @@ -12,6 +12,7 @@ import torch, torchvision from torch import nn from torch.nn import functional as F +import mygpt from mygpt import BracketedSequence ###################################################################### @@ -20,7 +21,7 @@ from mygpt import BracketedSequence class Gang(nn.Module): def __init__(self, models, nb_models_for_generation, mode="groupthink"): super().__init__() - self.models = models + self.models = nn.ModuleList(models) self.nb_models_for_generation = nb_models_for_generation self.mode = mode @@ -378,48 +379,44 @@ class QuizzMachine: nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64 ) - ar_mask = torch.full(c_quizzes.size(), 1, device=self.device) - seq_logproba = torch.empty(ar_mask.size(0), device=self.device) - - # bracketing of the temperature to get the target logproba + ar_mask_prompt = torch.zeros(c_quizzes.size(), device=self.device) + ar_mask_prompt[:, : ar_mask_prompt.size(1) // 2 + 1] = 1 + ar_mask_solve = 1 - ar_mask_prompt + seq_logproba = torch.empty(ar_mask_prompt.size(0), device=self.device) + warnings.warn("noise injection", RuntimeWarning) temperature = 1 - d_temperature = 1 / 3 + noise_std = torch.rand(1).item() + self.logger(f"{noise_std=}") + mygpt.set_noise_injection(model_for_generation, noise_std) - while True: - seq_logproba[...] = 0 + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=ar_mask_prompt, + seq_logproba=seq_logproba, + temperature=temperature, + deterministic_synthesis=False, + # progress_bar_desc="sampling c_quizzes", + device=self.device, + ) - masked_inplace_autoregression( - model=model_for_generation, - batch_size=self.batch_size, - input=c_quizzes, - ar_mask=ar_mask, - seq_logproba=seq_logproba, - temperature=temperature, - deterministic_synthesis=False, - # progress_bar_desc="sampling c_quizzes", - device=self.device, - ) + ave_seq_logproba = seq_logproba.mean() + + masked_inplace_autoregression( + model=model_for_generation, + batch_size=self.batch_size, + input=c_quizzes, + ar_mask=ar_mask_solve, + seq_logproba=seq_logproba, + temperature=temperature, + deterministic_synthesis=True, + # progress_bar_desc="sampling c_quizzes", + device=self.device, + ) - ave_seq_logproba = seq_logproba.mean() - - # If we do not have target logprobs, get out now - if min_ave_seq_logproba is None: - break - - # Oh man that's ugly - if ave_seq_logproba < min_ave_seq_logproba: - if d_temperature > 0: - d_temperature *= -1 / 3 - temperature += d_temperature - elif ave_seq_logproba > min_ave_seq_logproba * 0.99: - if d_temperature < 0: - d_temperature *= -1 / 3 - temperature += d_temperature - else: - break - - self.logger(f"changing temperature to {temperature}") + mygpt.set_noise_injection(model_for_generation, 0.0) return c_quizzes, seq_logproba.mean()