from torch import nn
from torch.nn import functional as F
+import mygpt
from mygpt import BracketedSequence
######################################################################
-
-class Gang(nn.Module):
- def __init__(self, models, nb_models_for_generation, mode="groupthink"):
- super().__init__()
- self.models = models
- self.nb_models_for_generation = nb_models_for_generation
- self.mode = mode
-
- def forward(self, bs):
- # If first = 0, we are re-starting an auto-regressive process,
- # that's the right moment to randomize who gonna do it
- if bs.first == 0:
- self.models_to_use = [
- self.models[k]
- for k in torch.randperm(len(self.models))[
- : self.nb_models_for_generation
- ]
- ]
-
- all_the_logits = torch.cat(
- [model(bs).x[None] for model in self.models_to_use], dim=0
- )
-
- if self.mode == "groupthink":
- y = all_the_logits.mean(dim=0)
- elif self.mode == "groupwork":
- m = torch.rand(all_the_logits.size(), device=all_the_logits.device)
- m = (m.sort(dim=0).indices == 0).long()
- y = (y * m).sum(dim=0)
- else:
- raise ValueError(f"Invalid mode {self.mode}")
-
- return BracketedSequence(y, bs.first, bs.nb)
-
-
-######################################################################
-
# ar_mask is a tensor with 0s and 1s, of same shape as input, with
# 1s where tokens should be generated. The others are kept
# unchanged.
nb_train_samples,
nb_test_samples,
batch_size,
- result_dir=None,
- logger=None,
+ result_dir,
+ logger,
device=torch.device("cpu"),
):
super().__init__()
self.problem = problem
self.batch_size = batch_size
self.device = device
+ self.logger = logger
self.train_w_quizzes = self.problem.generate_token_sequences(
nb_train_samples
return self.nb_codes
def produce_results(
- self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+ self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
):
- def compute_accuracy(input, logger=None):
+ def compute_accuracy(input):
input = input[:nmax]
ar_mask = self.make_ar_mask(input)
result = input.clone() * (1 - ar_mask)
train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
- logger(
+ self.logger(
f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
)
- test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger)
+ test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes)
- logger(
+ self.logger(
f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
main_test_accuracy = test_nb_correct / test_nb_total
- logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
+ self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
##############################
else:
self.test_c_quizzes.append(new_c_quizzes)
- def comput_correctness(self, c_quizzes, models_for_validation):
- # Create the reverse quizzes
-
+ def reverse_time(self, c_quizzes):
token_forward, token_backward = self.problem.direction_tokens()
l = (c_quizzes.size(1) - 1) // 2
direction = self.problem.token_forward * (
direction == self.problem.token_backward
) + self.problem.token_backward * (direction == self.problem.token_forward)
- reverse_c_quizzes = torch.cat(
- [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
- )
+
+ return torch.cat([c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1)
+
+ def comput_correctness(self, c_quizzes, models_for_validation):
+ reversed_c_quizzes = self.reverse_time(c_quizzes)
ar_mask = self.make_ar_mask(c_quizzes)
seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
correct = (c_quizzes == result).long().min(dim=-1).values
- reverse_result = reverse_c_quizzes.clone()
+ reversed_result = reversed_c_quizzes.clone()
masked_inplace_autoregression(
model=model,
batch_size=self.batch_size,
- input=reverse_result,
+ input=reversed_result,
ar_mask=ar_mask,
seq_logproba=seq_logproba,
temperature=1.0,
device=self.device,
)
- reverse_correct = (
- (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
+ reversed_correct = (
+ (reversed_c_quizzes == reversed_result).long().min(dim=-1).values
)
- nb_correct += correct * reverse_correct
+ nb_correct += correct * reversed_correct
return nb_correct
###############################################################
- def generate_quizzes(self, nb, model_for_generation, min_ave_seq_logproba):
+ def generate_quizzes(self, nb, model_for_generation, reverse_cleanup=False):
c_quizzes = torch.empty(
nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
)
- ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
- seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
+ ar_mask_prompt = torch.zeros(c_quizzes.size(), device=self.device)
+ ar_mask_prompt[:, : ar_mask_prompt.size(1) // 2 + 1] = 1
+ ar_mask_solve = 1 - ar_mask_prompt
+ seq_logproba = torch.empty(ar_mask_prompt.size(0), device=self.device)
+
+ if reverse_cleanup:
+ warnings.warn("very high temperature with reversed cleanup", RuntimeWarning)
+ temperature = 10.0
+ else:
+ temperature = 1.0
+
+ # warnings.warn("noise injection", RuntimeWarning)
+ # noise_std = torch.rand(1).item()
+ # self.logger(f"{noise_std=}")
+
+ # mygpt.set_noise_injection(model_for_generation, noise_std)
+
+ masked_inplace_autoregression(
+ model=model_for_generation,
+ batch_size=self.batch_size,
+ input=c_quizzes,
+ ar_mask=ar_mask_prompt,
+ seq_logproba=seq_logproba,
+ temperature=temperature,
+ deterministic_synthesis=False,
+ device=self.device,
+ )
- # bracketing of the temperature to get the target logproba
+ # mygpt.set_noise_injection(model_for_generation, 0.0)
- temperature = 1
- d_temperature = 1 / 3
+ ave_seq_logproba = seq_logproba.mean()
- while True:
- seq_logproba[...] = 0
+ masked_inplace_autoregression(
+ model=model_for_generation,
+ batch_size=self.batch_size,
+ input=c_quizzes,
+ ar_mask=ar_mask_solve,
+ seq_logproba=seq_logproba,
+ temperature=temperature,
+ deterministic_synthesis=True,
+ device=self.device,
+ )
+ if reverse_cleanup:
+ c_quizzes = self.reverse_time(c_quizzes)
masked_inplace_autoregression(
model=model_for_generation,
batch_size=self.batch_size,
input=c_quizzes,
- ar_mask=ar_mask,
+ ar_mask=ar_mask_solve,
seq_logproba=seq_logproba,
temperature=temperature,
- deterministic_synthesis=False,
- # progress_bar_desc="sampling c_quizzes",
+ deterministic_synthesis=True,
device=self.device,
)
- ave_seq_logproba = seq_logproba.mean()
-
- # If we do not have target logprobs, get out now
- if min_ave_seq_logproba is None:
- break
-
- # Oh man that's ugly
- if ave_seq_logproba < min_ave_seq_logproba:
- if d_temperature > 0:
- d_temperature *= -1 / 3
- temperature += d_temperature
- elif ave_seq_logproba > min_ave_seq_logproba * 0.99:
- if d_temperature < 0:
- d_temperature *= -1 / 3
- temperature += d_temperature
- else:
- break
-
- logger(f"changing temperature to {temperature}")
-
return c_quizzes, seq_logproba.mean()
-
- ######################################################################
-
- def create_c_quizzes(
- self,
- nb,
- model_for_generation,
- models_for_validation,
- min_ave_seq_logproba,
- n_epoch,
- result_dir,
- logger,
- ):
- c_quizzes, ave_seq_logproba = self.generate_quizzes(
- nb, model_for_generation, min_ave_seq_logproba
- )
-
- nb_correct = self.comput_correctness(c_quizzes, models_for_validation)
-
- return c_quizzes, nb_correct, ave_seq_logproba
-
- ######################################################################
-
- def gang_create_c_quizzes(
- self,
- nb,
- nb_models_for_generation,
- models,
- mode,
- min_ave_seq_logproba,
- n_epoch,
- result_dir,
- logger,
- ):
- model_for_generation = Gang(models, nb_models_for_generation, mode)
- models_for_validation = models
- return self.create_c_quizzes(
- nb,
- model_for_generation,
- models_for_validation,
- min_ave_seq_logproba,
- n_epoch,
- result_dir,
- logger,
- )