from torch import nn
from torch.nn import functional as F
+import mygpt
from mygpt import BracketedSequence
######################################################################
+# ar_mask is a tensor with 0s and 1s, of same shape as input, with
+# 1s where tokens should be generated. The others are kept
+# unchanged.
+
+
+def one_batch_masked_inplace_autoregression(
+ model,
+ input,
+ ar_mask,
+ seq_logproba,
+ temperature=1.0,
+ deterministic_synthesis=False,
+):
+ to_generate = (ar_mask.sum(0) > 0).nonzero()
+
+ if to_generate.min() > 0:
+ model(
+ BracketedSequence(input, 0, to_generate.min())
+ ) # Needed to initialize the model's cache
+ for s in range(to_generate.min(), to_generate.max() + 1):
+ output = model(BracketedSequence(input, s, 1)).x
+
+ logits = output[:, s]
+
+ logits = (logits / temperature).log_softmax(dim=-1)
+
+ if deterministic_synthesis:
+ t_next = logits.argmax(-1)
+ else:
+ dist = torch.distributions.categorical.Categorical(logits=logits)
+ t_next = dist.sample()
+
+ all_n = torch.arange(t_next.size(0))
+ seq_logproba += logits[all_n, t_next].sum(dim=-1)
+
+ input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
+
def masked_inplace_autoregression(
model,
deterministic_synthesis,
forbidden_tokens=None,
logit_biases=None,
- progress_bar_desc="autoregression",
+ progress_bar_desc=None,
device=torch.device("cpu"),
):
assert input.size() == ar_mask.size()
model.eval()
for input, ar_mask, seq_logproba in batches:
- model.masked_inplace_autoregression(
+ one_batch_masked_inplace_autoregression(
+ model=model,
input=input,
ar_mask=ar_mask,
seq_logproba=seq_logproba,
temperature=temperature,
deterministic_synthesis=deterministic_synthesis,
- forbidden_tokens=forbidden_tokens,
- forced_biases=logit_biases,
)
model.train(t)
######################################################################
-class Task:
- def batches(self, split="train", nb_to_use=-1, desc=None):
- pass
+class QuizzMachine:
+ def indices_forward_and_backward(self, quizzes):
+ i_forward = quizzes[:, 0] == self.token_forward
+ j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward
+ i_backward = quizzes[:, 0] == self.token_backward
+ j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward
+ assert torch.logical_or(
+ torch.logical_and(i_forward, j_forward),
+ torch.logical_and(i_backward, j_backward),
+ ).all()
+ return i_forward, i_backward
+
+ def reverse_time(self, quizzes):
+ i_forward, i_backward = self.indices_forward_and_backward(quizzes)
+
+ forward_to_backward = torch.cat(
+ [
+ quizzes[:, 0:1],
+ quizzes[:, 2 + self.prompt_len :],
+ quizzes[:, 1 + self.prompt_len : 2 + self.prompt_len],
+ quizzes[:, 1 : 1 + self.prompt_len],
+ ],
+ dim=1,
+ )
+ forward_to_backward[:, 0] = self.token_backward
+ forward_to_backward[:, 1 + self.answer_len] = self.token_backward
+
+ backward_to_forward = torch.cat(
+ [
+ quizzes[:, 0:1],
+ quizzes[:, 2 + self.answer_len :],
+ quizzes[:, 1 + self.answer_len : 2 + self.answer_len],
+ quizzes[:, 1 : 1 + self.answer_len],
+ ],
+ dim=1,
+ )
+
+ backward_to_forward[:, 0] = self.token_forward
+ backward_to_forward[:, 1 + self.prompt_len] = self.token_forward
+
+ m = i_forward.long()[:, None]
- def vocabulary_size(self):
- pass
+ return m * forward_to_backward + (1 - m) * backward_to_forward
- def produce_results(
- self, n_epoch, model, result_dir, logger, deterministic_synthesis
- ):
- pass
+ def make_ar_mask(self, quizzes, first=False):
+ i_forward, i_backward = self.indices_forward_and_backward(quizzes)
+ t = torch.arange(quizzes.size(1), device=quizzes.device)
-######################################################################
+ if first:
+ m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long()
+ m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long()
+ else:
+ m_forward = (t >= 2 + self.prompt_len).long()
+ m_backward = (t >= 2 + self.answer_len).long()
+
+ m = i_forward.long()[:, None]
+
+ return m * m_forward + (1 - m) * m_backward
+
+ def generate_token_sequences(self, nb):
+ prompts, answers = self.problem.generate_prompts_and_answers(nb)
+
+ if self.prompt_len is None:
+ self.prompt_len = prompts.size(1)
+
+ if self.answer_len is None:
+ self.answer_len = answers.size(1)
-import sky
+ assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len
+ result = []
-class QuizzMachine(Task):
- def save_image(self, input, result_dir, filename, logger):
- img = sky.seq2img(input.to("cpu"), self.height, self.width)
- image_name = os.path.join(result_dir, filename)
- torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=6, padding=4)
- logger(f"wrote {image_name}")
+ for prompt, answer in zip(prompts, answers):
+ if torch.rand(1) < 0.5:
+ a = [
+ torch.tensor([self.token_forward]),
+ prompt,
+ torch.tensor([self.token_forward]),
+ answer,
+ ]
+ else:
+ a = [
+ torch.tensor([self.token_backward]),
+ answer,
+ torch.tensor([self.token_backward]),
+ prompt,
+ ]
- def save_quizzes(self, input, result_dir, filename_prefix, logger):
- self.save_image(input, result_dir, filename_prefix + ".png", logger)
+ result.append(torch.cat(a, dim=0)[None, :])
- def make_ar_mask(self, input):
- b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
- return b.long()[None, :].expand_as(input)
+ return torch.cat(result, dim=0)
def __init__(
self,
+ problem,
nb_train_samples,
nb_test_samples,
batch_size,
- result_dir=None,
- logger=None,
+ result_dir,
+ logger,
device=torch.device("cpu"),
):
super().__init__()
+ v = problem.nb_token_values()
+ self.token_forward = v
+ self.token_backward = v + 1
+ self.nb_token_values = v + 2
+
+ self.problem = problem
self.batch_size = batch_size
self.device = device
- self.height = 6
- self.width = 8
-
- self.train_w_quizzes = sky.generate_seq(
- nb_train_samples, height=self.height, width=self.width
- ).to(device)
+ self.logger = logger
+ self.prompt_len = None
+ self.answer_len = None
- self.test_w_quizzes = sky.generate_seq(
- nb_test_samples, height=self.height, width=self.width
- ).to(device)
+ self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to(
+ device
+ )
- self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1
+ self.test_w_quizzes = self.generate_token_sequences(nb_test_samples).to(device)
self.train_c_quizzes = []
self.test_c_quizzes = []
if result_dir is not None:
self.save_quizzes(
- self.train_w_quizzes[:72], result_dir, f"culture_w_quizzes", logger
+ result_dir, "culture_w_quizzes", self.train_w_quizzes[:72]
)
+ # toto = self.reverse_time(self.train_w_quizzes[:72])
+ # self.save_quizzes(result_dir, "toto", toto)
+ # exit(0)
+
+ def save_quizzes(self, result_dir, filename_prefix, quizzes, prediction=False):
+ forward = quizzes[quizzes[:, 0] == self.token_forward]
+ ib = quizzes[:, 0] == self.token_backward
+ backward = quizzes[ib]
+ assert forward.size(0) + backward.size(0) == quizzes.size(0)
+ quizzes[ib] = self.reverse_time(quizzes[ib])
+
+ if prediction:
+ predicted_prompts = ib
+ predicted_answers = torch.logical_not(ib)
+ else:
+ predicted_prompts = None
+ predicted_answers = None
+
+ self.problem.save_quizzes(
+ result_dir,
+ filename_prefix,
+ quizzes[:, 1 : 1 + self.prompt_len],
+ quizzes[:, 2 + self.prompt_len :],
+ predicted_prompts,
+ predicted_answers,
+ )
+
def batches(self, split="train", desc=None):
assert split in {"train", "test"}
if split == "train":
if len(c_quizzes) > 0:
c_quizzes = torch.cat(c_quizzes, dim=0)
if c_quizzes.size(0) > w_quizzes.size(0) // 2:
- i = torch.randperm(w_quizzes.size(0))[: w_quizzes.size(0) // 2]
+ i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
c_quizzes = c_quizzes[i]
i = torch.randperm(w_quizzes.size(0))[
yield batch
def vocabulary_size(self):
- return self.nb_codes
+ return self.nb_token_values
def produce_results(
- self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
+ self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
):
- def compute_accuracy(input, logger=None):
+ def compute_accuracy(input):
input = input[:nmax]
ar_mask = self.make_ar_mask(input)
result = input.clone() * (1 - ar_mask)
device=self.device,
)
- nb_total, nb_correct = (
- input.size(0),
- (input == result).long().min(dim=1).values.sum(),
- )
+ nb_total = input.size(0)
+ nb_correct = (input == result).long().min(dim=1).values.sum()
return nb_total, nb_correct
train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
- logger(
+ self.logger(
f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
)
- test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger)
+ test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes)
- logger(
+ self.logger(
f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
main_test_accuracy = test_nb_correct / test_nb_total
- logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
+ self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
##############################
)
self.save_quizzes(
- result[:72],
result_dir,
f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
- logger,
+ quizzes=result[:72],
+ prediction=True,
)
return main_test_accuracy
input = self.train_w_quizzes if for_train else self.test_w_quizzes
nb = min(nb, input.size(0))
input[:-nb] = input[nb:].clone()
- input[-nb:] = sky.generate_seq(nb, height=self.height, width=self.width).to(
- self.device
- )
+ input[-nb:] = self.generate_token_sequences(nb).to(self.device)
def store_c_quizzes(self, new_c_quizzes, for_train=True):
if for_train:
else:
self.test_c_quizzes.append(new_c_quizzes)
- def create_c_quizzes(
+ def compute_correctness(
self,
- n_epoch,
- result_dir,
- logger,
- nb,
- model,
- other_models,
- min_ave_seq_logproba,
+ c_quizzes,
+ models_for_validation,
+ bidirectional_validation=False,
+ deterministic_validation=True,
):
- ###############################################################
- # Generate quizzes with model
+ if bidirectional_validation:
+ backward_c_quizzes = self.forward_to_backward(c_quizzes)
- c_quizzes = torch.empty(
- nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64
+ seq_logproba = torch.zeros(
+ c_quizzes.size(0),
+ max([m.id for m in models_for_validation]) + 1,
+ device=self.device,
)
- ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
- seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
+ nb_correct = 0
+
+ for model in models_for_validation:
+ result = c_quizzes.clone()
- temperature = 1
- d_temperature = 1 / 3
+ seq_logproba[...] = 0.0
- while True:
- seq_logproba[...] = 0
+ ar_mask = self.make_ar_mask(result)
masked_inplace_autoregression(
model=model,
batch_size=self.batch_size,
- input=c_quizzes,
+ input=result,
ar_mask=ar_mask,
- seq_logproba=seq_logproba,
- temperature=temperature,
- deterministic_synthesis=False,
- progress_bar_desc="sampling c_quizzes",
+ seq_logproba=seq_logproba[:, model.id],
+ temperature=1.0,
+ deterministic_synthesis=deterministic_validation,
+ # progress_bar_desc="solving c_quizzes",
device=self.device,
)
- ave_seq_logproba = seq_logproba.mean()
+ correct = (c_quizzes == result).long().min(dim=-1).values
- logger(f"{ave_seq_logproba=} {min_ave_seq_logproba=}")
+ if bidirectional_validation:
+ backward_result = backward_c_quizzes.clone()
- if min_ave_seq_logproba is None:
- break
+ ar_mask = self.make_ar_mask(backward_result)
- # Oh man that's ugly
- if ave_seq_logproba < min_ave_seq_logproba * 1.1:
- if d_temperature > 0:
- d_temperature *= -1 / 3
- temperature += d_temperature
- elif ave_seq_logproba > min_ave_seq_logproba:
- if d_temperature < 0:
- d_temperature *= -1 / 3
- temperature += d_temperature
- else:
- break
+ masked_inplace_autoregression(
+ model=model,
+ batch_size=self.batch_size,
+ input=backward_result,
+ ar_mask=ar_mask,
+ seq_logproba=seq_logproba[:, model.id],
+ temperature=1.0,
+ deterministic_synthesis=deterministic_validation,
+ # progress_bar_desc="solving backward c_quizzes",
+ device=self.device,
+ )
- logger(f"chaging temperature to {temperature}")
+ backward_correct = (
+ (backward_c_quizzes == backward_result).long().min(dim=-1).values
+ )
- ###############################################################
- # Create the reverse quizzes
+ correct *= backward_correct
- l = self.height * self.width
- direction = c_quizzes[:, l : l + 1]
- direction = sky.token_forward * (
- direction == sky.token_backward
- ) + sky.token_backward * (direction == sky.token_forward)
- reverse_c_quizzes = torch.cat(
- [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
- )
+ # endif
- ar_mask = self.make_ar_mask(c_quizzes)
- seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
+ nb_correct += correct
- ###############################################################
- # Check how many of the other models can solve them in both
- # directions
+ return nb_correct, seq_logproba
- nb_correct = []
+ ###############################################################
- for m in other_models:
- result = c_quizzes.clone()
+ def generate_quizzes(self, nb, model_for_generation, temperature=1.0):
+ c_quizzes = torch.empty(
+ nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
+ )
- masked_inplace_autoregression(
- model=m,
- batch_size=self.batch_size,
- input=result,
- ar_mask=ar_mask,
- seq_logproba=seq_logproba,
- temperature=1.0,
- deterministic_synthesis=True,
- progress_bar_desc="solving c_quizzes",
- device=self.device,
- )
+ seq_logproba = torch.zeros(nb, device=self.device)
- correct = (c_quizzes == result).long().min(dim=-1).values
+ # First, we generate the answer at high temperature
- reverse_result = reverse_c_quizzes.clone()
+ c_quizzes[:, 0] = self.token_backward
+ c_quizzes[:, 1 + self.answer_len] = self.token_backward
- masked_inplace_autoregression(
- model=m,
- batch_size=self.batch_size,
- input=reverse_result,
- ar_mask=ar_mask,
- seq_logproba=seq_logproba,
- temperature=1.0,
- deterministic_synthesis=True,
- progress_bar_desc="solving reversed c_quizzes",
- device=self.device,
- )
+ masked_inplace_autoregression(
+ model=model_for_generation,
+ batch_size=self.batch_size,
+ input=c_quizzes,
+ ar_mask=self.make_ar_mask(c_quizzes, first=True),
+ seq_logproba=seq_logproba,
+ temperature=temperature,
+ deterministic_synthesis=False,
+ device=self.device,
+ )
- reverse_correct = (
- (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
- )
+ # Then, we generate the prompt at low temperature
- nb_correct.append((correct * reverse_correct)[None, :])
+ masked_inplace_autoregression(
+ model=model_for_generation,
+ batch_size=self.batch_size,
+ input=c_quizzes,
+ ar_mask=self.make_ar_mask(c_quizzes),
+ seq_logproba=seq_logproba,
+ temperature=1 / temperature,
+ deterministic_synthesis=False,
+ device=self.device,
+ )
+
+ # Then we return the quizz, and re-generate the response, now
+ # at low temperature
+
+ c_quizzes = self.reverse_time(c_quizzes)
- nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0)
+ masked_inplace_autoregression(
+ model=model_for_generation,
+ batch_size=self.batch_size,
+ input=c_quizzes,
+ ar_mask=self.make_ar_mask(c_quizzes),
+ seq_logproba=seq_logproba,
+ temperature=1 / temperature,
+ deterministic_synthesis=False,
+ device=self.device,
+ )
- return c_quizzes, nb_correct, seq_logproba.mean()
+ return c_quizzes