seq_logproba,
temperature=1.0,
deterministic_synthesis=False,
- forbidden_tokens=None,
- forced_biases=None,
):
to_generate = (ar_mask.sum(0) > 0).nonzero()
logits = (logits / temperature).log_softmax(dim=-1)
- if forbidden_tokens is not None:
- logits = logits.masked_fill(forbidden_tokens, float("-inf"))
-
- if forced_biases is not None:
- logits = logits + forced_biases[None, :]
-
if deterministic_synthesis:
t_next = logits.argmax(-1)
else:
seq_logproba=seq_logproba,
temperature=temperature,
deterministic_synthesis=deterministic_synthesis,
- forbidden_tokens=forbidden_tokens,
- forced_biases=logit_biases,
)
model.train(t)
class QuizzMachine:
- def make_ar_mask(self, input):
- b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
- return b.long()[None, :].expand_as(input)
+ def indices_forward_and_backward(self, quizzes):
+ i_forward = quizzes[:, 0] == self.token_forward
+ j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward
+ i_backward = quizzes[:, 0] == self.token_backward
+ j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward
+ assert torch.logical_or(
+ torch.logical_and(i_forward, j_forward),
+ torch.logical_and(i_backward, j_backward),
+ ).all()
+ return i_forward, i_backward
+
+ def reverse_time(self, quizzes):
+ i_forward, i_backward = self.indices_forward_and_backward(quizzes)
+
+ forward_to_backward = torch.cat(
+ [
+ quizzes[:, 0:1],
+ quizzes[:, 2 + self.prompt_len :],
+ quizzes[:, 1 + self.prompt_len : 2 + self.prompt_len],
+ quizzes[:, 1 : 1 + self.prompt_len],
+ ],
+ dim=1,
+ )
+ forward_to_backward[:, 0] = self.token_backward
+ forward_to_backward[:, 1 + self.answer_len] = self.token_backward
+
+ backward_to_forward = torch.cat(
+ [
+ quizzes[:, 0:1],
+ quizzes[:, 2 + self.answer_len :],
+ quizzes[:, 1 + self.answer_len : 2 + self.answer_len],
+ quizzes[:, 1 : 1 + self.answer_len],
+ ],
+ dim=1,
+ )
+
+ backward_to_forward[:, 0] = self.token_forward
+ backward_to_forward[:, 1 + self.prompt_len] = self.token_forward
+
+ m = i_forward.long()[:, None]
+
+ return m * forward_to_backward + (1 - m) * backward_to_forward
+
+ def make_ar_mask(self, quizzes, first=False):
+ i_forward, i_backward = self.indices_forward_and_backward(quizzes)
+
+ t = torch.arange(quizzes.size(1), device=quizzes.device)
+
+ if first:
+ m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long()
+ m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long()
+ else:
+ m_forward = (t >= 2 + self.prompt_len).long()
+ m_backward = (t >= 2 + self.answer_len).long()
+
+ m = i_forward.long()[:, None]
+
+ return m * m_forward + (1 - m) * m_backward
def generate_token_sequences(self, nb):
prompts, answers = self.problem.generate_prompts_and_answers(nb)
+
+ if self.prompt_len is None:
+ self.prompt_len = prompts.size(1)
+
+ if self.answer_len is None:
+ self.answer_len = answers.size(1)
+
+ assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len
+
result = []
for prompt, answer in zip(prompts, answers):
if torch.rand(1) < 0.5:
- a = [torch.tensor([self.token_forward]), prompt, answer]
+ a = [
+ torch.tensor([self.token_forward]),
+ prompt,
+ torch.tensor([self.token_forward]),
+ answer,
+ ]
else:
- a = [torch.tensor([self.token_backward]), answer, prompt]
+ a = [
+ torch.tensor([self.token_backward]),
+ answer,
+ torch.tensor([self.token_backward]),
+ prompt,
+ ]
result.append(torch.cat(a, dim=0)[None, :])
self.batch_size = batch_size
self.device = device
self.logger = logger
+ self.prompt_len = None
+ self.answer_len = None
self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to(
device
result_dir, "culture_w_quizzes", self.train_w_quizzes[:72]
)
+ # toto = self.reverse_time(self.train_w_quizzes[:72])
+ # self.save_quizzes(result_dir, "toto", toto)
+ # exit(0)
+
def save_quizzes(self, result_dir, filename_prefix, quizzes, prediction=False):
- print(f"DEBUG {quizzes.size()=}")
- l = (quizzes.size(1) - 1) // 2
- forward = (quizzes[:, 0] == self.token_forward).long()
- backward = (quizzes[:, 0] == self.token_backward).long()
- assert forward.equal(1 - backward)
- first = quizzes[:, 1 : 1 + l]
- second = quizzes[:, 1 + l : 1 + 2 * l]
- prompts = forward[:, None] * first + backward[:, None] * second
- answers = forward[:, None] * second + backward[:, None] * first
+ forward = quizzes[quizzes[:, 0] == self.token_forward]
+ ib = quizzes[:, 0] == self.token_backward
+ backward = quizzes[ib]
+ assert forward.size(0) + backward.size(0) == quizzes.size(0)
+ quizzes[ib] = self.reverse_time(quizzes[ib])
if prediction:
- predicted_prompts = backward
- predicted_answers = forward
+ predicted_prompts = ib
+ predicted_answers = torch.logical_not(ib)
else:
predicted_prompts = None
predicted_answers = None
self.problem.save_quizzes(
result_dir,
filename_prefix,
- prompts,
- answers,
+ quizzes[:, 1 : 1 + self.prompt_len],
+ quizzes[:, 2 + self.prompt_len :],
predicted_prompts,
predicted_answers,
)
device=self.device,
)
- nb_total, nb_correct = (
- input.size(0),
- (input == result).long().min(dim=1).values.sum(),
- )
+ nb_total = input.size(0)
+ nb_correct = (input == result).long().min(dim=1).values.sum()
return nb_total, nb_correct
else:
self.test_c_quizzes.append(new_c_quizzes)
- def reverse_time(self, c_quizzes):
- l = (c_quizzes.size(1) - 1) // 2
- direction = c_quizzes[:, 0:1]
- direction = self.token_forward * (
- direction == self.token_backward
- ) + self.token_backward * (direction == self.token_forward)
-
- return torch.cat(
- [direction, c_quizzes[:, l + 1 :], c_quizzes[:, 1 : l + 1]], dim=1
- )
-
def compute_correctness(
- self, c_quizzes, models_for_validation, both_directions=True
+ self,
+ c_quizzes,
+ models_for_validation,
+ bidirectional_validation=False,
+ deterministic_validation=True,
):
- reversed_c_quizzes = self.reverse_time(c_quizzes)
+ if bidirectional_validation:
+ backward_c_quizzes = self.forward_to_backward(c_quizzes)
- ar_mask = self.make_ar_mask(c_quizzes)
- seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
-
- # Check how many of models can solve the quizzes in both directions
+ seq_logproba = torch.zeros(
+ c_quizzes.size(0),
+ max([m.id for m in models_for_validation]) + 1,
+ device=self.device,
+ )
nb_correct = 0
for model in models_for_validation:
result = c_quizzes.clone()
+ seq_logproba[...] = 0.0
+
+ ar_mask = self.make_ar_mask(result)
+
masked_inplace_autoregression(
model=model,
batch_size=self.batch_size,
input=result,
ar_mask=ar_mask,
- seq_logproba=seq_logproba,
+ seq_logproba=seq_logproba[:, model.id],
temperature=1.0,
- deterministic_synthesis=True,
+ deterministic_synthesis=deterministic_validation,
# progress_bar_desc="solving c_quizzes",
device=self.device,
)
correct = (c_quizzes == result).long().min(dim=-1).values
- if both_directions:
- reversed_result = reversed_c_quizzes.clone()
+ if bidirectional_validation:
+ backward_result = backward_c_quizzes.clone()
+
+ ar_mask = self.make_ar_mask(backward_result)
masked_inplace_autoregression(
model=model,
batch_size=self.batch_size,
- input=reversed_result,
+ input=backward_result,
ar_mask=ar_mask,
- seq_logproba=seq_logproba,
+ seq_logproba=seq_logproba[:, model.id],
temperature=1.0,
- deterministic_synthesis=True,
- # progress_bar_desc="solving reversed c_quizzes",
+ deterministic_synthesis=deterministic_validation,
+ # progress_bar_desc="solving backward c_quizzes",
device=self.device,
)
- reversed_correct = (
- (reversed_c_quizzes == reversed_result).long().min(dim=-1).values
+ backward_correct = (
+ (backward_c_quizzes == backward_result).long().min(dim=-1).values
)
- correct *= reversed_correct
+ correct *= backward_correct
# endif
nb_correct += correct
- return nb_correct
+ return nb_correct, seq_logproba
###############################################################
- def generate_quizzes(self, nb, model_for_generation, reverse_cleanup=False):
+ def generate_quizzes(self, nb, model_for_generation, temperature=1.0):
c_quizzes = torch.empty(
nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
)
- ar_mask_prompt = torch.zeros(c_quizzes.size(), device=self.device)
- ar_mask_prompt[:, : ar_mask_prompt.size(1) // 2 + 1] = 1
- ar_mask_solve = 1 - ar_mask_prompt
- seq_logproba = torch.empty(ar_mask_prompt.size(0), device=self.device)
-
- if reverse_cleanup:
- warnings.warn("very high temperature with reversed cleanup", RuntimeWarning)
- temperature = 10.0
- else:
- temperature = 1.0
+ seq_logproba = torch.zeros(nb, device=self.device)
- # warnings.warn("noise injection", RuntimeWarning)
- # noise_std = torch.rand(1).item()
- # self.logger(f"{noise_std=}")
+ # First, we generate the answer at high temperature
- # mygpt.set_noise_injection(model_for_generation, noise_std)
+ c_quizzes[:, 0] = self.token_backward
+ c_quizzes[:, 1 + self.answer_len] = self.token_backward
masked_inplace_autoregression(
model=model_for_generation,
batch_size=self.batch_size,
input=c_quizzes,
- ar_mask=ar_mask_prompt,
+ ar_mask=self.make_ar_mask(c_quizzes, first=True),
seq_logproba=seq_logproba,
temperature=temperature,
deterministic_synthesis=False,
device=self.device,
)
- # mygpt.set_noise_injection(model_for_generation, 0.0)
-
- ave_seq_logproba = seq_logproba.mean()
+ # Then, we generate the prompt at low temperature
masked_inplace_autoregression(
model=model_for_generation,
batch_size=self.batch_size,
input=c_quizzes,
- ar_mask=ar_mask_solve,
+ ar_mask=self.make_ar_mask(c_quizzes),
seq_logproba=seq_logproba,
- temperature=temperature,
- deterministic_synthesis=True,
+ temperature=1 / temperature,
+ deterministic_synthesis=False,
device=self.device,
)
- if reverse_cleanup:
- c_quizzes = self.reverse_time(c_quizzes)
- masked_inplace_autoregression(
- model=model_for_generation,
- batch_size=self.batch_size,
- input=c_quizzes,
- ar_mask=ar_mask_solve,
- seq_logproba=seq_logproba,
- temperature=temperature,
- deterministic_synthesis=True,
- device=self.device,
- )
+ # Then we return the quizz, and re-generate the response, now
+ # at low temperature
- c_quizzes = self.reverse_time(c_quizzes)
- masked_inplace_autoregression(
- model=model_for_generation,
- batch_size=self.batch_size,
- input=c_quizzes,
- ar_mask=ar_mask_solve,
- seq_logproba=seq_logproba,
- temperature=temperature,
- deterministic_synthesis=True,
- device=self.device,
- )
+ c_quizzes = self.reverse_time(c_quizzes)
+
+ masked_inplace_autoregression(
+ model=model_for_generation,
+ batch_size=self.batch_size,
+ input=c_quizzes,
+ ar_mask=self.make_ar_mask(c_quizzes),
+ seq_logproba=seq_logproba,
+ temperature=1 / temperature,
+ deterministic_synthesis=False,
+ device=self.device,
+ )
- return c_quizzes, seq_logproba.mean()
+ return c_quizzes