3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, os, tqdm, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
16 from mygpt import BracketedSequence
18 ######################################################################
20 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
21 # 1s where tokens should be generated. The others are kept
25 def one_batch_masked_inplace_autoregression(
31 deterministic_synthesis=False,
33 to_generate = (ar_mask.sum(0) > 0).nonzero()
35 if to_generate.min() > 0:
37 BracketedSequence(input, 0, to_generate.min())
38 ) # Needed to initialize the model's cache
39 for s in range(to_generate.min(), to_generate.max() + 1):
40 output = model(BracketedSequence(input, s, 1)).x
44 logits = (logits / temperature).log_softmax(dim=-1)
46 if deterministic_synthesis:
47 t_next = logits.argmax(-1)
49 dist = torch.distributions.categorical.Categorical(logits=logits)
50 t_next = dist.sample()
52 all_n = torch.arange(t_next.size(0))
53 seq_logproba += logits[all_n, t_next].sum(dim=-1)
55 input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
58 def masked_inplace_autoregression(
65 deterministic_synthesis,
66 forbidden_tokens=None,
68 progress_bar_desc=None,
69 device=torch.device("cpu"),
71 assert input.size() == ar_mask.size()
74 input.split(batch_size),
75 ar_mask.split(batch_size),
76 seq_logproba.split(batch_size),
79 if progress_bar_desc is not None:
83 desc=progress_bar_desc,
84 total=(input.size(0) + batch_size - 1) // batch_size,
87 with torch.autograd.no_grad():
91 for input, ar_mask, seq_logproba in batches:
92 one_batch_masked_inplace_autoregression(
96 seq_logproba=seq_logproba,
97 temperature=temperature,
98 deterministic_synthesis=deterministic_synthesis,
104 ######################################################################
108 def indices_forward_and_backward(self, quizzes):
109 i_forward = quizzes[:, 0] == self.token_forward
110 j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward
111 i_backward = quizzes[:, 0] == self.token_backward
112 j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward
113 assert torch.logical_or(
114 torch.logical_and(i_forward, j_forward),
115 torch.logical_and(i_backward, j_backward),
117 return i_forward, i_backward
119 def reverse_time(self, quizzes):
120 i_forward, i_backward = self.indices_forward_and_backward(quizzes)
122 forward_to_backward = torch.cat(
125 quizzes[:, 2 + self.prompt_len :],
126 quizzes[:, 1 + self.prompt_len : 2 + self.prompt_len],
127 quizzes[:, 1 : 1 + self.prompt_len],
131 forward_to_backward[:, 0] = self.token_backward
132 forward_to_backward[:, 1 + self.answer_len] = self.token_backward
134 backward_to_forward = torch.cat(
137 quizzes[:, 2 + self.answer_len :],
138 quizzes[:, 1 + self.answer_len : 2 + self.answer_len],
139 quizzes[:, 1 : 1 + self.answer_len],
144 backward_to_forward[:, 0] = self.token_forward
145 backward_to_forward[:, 1 + self.prompt_len] = self.token_forward
147 m = i_forward.long()[:, None]
149 return m * forward_to_backward + (1 - m) * backward_to_forward
151 def make_ar_mask(self, quizzes, first=False):
152 i_forward, i_backward = self.indices_forward_and_backward(quizzes)
154 t = torch.arange(quizzes.size(1), device=quizzes.device)
157 m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long()
158 m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long()
160 m_forward = (t >= 2 + self.prompt_len).long()
161 m_backward = (t >= 2 + self.answer_len).long()
163 m = i_forward.long()[:, None]
165 return m * m_forward + (1 - m) * m_backward
167 def generate_token_sequences(self, nb):
168 prompts, answers = self.problem.generate_prompts_and_answers(nb)
170 if self.prompt_len is None:
171 self.prompt_len = prompts.size(1)
173 if self.answer_len is None:
174 self.answer_len = answers.size(1)
176 assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len
180 for prompt, answer in zip(prompts, answers):
181 if torch.rand(1) < 0.5:
183 torch.tensor([self.token_forward]),
185 torch.tensor([self.token_forward]),
190 torch.tensor([self.token_backward]),
192 torch.tensor([self.token_backward]),
196 result.append(torch.cat(a, dim=0)[None, :])
198 return torch.cat(result, dim=0)
209 device=torch.device("cpu"),
213 v = problem.nb_token_values()
214 self.token_forward = v
215 self.token_backward = v + 1
216 self.nb_token_values = v + 2
218 self.problem = problem
219 self.back_accuracy = back_accuracy
220 self.batch_size = batch_size
223 self.prompt_len = None
224 self.answer_len = None
226 self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to(
230 self.test_w_quizzes = self.generate_token_sequences(nb_test_samples).to(device)
232 self.train_c_quizzes = []
233 self.test_c_quizzes = []
235 if result_dir is not None:
237 result_dir, "culture_w_quizzes", self.train_w_quizzes[:72]
240 # toto = self.reverse_time(self.train_w_quizzes[:72])
241 # self.save_quizzes(result_dir, "toto", toto)
244 def save_quizzes(self, result_dir, filename_prefix, quizzes, prediction=False):
245 forward = quizzes[quizzes[:, 0] == self.token_forward]
246 ib = quizzes[:, 0] == self.token_backward
247 backward = quizzes[ib]
248 assert forward.size(0) + backward.size(0) == quizzes.size(0)
249 quizzes[ib] = self.reverse_time(quizzes[ib])
252 predicted_prompts = ib
253 predicted_answers = torch.logical_not(ib)
255 predicted_prompts = None
256 predicted_answers = None
258 self.problem.save_quizzes(
261 quizzes[:, 1 : 1 + self.prompt_len],
262 quizzes[:, 2 + self.prompt_len :],
267 def batches(self, split="train", desc=None):
268 assert split in {"train", "test"}
270 w_quizzes = self.train_w_quizzes
271 c_quizzes = self.train_c_quizzes
273 w_quizzes = self.test_w_quizzes
274 c_quizzes = self.test_c_quizzes
276 if len(c_quizzes) > 0:
277 c_quizzes = torch.cat(c_quizzes, dim=0)
278 if c_quizzes.size(0) > w_quizzes.size(0) // 2:
279 i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
280 c_quizzes = c_quizzes[i]
282 i = torch.randperm(w_quizzes.size(0))[
283 : w_quizzes.size(0) - c_quizzes.size(0)
285 w_quizzes = w_quizzes[i]
287 self.nb_batch_w_quizzes = w_quizzes.size(0)
288 self.nb_batch_c_quizzes = c_quizzes.size(0)
290 input = torch.cat([w_quizzes, c_quizzes], dim=0)
293 self.nb_batch_w_quizzes = w_quizzes.size(0)
294 self.nb_batch_c_quizzes = 0
297 input = input[torch.randperm(input.size(0))]
300 desc = f"epoch-{split}"
301 for batch in tqdm.tqdm(
302 input.split(self.batch_size), dynamic_ncols=True, desc=desc
306 def vocabulary_size(self):
307 return self.nb_token_values
310 self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
312 def compute_accuracy(input):
313 ar_mask = self.make_ar_mask(input)
314 result = input.clone() * (1 - ar_mask)
315 seq_logproba = torch.empty(input.size(0), device=self.device)
317 masked_inplace_autoregression(
319 batch_size=self.batch_size,
322 seq_logproba=seq_logproba,
324 deterministic_synthesis=deterministic_synthesis,
325 progress_bar_desc=None,
329 if self.back_accuracy:
330 n_forward = input[:, 0] == self.token_forward
331 nb_total = input[n_forward].size(0)
333 (input[n_forward] == result[n_forward])
339 n_backward = input[:, 0] == self.token_backward
340 back_input = self.reverse_time(result[n_backward])
341 if back_input.size(0) > 0:
342 back_input[:, 2 + self.prompt_len :] = input[
343 n_backward, 2 + self.prompt_len :
345 back_nb_total, back_nb_correct = compute_accuracy(back_input)
346 nb_total += back_nb_total
347 nb_correct += back_nb_correct
349 nb_total = input.size(0)
350 nb_correct = (input == result).long().min(dim=1).values.sum()
352 return nb_total, nb_correct
354 train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes[:nmax])
357 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
360 test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes[:nmax])
363 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
366 main_test_accuracy = test_nb_correct / test_nb_total
367 self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
369 ##############################
371 input = self.test_w_quizzes[:96]
372 ar_mask = self.make_ar_mask(input)
373 result = input.clone() * (1 - ar_mask)
374 seq_logproba = torch.empty(input.size(0), device=self.device)
376 masked_inplace_autoregression(
378 batch_size=self.batch_size,
381 seq_logproba=seq_logproba,
383 deterministic_synthesis=deterministic_synthesis,
384 progress_bar_desc=None,
390 f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
395 return main_test_accuracy
397 def renew_w_quizzes(self, nb, for_train=True):
398 input = self.train_w_quizzes if for_train else self.test_w_quizzes
399 nb = min(nb, input.size(0))
400 input[:-nb] = input[nb:].clone()
401 input[-nb:] = self.generate_token_sequences(nb).to(self.device)
403 def store_c_quizzes(self, new_c_quizzes, for_train=True):
405 self.train_c_quizzes.append(new_c_quizzes)
407 self.test_c_quizzes.append(new_c_quizzes)
409 def compute_correctness(
412 models_for_validation,
413 bidirectional_validation=False,
414 deterministic_validation=True,
416 if bidirectional_validation:
417 backward_c_quizzes = self.forward_to_backward(c_quizzes)
419 seq_logproba = torch.zeros(
421 max([m.id for m in models_for_validation]) + 1,
427 for model in models_for_validation:
428 result = c_quizzes.clone()
430 seq_logproba[...] = 0.0
432 ar_mask = self.make_ar_mask(result)
434 masked_inplace_autoregression(
436 batch_size=self.batch_size,
439 seq_logproba=seq_logproba[:, model.id],
441 deterministic_synthesis=deterministic_validation,
442 # progress_bar_desc="solving c_quizzes",
446 correct = (c_quizzes == result).long().min(dim=-1).values
448 if bidirectional_validation:
449 backward_result = backward_c_quizzes.clone()
451 ar_mask = self.make_ar_mask(backward_result)
453 masked_inplace_autoregression(
455 batch_size=self.batch_size,
456 input=backward_result,
458 seq_logproba=seq_logproba[:, model.id],
460 deterministic_synthesis=deterministic_validation,
461 # progress_bar_desc="solving backward c_quizzes",
466 (backward_c_quizzes == backward_result).long().min(dim=-1).values
469 correct *= backward_correct
473 nb_correct += correct
475 return nb_correct, seq_logproba
477 ###############################################################
479 def generate_quizzes(self, nb, model_for_generation, temperature=1.0):
480 c_quizzes = torch.empty(
481 nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
484 seq_logproba = torch.zeros(nb, device=self.device)
486 # First, we generate the answer at high temperature
488 c_quizzes[:, 0] = self.token_backward
489 c_quizzes[:, 1 + self.answer_len] = self.token_backward
491 masked_inplace_autoregression(
492 model=model_for_generation,
493 batch_size=self.batch_size,
495 ar_mask=self.make_ar_mask(c_quizzes, first=True),
496 seq_logproba=seq_logproba,
497 temperature=temperature,
498 deterministic_synthesis=False,
502 # Then, we generate the prompt at low temperature
504 masked_inplace_autoregression(
505 model=model_for_generation,
506 batch_size=self.batch_size,
508 ar_mask=self.make_ar_mask(c_quizzes),
509 seq_logproba=seq_logproba,
510 temperature=1 / temperature,
511 deterministic_synthesis=False,
515 # Then we return the quizz, and re-generate the response, now
518 c_quizzes = self.reverse_time(c_quizzes)
520 masked_inplace_autoregression(
521 model=model_for_generation,
522 batch_size=self.batch_size,
524 ar_mask=self.make_ar_mask(c_quizzes),
525 seq_logproba=seq_logproba,
526 temperature=1 / temperature,
527 deterministic_synthesis=False,