3828e5b06d82a8ad17e169c8e0ec7520854c7dca
[culture.git] / quizz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 import mygpt
16 from mygpt import BracketedSequence
17
18 ######################################################################
19
20 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
21 # 1s where tokens should be generated. The others are kept
22 # unchanged.
23
24
25 def one_batch_masked_inplace_autoregression(
26     model,
27     input,
28     ar_mask,
29     seq_logproba,
30     temperature=1.0,
31     deterministic_synthesis=False,
32 ):
33     to_generate = (ar_mask.sum(0) > 0).nonzero()
34
35     if to_generate.min() > 0:
36         model(
37             BracketedSequence(input, 0, to_generate.min())
38         )  # Needed to initialize the model's cache
39     for s in range(to_generate.min(), to_generate.max() + 1):
40         output = model(BracketedSequence(input, s, 1)).x
41
42         logits = output[:, s]
43
44         logits = (logits / temperature).log_softmax(dim=-1)
45
46         if deterministic_synthesis:
47             t_next = logits.argmax(-1)
48         else:
49             dist = torch.distributions.categorical.Categorical(logits=logits)
50             t_next = dist.sample()
51
52         all_n = torch.arange(t_next.size(0))
53         seq_logproba += logits[all_n, t_next].sum(dim=-1)
54
55         input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
56
57
58 def masked_inplace_autoregression(
59     model,
60     batch_size,
61     input,
62     ar_mask,
63     seq_logproba,
64     temperature,
65     deterministic_synthesis,
66     forbidden_tokens=None,
67     logit_biases=None,
68     progress_bar_desc=None,
69     device=torch.device("cpu"),
70 ):
71     assert input.size() == ar_mask.size()
72
73     batches = zip(
74         input.split(batch_size),
75         ar_mask.split(batch_size),
76         seq_logproba.split(batch_size),
77     )
78
79     if progress_bar_desc is not None:
80         batches = tqdm.tqdm(
81             batches,
82             dynamic_ncols=True,
83             desc=progress_bar_desc,
84             total=(input.size(0) + batch_size - 1) // batch_size,
85         )
86
87     with torch.autograd.no_grad():
88         t = model.training
89         model.eval()
90
91         for input, ar_mask, seq_logproba in batches:
92             one_batch_masked_inplace_autoregression(
93                 model=model,
94                 input=input,
95                 ar_mask=ar_mask,
96                 seq_logproba=seq_logproba,
97                 temperature=temperature,
98                 deterministic_synthesis=deterministic_synthesis,
99             )
100
101         model.train(t)
102
103
104 ######################################################################
105
106
107 class QuizzMachine:
108     def indices_forward_and_backward(self, quizzes):
109         i_forward = quizzes[:, 0] == self.token_forward
110         j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward
111         i_backward = quizzes[:, 0] == self.token_backward
112         j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward
113         assert torch.logical_or(
114             torch.logical_and(i_forward, j_forward),
115             torch.logical_and(i_backward, j_backward),
116         ).all()
117         return i_forward, i_backward
118
119     def reverse_time(self, quizzes):
120         i_forward, i_backward = self.indices_forward_and_backward(quizzes)
121
122         forward_to_backward = torch.cat(
123             [
124                 quizzes[:, 0:1],
125                 quizzes[:, 2 + self.prompt_len :],
126                 quizzes[:, 1 + self.prompt_len : 2 + self.prompt_len],
127                 quizzes[:, 1 : 1 + self.prompt_len],
128             ],
129             dim=1,
130         )
131         forward_to_backward[:, 0] = self.token_backward
132         forward_to_backward[:, 1 + self.answer_len] = self.token_backward
133
134         backward_to_forward = torch.cat(
135             [
136                 quizzes[:, 0:1],
137                 quizzes[:, 2 + self.answer_len :],
138                 quizzes[:, 1 + self.answer_len : 2 + self.answer_len],
139                 quizzes[:, 1 : 1 + self.answer_len],
140             ],
141             dim=1,
142         )
143
144         backward_to_forward[:, 0] = self.token_forward
145         backward_to_forward[:, 1 + self.prompt_len] = self.token_forward
146
147         m = i_forward.long()[:, None]
148
149         return m * forward_to_backward + (1 - m) * backward_to_forward
150
151     def make_ar_mask(self, quizzes, first=False):
152         i_forward, i_backward = self.indices_forward_and_backward(quizzes)
153
154         t = torch.arange(quizzes.size(1), device=quizzes.device)
155
156         if first:
157             m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long()
158             m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long()
159         else:
160             m_forward = (t >= 2 + self.prompt_len).long()
161             m_backward = (t >= 2 + self.answer_len).long()
162
163         m = i_forward.long()[:, None]
164
165         return m * m_forward + (1 - m) * m_backward
166
167     def generate_token_sequences(self, nb):
168         prompts, answers = self.problem.generate_prompts_and_answers(nb)
169
170         print(f"{prompts.size()=} {answers.size()=}")
171
172         if self.prompt_len is None:
173             self.prompt_len = prompts.size(1)
174
175         if self.answer_len is None:
176             self.answer_len = answers.size(1)
177
178         assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len
179
180         result = []
181
182         for prompt, answer in zip(prompts, answers):
183             if torch.rand(1) < 0.5:
184                 a = [
185                     torch.tensor([self.token_forward]),
186                     prompt,
187                     torch.tensor([self.token_forward]),
188                     answer,
189                 ]
190             else:
191                 a = [
192                     torch.tensor([self.token_backward]),
193                     answer,
194                     torch.tensor([self.token_backward]),
195                     prompt,
196                 ]
197
198             result.append(torch.cat(a, dim=0)[None, :])
199
200         return torch.cat(result, dim=0)
201
202     def __init__(
203         self,
204         problem,
205         nb_train_samples,
206         nb_test_samples,
207         batch_size,
208         result_dir,
209         logger,
210         device=torch.device("cpu"),
211     ):
212         super().__init__()
213
214         v = problem.nb_token_values()
215         self.token_forward = v
216         self.token_backward = v + 1
217         self.nb_token_values = v + 2
218
219         self.problem = problem
220         self.batch_size = batch_size
221         self.device = device
222         self.logger = logger
223         self.prompt_len = None
224         self.answer_len = None
225
226         self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to(
227             device
228         )
229
230         self.test_w_quizzes = self.generate_token_sequences(nb_test_samples).to(device)
231
232         self.train_c_quizzes = []
233         self.test_c_quizzes = []
234
235         if result_dir is not None:
236             self.save_quizzes(
237                 result_dir, "culture_w_quizzes", self.train_w_quizzes[:72]
238             )
239
240             # toto = self.reverse_time(self.train_w_quizzes[:72])
241             # self.save_quizzes(result_dir, "toto", toto)
242             # exit(0)
243
244     def save_quizzes(self, result_dir, filename_prefix, quizzes, prediction=False):
245         forward = quizzes[quizzes[:, 0] == self.token_forward]
246         ib = quizzes[:, 0] == self.token_backward
247         backward = quizzes[ib]
248         assert forward.size(0) + backward.size(0) == quizzes.size(0)
249         quizzes[ib] = self.reverse_time(quizzes[ib])
250
251         if prediction:
252             predicted_prompts = ib
253             predicted_answers = torch.logical_not(ib)
254         else:
255             predicted_prompts = None
256             predicted_answers = None
257
258         self.problem.save_quizzes(
259             result_dir,
260             filename_prefix,
261             quizzes[:, 1 : 1 + self.prompt_len],
262             quizzes[:, 2 + self.prompt_len :],
263             predicted_prompts,
264             predicted_answers,
265         )
266
267     def batches(self, split="train", desc=None):
268         assert split in {"train", "test"}
269         if split == "train":
270             w_quizzes = self.train_w_quizzes
271             c_quizzes = self.train_c_quizzes
272         else:
273             w_quizzes = self.test_w_quizzes
274             c_quizzes = self.test_c_quizzes
275
276         if len(c_quizzes) > 0:
277             c_quizzes = torch.cat(c_quizzes, dim=0)
278             if c_quizzes.size(0) > w_quizzes.size(0) // 2:
279                 i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
280                 c_quizzes = c_quizzes[i]
281
282             i = torch.randperm(w_quizzes.size(0))[
283                 : w_quizzes.size(0) - c_quizzes.size(0)
284             ]
285             w_quizzes = w_quizzes[i]
286
287             self.nb_batch_w_quizzes = w_quizzes.size(0)
288             self.nb_batch_c_quizzes = c_quizzes.size(0)
289
290             input = torch.cat([w_quizzes, c_quizzes], dim=0)
291         else:
292             input = w_quizzes
293             self.nb_batch_w_quizzes = w_quizzes.size(0)
294             self.nb_batch_c_quizzes = 0
295
296         # Shuffle
297         input = input[torch.randperm(input.size(0))]
298
299         if desc is None:
300             desc = f"epoch-{split}"
301         for batch in tqdm.tqdm(
302             input.split(self.batch_size), dynamic_ncols=True, desc=desc
303         ):
304             yield batch
305
306     def vocabulary_size(self):
307         return self.nb_token_values
308
309     def produce_results(
310         self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
311     ):
312         def compute_accuracy(input):
313             input = input[:nmax]
314             ar_mask = self.make_ar_mask(input)
315             result = input.clone() * (1 - ar_mask)
316             seq_logproba = torch.empty(input.size(0), device=self.device)
317
318             masked_inplace_autoregression(
319                 model=model,
320                 batch_size=self.batch_size,
321                 input=result,
322                 ar_mask=ar_mask,
323                 seq_logproba=seq_logproba,
324                 temperature=1.0,
325                 deterministic_synthesis=deterministic_synthesis,
326                 progress_bar_desc=None,
327                 device=self.device,
328             )
329
330             nb_total = input.size(0)
331             nb_correct = (input == result).long().min(dim=1).values.sum()
332
333             return nb_total, nb_correct
334
335         train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
336
337         self.logger(
338             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
339         )
340
341         test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes)
342
343         self.logger(
344             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
345         )
346
347         main_test_accuracy = test_nb_correct / test_nb_total
348         self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
349
350         ##############################
351
352         input = self.test_w_quizzes[:96]
353         ar_mask = self.make_ar_mask(input)
354         result = input.clone() * (1 - ar_mask)
355         seq_logproba = torch.empty(input.size(0), device=self.device)
356
357         masked_inplace_autoregression(
358             model=model,
359             batch_size=self.batch_size,
360             input=result,
361             ar_mask=ar_mask,
362             seq_logproba=seq_logproba,
363             temperature=1.0,
364             deterministic_synthesis=deterministic_synthesis,
365             progress_bar_desc=None,
366             device=self.device,
367         )
368
369         self.save_quizzes(
370             result_dir,
371             f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
372             quizzes=result[:72],
373             prediction=True,
374         )
375
376         return main_test_accuracy
377
378     def renew_w_quizzes(self, nb, for_train=True):
379         input = self.train_w_quizzes if for_train else self.test_w_quizzes
380         nb = min(nb, input.size(0))
381         input[:-nb] = input[nb:].clone()
382         input[-nb:] = self.generate_token_sequences(nb).to(self.device)
383
384     def store_c_quizzes(self, new_c_quizzes, for_train=True):
385         if for_train:
386             self.train_c_quizzes.append(new_c_quizzes)
387         else:
388             self.test_c_quizzes.append(new_c_quizzes)
389
390     def compute_correctness(
391         self,
392         c_quizzes,
393         models_for_validation,
394         bidirectional_validation=False,
395         deterministic_validation=True,
396     ):
397         if bidirectional_validation:
398             backward_c_quizzes = self.forward_to_backward(c_quizzes)
399
400         seq_logproba = torch.zeros(
401             c_quizzes.size(0),
402             max([m.id for m in models_for_validation]) + 1,
403             device=self.device,
404         )
405
406         nb_correct = 0
407
408         for model in models_for_validation:
409             result = c_quizzes.clone()
410
411             seq_logproba[...] = 0.0
412
413             ar_mask = self.make_ar_mask(result)
414
415             masked_inplace_autoregression(
416                 model=model,
417                 batch_size=self.batch_size,
418                 input=result,
419                 ar_mask=ar_mask,
420                 seq_logproba=seq_logproba[:, model.id],
421                 temperature=1.0,
422                 deterministic_synthesis=deterministic_validation,
423                 # progress_bar_desc="solving c_quizzes",
424                 device=self.device,
425             )
426
427             correct = (c_quizzes == result).long().min(dim=-1).values
428
429             if bidirectional_validation:
430                 backward_result = backward_c_quizzes.clone()
431
432                 ar_mask = self.make_ar_mask(backward_result)
433
434                 masked_inplace_autoregression(
435                     model=model,
436                     batch_size=self.batch_size,
437                     input=backward_result,
438                     ar_mask=ar_mask,
439                     seq_logproba=seq_logproba[:, model.id],
440                     temperature=1.0,
441                     deterministic_synthesis=deterministic_validation,
442                     # progress_bar_desc="solving backward c_quizzes",
443                     device=self.device,
444                 )
445
446                 backward_correct = (
447                     (backward_c_quizzes == backward_result).long().min(dim=-1).values
448                 )
449
450                 correct *= backward_correct
451
452             # endif
453
454             nb_correct += correct
455
456         return nb_correct, seq_logproba
457
458     ###############################################################
459
460     def generate_quizzes(self, nb, model_for_generation, temperature=1.0):
461         c_quizzes = torch.empty(
462             nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
463         )
464
465         seq_logproba = torch.zeros(nb, device=self.device)
466
467         # First, we generate the answer at high temperature
468
469         c_quizzes[:, 0] = self.token_backward
470         c_quizzes[:, 1 + self.answer_len] = self.token_backward
471
472         masked_inplace_autoregression(
473             model=model_for_generation,
474             batch_size=self.batch_size,
475             input=c_quizzes,
476             ar_mask=self.make_ar_mask(c_quizzes, first=True),
477             seq_logproba=seq_logproba,
478             temperature=temperature,
479             deterministic_synthesis=False,
480             device=self.device,
481         )
482
483         # Then, we generate the prompt at low temperature
484
485         masked_inplace_autoregression(
486             model=model_for_generation,
487             batch_size=self.batch_size,
488             input=c_quizzes,
489             ar_mask=self.make_ar_mask(c_quizzes),
490             seq_logproba=seq_logproba,
491             temperature=1 / temperature,
492             deterministic_synthesis=False,
493             device=self.device,
494         )
495
496         # Then we return the quizz, and re-generate the response, now
497         # at low temperature
498
499         c_quizzes = self.reverse_time(c_quizzes)
500
501         masked_inplace_autoregression(
502             model=model_for_generation,
503             batch_size=self.batch_size,
504             input=c_quizzes,
505             ar_mask=self.make_ar_mask(c_quizzes),
506             seq_logproba=seq_logproba,
507             temperature=1 / temperature,
508             deterministic_synthesis=False,
509             device=self.device,
510         )
511
512         return c_quizzes