Merge branch 'dev'
[culture.git] / quiz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings, sys
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 import mygpt
16 from mygpt import BracketedSequence
17
18 import threading
19
20 ######################################################################
21 # if output is log(P(X=y)) and target is Y, returns -log P(X=Y) + H(X
22 # | X != Y)
23
24
25 # output is NxCxT and target is NxT
26 def confusion(output, target, reduction="mean"):
27     N, C, T = output.shape
28     output = output.permute(0, 2, 1).reshape(-1, C)
29     target = target.flatten()
30     all_t = torch.arange(N * T, device=output.device)
31     output = output.log_softmax(dim=-1)
32     result = -output[all_t, target]
33
34     output[all_t, target] = float("-inf")
35     output = output.log_softmax(dim=-1)
36     e = output.exp()
37     output[all_t, target] = 0
38     result = result - (output * e).sum(-1)
39
40     if reduction == "none":
41         return result.reshape(N, T)
42     elif reduction == "mean":
43         return result.reshape(N, T).mean()
44     elif reduction == "sum":
45         return result.reshape(N, T).sum()
46     else:
47         raise ValueError(f"unknown reduction '{reduction}'.")
48
49
50 ######################################################################
51
52 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
53 # 1s where tokens should be generated. The others are kept
54 # unchanged.
55
56
57 def one_batch_masked_inplace_autoregression(
58     model,
59     input,
60     ar_mask,
61     seq_logproba,
62     temperature,
63     deterministic_synthesis,
64 ):
65     to_generate = (ar_mask.sum(0) > 0).nonzero()
66
67     if to_generate.min() > 0:
68         model(
69             BracketedSequence(input, 0, to_generate.min())
70         )  # Needed to initialize the model's cache
71     for s in range(to_generate.min(), to_generate.max() + 1):
72         output = model(BracketedSequence(input, s, 1)).x
73
74         logits = output[:, s]
75
76         logits = (logits / temperature).log_softmax(dim=-1)
77
78         if deterministic_synthesis:
79             t_next = logits.argmax(-1)
80         else:
81             dist = torch.distributions.categorical.Categorical(logits=logits)
82             t_next = dist.sample()
83
84         all_n = torch.arange(t_next.size(0))
85
86         seq_logproba += logits[all_n, t_next]
87
88         input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
89
90
91 def masked_inplace_autoregression(
92     model,
93     batch_size,
94     input,
95     ar_mask,
96     seq_logproba,
97     temperature,
98     deterministic_synthesis,
99     forbidden_tokens=None,
100     logit_biases=None,
101     progress_bar_desc=None,
102     device=torch.device("cpu"),
103 ):
104     assert input.size() == ar_mask.size()
105
106     batches = zip(
107         input.split(batch_size),
108         ar_mask.split(batch_size),
109         seq_logproba.split(batch_size),
110     )
111
112     if progress_bar_desc is not None:
113         batches = tqdm.tqdm(
114             batches,
115             dynamic_ncols=True,
116             desc=progress_bar_desc,
117             total=(input.size(0) + batch_size - 1) // batch_size,
118         )
119
120     with torch.autograd.no_grad():
121         t = model.training
122         model.eval()
123
124         for input, ar_mask, seq_logproba in batches:
125             one_batch_masked_inplace_autoregression(
126                 model=model,
127                 input=input,
128                 ar_mask=ar_mask,
129                 seq_logproba=seq_logproba,
130                 temperature=temperature,
131                 deterministic_synthesis=deterministic_synthesis,
132             )
133
134         model.train(t)
135
136
137 ######################################################################
138
139
140 class QuizMachine:
141     def indices_forward_and_backward(self, quizzes):
142         i_forward = quizzes[:, 0] == self.token_forward
143         j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward
144         i_backward = quizzes[:, 0] == self.token_backward
145         j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward
146         assert torch.logical_or(
147             torch.logical_and(i_forward, j_forward),
148             torch.logical_and(i_backward, j_backward),
149         ).all()
150         return i_forward, i_backward
151
152     def non_trivial(self, quizzes):
153         quizzes = quizzes.clone()
154         n_forward = quizzes[quizzes[:, 0] == self.token_forward]
155         n_backward = quizzes[:, 0] == self.token_backward
156         backward = quizzes[n_backward]
157         quizzes[n_backward] = self.reverse_time(quizzes[n_backward])
158         return torch.logical_not(
159             self.problem.trivial_prompts_and_answers(
160                 quizzes[:, 1 : 1 + self.prompt_len],
161                 quizzes[:, 2 + self.prompt_len :],
162             )
163         )
164
165     def reverse_time(self, quizzes):
166         i_forward, i_backward = self.indices_forward_and_backward(quizzes)
167
168         forward_to_backward = torch.cat(
169             [
170                 quizzes[:, 0:1],
171                 quizzes[:, 2 + self.prompt_len : 2 + self.prompt_len + self.answer_len],
172                 quizzes[:, 1 + self.prompt_len : 1 + self.prompt_len + 1],
173                 quizzes[:, 1 : 1 + self.prompt_len],
174             ],
175             dim=1,
176         )
177
178         forward_to_backward[:, 0] = self.token_backward
179         forward_to_backward[:, 1 + self.answer_len] = self.token_backward
180
181         backward_to_forward = torch.cat(
182             [
183                 quizzes[:, 0:1],
184                 quizzes[:, 2 + self.answer_len :],
185                 quizzes[:, 1 + self.answer_len : 2 + self.answer_len],
186                 quizzes[:, 1 : 1 + self.answer_len],
187             ],
188             dim=1,
189         )
190
191         backward_to_forward[:, 0] = self.token_forward
192         backward_to_forward[:, 1 + self.prompt_len] = self.token_forward
193
194         m = i_forward.long()[:, None]
195
196         return m * forward_to_backward + (1 - m) * backward_to_forward
197
198     def reverse_random_half_in_place(self, quizzes):
199         i = torch.rand(quizzes.size(0)) < 0.5
200         if i.any():
201             quizzes[i] = self.reverse_time(quizzes[i])
202
203     def make_ar_mask(self, quizzes, first=False):
204         i_forward, i_backward = self.indices_forward_and_backward(quizzes)
205
206         t = torch.arange(quizzes.size(1), device=quizzes.device)
207
208         if first:
209             m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long()
210             m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long()
211         else:
212             m_forward = (t >= 2 + self.prompt_len).long()
213             m_backward = (t >= 2 + self.answer_len).long()
214
215         m = i_forward.long()[:, None]
216
217         return m * m_forward + (1 - m) * m_backward
218
219     def generate_token_sequences(self, nb):
220         prompts, answers = self.problem.generate_prompts_and_answers(nb)
221
222         if self.prompt_len is None:
223             self.prompt_len = prompts.size(1)
224
225         if self.answer_len is None:
226             self.answer_len = answers.size(1)
227
228         assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len
229
230         result = []
231
232         for prompt, answer in zip(prompts, answers):
233             a = [
234                 torch.tensor([self.token_forward]),
235                 prompt,
236                 torch.tensor([self.token_forward]),
237                 answer,
238             ]
239
240             result.append(torch.cat(a, dim=0)[None, :])
241
242         return torch.cat(result, dim=0)
243
244     def __init__(
245         self,
246         problem,
247         nb_train_samples,
248         nb_test_samples,
249         back_accuracy,
250         batch_size,
251         result_dir,
252         logger,
253         device=torch.device("cpu"),
254     ):
255         super().__init__()
256
257         v = problem.nb_token_values()
258         self.token_forward = v
259         self.token_backward = v + 1
260         self.nb_token_values = v + 2
261
262         self.problem = problem
263         self.back_accuracy = back_accuracy
264         self.batch_size = batch_size
265         self.device = device
266         self.logger = logger
267         self.prompt_len = None
268         self.answer_len = None
269
270         self.LOCK_C_QUIZZES = threading.Lock()
271         self.train_c_quizzes = []
272         self.test_c_quizzes = []
273
274     def save_quiz_illustrations(
275         self,
276         result_dir,
277         filename_prefix,
278         quizzes,
279         mistakes=None,
280     ):
281         quizzes = quizzes.clone().to("cpu")
282         n_forward = quizzes[quizzes[:, 0] == self.token_forward]
283         n_backward = quizzes[:, 0] == self.token_backward
284         backward = quizzes[n_backward]
285         assert n_forward.size(0) + backward.size(0) == quizzes.size(0)
286         quizzes[n_backward] = self.reverse_time(quizzes[n_backward])
287
288         predicted_prompts = n_backward.long()
289         predicted_answers = 1 - predicted_prompts
290         if mistakes is not None:
291             # 0/-1/+1 ~ not-to-predict / predicted wrong / predicted correct
292             predicted_prompts *= mistakes.to("cpu")
293             predicted_answers *= mistakes.to("cpu")
294         else:
295             # 0/2 ~ not-to-predict / to predict
296             predicted_prompts *= 2
297             predicted_answers *= 2
298
299         self.problem.save_quiz_illustrations(
300             result_dir,
301             filename_prefix,
302             quizzes[:, 1 : 1 + self.prompt_len],
303             quizzes[:, 2 + self.prompt_len :],
304             predicted_prompts,
305             predicted_answers,
306         )
307
308     def vocabulary_size(self):
309         return self.nb_token_values
310
311     ######################################################################
312
313     def batches(self, model, split="train", desc=None):
314         assert split in {"train", "test"}
315
316         with self.LOCK_C_QUIZZES:
317             if split == "train":
318                 w_quizzes = model.train_w_quizzes
319                 c_quizzes = self.train_c_quizzes
320             else:
321                 w_quizzes = model.test_w_quizzes
322                 c_quizzes = self.test_c_quizzes
323
324             if len(c_quizzes) > 0:
325                 c_quizzes = torch.cat(c_quizzes, dim=0)
326                 if c_quizzes.size(0) > w_quizzes.size(0) // 2:
327                     i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
328                     c_quizzes = c_quizzes[i]
329
330                 i = torch.randperm(w_quizzes.size(0))[
331                     : w_quizzes.size(0) - c_quizzes.size(0)
332                 ]
333                 w_quizzes = w_quizzes[i]
334
335                 self.nb_batch_w_quizzes = w_quizzes.size(0)
336                 self.nb_batch_c_quizzes = c_quizzes.size(0)
337
338                 input = torch.cat([w_quizzes, c_quizzes], dim=0)
339             else:
340                 input = w_quizzes
341                 self.nb_batch_w_quizzes = w_quizzes.size(0)
342                 self.nb_batch_c_quizzes = 0
343
344         # Shuffle
345         input = input[torch.randperm(input.size(0))]
346
347         if desc is None:
348             desc = f"epoch-{split}"
349         for batch in tqdm.tqdm(
350             input.split(self.batch_size), dynamic_ncols=True, desc=desc
351         ):
352             yield batch
353
354     ######################################################################
355
356     def produce_results(
357         self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
358     ):
359         def compute_accuracy(input, log_prefix=None):
360             input = input.to(self.device)
361             ar_mask = self.make_ar_mask(input)
362             result = input.clone() * (1 - ar_mask)
363             seq_logproba = torch.empty(input.size(0), device=self.device)
364
365             masked_inplace_autoregression(
366                 model=model,
367                 batch_size=self.batch_size,
368                 input=result,
369                 ar_mask=ar_mask,
370                 seq_logproba=seq_logproba,
371                 temperature=1.0,
372                 deterministic_synthesis=deterministic_synthesis,
373                 progress_bar_desc=None,
374                 device=self.device,
375             )
376
377             correct = torch.empty(input.size(0), dtype=torch.int64, device=input.device)
378
379             n_forward = input[:, 0] == self.token_forward
380             n_backward = input[:, 0] == self.token_backward
381
382             correct[n_forward] = (
383                 (input[n_forward] == result[n_forward]).long().min(dim=1).values
384             )
385
386             if self.back_accuracy and n_backward.any():
387                 # accuracy of B->A*->B*=B instead of B->A*=A
388                 back_input = self.reverse_time(result[n_backward])
389                 back_input[:, 2 + self.prompt_len :] = input[
390                     n_backward, 1 : 1 + self.answer_len
391                 ]
392                 _, correct[n_backward] = compute_accuracy(back_input)
393
394             if log_prefix is not None:
395                 forward_nb_correct = correct[n_forward].sum()
396                 forward_nb_total = correct[n_forward].size(0)
397                 backward_nb_correct = correct[n_backward].sum()
398                 backward_nb_total = correct[n_backward].size(0)
399
400                 self.logger(
401                     f"{log_prefix}_accuracy {n_epoch} model {model.id} forward {forward_nb_correct} / {forward_nb_total} backward {backward_nb_correct} / {backward_nb_total}"
402                 )
403
404             return result, correct
405
406         # compute_accuracy(model.train_w_quizzes[:nmax], log_prefix="train")
407
408         test_result, test_correct = compute_accuracy(
409             model.test_w_quizzes[:nmax], log_prefix="test"
410         )
411
412         main_test_accuracy = test_correct.sum() / test_correct.size(0)
413         self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
414
415         ##############################
416
417         self.save_quiz_illustrations(
418             result_dir,
419             f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
420             quizzes=test_result[:72],
421             mistakes=test_correct[:72] * 2 - 1,
422         )
423
424         return main_test_accuracy
425
426     ######################################################################
427
428     def renew_w_quizzes(self, model, nb, for_train=True):
429         input = model.train_w_quizzes if for_train else model.test_w_quizzes
430         nb = min(nb, input.size(0))
431         input[:-nb] = input[nb:].clone()
432         fresh_w_quizzes = self.generate_token_sequences(nb)
433         self.reverse_random_half_in_place(fresh_w_quizzes)
434         input[-nb:] = fresh_w_quizzes.to("cpu")
435
436     ######################################################################
437
438     def store_c_quizzes(self, new_c_quizzes, for_train=True):
439         with self.LOCK_C_QUIZZES:
440             if for_train:
441                 self.train_c_quizzes.append(new_c_quizzes.to("cpu"))
442             else:
443                 self.test_c_quizzes.append(new_c_quizzes.to("cpu"))
444
445     def save_c_quizzes(self, filename):
446         torch.save((self.train_c_quizzes, self.test_c_quizzes), filename)
447
448     def load_c_quizzes(self, filename):
449         self.train_c_quizzes, self.test_c_quizzes = torch.load(filename)
450
451     ######################################################################
452
453     def solution_token_logprobas(self, models, c_quizzes):
454         logproba = c_quizzes.new_zeros(
455             c_quizzes.size(0),
456             len(models),
457             c_quizzes.size(1),
458             device=self.device,
459             dtype=torch.float32,
460         )
461
462         for model in models:
463             with torch.autograd.no_grad():
464                 t = model.training
465                 model.eval()
466
467                 for input, l in zip(
468                     c_quizzes.split(self.batch_size), logproba.split(self.batch_size)
469                 ):
470                     input = input.to(self.device)
471                     ar_mask = self.make_ar_mask(input)
472                     output = model(mygpt.BracketedSequence(input)).x
473                     l[:, model.id] = (
474                         -F.cross_entropy(
475                             output.transpose(1, 2), input, reduction="none"
476                         )
477                         * ar_mask
478                     )
479
480                 model.train(t)
481
482         return logproba.to("cpu")
483
484     ###############################################################
485
486     def compute_correctness(
487         self,
488         c_quizzes,
489         models_for_validation,
490         bidirectional_validation=False,
491         deterministic_validation=True,
492     ):
493         if bidirectional_validation:
494             backward_c_quizzes = self.forward_to_backward(c_quizzes)
495
496         seq_logproba = torch.zeros(
497             c_quizzes.size(0),
498             max([m.id for m in models_for_validation]) + 1,
499             device=self.device,
500         )
501
502         nb_correct = 0
503
504         seq_logproba[...] = 0.0
505
506         for model in models_for_validation:
507             result = c_quizzes.clone()
508
509             ar_mask = self.make_ar_mask(result)
510
511             masked_inplace_autoregression(
512                 model=model,
513                 batch_size=self.batch_size,
514                 input=result,
515                 ar_mask=ar_mask,
516                 seq_logproba=seq_logproba[:, model.id],
517                 temperature=1.0,
518                 deterministic_synthesis=deterministic_validation,
519                 # progress_bar_desc="solving c_quizzes",
520                 device=self.device,
521             )
522
523             correct = (c_quizzes == result).long().min(dim=-1).values
524
525             if bidirectional_validation:
526                 backward_result = backward_c_quizzes.clone()
527
528                 ar_mask = self.make_ar_mask(backward_result)
529
530                 masked_inplace_autoregression(
531                     model=model,
532                     batch_size=self.batch_size,
533                     input=backward_result,
534                     ar_mask=ar_mask,
535                     seq_logproba=seq_logproba[:, model.id],
536                     temperature=1.0,
537                     deterministic_synthesis=deterministic_validation,
538                     # progress_bar_desc="solving backward c_quizzes",
539                     device=self.device,
540                 )
541
542                 backward_correct = (
543                     (backward_c_quizzes == backward_result).long().min(dim=-1).values
544                 )
545
546                 correct *= backward_correct
547
548             # endif
549
550             nb_correct += correct
551
552         return nb_correct, seq_logproba
553
554     ###############################################################
555
556     def generate_quizzes(self, nb, model_for_generation, temperature=1.0):
557         c_quizzes = torch.empty(
558             nb,
559             self.prompt_len + self.answer_len + 2,
560             device=self.device,
561             dtype=torch.int64,
562         )
563
564         seq_logproba = torch.zeros(nb, device=self.device)
565
566         # First, we generate the answer at high temperature
567
568         c_quizzes[:, 0] = self.token_backward
569         c_quizzes[:, 1 + self.answer_len] = self.token_backward
570
571         masked_inplace_autoregression(
572             model=model_for_generation,
573             batch_size=self.batch_size,
574             input=c_quizzes,
575             ar_mask=self.make_ar_mask(c_quizzes, first=True),
576             seq_logproba=seq_logproba,
577             temperature=temperature,
578             deterministic_synthesis=False,
579             device=self.device,
580         )
581
582         # Then, we generate the prompt at low temperature
583
584         masked_inplace_autoregression(
585             model=model_for_generation,
586             batch_size=self.batch_size,
587             input=c_quizzes,
588             ar_mask=self.make_ar_mask(c_quizzes),
589             seq_logproba=seq_logproba,
590             temperature=1 / temperature,
591             deterministic_synthesis=False,
592             device=self.device,
593         )
594
595         # Then we return the quizz, and re-generate the response, now
596         # at low temperature
597
598         c_quizzes = self.reverse_time(c_quizzes)
599
600         masked_inplace_autoregression(
601             model=model_for_generation,
602             batch_size=self.batch_size,
603             input=c_quizzes,
604             ar_mask=self.make_ar_mask(c_quizzes),
605             seq_logproba=seq_logproba,
606             temperature=1 / temperature,
607             deterministic_synthesis=False,
608             device=self.device,
609         )
610
611         return c_quizzes.to("cpu")