Update.
[culture.git] / quiz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings, sys
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 import mygpt
16 from mygpt import BracketedSequence
17
18 import threading
19
20 ######################################################################
21 # if output is log(P(X=y)) and target is Y, returns -log P(X=Y) + H(X
22 # | X != Y)
23
24
25 # output is NxCxT and target is NxT
26 def confusion(output, target, reduction="mean"):
27     N, C, T = output.shape
28     output = output.permute(0, 2, 1).reshape(-1, C)
29     target = target.flatten()
30     all_t = torch.arange(N * T, device=output.device)
31     output = output.log_softmax(dim=-1)
32     result = -output[all_t, target]
33
34     output[all_t, target] = float("-inf")
35     output = output.log_softmax(dim=-1)
36     e = output.exp()
37     output[all_t, target] = 0
38     result = result - (output * e).sum(-1)
39
40     if reduction == "none":
41         return result.reshape(N, T)
42     elif reduction == "mean":
43         return result.reshape(N, T).mean()
44     elif reduction == "sum":
45         return result.reshape(N, T).sum()
46     else:
47         raise ValueError(f"unknown reduction '{reduction}'.")
48
49
50 ######################################################################
51
52 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
53 # 1s where tokens should be generated. The others are kept
54 # unchanged.
55
56
57 def one_batch_masked_inplace_autoregression(
58     model,
59     input,
60     ar_mask,
61     seq_logproba,
62     temperature,
63     deterministic_synthesis,
64 ):
65     to_generate = (ar_mask.sum(0) > 0).nonzero()
66
67     if to_generate.min() > 0:
68         model(
69             BracketedSequence(input, 0, to_generate.min())
70         )  # Needed to initialize the model's cache
71     for s in range(to_generate.min(), to_generate.max() + 1):
72         output = model(BracketedSequence(input, s, 1)).x
73
74         logits = output[:, s]
75
76         logits = (logits / temperature).log_softmax(dim=-1)
77
78         if deterministic_synthesis:
79             t_next = logits.argmax(-1)
80         else:
81             dist = torch.distributions.categorical.Categorical(logits=logits)
82             t_next = dist.sample()
83
84         all_n = torch.arange(t_next.size(0))
85
86         seq_logproba += logits[all_n, t_next]
87
88         input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
89
90
91 def masked_inplace_autoregression(
92     model,
93     batch_size,
94     input,
95     ar_mask,
96     seq_logproba,
97     temperature,
98     deterministic_synthesis,
99     forbidden_tokens=None,
100     logit_biases=None,
101     progress_bar_desc=None,
102     device=torch.device("cpu"),
103 ):
104     assert input.size() == ar_mask.size()
105
106     batches = zip(
107         input.split(batch_size),
108         ar_mask.split(batch_size),
109         seq_logproba.split(batch_size),
110     )
111
112     if progress_bar_desc is not None:
113         batches = tqdm.tqdm(
114             batches,
115             dynamic_ncols=True,
116             desc=progress_bar_desc,
117             total=(input.size(0) + batch_size - 1) // batch_size,
118         )
119
120     with torch.autograd.no_grad():
121         t = model.training
122         model.eval()
123
124         for input, ar_mask, seq_logproba in batches:
125             one_batch_masked_inplace_autoregression(
126                 model=model,
127                 input=input,
128                 ar_mask=ar_mask,
129                 seq_logproba=seq_logproba,
130                 temperature=temperature,
131                 deterministic_synthesis=deterministic_synthesis,
132             )
133
134         model.train(t)
135
136
137 ######################################################################
138
139
140 class QuizMachine:
141     def indices_forward_and_backward(self, quizzes):
142         i_forward = quizzes[:, 0] == self.token_forward
143         j_forward = quizzes[:, 1 + self.prompt_len] == self.token_forward
144         i_backward = quizzes[:, 0] == self.token_backward
145         j_backward = quizzes[:, 1 + self.answer_len] == self.token_backward
146         assert torch.logical_or(
147             torch.logical_and(i_forward, j_forward),
148             torch.logical_and(i_backward, j_backward),
149         ).all()
150         return i_forward, i_backward
151
152     def non_trivial(self, quizzes):
153         quizzes = quizzes.clone()
154         n_forward = quizzes[quizzes[:, 0] == self.token_forward]
155         n_backward = quizzes[:, 0] == self.token_backward
156         backward = quizzes[n_backward]
157         quizzes[n_backward] = self.reverse_time(quizzes[n_backward])
158         return torch.logical_not(
159             self.problem.trivial_prompts_and_answers(
160                 quizzes[:, 1 : 1 + self.prompt_len],
161                 quizzes[:, 2 + self.prompt_len :],
162             )
163         )
164
165     def reverse_time(self, quizzes):
166         i_forward, i_backward = self.indices_forward_and_backward(quizzes)
167
168         forward_to_backward = torch.cat(
169             [
170                 quizzes[:, 0:1],
171                 quizzes[:, 2 + self.prompt_len : 2 + self.prompt_len + self.answer_len],
172                 quizzes[:, 1 + self.prompt_len : 1 + self.prompt_len + 1],
173                 quizzes[:, 1 : 1 + self.prompt_len],
174             ],
175             dim=1,
176         )
177
178         forward_to_backward[:, 0] = self.token_backward
179         forward_to_backward[:, 1 + self.answer_len] = self.token_backward
180
181         backward_to_forward = torch.cat(
182             [
183                 quizzes[:, 0:1],
184                 quizzes[:, 2 + self.answer_len :],
185                 quizzes[:, 1 + self.answer_len : 2 + self.answer_len],
186                 quizzes[:, 1 : 1 + self.answer_len],
187             ],
188             dim=1,
189         )
190
191         backward_to_forward[:, 0] = self.token_forward
192         backward_to_forward[:, 1 + self.prompt_len] = self.token_forward
193
194         m = i_forward.long()[:, None]
195
196         return m * forward_to_backward + (1 - m) * backward_to_forward
197
198     def reverse_random_half_in_place(self, quizzes):
199         i = torch.rand(quizzes.size(0)) < 0.5
200         if i.any():
201             quizzes[i] = self.reverse_time(quizzes[i])
202
203     def make_ar_mask(self, quizzes, first=False):
204         i_forward, i_backward = self.indices_forward_and_backward(quizzes)
205
206         t = torch.arange(quizzes.size(1), device=quizzes.device)
207
208         if first:
209             m_forward = (t >= 1).long() * (t < 1 + self.prompt_len).long()
210             m_backward = (t >= 1).long() * (t < 1 + self.answer_len).long()
211         else:
212             m_forward = (t >= 2 + self.prompt_len).long()
213             m_backward = (t >= 2 + self.answer_len).long()
214
215         m = i_forward.long()[:, None]
216
217         return m * m_forward + (1 - m) * m_backward
218
219     def generate_token_sequences(self, nb):
220         prompts, answers = self.problem.generate_prompts_and_answers(nb)
221
222         print(f"DEBUG {prompts.size()=} {answers.size()=}")
223         sys.stdout.flush()
224
225         if self.prompt_len is None:
226             self.prompt_len = prompts.size(1)
227
228         if self.answer_len is None:
229             self.answer_len = answers.size(1)
230
231         assert prompts.size(1) == self.prompt_len and answers.size(1) == self.answer_len
232
233         result = []
234
235         for prompt, answer in zip(prompts, answers):
236             a = [
237                 torch.tensor([self.token_forward]),
238                 prompt,
239                 torch.tensor([self.token_forward]),
240                 answer,
241             ]
242
243             result.append(torch.cat(a, dim=0)[None, :])
244
245         return torch.cat(result, dim=0)
246
247     def __init__(
248         self,
249         problem,
250         nb_train_samples,
251         nb_test_samples,
252         back_accuracy,
253         batch_size,
254         result_dir,
255         logger,
256         device=torch.device("cpu"),
257     ):
258         super().__init__()
259
260         v = problem.nb_token_values()
261         self.token_forward = v
262         self.token_backward = v + 1
263         self.nb_token_values = v + 2
264
265         self.problem = problem
266         self.back_accuracy = back_accuracy
267         self.batch_size = batch_size
268         self.device = device
269         self.logger = logger
270         self.prompt_len = None
271         self.answer_len = None
272
273         self.LOCK_C_QUIZZES = threading.Lock()
274         self.train_c_quizzes = []
275         self.test_c_quizzes = []
276
277     def save_quiz_illustrations(
278         self,
279         result_dir,
280         filename_prefix,
281         quizzes,
282         mistakes=None,
283     ):
284         quizzes = quizzes.clone().to("cpu")
285         n_forward = quizzes[quizzes[:, 0] == self.token_forward]
286         n_backward = quizzes[:, 0] == self.token_backward
287         backward = quizzes[n_backward]
288         assert n_forward.size(0) + backward.size(0) == quizzes.size(0)
289         quizzes[n_backward] = self.reverse_time(quizzes[n_backward])
290
291         predicted_prompts = n_backward.long()
292         predicted_answers = 1 - predicted_prompts
293         if mistakes is not None:
294             # 0/-1/+1 ~ not-to-predict / predicted wrong / predicted correct
295             predicted_prompts *= mistakes.to("cpu")
296             predicted_answers *= mistakes.to("cpu")
297         else:
298             # 0/2 ~ not-to-predict / to predict
299             predicted_prompts *= 2
300             predicted_answers *= 2
301
302         self.problem.save_quiz_illustrations(
303             result_dir,
304             filename_prefix,
305             quizzes[:, 1 : 1 + self.prompt_len],
306             quizzes[:, 2 + self.prompt_len :],
307             predicted_prompts,
308             predicted_answers,
309         )
310
311     def vocabulary_size(self):
312         return self.nb_token_values
313
314     ######################################################################
315
316     def batches(self, model, split="train", desc=None):
317         assert split in {"train", "test"}
318
319         with self.LOCK_C_QUIZZES:
320             if split == "train":
321                 w_quizzes = model.train_w_quizzes
322                 c_quizzes = self.train_c_quizzes
323             else:
324                 w_quizzes = model.test_w_quizzes
325                 c_quizzes = self.test_c_quizzes
326
327             if len(c_quizzes) > 0:
328                 c_quizzes = torch.cat(c_quizzes, dim=0)
329                 if c_quizzes.size(0) > w_quizzes.size(0) // 2:
330                     i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
331                     c_quizzes = c_quizzes[i]
332
333                 i = torch.randperm(w_quizzes.size(0))[
334                     : w_quizzes.size(0) - c_quizzes.size(0)
335                 ]
336                 w_quizzes = w_quizzes[i]
337
338                 self.nb_batch_w_quizzes = w_quizzes.size(0)
339                 self.nb_batch_c_quizzes = c_quizzes.size(0)
340
341                 input = torch.cat([w_quizzes, c_quizzes], dim=0)
342             else:
343                 input = w_quizzes
344                 self.nb_batch_w_quizzes = w_quizzes.size(0)
345                 self.nb_batch_c_quizzes = 0
346
347         # Shuffle
348         input = input[torch.randperm(input.size(0))]
349
350         if desc is None:
351             desc = f"epoch-{split}"
352         for batch in tqdm.tqdm(
353             input.split(self.batch_size), dynamic_ncols=True, desc=desc
354         ):
355             yield batch
356
357     ######################################################################
358
359     def produce_results(
360         self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
361     ):
362         def compute_accuracy(input, log_prefix=None):
363             input = input.to(self.device)
364             ar_mask = self.make_ar_mask(input)
365             result = input.clone() * (1 - ar_mask)
366             seq_logproba = torch.empty(input.size(0), device=self.device)
367
368             masked_inplace_autoregression(
369                 model=model,
370                 batch_size=self.batch_size,
371                 input=result,
372                 ar_mask=ar_mask,
373                 seq_logproba=seq_logproba,
374                 temperature=1.0,
375                 deterministic_synthesis=deterministic_synthesis,
376                 progress_bar_desc=None,
377                 device=self.device,
378             )
379
380             correct = torch.empty(input.size(0), dtype=torch.int64, device=input.device)
381
382             n_forward = input[:, 0] == self.token_forward
383             n_backward = input[:, 0] == self.token_backward
384
385             correct[n_forward] = (
386                 (input[n_forward] == result[n_forward]).long().min(dim=1).values
387             )
388
389             if self.back_accuracy and n_backward.any():
390                 # accuracy of B->A*->B*=B instead of B->A*=A
391                 back_input = self.reverse_time(result[n_backward])
392                 back_input[:, 2 + self.prompt_len :] = input[
393                     n_backward, 1 : 1 + self.answer_len
394                 ]
395                 _, correct[n_backward] = compute_accuracy(back_input)
396
397             if log_prefix is not None:
398                 forward_nb_correct = correct[n_forward].sum()
399                 forward_nb_total = correct[n_forward].size(0)
400                 backward_nb_correct = correct[n_backward].sum()
401                 backward_nb_total = correct[n_backward].size(0)
402
403                 self.logger(
404                     f"{log_prefix}_accuracy {n_epoch} model {model.id} forward {forward_nb_correct} / {forward_nb_total} backward {backward_nb_correct} / {backward_nb_total}"
405                 )
406
407             return result, correct
408
409         # compute_accuracy(model.train_w_quizzes[:nmax], log_prefix="train")
410
411         test_result, test_correct = compute_accuracy(
412             model.test_w_quizzes[:nmax], log_prefix="test"
413         )
414
415         main_test_accuracy = test_correct.sum() / test_correct.size(0)
416         self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
417
418         ##############################
419
420         self.save_quiz_illustrations(
421             result_dir,
422             f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
423             quizzes=test_result[:72],
424             mistakes=test_correct[:72] * 2 - 1,
425         )
426
427         return main_test_accuracy
428
429     ######################################################################
430
431     def renew_w_quizzes(self, model, nb, for_train=True):
432         input = model.train_w_quizzes if for_train else model.test_w_quizzes
433         nb = min(nb, input.size(0))
434         input[:-nb] = input[nb:].clone()
435         fresh_w_quizzes = self.generate_token_sequences(nb)
436         self.reverse_random_half_in_place(fresh_w_quizzes)
437         input[-nb:] = fresh_w_quizzes.to("cpu")
438
439     ######################################################################
440
441     def store_c_quizzes(self, new_c_quizzes, for_train=True):
442         with self.LOCK_C_QUIZZES:
443             if for_train:
444                 self.train_c_quizzes.append(new_c_quizzes.to("cpu"))
445             else:
446                 self.test_c_quizzes.append(new_c_quizzes.to("cpu"))
447
448     def save_c_quizzes(self, filename):
449         torch.save((self.train_c_quizzes, self.test_c_quizzes), filename)
450
451     def load_c_quizzes(self, filename):
452         self.train_c_quizzes, self.test_c_quizzes = torch.load(filename)
453
454     ######################################################################
455
456     def logproba_of_solutions(self, models, c_quizzes):
457         logproba = c_quizzes.new_zeros(
458             c_quizzes.size(0), len(models), device=self.device, dtype=torch.float32
459         )
460
461         for model in models:
462             with torch.autograd.no_grad():
463                 t = model.training
464                 model.eval()
465
466                 for input, l in zip(
467                     c_quizzes.split(self.batch_size), logproba.split(self.batch_size)
468                 ):
469                     input = input.to(self.device)
470                     ar_mask = self.make_ar_mask(input)
471                     output = model(mygpt.BracketedSequence(input)).x
472                     ce = (
473                         F.cross_entropy(output.transpose(1, 2), input, reduction="none")
474                         * ar_mask
475                     )
476                     l[:, model.id] = -ce.sum(dim=-1)
477
478                 model.train(t)
479
480         return logproba.to("cpu")
481
482     ###############################################################
483
484     def compute_correctness(
485         self,
486         c_quizzes,
487         models_for_validation,
488         bidirectional_validation=False,
489         deterministic_validation=True,
490     ):
491         if bidirectional_validation:
492             backward_c_quizzes = self.forward_to_backward(c_quizzes)
493
494         seq_logproba = torch.zeros(
495             c_quizzes.size(0),
496             max([m.id for m in models_for_validation]) + 1,
497             device=self.device,
498         )
499
500         nb_correct = 0
501
502         seq_logproba[...] = 0.0
503
504         for model in models_for_validation:
505             result = c_quizzes.clone()
506
507             ar_mask = self.make_ar_mask(result)
508
509             masked_inplace_autoregression(
510                 model=model,
511                 batch_size=self.batch_size,
512                 input=result,
513                 ar_mask=ar_mask,
514                 seq_logproba=seq_logproba[:, model.id],
515                 temperature=1.0,
516                 deterministic_synthesis=deterministic_validation,
517                 # progress_bar_desc="solving c_quizzes",
518                 device=self.device,
519             )
520
521             correct = (c_quizzes == result).long().min(dim=-1).values
522
523             if bidirectional_validation:
524                 backward_result = backward_c_quizzes.clone()
525
526                 ar_mask = self.make_ar_mask(backward_result)
527
528                 masked_inplace_autoregression(
529                     model=model,
530                     batch_size=self.batch_size,
531                     input=backward_result,
532                     ar_mask=ar_mask,
533                     seq_logproba=seq_logproba[:, model.id],
534                     temperature=1.0,
535                     deterministic_synthesis=deterministic_validation,
536                     # progress_bar_desc="solving backward c_quizzes",
537                     device=self.device,
538                 )
539
540                 backward_correct = (
541                     (backward_c_quizzes == backward_result).long().min(dim=-1).values
542                 )
543
544                 correct *= backward_correct
545
546             # endif
547
548             nb_correct += correct
549
550         return nb_correct, seq_logproba
551
552     ###############################################################
553
554     def generate_quizzes(self, nb, model_for_generation, temperature=1.0):
555         c_quizzes = torch.empty(
556             nb,
557             self.prompt_len + self.answer_len + 2,
558             device=self.device,
559             dtype=torch.int64,
560         )
561
562         seq_logproba = torch.zeros(nb, device=self.device)
563
564         # First, we generate the answer at high temperature
565
566         c_quizzes[:, 0] = self.token_backward
567         c_quizzes[:, 1 + self.answer_len] = self.token_backward
568
569         masked_inplace_autoregression(
570             model=model_for_generation,
571             batch_size=self.batch_size,
572             input=c_quizzes,
573             ar_mask=self.make_ar_mask(c_quizzes, first=True),
574             seq_logproba=seq_logproba,
575             temperature=temperature,
576             deterministic_synthesis=False,
577             device=self.device,
578         )
579
580         # Then, we generate the prompt at low temperature
581
582         masked_inplace_autoregression(
583             model=model_for_generation,
584             batch_size=self.batch_size,
585             input=c_quizzes,
586             ar_mask=self.make_ar_mask(c_quizzes),
587             seq_logproba=seq_logproba,
588             temperature=1 / temperature,
589             deterministic_synthesis=False,
590             device=self.device,
591         )
592
593         # Then we return the quizz, and re-generate the response, now
594         # at low temperature
595
596         c_quizzes = self.reverse_time(c_quizzes)
597
598         masked_inplace_autoregression(
599             model=model_for_generation,
600             batch_size=self.batch_size,
601             input=c_quizzes,
602             ar_mask=self.make_ar_mask(c_quizzes),
603             seq_logproba=seq_logproba,
604             temperature=1 / temperature,
605             deterministic_synthesis=False,
606             device=self.device,
607         )
608
609         return c_quizzes.to("cpu")