Update.
[culture.git] / quizz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 import mygpt
16 from mygpt import BracketedSequence
17
18 ######################################################################
19
20 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
21 # 1s where tokens should be generated. The others are kept
22 # unchanged.
23
24
25 def one_batch_masked_inplace_autoregression(
26     model,
27     input,
28     ar_mask,
29     seq_logproba,
30     temperature=1.0,
31     deterministic_synthesis=False,
32     forbidden_tokens=None,
33     forced_biases=None,
34 ):
35     to_generate = (ar_mask.sum(0) > 0).nonzero()
36
37     if to_generate.min() > 0:
38         model(
39             BracketedSequence(input, 0, to_generate.min())
40         )  # Needed to initialize the model's cache
41     for s in range(to_generate.min(), to_generate.max() + 1):
42         output = model(BracketedSequence(input, s, 1)).x
43
44         logits = output[:, s]
45
46         logits = (logits / temperature).log_softmax(dim=-1)
47
48         if forbidden_tokens is not None:
49             logits = logits.masked_fill(forbidden_tokens, float("-inf"))
50
51         if forced_biases is not None:
52             logits = logits + forced_biases[None, :]
53
54         if deterministic_synthesis:
55             t_next = logits.argmax(-1)
56         else:
57             dist = torch.distributions.categorical.Categorical(logits=logits)
58             t_next = dist.sample()
59
60         all_n = torch.arange(t_next.size(0))
61         seq_logproba += logits[all_n, t_next].sum(dim=-1)
62
63         input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
64
65
66 def masked_inplace_autoregression(
67     model,
68     batch_size,
69     input,
70     ar_mask,
71     seq_logproba,
72     temperature,
73     deterministic_synthesis,
74     forbidden_tokens=None,
75     logit_biases=None,
76     progress_bar_desc=None,
77     device=torch.device("cpu"),
78 ):
79     assert input.size() == ar_mask.size()
80
81     batches = zip(
82         input.split(batch_size),
83         ar_mask.split(batch_size),
84         seq_logproba.split(batch_size),
85     )
86
87     if progress_bar_desc is not None:
88         batches = tqdm.tqdm(
89             batches,
90             dynamic_ncols=True,
91             desc=progress_bar_desc,
92             total=(input.size(0) + batch_size - 1) // batch_size,
93         )
94
95     with torch.autograd.no_grad():
96         t = model.training
97         model.eval()
98
99         for input, ar_mask, seq_logproba in batches:
100             one_batch_masked_inplace_autoregression(
101                 model=model,
102                 input=input,
103                 ar_mask=ar_mask,
104                 seq_logproba=seq_logproba,
105                 temperature=temperature,
106                 deterministic_synthesis=deterministic_synthesis,
107                 forbidden_tokens=forbidden_tokens,
108                 forced_biases=logit_biases,
109             )
110
111         model.train(t)
112
113
114 ######################################################################
115
116
117 class QuizzMachine:
118     def make_ar_mask(self, input):
119         b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
120         return b.long()[None, :].expand_as(input)
121
122     def generate_token_sequences(self, nb):
123         prompts, answers = self.problem.generate_prompts_and_answers(nb)
124         result = []
125
126         for prompt, answer in zip(prompts, answers):
127             if torch.rand(1) < 0.5:
128                 a = [torch.tensor([self.token_forward]), prompt, answer]
129             else:
130                 a = [torch.tensor([self.token_backward]), answer, prompt]
131
132             result.append(torch.cat(a, dim=0)[None, :])
133
134         return torch.cat(result, dim=0)
135
136     def __init__(
137         self,
138         problem,
139         nb_train_samples,
140         nb_test_samples,
141         batch_size,
142         result_dir,
143         logger,
144         device=torch.device("cpu"),
145     ):
146         super().__init__()
147
148         v = problem.nb_token_values()
149         self.token_forward = v
150         self.token_backward = v + 1
151         self.nb_token_values = v + 2
152
153         self.problem = problem
154         self.batch_size = batch_size
155         self.device = device
156         self.logger = logger
157
158         self.train_w_quizzes = self.generate_token_sequences(nb_train_samples).to(
159             device
160         )
161
162         self.test_w_quizzes = self.generate_token_sequences(nb_test_samples).to(device)
163
164         self.train_c_quizzes = []
165         self.test_c_quizzes = []
166
167         if result_dir is not None:
168             self.save_quizzes(
169                 result_dir, "culture_w_quizzes", self.train_w_quizzes[:72]
170             )
171
172     def save_quizzes(self, result_dir, filename_prefix, quizzes, prediction=False):
173         print(f"DEBUG {quizzes.size()=}")
174         l = (quizzes.size(1) - 1) // 2
175         forward = (quizzes[:, 0] == self.token_forward).long()
176         backward = (quizzes[:, 0] == self.token_backward).long()
177         assert forward.equal(1 - backward)
178         first = quizzes[:, 1 : 1 + l]
179         second = quizzes[:, 1 + l : 1 + 2 * l]
180         prompts = forward[:, None] * first + backward[:, None] * second
181         answers = forward[:, None] * second + backward[:, None] * first
182
183         if prediction:
184             predicted_prompts = backward
185             predicted_answers = forward
186         else:
187             predicted_prompts = None
188             predicted_answers = None
189
190         self.problem.save_quizzes(
191             result_dir,
192             filename_prefix,
193             prompts,
194             answers,
195             predicted_prompts,
196             predicted_answers,
197         )
198
199     def batches(self, split="train", desc=None):
200         assert split in {"train", "test"}
201         if split == "train":
202             w_quizzes = self.train_w_quizzes
203             c_quizzes = self.train_c_quizzes
204         else:
205             w_quizzes = self.test_w_quizzes
206             c_quizzes = self.test_c_quizzes
207
208         if len(c_quizzes) > 0:
209             c_quizzes = torch.cat(c_quizzes, dim=0)
210             if c_quizzes.size(0) > w_quizzes.size(0) // 2:
211                 i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
212                 c_quizzes = c_quizzes[i]
213
214             i = torch.randperm(w_quizzes.size(0))[
215                 : w_quizzes.size(0) - c_quizzes.size(0)
216             ]
217             w_quizzes = w_quizzes[i]
218
219             self.nb_batch_w_quizzes = w_quizzes.size(0)
220             self.nb_batch_c_quizzes = c_quizzes.size(0)
221
222             input = torch.cat([w_quizzes, c_quizzes], dim=0)
223         else:
224             input = w_quizzes
225             self.nb_batch_w_quizzes = w_quizzes.size(0)
226             self.nb_batch_c_quizzes = 0
227
228         # Shuffle
229         input = input[torch.randperm(input.size(0))]
230
231         if desc is None:
232             desc = f"epoch-{split}"
233         for batch in tqdm.tqdm(
234             input.split(self.batch_size), dynamic_ncols=True, desc=desc
235         ):
236             yield batch
237
238     def vocabulary_size(self):
239         return self.nb_token_values
240
241     def produce_results(
242         self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
243     ):
244         def compute_accuracy(input):
245             input = input[:nmax]
246             ar_mask = self.make_ar_mask(input)
247             result = input.clone() * (1 - ar_mask)
248             seq_logproba = torch.empty(input.size(0), device=self.device)
249
250             masked_inplace_autoregression(
251                 model=model,
252                 batch_size=self.batch_size,
253                 input=result,
254                 ar_mask=ar_mask,
255                 seq_logproba=seq_logproba,
256                 temperature=1.0,
257                 deterministic_synthesis=deterministic_synthesis,
258                 progress_bar_desc=None,
259                 device=self.device,
260             )
261
262             nb_total, nb_correct = (
263                 input.size(0),
264                 (input == result).long().min(dim=1).values.sum(),
265             )
266
267             return nb_total, nb_correct
268
269         train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
270
271         self.logger(
272             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
273         )
274
275         test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes)
276
277         self.logger(
278             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
279         )
280
281         main_test_accuracy = test_nb_correct / test_nb_total
282         self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
283
284         ##############################
285
286         input = self.test_w_quizzes[:96]
287         ar_mask = self.make_ar_mask(input)
288         result = input.clone() * (1 - ar_mask)
289         seq_logproba = torch.empty(input.size(0), device=self.device)
290
291         masked_inplace_autoregression(
292             model=model,
293             batch_size=self.batch_size,
294             input=result,
295             ar_mask=ar_mask,
296             seq_logproba=seq_logproba,
297             temperature=1.0,
298             deterministic_synthesis=deterministic_synthesis,
299             progress_bar_desc=None,
300             device=self.device,
301         )
302
303         self.save_quizzes(
304             result_dir,
305             f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
306             quizzes=result[:72],
307             prediction=True,
308         )
309
310         return main_test_accuracy
311
312     def renew_w_quizzes(self, nb, for_train=True):
313         input = self.train_w_quizzes if for_train else self.test_w_quizzes
314         nb = min(nb, input.size(0))
315         input[:-nb] = input[nb:].clone()
316         input[-nb:] = self.generate_token_sequences(nb).to(self.device)
317
318     def store_c_quizzes(self, new_c_quizzes, for_train=True):
319         if for_train:
320             self.train_c_quizzes.append(new_c_quizzes)
321         else:
322             self.test_c_quizzes.append(new_c_quizzes)
323
324     def reverse_time(self, c_quizzes):
325         l = (c_quizzes.size(1) - 1) // 2
326         direction = c_quizzes[:, 0:1]
327         direction = self.token_forward * (
328             direction == self.token_backward
329         ) + self.token_backward * (direction == self.token_forward)
330
331         return torch.cat(
332             [direction, c_quizzes[:, l + 1 :], c_quizzes[:, 1 : l + 1]], dim=1
333         )
334
335     def compute_correctness(
336         self, c_quizzes, models_for_validation, both_directions=True
337     ):
338         reversed_c_quizzes = self.reverse_time(c_quizzes)
339
340         ar_mask = self.make_ar_mask(c_quizzes)
341         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
342
343         # Check how many of models can solve the quizzes in both directions
344
345         nb_correct = 0
346
347         for model in models_for_validation:
348             result = c_quizzes.clone()
349
350             masked_inplace_autoregression(
351                 model=model,
352                 batch_size=self.batch_size,
353                 input=result,
354                 ar_mask=ar_mask,
355                 seq_logproba=seq_logproba,
356                 temperature=1.0,
357                 deterministic_synthesis=True,
358                 # progress_bar_desc="solving c_quizzes",
359                 device=self.device,
360             )
361
362             correct = (c_quizzes == result).long().min(dim=-1).values
363
364             if both_directions:
365                 reversed_result = reversed_c_quizzes.clone()
366
367                 masked_inplace_autoregression(
368                     model=model,
369                     batch_size=self.batch_size,
370                     input=reversed_result,
371                     ar_mask=ar_mask,
372                     seq_logproba=seq_logproba,
373                     temperature=1.0,
374                     deterministic_synthesis=True,
375                     # progress_bar_desc="solving reversed c_quizzes",
376                     device=self.device,
377                 )
378
379                 reversed_correct = (
380                     (reversed_c_quizzes == reversed_result).long().min(dim=-1).values
381                 )
382
383                 correct *= reversed_correct
384
385             # endif
386
387             nb_correct += correct
388
389         return nb_correct
390
391     ###############################################################
392
393     def generate_quizzes(self, nb, model_for_generation, reverse_cleanup=False):
394         c_quizzes = torch.empty(
395             nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
396         )
397
398         ar_mask_first = torch.zeros(c_quizzes.size(), device=self.device)
399         ar_mask_first[:, : ar_mask_first.size(1) // 2 + 1] = 1
400         ar_mask_second = 1 - ar_mask_first
401         ar_mask_first[:, 0] = 0
402         ar_mask_second[:, 0] = 0
403
404         seq_logproba = torch.empty(ar_mask_first.size(0), device=self.device)
405
406         if reverse_cleanup:
407             temperature = 10.0
408         else:
409             temperature = 1.0
410
411         # First, we generate the answer at high temperature
412
413         c_quizzes[:, 0] = self.token_backward
414
415         masked_inplace_autoregression(
416             model=model_for_generation,
417             batch_size=self.batch_size,
418             input=c_quizzes,
419             ar_mask=ar_mask_first,
420             seq_logproba=seq_logproba,
421             temperature=temperature,
422             deterministic_synthesis=False,
423             device=self.device,
424         )
425
426         ave_seq_logproba = seq_logproba.mean()
427
428         # Then, we generate the prompt deterministically
429
430         masked_inplace_autoregression(
431             model=model_for_generation,
432             batch_size=self.batch_size,
433             input=c_quizzes,
434             ar_mask=ar_mask_second,
435             seq_logproba=seq_logproba,
436             temperature=temperature,
437             deterministic_synthesis=True,
438             device=self.device,
439         )
440
441         # Then we return the quizz, and re-generate the response, now
442         # deterministically
443
444         c_quizzes = self.reverse_time(c_quizzes)
445
446         masked_inplace_autoregression(
447             model=model_for_generation,
448             batch_size=self.batch_size,
449             input=c_quizzes,
450             ar_mask=ar_mask_second,
451             seq_logproba=seq_logproba,
452             temperature=temperature,
453             deterministic_synthesis=True,
454             device=self.device,
455         )
456
457         return c_quizzes, seq_logproba.mean()