Update.
[culture.git] / quizz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 from mygpt import BracketedSequence
16
17 ######################################################################
18
19
20 class Gang(nn.Module):
21     def __init__(self, models, nb_models_for_generation, mode="groupthink"):
22         super().__init__()
23         self.models = models
24         self.nb_models_for_generation = nb_models_for_generation
25         self.mode = mode
26
27     def forward(self, bs):
28         # If first = 0, we are re-starting an auto-regressive process,
29         # that's the right moment to randomize who gonna do it
30         if bs.first == 0:
31             self.models_to_use = [
32                 self.models[k]
33                 for k in torch.randperm(len(self.models))[
34                     : self.nb_models_for_generation
35                 ]
36             ]
37
38         all_the_logits = torch.cat(
39             [model(bs).x[None] for model in self.models_to_use], dim=0
40         )
41
42         if self.mode == "groupthink":
43             y = all_the_logits.mean(dim=0)
44         elif self.mode == "groupwork":
45             m = torch.rand(all_the_logits.size(), device=all_the_logits.device)
46             m = (m.sort(dim=0).indices == 0).long()
47             y = (y * m).sum(dim=0)
48         else:
49             raise ValueError(f"Invalid mode {self.mode}")
50
51         return BracketedSequence(y, bs.first, bs.nb)
52
53
54 ######################################################################
55
56 # ar_mask is a tensor with 0s and 1s, of same shape as input, with
57 # 1s where tokens should be generated. The others are kept
58 # unchanged.
59
60
61 def one_batch_masked_inplace_autoregression(
62     model,
63     input,
64     ar_mask,
65     seq_logproba,
66     temperature=1.0,
67     deterministic_synthesis=False,
68     forbidden_tokens=None,
69     forced_biases=None,
70 ):
71     to_generate = (ar_mask.sum(0) > 0).nonzero()
72
73     if to_generate.min() > 0:
74         model(
75             BracketedSequence(input, 0, to_generate.min())
76         )  # Needed to initialize the model's cache
77     for s in range(to_generate.min(), to_generate.max() + 1):
78         output = model(BracketedSequence(input, s, 1)).x
79
80         logits = output[:, s]
81
82         logits = (logits / temperature).log_softmax(dim=-1)
83
84         if forbidden_tokens is not None:
85             logits = logits.masked_fill(forbidden_tokens, float("-inf"))
86
87         if forced_biases is not None:
88             logits = logits + forced_biases[None, :]
89
90         if deterministic_synthesis:
91             t_next = logits.argmax(-1)
92         else:
93             dist = torch.distributions.categorical.Categorical(logits=logits)
94             t_next = dist.sample()
95
96         all_n = torch.arange(t_next.size(0))
97         seq_logproba += logits[all_n, t_next].sum(dim=-1)
98
99         input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
100
101
102 def masked_inplace_autoregression(
103     model,
104     batch_size,
105     input,
106     ar_mask,
107     seq_logproba,
108     temperature,
109     deterministic_synthesis,
110     forbidden_tokens=None,
111     logit_biases=None,
112     progress_bar_desc=None,
113     device=torch.device("cpu"),
114 ):
115     assert input.size() == ar_mask.size()
116
117     batches = zip(
118         input.split(batch_size),
119         ar_mask.split(batch_size),
120         seq_logproba.split(batch_size),
121     )
122
123     if progress_bar_desc is not None:
124         batches = tqdm.tqdm(
125             batches,
126             dynamic_ncols=True,
127             desc=progress_bar_desc,
128             total=(input.size(0) + batch_size - 1) // batch_size,
129         )
130
131     with torch.autograd.no_grad():
132         t = model.training
133         model.eval()
134
135         for input, ar_mask, seq_logproba in batches:
136             one_batch_masked_inplace_autoregression(
137                 model=model,
138                 input=input,
139                 ar_mask=ar_mask,
140                 seq_logproba=seq_logproba,
141                 temperature=temperature,
142                 deterministic_synthesis=deterministic_synthesis,
143                 forbidden_tokens=forbidden_tokens,
144                 forced_biases=logit_biases,
145             )
146
147         model.train(t)
148
149
150 ######################################################################
151
152
153 class QuizzMachine:
154     def make_ar_mask(self, input):
155         b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
156         return b.long()[None, :].expand_as(input)
157
158     def __init__(
159         self,
160         problem,
161         nb_train_samples,
162         nb_test_samples,
163         batch_size,
164         result_dir,
165         logger,
166         device=torch.device("cpu"),
167     ):
168         super().__init__()
169
170         self.problem = problem
171         self.batch_size = batch_size
172         self.device = device
173         self.logger = logger
174
175         self.train_w_quizzes = self.problem.generate_token_sequences(
176             nb_train_samples
177         ).to(device)
178         self.test_w_quizzes = self.problem.generate_token_sequences(nb_test_samples).to(
179             device
180         )
181
182         self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1
183
184         self.train_c_quizzes = []
185         self.test_c_quizzes = []
186
187         if result_dir is not None:
188             self.problem.save_quizzes(
189                 self.train_w_quizzes[:72], result_dir, "culture_w_quizzes"
190             )
191
192     def batches(self, split="train", desc=None):
193         assert split in {"train", "test"}
194         if split == "train":
195             w_quizzes = self.train_w_quizzes
196             c_quizzes = self.train_c_quizzes
197         else:
198             w_quizzes = self.test_w_quizzes
199             c_quizzes = self.test_c_quizzes
200
201         if len(c_quizzes) > 0:
202             c_quizzes = torch.cat(c_quizzes, dim=0)
203             if c_quizzes.size(0) > w_quizzes.size(0) // 2:
204                 i = torch.randperm(c_quizzes.size(0))[: w_quizzes.size(0) // 2]
205                 c_quizzes = c_quizzes[i]
206
207             i = torch.randperm(w_quizzes.size(0))[
208                 : w_quizzes.size(0) - c_quizzes.size(0)
209             ]
210             w_quizzes = w_quizzes[i]
211
212             self.nb_batch_w_quizzes = w_quizzes.size(0)
213             self.nb_batch_c_quizzes = c_quizzes.size(0)
214
215             input = torch.cat([w_quizzes, c_quizzes], dim=0)
216         else:
217             input = w_quizzes
218             self.nb_batch_w_quizzes = w_quizzes.size(0)
219             self.nb_batch_c_quizzes = 0
220
221         # Shuffle
222         input = input[torch.randperm(input.size(0))]
223
224         if desc is None:
225             desc = f"epoch-{split}"
226         for batch in tqdm.tqdm(
227             input.split(self.batch_size), dynamic_ncols=True, desc=desc
228         ):
229             yield batch
230
231     def vocabulary_size(self):
232         return self.nb_codes
233
234     def produce_results(
235         self, n_epoch, model, result_dir, deterministic_synthesis, nmax=1000
236     ):
237         def compute_accuracy(input):
238             input = input[:nmax]
239             ar_mask = self.make_ar_mask(input)
240             result = input.clone() * (1 - ar_mask)
241             seq_logproba = torch.empty(input.size(0), device=self.device)
242
243             masked_inplace_autoregression(
244                 model=model,
245                 batch_size=self.batch_size,
246                 input=result,
247                 ar_mask=ar_mask,
248                 seq_logproba=seq_logproba,
249                 temperature=1.0,
250                 deterministic_synthesis=deterministic_synthesis,
251                 progress_bar_desc=None,
252                 device=self.device,
253             )
254
255             nb_total, nb_correct = (
256                 input.size(0),
257                 (input == result).long().min(dim=1).values.sum(),
258             )
259
260             return nb_total, nb_correct
261
262         train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
263
264         self.logger(
265             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
266         )
267
268         test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes)
269
270         self.logger(
271             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
272         )
273
274         main_test_accuracy = test_nb_correct / test_nb_total
275         self.logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
276
277         ##############################
278
279         input = self.test_w_quizzes[:96]
280         ar_mask = self.make_ar_mask(input)
281         result = input.clone() * (1 - ar_mask)
282         seq_logproba = torch.empty(input.size(0), device=self.device)
283
284         masked_inplace_autoregression(
285             model=model,
286             batch_size=self.batch_size,
287             input=result,
288             ar_mask=ar_mask,
289             seq_logproba=seq_logproba,
290             temperature=1.0,
291             deterministic_synthesis=deterministic_synthesis,
292             progress_bar_desc=None,
293             device=self.device,
294         )
295
296         self.problem.save_quizzes(
297             result[:72], result_dir, f"culture_prediction_{n_epoch:04d}_{model.id:02d}"
298         )
299
300         return main_test_accuracy
301
302     def renew_w_quizzes(self, nb, for_train=True):
303         input = self.train_w_quizzes if for_train else self.test_w_quizzes
304         nb = min(nb, input.size(0))
305         input[:-nb] = input[nb:].clone()
306         input[-nb:] = self.problem.generate_token_sequences(nb).to(self.device)
307
308     def store_c_quizzes(self, new_c_quizzes, for_train=True):
309         if for_train:
310             self.train_c_quizzes.append(new_c_quizzes)
311         else:
312             self.test_c_quizzes.append(new_c_quizzes)
313
314     def comput_correctness(self, c_quizzes, models_for_validation):
315         # Create the reverse quizzes
316
317         token_forward, token_backward = self.problem.direction_tokens()
318
319         l = (c_quizzes.size(1) - 1) // 2
320         direction = c_quizzes[:, l : l + 1]
321         direction = self.problem.token_forward * (
322             direction == self.problem.token_backward
323         ) + self.problem.token_backward * (direction == self.problem.token_forward)
324         reverse_c_quizzes = torch.cat(
325             [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
326         )
327
328         ar_mask = self.make_ar_mask(c_quizzes)
329         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
330
331         # Check how many of models can solve the quizzes in both directions
332
333         nb_correct = 0
334
335         for model in models_for_validation:
336             result = c_quizzes.clone()
337
338             masked_inplace_autoregression(
339                 model=model,
340                 batch_size=self.batch_size,
341                 input=result,
342                 ar_mask=ar_mask,
343                 seq_logproba=seq_logproba,
344                 temperature=1.0,
345                 deterministic_synthesis=True,
346                 # progress_bar_desc="solving c_quizzes",
347                 device=self.device,
348             )
349
350             correct = (c_quizzes == result).long().min(dim=-1).values
351
352             reverse_result = reverse_c_quizzes.clone()
353
354             masked_inplace_autoregression(
355                 model=model,
356                 batch_size=self.batch_size,
357                 input=reverse_result,
358                 ar_mask=ar_mask,
359                 seq_logproba=seq_logproba,
360                 temperature=1.0,
361                 deterministic_synthesis=True,
362                 # progress_bar_desc="solving reversed c_quizzes",
363                 device=self.device,
364             )
365
366             reverse_correct = (
367                 (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
368             )
369
370             nb_correct += correct * reverse_correct
371
372         return nb_correct
373
374     ###############################################################
375
376     def generate_quizzes(self, nb, model_for_generation, min_ave_seq_logproba):
377         c_quizzes = torch.empty(
378             nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
379         )
380
381         ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
382         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
383
384         # bracketing of the temperature to get the target logproba
385
386         temperature = 1
387         d_temperature = 1 / 3
388
389         while True:
390             seq_logproba[...] = 0
391
392             masked_inplace_autoregression(
393                 model=model_for_generation,
394                 batch_size=self.batch_size,
395                 input=c_quizzes,
396                 ar_mask=ar_mask,
397                 seq_logproba=seq_logproba,
398                 temperature=temperature,
399                 deterministic_synthesis=False,
400                 # progress_bar_desc="sampling c_quizzes",
401                 device=self.device,
402             )
403
404             ave_seq_logproba = seq_logproba.mean()
405
406             # If we do not have target logprobs, get out now
407             if min_ave_seq_logproba is None:
408                 break
409
410             # Oh man that's ugly
411             if ave_seq_logproba < min_ave_seq_logproba:
412                 if d_temperature > 0:
413                     d_temperature *= -1 / 3
414                 temperature += d_temperature
415             elif ave_seq_logproba > min_ave_seq_logproba * 0.99:
416                 if d_temperature < 0:
417                     d_temperature *= -1 / 3
418                 temperature += d_temperature
419             else:
420                 break
421
422             self.logger(f"changing temperature to {temperature}")
423
424         return c_quizzes, seq_logproba.mean()
425
426     ######################################################################
427
428     def create_c_quizzes(
429         self,
430         nb,
431         model_for_generation,
432         models_for_validation,
433         min_ave_seq_logproba,
434         n_epoch,
435         result_dir,
436     ):
437         c_quizzes, ave_seq_logproba = self.generate_quizzes(
438             nb, model_for_generation, min_ave_seq_logproba
439         )
440
441         nb_correct = self.comput_correctness(c_quizzes, models_for_validation)
442
443         return c_quizzes, nb_correct, ave_seq_logproba
444
445     ######################################################################
446
447     def gang_create_c_quizzes(
448         self,
449         nb,
450         nb_models_for_generation,
451         models,
452         mode,
453         min_ave_seq_logproba,
454         n_epoch,
455         result_dir,
456     ):
457         model_for_generation = Gang(models, nb_models_for_generation, mode)
458         models_for_validation = models
459         return self.create_c_quizzes(
460             nb,
461             model_for_generation,
462             models_for_validation,
463             min_ave_seq_logproba,
464             n_epoch,
465             result_dir,
466         )