2cc6cfd59d02fcb417834a34455a1f42b7dfcbd5
[culture.git] / quizz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 from mygpt import BracketedSequence
16
17 ######################################################################
18
19
20 def masked_inplace_autoregression(
21     model,
22     batch_size,
23     input,
24     ar_mask,
25     seq_logproba,
26     temperature,
27     deterministic_synthesis,
28     forbidden_tokens=None,
29     logit_biases=None,
30     progress_bar_desc="autoregression",
31     device=torch.device("cpu"),
32 ):
33     assert input.size() == ar_mask.size()
34
35     batches = zip(
36         input.split(batch_size),
37         ar_mask.split(batch_size),
38         seq_logproba.split(batch_size),
39     )
40
41     if progress_bar_desc is not None:
42         batches = tqdm.tqdm(
43             batches,
44             dynamic_ncols=True,
45             desc=progress_bar_desc,
46             total=(input.size(0) + batch_size - 1) // batch_size,
47         )
48
49     with torch.autograd.no_grad():
50         t = model.training
51         model.eval()
52
53         for input, ar_mask, seq_logproba in batches:
54             model.masked_inplace_autoregression(
55                 input=input,
56                 ar_mask=ar_mask,
57                 seq_logproba=seq_logproba,
58                 temperature=temperature,
59                 deterministic_synthesis=deterministic_synthesis,
60                 forbidden_tokens=forbidden_tokens,
61                 forced_biases=logit_biases,
62             )
63
64         model.train(t)
65
66
67 ######################################################################
68
69
70 class QuizzMachine:
71     def make_ar_mask(self, input):
72         b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
73         return b.long()[None, :].expand_as(input)
74
75     def __init__(
76         self,
77         problem,
78         nb_train_samples,
79         nb_test_samples,
80         batch_size,
81         result_dir=None,
82         logger=None,
83         device=torch.device("cpu"),
84     ):
85         super().__init__()
86
87         self.problem = problem
88         self.batch_size = batch_size
89         self.device = device
90
91         self.train_w_quizzes = self.problem.generate_seq(nb_train_samples).to(device)
92         self.test_w_quizzes = self.problem.generate_seq(nb_test_samples).to(device)
93
94         self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1
95
96         self.train_c_quizzes = []
97         self.test_c_quizzes = []
98
99         if result_dir is not None:
100             self.problem.save_quizzes(
101                 self.train_w_quizzes[:72], result_dir, "culture_w_quizzes"
102             )
103
104     def batches(self, split="train", desc=None):
105         assert split in {"train", "test"}
106         if split == "train":
107             w_quizzes = self.train_w_quizzes
108             c_quizzes = self.train_c_quizzes
109         else:
110             w_quizzes = self.test_w_quizzes
111             c_quizzes = self.test_c_quizzes
112
113         if len(c_quizzes) > 0:
114             c_quizzes = torch.cat(c_quizzes, dim=0)
115             if c_quizzes.size(0) > w_quizzes.size(0) // 2:
116                 i = torch.randperm(w_quizzes.size(0))[: w_quizzes.size(0) // 2]
117                 c_quizzes = c_quizzes[i]
118
119             i = torch.randperm(w_quizzes.size(0))[
120                 : w_quizzes.size(0) - c_quizzes.size(0)
121             ]
122             w_quizzes = w_quizzes[i]
123
124             self.nb_batch_w_quizzes = w_quizzes.size(0)
125             self.nb_batch_c_quizzes = c_quizzes.size(0)
126
127             input = torch.cat([w_quizzes, c_quizzes], dim=0)
128         else:
129             input = w_quizzes
130             self.nb_batch_w_quizzes = w_quizzes.size(0)
131             self.nb_batch_c_quizzes = 0
132
133         # Shuffle
134         input = input[torch.randperm(input.size(0))]
135
136         if desc is None:
137             desc = f"epoch-{split}"
138         for batch in tqdm.tqdm(
139             input.split(self.batch_size), dynamic_ncols=True, desc=desc
140         ):
141             yield batch
142
143     def vocabulary_size(self):
144         return self.nb_codes
145
146     def produce_results(
147         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
148     ):
149         def compute_accuracy(input, logger=None):
150             input = input[:nmax]
151             ar_mask = self.make_ar_mask(input)
152             result = input.clone() * (1 - ar_mask)
153             seq_logproba = torch.empty(input.size(0), device=self.device)
154
155             masked_inplace_autoregression(
156                 model=model,
157                 batch_size=self.batch_size,
158                 input=result,
159                 ar_mask=ar_mask,
160                 seq_logproba=seq_logproba,
161                 temperature=1.0,
162                 deterministic_synthesis=deterministic_synthesis,
163                 progress_bar_desc=None,
164                 device=self.device,
165             )
166
167             nb_total, nb_correct = (
168                 input.size(0),
169                 (input == result).long().min(dim=1).values.sum(),
170             )
171
172             return nb_total, nb_correct
173
174         train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
175
176         logger(
177             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
178         )
179
180         test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger)
181
182         logger(
183             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
184         )
185
186         main_test_accuracy = test_nb_correct / test_nb_total
187         logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
188
189         ##############################
190
191         input = self.test_w_quizzes[:96]
192         ar_mask = self.make_ar_mask(input)
193         result = input.clone() * (1 - ar_mask)
194         seq_logproba = torch.empty(input.size(0), device=self.device)
195
196         masked_inplace_autoregression(
197             model=model,
198             batch_size=self.batch_size,
199             input=result,
200             ar_mask=ar_mask,
201             seq_logproba=seq_logproba,
202             temperature=1.0,
203             deterministic_synthesis=deterministic_synthesis,
204             progress_bar_desc=None,
205             device=self.device,
206         )
207
208         self.problem.save_quizzes(
209             result[:72], result_dir, f"culture_prediction_{n_epoch:04d}_{model.id:02d}"
210         )
211
212         return main_test_accuracy
213
214     def renew_w_quizzes(self, nb, for_train=True):
215         input = self.train_w_quizzes if for_train else self.test_w_quizzes
216         nb = min(nb, input.size(0))
217         input[:-nb] = input[nb:].clone()
218         input[-nb:] = self.problem.generate_seq(nb).to(self.device)
219
220     def store_c_quizzes(self, new_c_quizzes, for_train=True):
221         if for_train:
222             self.train_c_quizzes.append(new_c_quizzes)
223         else:
224             self.test_c_quizzes.append(new_c_quizzes)
225
226     def create_c_quizzes(
227         self,
228         n_epoch,
229         result_dir,
230         logger,
231         nb,
232         model,
233         other_models,
234         min_ave_seq_logproba,
235     ):
236         ###############################################################
237         # Generate quizzes with model
238
239         c_quizzes = torch.empty(
240             nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
241         )
242
243         ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
244         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
245
246         temperature = 1
247         d_temperature = 1 / 3
248
249         while True:
250             seq_logproba[...] = 0
251
252             masked_inplace_autoregression(
253                 model=model,
254                 batch_size=self.batch_size,
255                 input=c_quizzes,
256                 ar_mask=ar_mask,
257                 seq_logproba=seq_logproba,
258                 temperature=temperature,
259                 deterministic_synthesis=False,
260                 progress_bar_desc="sampling c_quizzes",
261                 device=self.device,
262             )
263
264             ave_seq_logproba = seq_logproba.mean()
265
266             if min_ave_seq_logproba is None:
267                 break
268
269             # Oh man that's ugly
270             if ave_seq_logproba < min_ave_seq_logproba:
271                 if d_temperature > 0:
272                     d_temperature *= -1 / 3
273                 temperature += d_temperature
274             elif ave_seq_logproba > min_ave_seq_logproba * 0.99:
275                 if d_temperature < 0:
276                     d_temperature *= -1 / 3
277                 temperature += d_temperature
278             else:
279                 break
280
281             logger(f"chaging temperature to {temperature}")
282
283         ###############################################################
284         # Create the reverse quizzes
285
286         token_forward, token_backward = self.problem.direction_tokens()
287
288         l = (c_quizzes.size(1) - 1) // 2
289         direction = c_quizzes[:, l : l + 1]
290         direction = self.problem.token_forward * (
291             direction == self.problem.token_backward
292         ) + self.problem.token_backward * (direction == self.problem.token_forward)
293         reverse_c_quizzes = torch.cat(
294             [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
295         )
296
297         ar_mask = self.make_ar_mask(c_quizzes)
298         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
299
300         ###############################################################
301         # Check how many of the other models can solve them in both
302         # directions
303
304         nb_correct = []
305
306         for m in other_models:
307             result = c_quizzes.clone()
308
309             masked_inplace_autoregression(
310                 model=m,
311                 batch_size=self.batch_size,
312                 input=result,
313                 ar_mask=ar_mask,
314                 seq_logproba=seq_logproba,
315                 temperature=1.0,
316                 deterministic_synthesis=True,
317                 progress_bar_desc="solving c_quizzes",
318                 device=self.device,
319             )
320
321             correct = (c_quizzes == result).long().min(dim=-1).values
322
323             reverse_result = reverse_c_quizzes.clone()
324
325             masked_inplace_autoregression(
326                 model=m,
327                 batch_size=self.batch_size,
328                 input=reverse_result,
329                 ar_mask=ar_mask,
330                 seq_logproba=seq_logproba,
331                 temperature=1.0,
332                 deterministic_synthesis=True,
333                 progress_bar_desc="solving reversed c_quizzes",
334                 device=self.device,
335             )
336
337             reverse_correct = (
338                 (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
339             )
340
341             nb_correct.append((correct * reverse_correct)[None, :])
342
343         nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0)
344
345         return c_quizzes, nb_correct, seq_logproba.mean()