Update.
[culture.git] / quizz_machine.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 from mygpt import BracketedSequence
16
17 ######################################################################
18
19
20 def masked_inplace_autoregression(
21     model,
22     batch_size,
23     input,
24     ar_mask,
25     seq_logproba,
26     temperature,
27     deterministic_synthesis,
28     forbidden_tokens=None,
29     logit_biases=None,
30     progress_bar_desc="autoregression",
31     device=torch.device("cpu"),
32 ):
33     assert input.size() == ar_mask.size()
34
35     batches = zip(
36         input.split(batch_size),
37         ar_mask.split(batch_size),
38         seq_logproba.split(batch_size),
39     )
40
41     if progress_bar_desc is not None:
42         batches = tqdm.tqdm(
43             batches,
44             dynamic_ncols=True,
45             desc=progress_bar_desc,
46             total=(input.size(0) + batch_size - 1) // batch_size,
47         )
48
49     with torch.autograd.no_grad():
50         t = model.training
51         model.eval()
52
53         for input, ar_mask, seq_logproba in batches:
54             model.masked_inplace_autoregression(
55                 input=input,
56                 ar_mask=ar_mask,
57                 seq_logproba=seq_logproba,
58                 temperature=temperature,
59                 deterministic_synthesis=deterministic_synthesis,
60                 forbidden_tokens=forbidden_tokens,
61                 forced_biases=logit_biases,
62             )
63
64         model.train(t)
65
66
67 ######################################################################
68
69
70 class Task:
71     def batches(self, split="train", nb_to_use=-1, desc=None):
72         pass
73
74     def vocabulary_size(self):
75         pass
76
77     def produce_results(
78         self, n_epoch, model, result_dir, logger, deterministic_synthesis
79     ):
80         pass
81
82
83 ######################################################################
84
85 import sky
86
87
88 class QuizzMachine(Task):
89     def save_image(self, input, result_dir, filename, logger):
90         img = sky.seq2img(input.to("cpu"), self.height, self.width)
91         image_name = os.path.join(result_dir, filename)
92         torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=6, padding=4)
93         logger(f"wrote {image_name}")
94
95     def save_quizzes(self, input, result_dir, filename_prefix, logger):
96         self.save_image(input, result_dir, filename_prefix + ".png", logger)
97
98     def make_ar_mask(self, input):
99         b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
100         return b.long()[None, :].expand_as(input)
101
102     def __init__(
103         self,
104         nb_train_samples,
105         nb_test_samples,
106         batch_size,
107         result_dir=None,
108         logger=None,
109         device=torch.device("cpu"),
110     ):
111         super().__init__()
112
113         self.batch_size = batch_size
114         self.device = device
115         self.height = 6
116         self.width = 8
117
118         self.train_w_quizzes = sky.generate_seq(
119             nb_train_samples, height=self.height, width=self.width
120         ).to(device)
121
122         self.test_w_quizzes = sky.generate_seq(
123             nb_test_samples, height=self.height, width=self.width
124         ).to(device)
125
126         self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1
127
128         self.train_c_quizzes = []
129         self.test_c_quizzes = []
130
131         if result_dir is not None:
132             self.save_quizzes(
133                 self.train_w_quizzes[:72], result_dir, f"culture_w_quizzes", logger
134             )
135
136     def batches(self, split="train", desc=None):
137         assert split in {"train", "test"}
138         if split == "train":
139             w_quizzes = self.train_w_quizzes
140             c_quizzes = self.train_c_quizzes
141         else:
142             w_quizzes = self.test_w_quizzes
143             c_quizzes = self.test_c_quizzes
144
145         if len(c_quizzes) > 0:
146             c_quizzes = torch.cat(c_quizzes, dim=0)
147             if c_quizzes.size(0) > w_quizzes.size(0) // 2:
148                 i = torch.randperm(w_quizzes.size(0))[: w_quizzes.size(0) // 2]
149                 c_quizzes = c_quizzes[i]
150
151             i = torch.randperm(w_quizzes.size(0))[
152                 : w_quizzes.size(0) - c_quizzes.size(0)
153             ]
154             w_quizzes = w_quizzes[i]
155
156             self.nb_batch_w_quizzes = w_quizzes.size(0)
157             self.nb_batch_c_quizzes = c_quizzes.size(0)
158
159             input = torch.cat([w_quizzes, c_quizzes], dim=0)
160         else:
161             input = w_quizzes
162             self.nb_batch_w_quizzes = w_quizzes.size(0)
163             self.nb_batch_c_quizzes = 0
164
165         # Shuffle
166         input = input[torch.randperm(input.size(0))]
167
168         if desc is None:
169             desc = f"epoch-{split}"
170         for batch in tqdm.tqdm(
171             input.split(self.batch_size), dynamic_ncols=True, desc=desc
172         ):
173             yield batch
174
175     def vocabulary_size(self):
176         return self.nb_codes
177
178     def produce_results(
179         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
180     ):
181         def compute_accuracy(input, logger=None):
182             input = input[:nmax]
183             ar_mask = self.make_ar_mask(input)
184             result = input.clone() * (1 - ar_mask)
185             seq_logproba = torch.empty(input.size(0), device=self.device)
186
187             masked_inplace_autoregression(
188                 model=model,
189                 batch_size=self.batch_size,
190                 input=result,
191                 ar_mask=ar_mask,
192                 seq_logproba=seq_logproba,
193                 temperature=1.0,
194                 deterministic_synthesis=deterministic_synthesis,
195                 progress_bar_desc=None,
196                 device=self.device,
197             )
198
199             nb_total, nb_correct = (
200                 input.size(0),
201                 (input == result).long().min(dim=1).values.sum(),
202             )
203
204             return nb_total, nb_correct
205
206         train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
207
208         logger(
209             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
210         )
211
212         test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger)
213
214         logger(
215             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
216         )
217
218         main_test_accuracy = test_nb_correct / test_nb_total
219         logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
220
221         ##############################
222
223         input = self.test_w_quizzes[:96]
224         ar_mask = self.make_ar_mask(input)
225         result = input.clone() * (1 - ar_mask)
226         seq_logproba = torch.empty(input.size(0), device=self.device)
227
228         masked_inplace_autoregression(
229             model=model,
230             batch_size=self.batch_size,
231             input=result,
232             ar_mask=ar_mask,
233             seq_logproba=seq_logproba,
234             temperature=1.0,
235             deterministic_synthesis=deterministic_synthesis,
236             progress_bar_desc=None,
237             device=self.device,
238         )
239
240         self.save_quizzes(
241             result[:72],
242             result_dir,
243             f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
244             logger,
245         )
246
247         return main_test_accuracy
248
249     def renew_w_quizzes(self, nb, for_train=True):
250         input = self.train_w_quizzes if for_train else self.test_w_quizzes
251         nb = min(nb, input.size(0))
252         input[:-nb] = input[nb:].clone()
253         input[-nb:] = sky.generate_seq(nb, height=self.height, width=self.width).to(
254             self.device
255         )
256
257     def store_c_quizzes(self, new_c_quizzes, for_train=True):
258         if for_train:
259             self.train_c_quizzes.append(new_c_quizzes)
260         else:
261             self.test_c_quizzes.append(new_c_quizzes)
262
263     def create_c_quizzes(
264         self,
265         n_epoch,
266         result_dir,
267         logger,
268         nb,
269         model,
270         other_models,
271         min_ave_seq_logproba,
272     ):
273         ###############################################################
274         # Generate quizzes with model
275
276         c_quizzes = torch.empty(
277             nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64
278         )
279
280         ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
281         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
282
283         temperature = 1
284         d_temperature = 1 / 3
285
286         while True:
287             seq_logproba[...] = 0
288
289             masked_inplace_autoregression(
290                 model=model,
291                 batch_size=self.batch_size,
292                 input=c_quizzes,
293                 ar_mask=ar_mask,
294                 seq_logproba=seq_logproba,
295                 temperature=temperature,
296                 deterministic_synthesis=False,
297                 progress_bar_desc="sampling c_quizzes",
298                 device=self.device,
299             )
300
301             ave_seq_logproba = seq_logproba.mean()
302
303             logger(f"{ave_seq_logproba=} {min_ave_seq_logproba=}")
304
305             if min_ave_seq_logproba is None:
306                 break
307
308             # Oh man that's ugly
309             if ave_seq_logproba < min_ave_seq_logproba * 1.1:
310                 if d_temperature > 0:
311                     d_temperature *= -1 / 3
312                 temperature += d_temperature
313             elif ave_seq_logproba > min_ave_seq_logproba:
314                 if d_temperature < 0:
315                     d_temperature *= -1 / 3
316                 temperature += d_temperature
317             else:
318                 break
319
320             logger(f"chaging temperature to {temperature}")
321
322         ###############################################################
323         # Create the reverse quizzes
324
325         l = self.height * self.width
326         direction = c_quizzes[:, l : l + 1]
327         direction = sky.token_forward * (
328             direction == sky.token_backward
329         ) + sky.token_backward * (direction == sky.token_forward)
330         reverse_c_quizzes = torch.cat(
331             [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
332         )
333
334         ar_mask = self.make_ar_mask(c_quizzes)
335         seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
336
337         ###############################################################
338         # Check how many of the other models can solve them in both
339         # directions
340
341         nb_correct = []
342
343         for m in other_models:
344             result = c_quizzes.clone()
345
346             masked_inplace_autoregression(
347                 model=m,
348                 batch_size=self.batch_size,
349                 input=result,
350                 ar_mask=ar_mask,
351                 seq_logproba=seq_logproba,
352                 temperature=1.0,
353                 deterministic_synthesis=True,
354                 progress_bar_desc="solving c_quizzes",
355                 device=self.device,
356             )
357
358             correct = (c_quizzes == result).long().min(dim=-1).values
359
360             reverse_result = reverse_c_quizzes.clone()
361
362             masked_inplace_autoregression(
363                 model=m,
364                 batch_size=self.batch_size,
365                 input=reverse_result,
366                 ar_mask=ar_mask,
367                 seq_logproba=seq_logproba,
368                 temperature=1.0,
369                 deterministic_synthesis=True,
370                 progress_bar_desc="solving reversed c_quizzes",
371                 device=self.device,
372             )
373
374             reverse_correct = (
375                 (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
376             )
377
378             nb_correct.append((correct * reverse_correct)[None, :])
379
380         nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0)
381
382         return c_quizzes, nb_correct, seq_logproba.mean()