a522728d17e732d166622b0e83150b772bbf581a
[culture.git] / tasks.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 from mygpt import BracketedSequence
16
17 ######################################################################
18
19
20 def masked_inplace_autoregression(
21     model,
22     batch_size,
23     input,
24     ar_mask,
25     seq_logproba,
26     temperature,
27     deterministic_synthesis,
28     forbidden_tokens=None,
29     logit_biases=None,
30     progress_bar_desc="autoregression",
31     device=torch.device("cpu"),
32 ):
33     assert input.size() == ar_mask.size()
34
35     batches = zip(input.split(batch_size), ar_mask.split(batch_size))
36
37     if progress_bar_desc is not None:
38         batches = tqdm.tqdm(
39             batches,
40             dynamic_ncols=True,
41             desc=progress_bar_desc,
42             total=(input.size(0) + batch_size - 1) // batch_size,
43         )
44
45     with torch.autograd.no_grad():
46         t = model.training
47         model.eval()
48
49         for input, ar_mask in batches:
50             model.masked_inplace_autoregression(
51                 input=input,
52                 ar_mask=ar_mask,
53                 seq_logproba=seq_logproba,
54                 temperature=temperature,
55                 deterministic_synthesis=deterministic_synthesis,
56                 forbidden_tokens=forbidden_tokens,
57                 forced_biases=logit_biases,
58             )
59
60         model.train(t)
61
62
63 ######################################################################
64
65
66 class Task:
67     def batches(self, split="train", nb_to_use=-1, desc=None):
68         pass
69
70     def vocabulary_size(self):
71         pass
72
73     def produce_results(
74         self, n_epoch, model, result_dir, logger, deterministic_synthesis
75     ):
76         pass
77
78
79 ######################################################################
80
81 import world
82
83
84 class World(Task):
85     def save_image(self, input, result_dir, filename, logger):
86         img = world.seq2img(input.to("cpu"), self.height, self.width)
87         image_name = os.path.join(result_dir, filename)
88         torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=6, padding=4)
89         logger(f"wrote {image_name}")
90
91     def save_quizzes(self, input, result_dir, filename_prefix, logger):
92         self.save_image(input, result_dir, filename_prefix + ".png", logger)
93
94     def make_ar_mask(self, input):
95         b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
96         return b.long()[None, :].expand_as(input)
97
98     def __init__(
99         self,
100         nb_train_samples,
101         nb_test_samples,
102         batch_size,
103         result_dir=None,
104         logger=None,
105         device=torch.device("cpu"),
106     ):
107         super().__init__()
108
109         self.batch_size = batch_size
110         self.device = device
111         self.height = 6
112         self.width = 8
113
114         self.train_w_quizzes = world.generate_seq(
115             nb_train_samples, height=self.height, width=self.width
116         ).to(device)
117
118         self.test_w_quizzes = world.generate_seq(
119             nb_test_samples, height=self.height, width=self.width
120         ).to(device)
121
122         self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1
123
124         self.train_c_quizzes = []
125         self.test_c_quizzes = []
126
127         if result_dir is not None:
128             self.save_quizzes(
129                 self.train_w_quizzes[:72], result_dir, f"culture_w_quizzes", logger
130             )
131
132     def batches(self, split="train", desc=None):
133         assert split in {"train", "test"}
134         if split == "train":
135             w_quizzes = self.train_w_quizzes
136             c_quizzes = self.train_c_quizzes
137         else:
138             w_quizzes = self.test_w_quizzes
139             c_quizzes = self.test_c_quizzes
140
141         if len(c_quizzes) > 0:
142             c_quizzes = torch.cat(c_quizzes, dim=0)
143             if c_quizzes.size(0) > w_quizzes.size(0) // 2:
144                 i = torch.randperm(w_quizzes.size(0))[: w_quizzes.size(0) // 2]
145                 c_quizzes = c_quizzes[i]
146
147             i = torch.randperm(w_quizzes.size(0))[
148                 : w_quizzes.size(0) - c_quizzes.size(0)
149             ]
150             w_quizzes = w_quizzes[i]
151
152             self.nb_batch_w_quizzes = w_quizzes.size(0)
153             self.nb_batch_c_quizzes = c_quizzes.size(0)
154
155             input = torch.cat([w_quizzes, c_quizzes], dim=0)
156         else:
157             input = w_quizzes
158             self.nb_batch_w_quizzes = w_quizzes.size(0)
159             self.nb_batch_c_quizzes = 0
160
161         # Shuffle
162         input = input[torch.randperm(input.size(0))]
163
164         if desc is None:
165             desc = f"epoch-{split}"
166         for batch in tqdm.tqdm(
167             input.split(self.batch_size), dynamic_ncols=True, desc=desc
168         ):
169             yield batch
170
171     def vocabulary_size(self):
172         return self.nb_codes
173
174     def produce_results(
175         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
176     ):
177         def compute_accuracy(input, logger=None):
178             input = input[:nmax]
179             ar_mask = self.make_ar_mask(input)
180             result = input.clone() * (1 - ar_mask)
181
182             masked_inplace_autoregression(
183                 model=model,
184                 batch_size=self.batch_size,
185                 input=result,
186                 ar_mask=ar_mask,
187                 seq_logproba=None,
188                 temperature=1.0,
189                 deterministic_synthesis=deterministic_synthesis,
190                 progress_bar_desc=None,
191                 device=self.device,
192             )
193
194             nb_total, nb_correct = (
195                 input.size(0),
196                 (input == result).long().min(dim=1).values.sum(),
197             )
198
199             return nb_total, nb_correct
200
201         train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
202
203         logger(
204             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
205         )
206
207         test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger)
208
209         logger(
210             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
211         )
212
213         main_test_accuracy = test_nb_correct / test_nb_total
214         logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
215
216         ##############################
217
218         input = self.test_w_quizzes[:96]
219         ar_mask = self.make_ar_mask(input)
220         result = input.clone() * (1 - ar_mask)
221
222         masked_inplace_autoregression(
223             model=model,
224             batch_size=self.batch_size,
225             input=result,
226             ar_mask=ar_mask,
227             seq_logproba=None,
228             temperature=1.0,
229             deterministic_synthesis=deterministic_synthesis,
230             progress_bar_desc=None,
231             device=self.device,
232         )
233
234         self.save_quizzes(
235             result[:72],
236             result_dir,
237             f"culture_prediction_{n_epoch:04d}_{model.id:02d}",
238             logger,
239         )
240
241         return main_test_accuracy
242
243     def renew_w_quizzes(self, nb, for_train=True):
244         input = self.train_w_quizzes if for_train else self.test_w_quizzes
245         nb = min(nb, input.size(0))
246         input[:-nb] = input[nb:].clone()
247         input[-nb:] = world.generate_seq(nb, height=self.height, width=self.width).to(
248             self.device
249         )
250
251     def store_c_quizzes(self, new_c_quizzes, for_train=True):
252         if for_train:
253             self.train_c_quizzes.append(new_c_quizzes)
254         else:
255             self.test_c_quizzes.append(new_c_quizzes)
256
257     def create_c_quizzes(
258         self,
259         n_epoch,
260         result_dir,
261         logger,
262         nb,
263         model,
264         other_models,
265         min_ave_seq_logproba=None,
266     ):
267         ###############################################################
268         # Generate quizzes with model
269
270         c_quizzes = torch.empty(
271             nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64
272         )
273
274         ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
275         seq_logproba = torch.empty(nb, device=self.device)
276
277         temperature = 1
278         d_temperature = 1
279
280         while True:
281             seq_logproba[...] = 0
282
283             masked_inplace_autoregression(
284                 model=model,
285                 batch_size=self.batch_size,
286                 input=c_quizzes,
287                 ar_mask=ar_mask,
288                 seq_logproba=seq_logproba,
289                 temperature=temperature,
290                 deterministic_synthesis=False,
291                 progress_bar_desc="sampling c_quizzes",
292                 device=self.device,
293             )
294
295             ave_seq_logproba = seq_logproba.mean()
296
297             logger(f"{ave_seq_logproba=} {min_ave_seq_logproba=}")
298
299             if min_ave_seq_logproba is None:
300                 break
301
302             # Oh man that's ugly
303             if ave_seq_logproba < min_ave_seq_logproba * 1.1:
304                 if d_temperature > 0:
305                     d_temperature *= -0.5
306                 temperature += d_temperature
307             elif ave_seq_logproba > min_ave_seq_logproba:
308                 if d_temperature < 0:
309                     d_temperature *= -0.5
310                 temperature += d_temperature
311             else:
312                 break
313
314             logger(f"chaging temperature to {temperature}")
315
316         ###############################################################
317         # Create the reverse quizzes
318
319         l = self.height * self.width
320         direction = c_quizzes[:, l : l + 1]
321         direction = world.token_forward * (
322             direction == world.token_backward
323         ) + world.token_backward * (direction == world.token_forward)
324         reverse_c_quizzes = torch.cat(
325             [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
326         )
327
328         ar_mask = self.make_ar_mask(c_quizzes)
329
330         ###############################################################
331         # Check how many of the other models can solve them in both
332         # directions
333
334         nb_correct = []
335
336         for m in other_models:
337             result = c_quizzes.clone()
338
339             masked_inplace_autoregression(
340                 model=m,
341                 batch_size=self.batch_size,
342                 input=result,
343                 ar_mask=ar_mask,
344                 seq_logproba=None,
345                 temperature=1.0,
346                 deterministic_synthesis=True,
347                 progress_bar_desc="solving c_quizzes",
348                 device=self.device,
349             )
350
351             correct = (c_quizzes == result).long().min(dim=-1).values
352
353             reverse_result = reverse_c_quizzes.clone()
354
355             masked_inplace_autoregression(
356                 model=m,
357                 batch_size=self.batch_size,
358                 input=reverse_result,
359                 ar_mask=ar_mask,
360                 seq_logproba=None,
361                 temperature=1.0,
362                 deterministic_synthesis=True,
363                 progress_bar_desc="solving reversed c_quizzes",
364                 device=self.device,
365             )
366
367             reverse_correct = (
368                 (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
369             )
370
371             nb_correct.append((correct * reverse_correct)[None, :])
372
373         nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0)
374
375         return c_quizzes, nb_correct, seq_logproba.mean()