2a1833d831fe2fdfa0c097f7f4b4e8a9327ef88b
[culture.git] / tasks.py
1 #!/usr/bin/env python
2
3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
5
6 # Written by Francois Fleuret <francois@fleuret.org>
7
8 import math, os, tqdm, warnings
9
10 import torch, torchvision
11
12 from torch import nn
13 from torch.nn import functional as F
14
15 from mygpt import BracketedSequence
16
17 ######################################################################
18
19
20 def masked_inplace_autoregression(
21     model,
22     batch_size,
23     input,
24     ar_mask,
25     temperature,
26     deterministic_synthesis,
27     forbidden_tokens=None,
28     logit_biases=None,
29     progress_bar_desc="autoregression",
30     device=torch.device("cpu"),
31 ):
32     assert input.size() == ar_mask.size()
33
34     batches = zip(input.split(batch_size), ar_mask.split(batch_size))
35
36     if progress_bar_desc is not None:
37         batches = tqdm.tqdm(
38             batches,
39             dynamic_ncols=True,
40             desc=progress_bar_desc,
41             total=(input.size(0) + batch_size - 1) // batch_size,
42         )
43
44     sum_logits = 0
45
46     with torch.autograd.no_grad():
47         t = model.training
48         model.eval()
49
50         for input, ar_mask in batches:
51             sum_logits += model.masked_inplace_autoregression(
52                 input=input,
53                 ar_mask=ar_mask,
54                 temperature=temperature,
55                 deterministic_synthesis=deterministic_synthesis,
56                 forbidden_tokens=forbidden_tokens,
57                 forced_biases=logit_biases,
58             )
59
60         model.train(t)
61
62     return sum_logits
63
64
65 ######################################################################
66
67
68 class Task:
69     def batches(self, split="train", nb_to_use=-1, desc=None):
70         pass
71
72     def vocabulary_size(self):
73         pass
74
75     def produce_results(
76         self, n_epoch, model, result_dir, logger, deterministic_synthesis
77     ):
78         pass
79
80
81 ######################################################################
82
83 import world
84
85
86 class World(Task):
87     def save_image(self, input, result_dir, filename, logger):
88         img = world.seq2img(input.to("cpu"), self.height, self.width)
89         image_name = os.path.join(result_dir, filename)
90         torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=6, padding=4)
91         logger(f"wrote {image_name}")
92
93     def make_ar_mask(self, input):
94         b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
95         return b.long()[None, :].expand_as(input)
96
97     def __init__(
98         self,
99         nb_train_samples,
100         nb_test_samples,
101         batch_size,
102         result_dir=None,
103         logger=None,
104         device=torch.device("cpu"),
105     ):
106         super().__init__()
107
108         self.batch_size = batch_size
109         self.device = device
110         self.height = 6
111         self.width = 8
112
113         self.train_input = world.generate_seq(
114             nb_train_samples, height=self.height, width=self.width
115         ).to(device)
116
117         self.test_input = world.generate_seq(
118             nb_test_samples, height=self.height, width=self.width
119         ).to(device)
120
121         self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
122
123         self.train_quizzes = []
124         self.test_quizzes = []
125
126         if result_dir is not None:
127             self.save_image(
128                 self.train_input[:72], result_dir, f"world_train.png", logger
129             )
130
131     def batches(self, split="train", desc=None):
132         assert split in {"train", "test"}
133         if split == "train":
134             input = self.train_input
135             quizzes = self.train_quizzes
136         else:
137             input = self.test_input
138             quizzes = self.test_quizzes
139
140         if len(quizzes) > 0:
141             quizzes = torch.cat(quizzes, dim=0)
142             if quizzes.size(0) > input.size(0) // 2:
143                 i = torch.randperm(input.size(0))[: input.size(0) // 2]
144                 quizzes = quizzes[i]
145
146             i = torch.randperm(input.size(0))[: input.size(0) - quizzes.size(0)]
147             input = input[i]
148
149             self.nb_batch_samples_world = input.size(0)
150             self.nb_batch_samples_quizzes = quizzes.size(0)
151
152             input = torch.cat([input, quizzes], dim=0)
153         else:
154             self.nb_batch_samples_world = input.size(0)
155             self.nb_batch_samples_quizzes = 0
156
157         # Shuffle
158         input = input[torch.randperm(input.size(0))]
159
160         if desc is None:
161             desc = f"epoch-{split}"
162         for batch in tqdm.tqdm(
163             input.split(self.batch_size), dynamic_ncols=True, desc=desc
164         ):
165             yield batch
166
167     def vocabulary_size(self):
168         return self.nb_codes
169
170     def produce_results(
171         self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
172     ):
173         def compute_accuracy(input, logger=None):
174             input = input[:nmax]
175             ar_mask = self.make_ar_mask(input)
176             result = input.clone() * (1 - ar_mask)
177
178             masked_inplace_autoregression(
179                 model=model,
180                 batch_size=self.batch_size,
181                 input=result,
182                 ar_mask=ar_mask,
183                 temperature=1.0,
184                 deterministic_synthesis=deterministic_synthesis,
185                 progress_bar_desc=None,
186                 device=self.device,
187             )
188
189             nb_total, nb_correct = (
190                 input.size(0),
191                 (input == result).long().min(dim=1).values.sum(),
192             )
193
194             return nb_total, nb_correct
195
196         train_nb_total, train_nb_correct = compute_accuracy(self.train_input)
197
198         logger(
199             f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
200         )
201
202         test_nb_total, test_nb_correct = compute_accuracy(self.test_input, logger)
203
204         logger(
205             f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
206         )
207
208         main_test_accuracy = test_nb_correct / test_nb_total
209         logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
210
211         ##############################
212
213         input = self.test_input[:96]
214         ar_mask = self.make_ar_mask(input)
215         result = input.clone() * (1 - ar_mask)
216
217         masked_inplace_autoregression(
218             model=model,
219             batch_size=self.batch_size,
220             input=result,
221             ar_mask=ar_mask,
222             temperature=1.0,
223             deterministic_synthesis=deterministic_synthesis,
224             progress_bar_desc=None,
225             device=self.device,
226         )
227
228         self.save_image(
229             result[:72],
230             result_dir,
231             f"world_prediction_{n_epoch:04d}_{model.id:02d}.png",
232             logger,
233         )
234
235         return main_test_accuracy
236
237     def renew_samples(self, nb, for_train=True):
238         input = self.train_input if for_train else self.test_input
239         nb = min(nb, input.size(0))
240         input[:-nb] = input[nb:].clone()
241         input[-nb:] = world.generate_seq(nb, height=self.height, width=self.width).to(
242             self.device
243         )
244
245     def store_new_quizzes(self, new_quizzes, for_train=True):
246         if for_train:
247             self.train_quizzes.append(new_quizzes)
248         else:
249             self.test_quizzes.append(new_quizzes)
250
251     def create_new_quizzes(
252         self,
253         n_epoch,
254         result_dir,
255         logger,
256         nb,
257         model,
258         other_models,
259         desired_average_logits=None,
260     ):
261         ###############################################################
262         # Generate quizzes with model
263
264         quizzes = torch.empty(
265             nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64
266         )
267
268         ar_mask = torch.full(quizzes.size(), 1, device=self.device)
269
270         temperature = 1
271         d_temperature = 1
272
273         while True:
274             sum_logits = masked_inplace_autoregression(
275                 model=model,
276                 batch_size=self.batch_size,
277                 input=quizzes,
278                 ar_mask=ar_mask,
279                 temperature=temperature,
280                 deterministic_synthesis=False,
281                 progress_bar_desc="creating quizzes",
282                 device=self.device,
283             )
284
285             average_logits = sum_logits / quizzes.size(0)
286
287             logger(f"{average_logits=} {desired_average_logits=}")
288
289             if desired_average_logits is None:
290                 break
291
292             # Oh man that's ugly
293             if average_logits > desired_average_logits:
294                 if d_temperature < 0:
295                     d_temperature *= -0.5
296                 temperature += d_temperature
297             else:
298                 if d_temperature > 0:
299                     d_temperature *= -0.5
300                 temperature += d_temperature
301
302             logger(f"chaging temperature to {temperature}")
303
304         ###############################################################
305         # Create the reverse quizzes
306
307         l = self.height * self.width
308         direction = quizzes[:, l : l + 1]
309         direction = world.token_forward * (
310             direction == world.token_backward
311         ) + world.token_backward * (direction == world.token_forward)
312         reverse_quizzes = torch.cat(
313             [quizzes[:, l + 1 :], direction, quizzes[:, :l]], dim=1
314         )
315
316         ar_mask = self.make_ar_mask(quizzes)
317
318         ###############################################################
319         # Check how many of the other models can solve them in both
320         # directions
321
322         nb_correct = []
323
324         for m in other_models:
325             result = quizzes.clone()
326
327             masked_inplace_autoregression(
328                 model=m,
329                 batch_size=self.batch_size,
330                 input=result,
331                 ar_mask=ar_mask,
332                 temperature=1.0,
333                 deterministic_synthesis=True,
334                 progress_bar_desc="solving quizzes",
335                 device=self.device,
336             )
337
338             correct = (quizzes == result).long().min(dim=-1).values
339
340             reverse_result = reverse_quizzes.clone()
341
342             masked_inplace_autoregression(
343                 model=m,
344                 batch_size=self.batch_size,
345                 input=reverse_result,
346                 ar_mask=ar_mask,
347                 temperature=1.0,
348                 deterministic_synthesis=True,
349                 progress_bar_desc="solving reversed quizzes",
350                 device=self.device,
351             )
352
353             reverse_correct = (
354                 (reverse_quizzes == reverse_result).long().min(dim=-1).values
355             )
356
357             nb_correct.append((correct * reverse_correct)[None, :])
358
359         nb_correct = torch.cat(nb_correct, dim=0)
360
361         # filename = os.path.join(result_dir, "correct_{n_epoch:04d}.dat")
362         # with open(filename, "w") as f:
363         # for k in nb_correct:
364         # f.write(f"{k}\n")
365
366         return quizzes, nb_correct.sum(dim=0), sum_logits