3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, os, tqdm, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 ######################################################################
20 def masked_inplace_autoregression(
27 deterministic_synthesis,
28 forbidden_tokens=None,
30 progress_bar_desc="autoregression",
31 device=torch.device("cpu"),
33 assert input.size() == ar_mask.size()
35 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
37 if progress_bar_desc is not None:
41 desc=progress_bar_desc,
42 total=(input.size(0) + batch_size - 1) // batch_size,
45 with torch.autograd.no_grad():
49 for input, ar_mask in batches:
50 model.masked_inplace_autoregression(
53 summed_logits=summed_logits,
54 temperature=temperature,
55 deterministic_synthesis=deterministic_synthesis,
56 forbidden_tokens=forbidden_tokens,
57 forced_biases=logit_biases,
63 ######################################################################
67 def batches(self, split="train", nb_to_use=-1, desc=None):
70 def vocabulary_size(self):
74 self, n_epoch, model, result_dir, logger, deterministic_synthesis
79 ######################################################################
85 def save_image(self, input, result_dir, filename, logger):
86 img = world.seq2img(input.to("cpu"), self.height, self.width)
87 image_name = os.path.join(result_dir, filename)
88 torchvision.utils.save_image(img.float() / 255.0, image_name, nrow=6, padding=4)
89 logger(f"wrote {image_name}")
91 def make_ar_mask(self, input):
92 b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
93 return b.long()[None, :].expand_as(input)
102 device=torch.device("cpu"),
106 self.batch_size = batch_size
111 self.train_input = world.generate_seq(
112 nb_train_samples, height=self.height, width=self.width
115 self.test_input = world.generate_seq(
116 nb_test_samples, height=self.height, width=self.width
119 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
121 self.train_quizzes = []
122 self.test_quizzes = []
124 if result_dir is not None:
126 self.train_input[:72], result_dir, f"world_train.png", logger
129 def batches(self, split="train", desc=None):
130 assert split in {"train", "test"}
132 input = self.train_input
133 quizzes = self.train_quizzes
135 input = self.test_input
136 quizzes = self.test_quizzes
139 quizzes = torch.cat(quizzes, dim=0)
140 if quizzes.size(0) > input.size(0) // 2:
141 i = torch.randperm(input.size(0))[: input.size(0) // 2]
144 i = torch.randperm(input.size(0))[: input.size(0) - quizzes.size(0)]
147 self.nb_batch_samples_world = input.size(0)
148 self.nb_batch_samples_quizzes = quizzes.size(0)
150 input = torch.cat([input, quizzes], dim=0)
152 self.nb_batch_samples_world = input.size(0)
153 self.nb_batch_samples_quizzes = 0
156 input = input[torch.randperm(input.size(0))]
159 desc = f"epoch-{split}"
160 for batch in tqdm.tqdm(
161 input.split(self.batch_size), dynamic_ncols=True, desc=desc
165 def vocabulary_size(self):
169 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
171 def compute_accuracy(input, logger=None):
173 ar_mask = self.make_ar_mask(input)
174 result = input.clone() * (1 - ar_mask)
176 masked_inplace_autoregression(
178 batch_size=self.batch_size,
183 deterministic_synthesis=deterministic_synthesis,
184 progress_bar_desc=None,
188 nb_total, nb_correct = (
190 (input == result).long().min(dim=1).values.sum(),
193 return nb_total, nb_correct
195 train_nb_total, train_nb_correct = compute_accuracy(self.train_input)
198 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
201 test_nb_total, test_nb_correct = compute_accuracy(self.test_input, logger)
204 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
207 main_test_accuracy = test_nb_correct / test_nb_total
208 logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
210 ##############################
212 input = self.test_input[:96]
213 ar_mask = self.make_ar_mask(input)
214 result = input.clone() * (1 - ar_mask)
216 masked_inplace_autoregression(
218 batch_size=self.batch_size,
223 deterministic_synthesis=deterministic_synthesis,
224 progress_bar_desc=None,
231 f"world_prediction_{n_epoch:04d}_{model.id:02d}.png",
235 return main_test_accuracy
237 def renew_samples(self, nb, for_train=True):
238 input = self.train_input if for_train else self.test_input
239 nb = min(nb, input.size(0))
240 input[:-nb] = input[nb:].clone()
241 input[-nb:] = world.generate_seq(nb, height=self.height, width=self.width).to(
245 def store_new_quizzes(self, new_quizzes, for_train=True):
247 self.train_quizzes.append(new_quizzes)
249 self.test_quizzes.append(new_quizzes)
251 def create_new_quizzes(
259 desired_average_logits=None,
261 ###############################################################
262 # Generate quizzes with model
264 quizzes = torch.empty(
265 nb, self.height * self.width * 2 + 1, device=self.device, dtype=torch.int64
268 ar_mask = torch.full(quizzes.size(), 1, device=self.device)
269 summed_logits = torch.empty(nb, device=self.device)
275 summed_logits[...] = 0
277 masked_inplace_autoregression(
279 batch_size=self.batch_size,
282 summed_logits=summed_logits,
283 temperature=temperature,
284 deterministic_synthesis=False,
285 progress_bar_desc="creating quizzes",
289 average_logits = summed_logits.mean()
291 logger(f"{average_logits=} {desired_average_logits=}")
293 if desired_average_logits is None:
297 if average_logits < desired_average_logits * 1.1:
298 if d_temperature > 0:
299 d_temperature *= -0.5
300 temperature += d_temperature
301 elif average_logits > desired_average_logits:
302 if d_temperature < 0:
303 d_temperature *= -0.5
304 temperature += d_temperature
308 logger(f"changing temperature to {temperature}")
310 ###############################################################
311 # Create the reverse quizzes
313 l = self.height * self.width
314 direction = quizzes[:, l : l + 1]
315 direction = world.token_forward * (
316 direction == world.token_backward
317 ) + world.token_backward * (direction == world.token_forward)
318 reverse_quizzes = torch.cat(
319 [quizzes[:, l + 1 :], direction, quizzes[:, :l]], dim=1
322 ar_mask = self.make_ar_mask(quizzes)
324 ###############################################################
325 # Check how many of the other models can solve them in both
330 for m in other_models:
331 result = quizzes.clone()
333 masked_inplace_autoregression(
335 batch_size=self.batch_size,
340 deterministic_synthesis=True,
341 progress_bar_desc="solving quizzes",
345 correct = (quizzes == result).long().min(dim=-1).values
347 reverse_result = reverse_quizzes.clone()
349 masked_inplace_autoregression(
351 batch_size=self.batch_size,
352 input=reverse_result,
356 deterministic_synthesis=True,
357 progress_bar_desc="solving reversed quizzes",
362 (reverse_quizzes == reverse_result).long().min(dim=-1).values
365 nb_correct.append((correct * reverse_correct)[None, :])
367 nb_correct = torch.cat(nb_correct, dim=0)
369 # filename = os.path.join(result_dir, "correct_{n_epoch:04d}.dat")
370 # with open(filename, "w") as f:
371 # for k in nb_correct:
374 return quizzes, nb_correct.sum(dim=0), summed_logits.mean()