3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, os, tqdm, warnings
10 import torch, torchvision
13 from torch.nn import functional as F
15 from mygpt import BracketedSequence
17 ######################################################################
20 def masked_inplace_autoregression(
27 deterministic_synthesis,
28 forbidden_tokens=None,
30 progress_bar_desc=None,
31 device=torch.device("cpu"),
33 assert input.size() == ar_mask.size()
36 input.split(batch_size),
37 ar_mask.split(batch_size),
38 seq_logproba.split(batch_size),
41 if progress_bar_desc is not None:
45 desc=progress_bar_desc,
46 total=(input.size(0) + batch_size - 1) // batch_size,
49 with torch.autograd.no_grad():
53 for input, ar_mask, seq_logproba in batches:
54 model.masked_inplace_autoregression(
57 seq_logproba=seq_logproba,
58 temperature=temperature,
59 deterministic_synthesis=deterministic_synthesis,
60 forbidden_tokens=forbidden_tokens,
61 forced_biases=logit_biases,
67 ######################################################################
71 def make_ar_mask(self, input):
72 b = torch.arange(input.size(1), device=input.device) > input.size(1) // 2
73 return b.long()[None, :].expand_as(input)
83 device=torch.device("cpu"),
87 self.problem = problem
88 self.batch_size = batch_size
91 self.train_w_quizzes = self.problem.generate_seq(nb_train_samples).to(device)
92 self.test_w_quizzes = self.problem.generate_seq(nb_test_samples).to(device)
94 self.nb_codes = max(self.train_w_quizzes.max(), self.test_w_quizzes.max()) + 1
96 self.train_c_quizzes = []
97 self.test_c_quizzes = []
99 if result_dir is not None:
100 self.problem.save_quizzes(
101 self.train_w_quizzes[:72], result_dir, "culture_w_quizzes"
104 def batches(self, split="train", desc=None):
105 assert split in {"train", "test"}
107 w_quizzes = self.train_w_quizzes
108 c_quizzes = self.train_c_quizzes
110 w_quizzes = self.test_w_quizzes
111 c_quizzes = self.test_c_quizzes
113 if len(c_quizzes) > 0:
114 c_quizzes = torch.cat(c_quizzes, dim=0)
115 if c_quizzes.size(0) > w_quizzes.size(0) // 2:
116 i = torch.randperm(w_quizzes.size(0))[: w_quizzes.size(0) // 2]
117 c_quizzes = c_quizzes[i]
119 i = torch.randperm(w_quizzes.size(0))[
120 : w_quizzes.size(0) - c_quizzes.size(0)
122 w_quizzes = w_quizzes[i]
124 self.nb_batch_w_quizzes = w_quizzes.size(0)
125 self.nb_batch_c_quizzes = c_quizzes.size(0)
127 input = torch.cat([w_quizzes, c_quizzes], dim=0)
130 self.nb_batch_w_quizzes = w_quizzes.size(0)
131 self.nb_batch_c_quizzes = 0
134 input = input[torch.randperm(input.size(0))]
137 desc = f"epoch-{split}"
138 for batch in tqdm.tqdm(
139 input.split(self.batch_size), dynamic_ncols=True, desc=desc
143 def vocabulary_size(self):
147 self, n_epoch, model, result_dir, logger, deterministic_synthesis, nmax=1000
149 def compute_accuracy(input, logger=None):
151 ar_mask = self.make_ar_mask(input)
152 result = input.clone() * (1 - ar_mask)
153 seq_logproba = torch.empty(input.size(0), device=self.device)
155 masked_inplace_autoregression(
157 batch_size=self.batch_size,
160 seq_logproba=seq_logproba,
162 deterministic_synthesis=deterministic_synthesis,
163 progress_bar_desc=None,
167 nb_total, nb_correct = (
169 (input == result).long().min(dim=1).values.sum(),
172 return nb_total, nb_correct
174 train_nb_total, train_nb_correct = compute_accuracy(self.train_w_quizzes)
177 f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
180 test_nb_total, test_nb_correct = compute_accuracy(self.test_w_quizzes, logger)
183 f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
186 main_test_accuracy = test_nb_correct / test_nb_total
187 logger(f"main_test_accuracy {n_epoch} {main_test_accuracy}")
189 ##############################
191 input = self.test_w_quizzes[:96]
192 ar_mask = self.make_ar_mask(input)
193 result = input.clone() * (1 - ar_mask)
194 seq_logproba = torch.empty(input.size(0), device=self.device)
196 masked_inplace_autoregression(
198 batch_size=self.batch_size,
201 seq_logproba=seq_logproba,
203 deterministic_synthesis=deterministic_synthesis,
204 progress_bar_desc=None,
208 self.problem.save_quizzes(
209 result[:72], result_dir, f"culture_prediction_{n_epoch:04d}_{model.id:02d}"
212 return main_test_accuracy
214 def renew_w_quizzes(self, nb, for_train=True):
215 input = self.train_w_quizzes if for_train else self.test_w_quizzes
216 nb = min(nb, input.size(0))
217 input[:-nb] = input[nb:].clone()
218 input[-nb:] = self.problem.generate_seq(nb).to(self.device)
220 def store_c_quizzes(self, new_c_quizzes, for_train=True):
222 self.train_c_quizzes.append(new_c_quizzes)
224 self.test_c_quizzes.append(new_c_quizzes)
226 def create_c_quizzes(
229 model_for_generation,
230 models_for_validation,
231 min_ave_seq_logproba,
236 ###############################################################
237 # Generate quizzes with model
239 c_quizzes = torch.empty(
240 nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
243 ar_mask = torch.full(c_quizzes.size(), 1, device=self.device)
244 seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
247 d_temperature = 1 / 3
250 seq_logproba[...] = 0
252 masked_inplace_autoregression(
253 model=model_for_generation,
254 batch_size=self.batch_size,
257 seq_logproba=seq_logproba,
258 temperature=temperature,
259 deterministic_synthesis=False,
260 # progress_bar_desc="sampling c_quizzes",
264 ave_seq_logproba = seq_logproba.mean()
266 if min_ave_seq_logproba is None:
270 if ave_seq_logproba < min_ave_seq_logproba:
271 if d_temperature > 0:
272 d_temperature *= -1 / 3
273 temperature += d_temperature
274 elif ave_seq_logproba > min_ave_seq_logproba * 0.99:
275 if d_temperature < 0:
276 d_temperature *= -1 / 3
277 temperature += d_temperature
281 logger(f"changing temperature to {temperature}")
283 ###############################################################
284 # Create the reverse quizzes
286 token_forward, token_backward = self.problem.direction_tokens()
288 l = (c_quizzes.size(1) - 1) // 2
289 direction = c_quizzes[:, l : l + 1]
290 direction = self.problem.token_forward * (
291 direction == self.problem.token_backward
292 ) + self.problem.token_backward * (direction == self.problem.token_forward)
293 reverse_c_quizzes = torch.cat(
294 [c_quizzes[:, l + 1 :], direction, c_quizzes[:, :l]], dim=1
297 ar_mask = self.make_ar_mask(c_quizzes)
298 seq_logproba = torch.empty(ar_mask.size(0), device=self.device)
300 ###############################################################
301 # Check how many of the other models can solve them in both
306 for model in models_for_validation:
307 result = c_quizzes.clone()
309 masked_inplace_autoregression(
311 batch_size=self.batch_size,
314 seq_logproba=seq_logproba,
316 deterministic_synthesis=True,
317 # progress_bar_desc="solving c_quizzes",
321 correct = (c_quizzes == result).long().min(dim=-1).values
323 reverse_result = reverse_c_quizzes.clone()
325 masked_inplace_autoregression(
327 batch_size=self.batch_size,
328 input=reverse_result,
330 seq_logproba=seq_logproba,
332 deterministic_synthesis=True,
333 # progress_bar_desc="solving reversed c_quizzes",
338 (reverse_c_quizzes == reverse_result).long().min(dim=-1).values
341 nb_correct.append((correct * reverse_correct)[None, :])
343 nb_correct = torch.cat(nb_correct, dim=0).sum(dim=0)
345 return c_quizzes, nb_correct, seq_logproba.mean()