3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
15 import mygpt, quizz_machine
17 # world quizzes vs. culture quizzes
19 ######################################################################
21 accuracy_to_make_c_quizzes = 0.975
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
25 ######################################################################
27 if torch.cuda.is_available():
28 device = torch.device("cuda")
29 torch.backends.cuda.matmul.allow_tf32 = True
31 device = torch.device("cpu")
33 ######################################################################
35 parser = argparse.ArgumentParser(
36 description="An implementation of GPT with cache.",
37 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
40 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
42 parser.add_argument("--result_dir", type=str, default=None)
44 parser.add_argument("--seed", type=int, default=0)
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
48 ########################################
50 parser.add_argument("--nb_epochs", type=int, default=10000)
52 parser.add_argument("--batch_size", type=int, default=None)
54 parser.add_argument("--physical_batch_size", type=int, default=None)
56 parser.add_argument("--nb_train_samples", type=int, default=None)
58 parser.add_argument("--nb_test_samples", type=int, default=None)
60 parser.add_argument("--learning_rate", type=float, default=1e-4)
62 ########################################
64 parser.add_argument("--model", type=str, default=None)
66 parser.add_argument("--dim_model", type=int, default=None)
68 parser.add_argument("--dim_keys", type=int, default=None)
70 parser.add_argument("--dim_hidden", type=int, default=None)
72 parser.add_argument("--nb_heads", type=int, default=None)
74 parser.add_argument("--nb_blocks", type=int, default=None)
76 parser.add_argument("--dropout", type=float, default=0.1)
78 ########################################
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
82 parser.add_argument("--nb_gpts", type=int, default=5)
84 parser.add_argument("--dirty_debug", action="store_true", default=False)
86 ######################################################################
88 args = parser.parse_args()
90 if args.result_dir is None:
91 args.result_dir = f"results_culture"
93 ######################################################################
96 accuracy_to_make_c_quizzes = 0.0
97 nb_new_c_quizzes_for_train = 100
98 nb_new_c_quizzes_for_test = 10
100 ######################################################################
105 "nb_train_samples": 250000,
106 "nb_test_samples": 10000,
109 for k, v in default_args.items():
110 if getattr(args, k) is None:
113 ######################################################################
115 default_model_args = {
153 if args.model in default_model_args:
154 for k, v in default_model_args[args.model].items():
155 if getattr(args, k) is None:
158 raise ValueError(f"Unknown model {args.model}")
160 ######################################################################
163 os.mkdir(args.result_dir)
164 except FileExistsError:
165 print(f"result directory {args.result_dir} already exists")
168 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
171 # torch.backends.cudnn.deterministic = True
172 # torch.backends.cudnn.benchmark = False
173 # torch.use_deterministic_algorithms(True)
174 torch.manual_seed(args.seed)
175 if torch.cuda.is_available():
176 torch.cuda.manual_seed_all(args.seed)
178 ######################################################################
182 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
184 if log_file is not None:
185 log_file.write(t + s + "\n")
192 log_string(f"argv {' '.join(sys.argv)}")
195 log_string(f"args.{n} {getattr(args, n)}")
198 ######################################################################
201 args.nb_train_samples = 2500
202 args.nb_test_samples = 100
204 if args.physical_batch_size is None:
205 args.physical_batch_size = args.batch_size
207 assert args.batch_size % args.physical_batch_size == 0
209 assert args.nb_train_samples % args.batch_size == 0
210 assert args.nb_test_samples % args.batch_size == 0
212 quizz_machine = quizz_machine.QuizzMachine(
213 nb_train_samples=args.nb_train_samples,
214 nb_test_samples=args.nb_test_samples,
215 batch_size=args.physical_batch_size,
216 result_dir=args.result_dir,
221 ######################################################################
223 log_string(f"device {device}")
225 vocabulary_size = quizz_machine.vocabulary_size()
227 log_string(f"vocabulary_size {vocabulary_size}")
229 ######################################################################
231 # Compute the entropy of the training tokens
234 for input in quizz_machine.batches(split="train", desc="train-entropy"):
235 token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
238 token_probas = token_count / token_count.sum()
239 entropy = -torch.xlogy(token_probas, token_probas).sum()
240 train_set_perplexity = math.exp(entropy)
242 ######################################################################
243 # A bit of paranoia never hurts
245 if args.max_percents_of_test_in_train >= 0:
247 def subsets_as_tuples(batches, cs):
249 for batch in batches:
251 s.add(tuple([v.item() for v in x]))
257 nb_test, nb_in_train = 0, 0
258 for test_subset in subsets_as_tuples(
259 quizz_machine.batches(split="test", desc="test-check"), 25000
262 for train_subset in subsets_as_tuples(
263 quizz_machine.batches(split="train", desc="train-check"), 25000
265 in_train.update(test_subset.intersection(train_subset))
266 nb_in_train += len(in_train)
267 nb_test += len(test_subset)
270 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
274 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
275 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
277 ##############################
280 def one_epoch(model, quizz_machine):
281 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
285 nb_train_samples, acc_train_loss = 0, 0.0
287 for input in quizz_machine.batches(split="train"):
288 input = input.to(device)
290 if nb_train_samples % args.batch_size == 0:
291 optimizer.zero_grad()
293 output = model(mygpt.BracketedSequence(input)).x
294 loss = F.cross_entropy(output.transpose(1, 2), input)
295 acc_train_loss += loss.item() * input.size(0)
297 nb_train_samples += input.size(0)
301 if nb_train_samples % args.batch_size == 0:
304 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
306 log_string(f"train_perplexity {n_epoch} {train_perplexity}")
309 ######################################################################
312 def run_tests(model, quizz_machine, deterministic_synthesis):
313 with torch.autograd.no_grad():
316 nb_test_samples, acc_test_loss = 0, 0.0
317 nb_samples_accumulated = 0
319 for input in quizz_machine.batches(split="test"):
320 input = input.to(device)
322 bs = model(mygpt.BracketedSequence(input))
325 loss = F.cross_entropy(output.transpose(1, 2), input)
327 acc_test_loss += loss.item() * input.size(0)
329 nb_test_samples += input.size(0)
331 main_test_accuracy = quizz_machine.produce_results(
334 result_dir=args.result_dir,
336 deterministic_synthesis=deterministic_synthesis,
339 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
341 log_string(f"test_perplexity {n_epoch} {test_perplexity}")
343 model.main_test_accuracy = main_test_accuracy
346 ######################################################################
349 def create_c_quizzes(
355 min_ave_seq_logproba=None,
359 sum_logits, sum_nb_c_quizzes = 0, 0
361 while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
362 nb_to_generate = 4 * (nb_for_train + nb_for_test)
364 new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
366 result_dir=args.result_dir,
370 other_models=other_models,
371 min_ave_seq_logproba=min_ave_seq_logproba,
374 sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
375 sum_nb_c_quizzes += new_c_quizzes.size(0)
377 to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
380 to_keep = new_c_quizzes
383 f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
388 new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
390 quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
391 quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
393 quizz_machine.save_quizzes(
396 f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
400 return sum_logits / sum_nb_c_quizzes
403 ######################################################################
407 for k in range(args.nb_gpts):
409 vocabulary_size=vocabulary_size,
410 dim_model=args.dim_model,
411 dim_keys=args.dim_keys,
412 dim_hidden=args.dim_hidden,
413 nb_heads=args.nb_heads,
414 nb_blocks=args.nb_blocks,
416 dropout=args.dropout,
419 model.main_test_accuracy = 0.0
425 nb_parameters = sum(p.numel() for p in models[0].parameters())
426 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
428 ######################################################################
430 min_ave_seq_logproba = None
432 for n_epoch in range(args.nb_epochs):
433 log_string(f"--- epoch {n_epoch} ----------------------------------------")
435 a = [(model.id, float(model.main_test_accuracy)) for model in models]
436 a.sort(key=lambda p: p[0])
437 log_string(f"current accuracies {a}")
439 # select the model with lowest accuracy
440 models.sort(key=lambda model: model.main_test_accuracy)
444 f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
448 one_epoch(model, quizz_machine)
450 quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
453 f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
457 run_tests(model, quizz_machine, deterministic_synthesis=False)
460 f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
463 if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
464 other_models = models.copy()
465 other_models.remove(model)
467 ave_seq_logproba = create_c_quizzes(
471 nb_for_train=nb_new_c_quizzes_for_train,
472 nb_for_test=nb_new_c_quizzes_for_test,
473 min_ave_seq_logproba=min_ave_seq_logproba,
476 # We keep the first average logits as a reference
477 if min_ave_seq_logproba is None:
478 min_ave_seq_logproba = ave_seq_logproba
481 f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
486 run_tests(model, quizz_machine, deterministic_synthesis=False)
489 ######################################################################