3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
16 import sky, quizz_machine
18 # world quizzes vs. culture quizzes
20 ######################################################################
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
25 ######################################################################
27 if torch.cuda.is_available():
28 device = torch.device("cuda")
29 torch.backends.cuda.matmul.allow_tf32 = True
31 device = torch.device("cpu")
33 ######################################################################
35 parser = argparse.ArgumentParser(
36 description="An implementation of GPT with cache.",
37 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
40 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
42 parser.add_argument("--result_dir", type=str, default=None)
44 parser.add_argument("--seed", type=int, default=0)
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
48 ########################################
50 parser.add_argument("--nb_epochs", type=int, default=10000)
52 parser.add_argument("--batch_size", type=int, default=None)
54 parser.add_argument("--physical_batch_size", type=int, default=None)
56 parser.add_argument("--nb_train_samples", type=int, default=None)
58 parser.add_argument("--nb_test_samples", type=int, default=None)
60 parser.add_argument("--learning_rate", type=float, default=1e-3)
62 ########################################
64 parser.add_argument("--model", type=str, default=None)
66 parser.add_argument("--dim_model", type=int, default=None)
68 parser.add_argument("--dim_keys", type=int, default=None)
70 parser.add_argument("--dim_hidden", type=int, default=None)
72 parser.add_argument("--nb_heads", type=int, default=None)
74 parser.add_argument("--nb_blocks", type=int, default=None)
76 parser.add_argument("--dropout", type=float, default=0.1)
78 ########################################
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
82 parser.add_argument("--nb_gpts", type=int, default=5)
84 parser.add_argument("--nb_models_for_generation", type=int, default=1)
86 parser.add_argument("--generation_mode", type=str, default="groupthink")
88 parser.add_argument("--min_to_validate", type=int, default=4)
90 parser.add_argument("--max_to_validate", type=int, default=4)
92 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
94 parser.add_argument("--dirty_debug", action="store_true", default=False)
96 ######################################################################
98 args = parser.parse_args()
100 if args.result_dir is None:
101 args.result_dir = f"results_culture"
103 ######################################################################
106 args.accuracy_to_make_c_quizzes = 0.0
107 nb_new_c_quizzes_for_train = 100
108 nb_new_c_quizzes_for_test = 10
110 ######################################################################
115 "nb_train_samples": 100000,
116 "nb_test_samples": 10000,
119 for k, v in default_args.items():
120 if getattr(args, k) is None:
123 ######################################################################
125 default_model_args = {
163 if args.model in default_model_args:
164 for k, v in default_model_args[args.model].items():
165 if getattr(args, k) is None:
168 raise ValueError(f"Unknown model {args.model}")
170 ######################################################################
173 os.mkdir(args.result_dir)
174 except FileExistsError:
175 print(f"result directory {args.result_dir} already exists")
178 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
181 # torch.backends.cudnn.deterministic = True
182 # torch.backends.cudnn.benchmark = False
183 # torch.use_deterministic_algorithms(True)
184 torch.manual_seed(args.seed)
185 if torch.cuda.is_available():
186 torch.cuda.manual_seed_all(args.seed)
188 ######################################################################
192 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
194 if log_file is not None:
195 log_file.write(t + s + "\n")
202 log_string(f"argv {' '.join(sys.argv)}")
205 log_string(f"args.{n} {getattr(args, n)}")
208 ######################################################################
211 args.nb_train_samples = 2500
212 args.nb_test_samples = 100
214 if args.physical_batch_size is None:
215 args.physical_batch_size = args.batch_size
217 assert args.batch_size % args.physical_batch_size == 0
219 assert args.nb_train_samples % args.batch_size == 0
220 assert args.nb_test_samples % args.batch_size == 0
222 quizz_machine = quizz_machine.QuizzMachine(
223 problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2, speed=2),
224 nb_train_samples=args.nb_train_samples,
225 nb_test_samples=args.nb_test_samples,
226 batch_size=args.physical_batch_size,
227 result_dir=args.result_dir,
232 ######################################################################
234 log_string(f"device {device}")
236 vocabulary_size = quizz_machine.vocabulary_size()
238 log_string(f"vocabulary_size {vocabulary_size}")
240 ######################################################################
242 # Compute the entropy of the training tokens
245 for input in quizz_machine.batches(split="train", desc="train-entropy"):
246 token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
249 token_probas = token_count / token_count.sum()
250 entropy = -torch.xlogy(token_probas, token_probas).sum()
251 train_set_perplexity = math.exp(entropy)
253 ######################################################################
254 # A bit of paranoia never hurts
256 if args.max_percents_of_test_in_train >= 0:
258 def subsets_as_tuples(batches, cs):
260 for batch in batches:
262 s.add(tuple([v.item() for v in x]))
268 nb_test, nb_in_train = 0, 0
269 for test_subset in subsets_as_tuples(
270 quizz_machine.batches(split="test", desc="test-check"), 25000
273 for train_subset in subsets_as_tuples(
274 quizz_machine.batches(split="train", desc="train-check"), 25000
276 in_train.update(test_subset.intersection(train_subset))
277 nb_in_train += len(in_train)
278 nb_test += len(test_subset)
281 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
285 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
286 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
288 ##############################
291 def one_epoch(model, quizz_machine):
292 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
296 nb_train_samples, acc_train_loss = 0, 0.0
298 for input in quizz_machine.batches(split="train"):
299 input = input.to(device)
301 if nb_train_samples % args.batch_size == 0:
302 optimizer.zero_grad()
304 output = model(mygpt.BracketedSequence(input)).x
305 loss = F.cross_entropy(output.transpose(1, 2), input)
306 acc_train_loss += loss.item() * input.size(0)
308 nb_train_samples += input.size(0)
312 if nb_train_samples % args.batch_size == 0:
315 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
317 log_string(f"train_perplexity {n_epoch} {train_perplexity}")
320 ######################################################################
323 def run_tests(model, quizz_machine, deterministic_synthesis):
324 with torch.autograd.no_grad():
327 nb_test_samples, acc_test_loss = 0, 0.0
328 nb_samples_accumulated = 0
330 for input in quizz_machine.batches(split="test"):
331 input = input.to(device)
333 bs = model(mygpt.BracketedSequence(input))
336 loss = F.cross_entropy(output.transpose(1, 2), input)
338 acc_test_loss += loss.item() * input.size(0)
340 nb_test_samples += input.size(0)
342 main_test_accuracy = quizz_machine.produce_results(
345 result_dir=args.result_dir,
346 deterministic_synthesis=deterministic_synthesis,
349 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
351 log_string(f"test_perplexity {n_epoch} {test_perplexity}")
353 model.main_test_accuracy = main_test_accuracy
356 ######################################################################
359 def create_c_quizzes(
364 min_ave_seq_logproba=None,
366 # We will store the generated quizzes for each number of
368 recorded = dict([(n, []) for n in range(len(models) + 1)])
371 sum_logits, sum_nb_c_quizzes = 0, 0
374 return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()])
379 sum([x.size(0) for x in recorded[n]])
380 for n in range(args.min_to_validate, args.max_to_validate + 1)
384 nb_to_create = nb_for_train + nb_for_test
386 while nb_validated() < nb_to_create:
391 ) = quizz_machine.gang_create_c_quizzes(
393 nb_models_for_generation=args.nb_models_for_generation,
395 mode=args.generation_mode,
396 min_ave_seq_logproba=min_ave_seq_logproba,
398 result_dir=args.result_dir,
401 sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
402 sum_nb_c_quizzes += new_c_quizzes.size(0)
405 nb_correct = torch.randint(
406 len(models) + 1, nb_correct.size(), device=new_c_quizzes.device
409 for n in range(nb_correct.max() + 1):
410 recorded[n].append(new_c_quizzes[nb_correct == n].clone())
413 f"keep c_quizzes {nb_validated()*100/nb_generated():.02f}% kept total {nb_validated()} / {nb_to_create}"
416 # concatenate and shuffle
417 for n in recorded.keys():
418 if len(recorded[n]) > 0:
419 q = torch.cat(recorded[n], dim=0)
420 q = q[torch.randperm(q.size(0), device=q.device)]
425 new_c_quizzes = torch.cat(
426 [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)],
430 new_c_quizzes = new_c_quizzes[
431 torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[
432 : nb_for_train + nb_for_test
436 quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
437 quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
439 for n in recorded.keys():
442 if n >= args.min_to_validate and n <= args.max_to_validate
445 quizz_machine.problem.save_quizzes(
448 f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
451 return sum_logits / sum_nb_c_quizzes
454 ######################################################################
458 for k in range(args.nb_gpts):
460 vocabulary_size=vocabulary_size,
461 dim_model=args.dim_model,
462 dim_keys=args.dim_keys,
463 dim_hidden=args.dim_hidden,
464 nb_heads=args.nb_heads,
465 nb_blocks=args.nb_blocks,
467 dropout=args.dropout,
470 model.main_test_accuracy = 0.0
476 nb_parameters = sum(p.numel() for p in models[0].parameters())
477 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
479 ######################################################################
481 min_ave_seq_logproba = None
483 for n_epoch in range(args.nb_epochs):
484 log_string(f"--- epoch {n_epoch} ----------------------------------------")
486 a = [(model.id, float(model.main_test_accuracy)) for model in models]
487 a.sort(key=lambda p: p[0])
488 s = " ".join([f"{p[1]*100:.02f}%" for p in a])
489 log_string(f"current accuracies {s}")
491 # select the model with lowest accuracy
492 models.sort(key=lambda model: model.main_test_accuracy)
496 f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
500 one_epoch(model, quizz_machine)
502 quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
505 f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
509 run_tests(model, quizz_machine, deterministic_synthesis=False)
512 f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
515 if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
516 ave_seq_logproba = create_c_quizzes(
519 nb_for_train=nb_new_c_quizzes_for_train,
520 nb_for_test=nb_new_c_quizzes_for_test,
521 min_ave_seq_logproba=min_ave_seq_logproba,
524 # We keep the first average logits as a reference
525 # if min_ave_seq_logproba is None:
526 # min_ave_seq_logproba = ave_seq_logproba
529 # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
534 run_tests(model, quizz_machine, deterministic_synthesis=False)
537 ######################################################################