3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
16 import sky, quizz_machine
18 # world quizzes vs. culture quizzes
20 ######################################################################
22 accuracy_to_make_c_quizzes = 0.975
23 nb_new_c_quizzes_for_train = 1000
24 nb_new_c_quizzes_for_test = 100
26 ######################################################################
28 if torch.cuda.is_available():
29 device = torch.device("cuda")
30 torch.backends.cuda.matmul.allow_tf32 = True
32 device = torch.device("cpu")
34 ######################################################################
36 parser = argparse.ArgumentParser(
37 description="An implementation of GPT with cache.",
38 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
41 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
43 parser.add_argument("--result_dir", type=str, default=None)
45 parser.add_argument("--seed", type=int, default=0)
47 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
49 ########################################
51 parser.add_argument("--nb_epochs", type=int, default=10000)
53 parser.add_argument("--batch_size", type=int, default=None)
55 parser.add_argument("--physical_batch_size", type=int, default=None)
57 parser.add_argument("--nb_train_samples", type=int, default=None)
59 parser.add_argument("--nb_test_samples", type=int, default=None)
61 parser.add_argument("--learning_rate", type=float, default=1e-3)
63 ########################################
65 parser.add_argument("--model", type=str, default=None)
67 parser.add_argument("--dim_model", type=int, default=None)
69 parser.add_argument("--dim_keys", type=int, default=None)
71 parser.add_argument("--dim_hidden", type=int, default=None)
73 parser.add_argument("--nb_heads", type=int, default=None)
75 parser.add_argument("--nb_blocks", type=int, default=None)
77 parser.add_argument("--dropout", type=float, default=0.1)
79 ########################################
81 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
83 parser.add_argument("--nb_gpts", type=int, default=5)
85 parser.add_argument("--dirty_debug", action="store_true", default=False)
87 ######################################################################
89 args = parser.parse_args()
91 if args.result_dir is None:
92 args.result_dir = f"results_culture"
94 ######################################################################
97 accuracy_to_make_c_quizzes = 0.0
98 nb_new_c_quizzes_for_train = 100
99 nb_new_c_quizzes_for_test = 10
101 ######################################################################
106 "nb_train_samples": 100000,
107 "nb_test_samples": 10000,
110 for k, v in default_args.items():
111 if getattr(args, k) is None:
114 ######################################################################
116 default_model_args = {
154 if args.model in default_model_args:
155 for k, v in default_model_args[args.model].items():
156 if getattr(args, k) is None:
159 raise ValueError(f"Unknown model {args.model}")
161 ######################################################################
164 os.mkdir(args.result_dir)
165 except FileExistsError:
166 print(f"result directory {args.result_dir} already exists")
169 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
172 # torch.backends.cudnn.deterministic = True
173 # torch.backends.cudnn.benchmark = False
174 # torch.use_deterministic_algorithms(True)
175 torch.manual_seed(args.seed)
176 if torch.cuda.is_available():
177 torch.cuda.manual_seed_all(args.seed)
179 ######################################################################
183 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
185 if log_file is not None:
186 log_file.write(t + s + "\n")
193 log_string(f"argv {' '.join(sys.argv)}")
196 log_string(f"args.{n} {getattr(args, n)}")
199 ######################################################################
202 args.nb_train_samples = 2500
203 args.nb_test_samples = 100
205 if args.physical_batch_size is None:
206 args.physical_batch_size = args.batch_size
208 assert args.batch_size % args.physical_batch_size == 0
210 assert args.nb_train_samples % args.batch_size == 0
211 assert args.nb_test_samples % args.batch_size == 0
213 quizz_machine = quizz_machine.QuizzMachine(
214 problem=sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2),
215 nb_train_samples=args.nb_train_samples,
216 nb_test_samples=args.nb_test_samples,
217 batch_size=args.physical_batch_size,
218 result_dir=args.result_dir,
223 ######################################################################
225 log_string(f"device {device}")
227 vocabulary_size = quizz_machine.vocabulary_size()
229 log_string(f"vocabulary_size {vocabulary_size}")
231 ######################################################################
233 # Compute the entropy of the training tokens
236 for input in quizz_machine.batches(split="train", desc="train-entropy"):
237 token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
240 token_probas = token_count / token_count.sum()
241 entropy = -torch.xlogy(token_probas, token_probas).sum()
242 train_set_perplexity = math.exp(entropy)
244 ######################################################################
245 # A bit of paranoia never hurts
247 if args.max_percents_of_test_in_train >= 0:
249 def subsets_as_tuples(batches, cs):
251 for batch in batches:
253 s.add(tuple([v.item() for v in x]))
259 nb_test, nb_in_train = 0, 0
260 for test_subset in subsets_as_tuples(
261 quizz_machine.batches(split="test", desc="test-check"), 25000
264 for train_subset in subsets_as_tuples(
265 quizz_machine.batches(split="train", desc="train-check"), 25000
267 in_train.update(test_subset.intersection(train_subset))
268 nb_in_train += len(in_train)
269 nb_test += len(test_subset)
272 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
276 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
277 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
279 ##############################
282 def one_epoch(model, quizz_machine):
283 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
287 nb_train_samples, acc_train_loss = 0, 0.0
289 for input in quizz_machine.batches(split="train"):
290 input = input.to(device)
292 if nb_train_samples % args.batch_size == 0:
293 optimizer.zero_grad()
295 output = model(mygpt.BracketedSequence(input)).x
296 loss = F.cross_entropy(output.transpose(1, 2), input)
297 acc_train_loss += loss.item() * input.size(0)
299 nb_train_samples += input.size(0)
303 if nb_train_samples % args.batch_size == 0:
306 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
308 log_string(f"train_perplexity {n_epoch} {train_perplexity}")
311 ######################################################################
314 def run_tests(model, quizz_machine, deterministic_synthesis):
315 with torch.autograd.no_grad():
318 nb_test_samples, acc_test_loss = 0, 0.0
319 nb_samples_accumulated = 0
321 for input in quizz_machine.batches(split="test"):
322 input = input.to(device)
324 bs = model(mygpt.BracketedSequence(input))
327 loss = F.cross_entropy(output.transpose(1, 2), input)
329 acc_test_loss += loss.item() * input.size(0)
331 nb_test_samples += input.size(0)
333 main_test_accuracy = quizz_machine.produce_results(
336 result_dir=args.result_dir,
338 deterministic_synthesis=deterministic_synthesis,
341 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
343 log_string(f"test_perplexity {n_epoch} {test_perplexity}")
345 model.main_test_accuracy = main_test_accuracy
348 ######################################################################
351 def create_c_quizzes(
356 min_ave_seq_logproba=None,
360 sum_logits, sum_nb_c_quizzes = 0, 0
362 while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
363 nb_to_generate = nb_for_train + nb_for_test
365 if len(model_indexes) == 0:
366 model_indexes = [i.item() for i in torch.randperm(len(models))]
368 model = models[model_indexes.pop()]
370 new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
372 model_for_generation=model,
373 models_for_validation=models,
374 min_ave_seq_logproba=min_ave_seq_logproba,
376 result_dir=args.result_dir,
380 sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
381 sum_nb_c_quizzes += new_c_quizzes.size(0)
383 to_keep = new_c_quizzes[nb_correct == len(models) - 1]
386 to_keep = new_c_quizzes[
387 torch.randint(3, (new_c_quizzes.size(0),), device=new_c_quizzes.device)
394 f"keep c_quizzes {to_keep.size(0)}/{new_c_quizzes.size(0)} ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%) total {sum([ x.size(0) for x in kept])}/{nb_to_generate}"
397 new_c_quizzes = torch.cat(kept, dim=0)
398 new_c_quizzes = new_c_quizzes[
399 torch.randperm(new_c_quizzes.size(0))[: nb_for_train + nb_for_test]
402 quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
403 quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
405 quizz_machine.problem.save_quizzes(
408 f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
411 return sum_logits / sum_nb_c_quizzes
414 ######################################################################
418 for k in range(args.nb_gpts):
420 vocabulary_size=vocabulary_size,
421 dim_model=args.dim_model,
422 dim_keys=args.dim_keys,
423 dim_hidden=args.dim_hidden,
424 nb_heads=args.nb_heads,
425 nb_blocks=args.nb_blocks,
427 dropout=args.dropout,
430 model.main_test_accuracy = 0.0
436 nb_parameters = sum(p.numel() for p in models[0].parameters())
437 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
439 ######################################################################
441 min_ave_seq_logproba = None
443 for n_epoch in range(args.nb_epochs):
444 log_string(f"--- epoch {n_epoch} ----------------------------------------")
446 a = [(model.id, float(model.main_test_accuracy)) for model in models]
447 a.sort(key=lambda p: p[0])
448 log_string(f"current accuracies {a}")
450 # select the model with lowest accuracy
451 models.sort(key=lambda model: model.main_test_accuracy)
455 f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
459 one_epoch(model, quizz_machine)
461 quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
464 f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
468 run_tests(model, quizz_machine, deterministic_synthesis=False)
471 f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
474 if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
475 ave_seq_logproba = create_c_quizzes(
478 nb_for_train=nb_new_c_quizzes_for_train,
479 nb_for_test=nb_new_c_quizzes_for_test,
480 min_ave_seq_logproba=min_ave_seq_logproba,
483 # We keep the first average logits as a reference
484 # if min_ave_seq_logproba is None:
485 # min_ave_seq_logproba = ave_seq_logproba
488 # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
493 run_tests(model, quizz_machine, deterministic_synthesis=False)
496 ######################################################################