3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
16 import sky, wireworld, quizz_machine
18 # world quizzes vs. culture quizzes
20 ######################################################################
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
25 ######################################################################
27 if torch.cuda.is_available():
28 device = torch.device("cuda")
29 torch.backends.cuda.matmul.allow_tf32 = True
31 device = torch.device("cpu")
33 ######################################################################
35 parser = argparse.ArgumentParser(
36 description="An implementation of GPT with cache.",
37 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
40 parser.add_argument("--log_filename", type=str, default="train.log")
42 parser.add_argument("--result_dir", type=str, default=None)
44 parser.add_argument("--seed", type=int, default=0)
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
48 ########################################
50 parser.add_argument("--nb_epochs", type=int, default=10000)
52 parser.add_argument("--batch_size", type=int, default=None)
54 parser.add_argument("--physical_batch_size", type=int, default=None)
56 parser.add_argument("--nb_train_samples", type=int, default=None)
58 parser.add_argument("--nb_test_samples", type=int, default=None)
60 parser.add_argument("--learning_rate", type=float, default=1e-3)
62 ########################################
64 parser.add_argument("--model", type=str, default=None)
66 parser.add_argument("--dim_model", type=int, default=None)
68 parser.add_argument("--dim_keys", type=int, default=None)
70 parser.add_argument("--dim_hidden", type=int, default=None)
72 parser.add_argument("--nb_heads", type=int, default=None)
74 parser.add_argument("--nb_blocks", type=int, default=None)
76 parser.add_argument("--dropout", type=float, default=0.1)
78 ########################################
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
82 parser.add_argument("--problem", type=str, default="sky")
84 parser.add_argument("--nb_gpts", type=int, default=5)
86 parser.add_argument("--nb_models_for_generation", type=int, default=1)
88 parser.add_argument("--generation_mode", type=str, default="groupthink")
90 parser.add_argument("--min_to_validate", type=int, default=4)
92 parser.add_argument("--max_to_validate", type=int, default=4)
94 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
96 parser.add_argument("--dirty_debug", action="store_true", default=False)
98 ######################################################################
100 args = parser.parse_args()
102 if args.result_dir is None:
103 args.result_dir = f"results_culture"
105 ######################################################################
108 args.accuracy_to_make_c_quizzes = 0.0
109 nb_new_c_quizzes_for_train = 100
110 nb_new_c_quizzes_for_test = 10
112 ######################################################################
117 "nb_train_samples": 100000,
118 "nb_test_samples": 10000,
121 for k, v in default_args.items():
122 if getattr(args, k) is None:
125 ######################################################################
127 default_model_args = {
165 if args.model in default_model_args:
166 for k, v in default_model_args[args.model].items():
167 if getattr(args, k) is None:
170 raise ValueError(f"Unknown model {args.model}")
172 ######################################################################
175 os.mkdir(args.result_dir)
176 except FileExistsError:
177 print(f"result directory {args.result_dir} already exists")
180 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
183 # torch.backends.cudnn.deterministic = True
184 # torch.backends.cudnn.benchmark = False
185 # torch.use_deterministic_algorithms(True)
186 torch.manual_seed(args.seed)
187 if torch.cuda.is_available():
188 torch.cuda.manual_seed_all(args.seed)
190 ######################################################################
194 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
196 if log_file is not None:
197 log_file.write(t + s + "\n")
204 log_string(f"argv {' '.join(sys.argv)}")
207 log_string(f"args.{n} {getattr(args, n)}")
210 ######################################################################
213 args.nb_train_samples = 2500
214 args.nb_test_samples = 100
216 if args.physical_batch_size is None:
217 args.physical_batch_size = args.batch_size
219 assert args.batch_size % args.physical_batch_size == 0
221 assert args.nb_train_samples % args.batch_size == 0
222 assert args.nb_test_samples % args.batch_size == 0
224 if args.problem == "sky":
225 problem = (sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2, speed=2),)
226 elif args.problem == "wireworld":
227 problem = wireworld.Wireworld(height=10, width=15, nb_iterations=4)
231 quizz_machine = quizz_machine.QuizzMachine(
233 nb_train_samples=args.nb_train_samples,
234 nb_test_samples=args.nb_test_samples,
235 batch_size=args.physical_batch_size,
236 result_dir=args.result_dir,
241 ######################################################################
243 log_string(f"device {device}")
245 vocabulary_size = quizz_machine.vocabulary_size()
247 log_string(f"vocabulary_size {vocabulary_size}")
249 ######################################################################
251 # Compute the entropy of the training tokens
254 for input in quizz_machine.batches(split="train", desc="train-entropy"):
255 token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
258 token_probas = token_count / token_count.sum()
259 entropy = -torch.xlogy(token_probas, token_probas).sum()
260 train_set_perplexity = math.exp(entropy)
262 ######################################################################
263 # A bit of paranoia never hurts
265 if args.max_percents_of_test_in_train >= 0:
267 def subsets_as_tuples(batches, cs):
269 for batch in batches:
271 s.add(tuple([v.item() for v in x]))
277 nb_test, nb_in_train = 0, 0
278 for test_subset in subsets_as_tuples(
279 quizz_machine.batches(split="test", desc="test-check"), 25000
282 for train_subset in subsets_as_tuples(
283 quizz_machine.batches(split="train", desc="train-check"), 25000
285 in_train.update(test_subset.intersection(train_subset))
286 nb_in_train += len(in_train)
287 nb_test += len(test_subset)
290 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
294 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
295 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
297 ##############################
300 def one_epoch(model, quizz_machine):
301 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
305 nb_train_samples, acc_train_loss = 0, 0.0
307 for input in quizz_machine.batches(split="train"):
308 input = input.to(device)
310 if nb_train_samples % args.batch_size == 0:
311 optimizer.zero_grad()
313 output = model(mygpt.BracketedSequence(input)).x
314 loss = F.cross_entropy(output.transpose(1, 2), input)
315 acc_train_loss += loss.item() * input.size(0)
317 nb_train_samples += input.size(0)
321 if nb_train_samples % args.batch_size == 0:
324 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
326 log_string(f"train_perplexity {n_epoch} {train_perplexity}")
329 ######################################################################
332 def run_tests(model, quizz_machine, deterministic_synthesis):
333 with torch.autograd.no_grad():
336 nb_test_samples, acc_test_loss = 0, 0.0
337 nb_samples_accumulated = 0
339 for input in quizz_machine.batches(split="test"):
340 input = input.to(device)
342 bs = model(mygpt.BracketedSequence(input))
345 loss = F.cross_entropy(output.transpose(1, 2), input)
347 acc_test_loss += loss.item() * input.size(0)
349 nb_test_samples += input.size(0)
351 main_test_accuracy = quizz_machine.produce_results(
354 result_dir=args.result_dir,
355 deterministic_synthesis=deterministic_synthesis,
358 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
360 log_string(f"test_perplexity {n_epoch} {test_perplexity}")
362 model.main_test_accuracy = main_test_accuracy
365 ######################################################################
368 def create_c_quizzes(
373 min_ave_seq_logproba=None,
375 # We will store the generated quizzes for each number of
377 recorded = dict([(n, []) for n in range(len(models) + 1)])
380 sum_logits, sum_nb_c_quizzes = 0, 0
383 return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()])
388 sum([x.size(0) for x in recorded[n]])
389 for n in range(args.min_to_validate, args.max_to_validate + 1)
393 nb_to_create = nb_for_train + nb_for_test
395 while nb_validated() < nb_to_create:
400 ) = quizz_machine.gang_create_c_quizzes(
402 nb_models_for_generation=args.nb_models_for_generation,
404 mode=args.generation_mode,
405 min_ave_seq_logproba=min_ave_seq_logproba,
407 result_dir=args.result_dir,
410 sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
411 sum_nb_c_quizzes += new_c_quizzes.size(0)
414 nb_correct = torch.randint(
415 len(models) + 1, nb_correct.size(), device=new_c_quizzes.device
418 for n in range(nb_correct.max() + 1):
419 recorded[n].append(new_c_quizzes[nb_correct == n].clone())
422 f"keep c_quizzes {nb_validated()*100/nb_generated():.02f}% kept total {nb_validated()} / {nb_to_create}"
425 # concatenate and shuffle
426 for n in recorded.keys():
427 if len(recorded[n]) > 0:
428 q = torch.cat(recorded[n], dim=0)
429 q = q[torch.randperm(q.size(0), device=q.device)]
434 new_c_quizzes = torch.cat(
435 [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)],
439 new_c_quizzes = new_c_quizzes[
440 torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[
441 : nb_for_train + nb_for_test
445 quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
446 quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
448 for n in recorded.keys():
451 if n >= args.min_to_validate and n <= args.max_to_validate
454 quizz_machine.problem.save_quizzes(
457 f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
460 return sum_logits / sum_nb_c_quizzes
463 ######################################################################
467 for k in range(args.nb_gpts):
469 vocabulary_size=vocabulary_size,
470 dim_model=args.dim_model,
471 dim_keys=args.dim_keys,
472 dim_hidden=args.dim_hidden,
473 nb_heads=args.nb_heads,
474 nb_blocks=args.nb_blocks,
476 dropout=args.dropout,
479 model.main_test_accuracy = 0.0
485 nb_parameters = sum(p.numel() for p in models[0].parameters())
486 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
488 ######################################################################
490 min_ave_seq_logproba = None
492 for n_epoch in range(args.nb_epochs):
493 log_string(f"--- epoch {n_epoch} ----------------------------------------")
495 a = [(model.id, float(model.main_test_accuracy)) for model in models]
496 a.sort(key=lambda p: p[0])
497 s = " ".join([f"{p[1]*100:.02f}%" for p in a])
498 log_string(f"current accuracies {s}")
500 # select the model with lowest accuracy
501 models.sort(key=lambda model: model.main_test_accuracy)
505 f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
509 one_epoch(model, quizz_machine)
511 quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
514 f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
518 run_tests(model, quizz_machine, deterministic_synthesis=False)
521 f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
524 if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
525 ave_seq_logproba = create_c_quizzes(
528 nb_for_train=nb_new_c_quizzes_for_train,
529 nb_for_test=nb_new_c_quizzes_for_test,
530 min_ave_seq_logproba=min_ave_seq_logproba,
533 # We keep the first average logits as a reference
534 # if min_ave_seq_logproba is None:
535 # min_ave_seq_logproba = ave_seq_logproba
538 # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
543 run_tests(model, quizz_machine, deterministic_synthesis=False)
546 ######################################################################