3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
16 import sky, wireworld, quizz_machine
18 # world quizzes vs. culture quizzes
20 ######################################################################
22 nb_new_c_quizzes_for_train = 1000
23 nb_new_c_quizzes_for_test = 100
25 ######################################################################
27 if torch.cuda.is_available():
28 device = torch.device("cuda")
29 torch.backends.cuda.matmul.allow_tf32 = True
31 device = torch.device("cpu")
33 ######################################################################
35 parser = argparse.ArgumentParser(
36 description="An implementation of GPT with cache.",
37 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
40 parser.add_argument("--log_filename", type=str, default="train.log")
42 parser.add_argument("--result_dir", type=str, default=None)
44 parser.add_argument("--seed", type=int, default=0)
46 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
48 ########################################
50 parser.add_argument("--nb_epochs", type=int, default=10000)
52 parser.add_argument("--batch_size", type=int, default=None)
54 parser.add_argument("--physical_batch_size", type=int, default=None)
56 parser.add_argument("--nb_train_samples", type=int, default=None)
58 parser.add_argument("--nb_test_samples", type=int, default=None)
60 parser.add_argument("--learning_rate", type=float, default=1e-3)
62 ########################################
64 parser.add_argument("--model", type=str, default=None)
66 parser.add_argument("--dim_model", type=int, default=None)
68 parser.add_argument("--dim_keys", type=int, default=None)
70 parser.add_argument("--dim_hidden", type=int, default=None)
72 parser.add_argument("--nb_heads", type=int, default=None)
74 parser.add_argument("--nb_blocks", type=int, default=None)
76 parser.add_argument("--dropout", type=float, default=0.1)
78 ########################################
80 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
82 parser.add_argument("--both_directions", action="store_true", default=False)
84 parser.add_argument("--problem", type=str, default="sky")
86 parser.add_argument("--nb_gpts", type=int, default=5)
88 parser.add_argument("--min_to_validate", type=int, default=4)
90 parser.add_argument("--max_to_validate", type=int, default=4)
92 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
94 parser.add_argument("--dirty_debug", action="store_true", default=False)
96 parser.add_argument("--generation_temperature", type=float, default=1.0)
98 parser.add_argument("--stochastic_validation", action="store_true", default=False)
100 ######################################################################
102 parser.add_argument("--sky_height", type=int, default=6)
104 parser.add_argument("--sky_width", type=int, default=8)
106 parser.add_argument("--sky_nb_birds", type=int, default=3)
108 parser.add_argument("--sky_nb_iterations", type=int, default=2)
110 parser.add_argument("--sky_speed", type=int, default=3)
112 ######################################################################
114 args = parser.parse_args()
116 if args.result_dir is None:
117 args.result_dir = f"results_culture"
119 ######################################################################
122 args.accuracy_to_make_c_quizzes = 0.0
123 nb_new_c_quizzes_for_train = 100
124 nb_new_c_quizzes_for_test = 10
126 ######################################################################
131 "nb_train_samples": 100000,
132 "nb_test_samples": 10000,
135 for k, v in default_args.items():
136 if getattr(args, k) is None:
139 ######################################################################
141 default_model_args = {
179 if args.model in default_model_args:
180 for k, v in default_model_args[args.model].items():
181 if getattr(args, k) is None:
184 raise ValueError(f"Unknown model {args.model}")
186 ######################################################################
189 os.mkdir(args.result_dir)
190 except FileExistsError:
191 print(f"result directory {args.result_dir} already exists")
194 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
197 # torch.backends.cudnn.deterministic = True
198 # torch.backends.cudnn.benchmark = False
199 # torch.use_deterministic_algorithms(True)
200 torch.manual_seed(args.seed)
201 if torch.cuda.is_available():
202 torch.cuda.manual_seed_all(args.seed)
204 ######################################################################
208 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
210 if log_file is not None:
211 log_file.write(t + s + "\n")
218 log_string(f"argv {' '.join(sys.argv)}")
221 log_string(f"args.{n} {getattr(args, n)}")
224 ######################################################################
227 args.nb_train_samples = 2500
228 args.nb_test_samples = 100
230 if args.physical_batch_size is None:
231 args.physical_batch_size = args.batch_size
233 assert args.batch_size % args.physical_batch_size == 0
235 assert args.nb_train_samples % args.batch_size == 0
236 assert args.nb_test_samples % args.batch_size == 0
238 if args.problem == "sky":
240 height=args.sky_height,
241 width=args.sky_width,
242 nb_birds=args.sky_nb_birds,
243 nb_iterations=args.sky_nb_iterations,
244 speed=args.sky_speed,
246 elif args.problem == "wireworld":
247 problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5)
251 quizz_machine = quizz_machine.QuizzMachine(
253 nb_train_samples=args.nb_train_samples,
254 nb_test_samples=args.nb_test_samples,
255 batch_size=args.physical_batch_size,
256 result_dir=args.result_dir,
261 ######################################################################
263 log_string(f"device {device}")
265 vocabulary_size = quizz_machine.vocabulary_size()
267 log_string(f"vocabulary_size {vocabulary_size}")
269 ######################################################################
271 # Compute the entropy of the training tokens
274 for input in quizz_machine.batches(split="train", desc="train-entropy"):
275 token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
278 token_probas = token_count / token_count.sum()
279 entropy = -torch.xlogy(token_probas, token_probas).sum()
280 train_set_perplexity = math.exp(entropy)
282 ######################################################################
283 # A bit of paranoia never hurts
285 if args.max_percents_of_test_in_train >= 0:
287 def subsets_as_tuples(batches, cs):
289 for batch in batches:
291 s.add(tuple([v.item() for v in x]))
297 nb_test, nb_in_train = 0, 0
298 for test_subset in subsets_as_tuples(
299 quizz_machine.batches(split="test", desc="test-check"), 25000
302 for train_subset in subsets_as_tuples(
303 quizz_machine.batches(split="train", desc="train-check"), 25000
305 in_train.update(test_subset.intersection(train_subset))
306 nb_in_train += len(in_train)
307 nb_test += len(test_subset)
310 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
314 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
315 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
317 ##############################
320 def one_epoch(model, quizz_machine):
321 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
325 nb_train_samples, acc_train_loss = 0, 0.0
327 for input in quizz_machine.batches(split="train"):
328 input = input.to(device)
330 if nb_train_samples % args.batch_size == 0:
331 optimizer.zero_grad()
333 output = model(mygpt.BracketedSequence(input)).x
334 loss = F.cross_entropy(output.transpose(1, 2), input)
335 acc_train_loss += loss.item() * input.size(0)
337 nb_train_samples += input.size(0)
341 if nb_train_samples % args.batch_size == 0:
344 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
346 log_string(f"train_perplexity {n_epoch} {train_perplexity}")
349 ######################################################################
352 def run_tests(model, quizz_machine, deterministic_synthesis):
353 with torch.autograd.no_grad():
356 nb_test_samples, acc_test_loss = 0, 0.0
357 nb_samples_accumulated = 0
359 for input in quizz_machine.batches(split="test"):
360 input = input.to(device)
362 bs = model(mygpt.BracketedSequence(input))
365 loss = F.cross_entropy(output.transpose(1, 2), input)
367 acc_test_loss += loss.item() * input.size(0)
369 nb_test_samples += input.size(0)
371 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
373 log_string(f"test_perplexity {n_epoch} {test_perplexity}")
375 model.main_test_accuracy = quizz_machine.produce_results(
378 result_dir=args.result_dir,
379 deterministic_synthesis=deterministic_synthesis,
383 ######################################################################
386 def valid_c_quizzes(recorded, criteria):
387 result = [q[criteria(c)] for q, c in recorded]
388 return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([])
391 ######################################################################
394 def create_c_quizzes(
402 nb_to_create = nb_for_train + nb_for_test
404 # ------------------------------------------------------------
406 standard_validity = lambda nb_correct: torch.logical_and(
407 nb_correct >= args.min_to_validate, nb_correct <= args.max_to_validate
410 file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat")
411 with open(file_name, "w") as logp_file:
412 while valid_c_quizzes(recorded, standard_validity).size(0) < nb_to_create:
413 # Select a model at random to generate the new quizzes
415 model_for_generation = models[torch.randint(len(models), (1,))]
417 c_quizzes = quizz_machine.generate_quizzes(
419 model_for_generation=model_for_generation,
420 temperature=args.generation_temperature,
423 nb_correct, seq_logproba = quizz_machine.compute_correctness(
426 both_directions=args.both_directions,
427 deterministic_validation=not args.stochastic_validation,
430 for n, l in zip(nb_correct, seq_logproba):
431 s = " ".join([str(x.item()) for x in l])
432 logp_file.write(f"{n} {s}\n")
435 nb_correct = torch.randint(
436 len(models) + 1, nb_correct.size(), device=c_quizzes.device
439 recorded.append((c_quizzes, nb_correct))
441 nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0)
442 nv = " ".join([str(x.item()) for x in nv])
444 nb_validated = valid_c_quizzes(recorded, standard_validity).size(0)
447 f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
450 # store the new c_quizzes which have been validated
452 new_c_quizzes = valid_c_quizzes(recorded, standard_validity)
454 quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
455 quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
457 # save a bunch of images to investigate what quizzes with a
458 # certain nb of correct predictions look like
460 for n in range(len(models) + 1):
463 if n >= args.min_to_validate and n <= args.max_to_validate
467 q = valid_c_quizzes(recorded, criteria=lambda nb_correct: nb_correct == n)[:72]
470 quizz_machine.save_quizzes(
471 args.result_dir, f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", q
475 ######################################################################
479 for k in range(args.nb_gpts):
481 vocabulary_size=vocabulary_size,
482 dim_model=args.dim_model,
483 dim_keys=args.dim_keys,
484 dim_hidden=args.dim_hidden,
485 nb_heads=args.nb_heads,
486 nb_blocks=args.nb_blocks,
488 dropout=args.dropout,
491 model.main_test_accuracy = 0.0
497 nb_parameters = sum(p.numel() for p in models[0].parameters())
498 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
500 ######################################################################
502 for n_epoch in range(args.nb_epochs):
503 log_string(f"--- epoch {n_epoch} ----------------------------------------")
505 # Select, improve, and eval the worst model
507 weakest_model = min(models, key=lambda m: float(m.main_test_accuracy))
510 f"training model {weakest_model.id} main_test_accuracy {weakest_model.main_test_accuracy}"
513 one_epoch(weakest_model, quizz_machine)
516 f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
519 run_tests(weakest_model, quizz_machine, deterministic_synthesis=False)
522 f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
525 cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
526 log_string(f"current_test_accuracies {cta}")
528 # Replace a fraction of the w_quizzes with fresh ones
530 quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
532 # If all the models are good enough, generate new quizzes and
533 # re-compute the test errors
535 if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
539 nb_for_train=nb_new_c_quizzes_for_train,
540 nb_for_test=nb_new_c_quizzes_for_test,
544 run_tests(model, quizz_machine, deterministic_synthesis=False)
547 ######################################################################