3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
17 import sky, grids, quiz_machine
21 import torch.multiprocessing as mp
23 # world quizzes vs. culture quizzes
25 ######################################################################
27 parser = argparse.ArgumentParser(
28 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
31 parser.add_argument("--log_filename", type=str, default="train.log")
33 parser.add_argument("--result_dir", type=str, default=None)
35 parser.add_argument("--seed", type=int, default=0)
37 parser.add_argument("--max_percents_of_test_in_train", type=int, default=-1)
39 ########################################
41 parser.add_argument("--nb_epochs", type=int, default=10000)
43 parser.add_argument("--batch_size", type=int, default=None)
45 parser.add_argument("--physical_batch_size", type=int, default=None)
47 parser.add_argument("--nb_train_samples", type=int, default=None)
49 parser.add_argument("--nb_test_samples", type=int, default=None)
51 parser.add_argument("--learning_rate", type=float, default=5e-4)
53 ########################################
55 parser.add_argument("--model", type=str, default=None)
57 parser.add_argument("--dim_model", type=int, default=None)
59 parser.add_argument("--dim_keys", type=int, default=None)
61 parser.add_argument("--dim_hidden", type=int, default=None)
63 parser.add_argument("--nb_heads", type=int, default=None)
65 parser.add_argument("--nb_blocks", type=int, default=None)
67 parser.add_argument("--dropout", type=float, default=0.1)
69 ########################################
71 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
73 parser.add_argument("--problem", type=str, default="grids")
75 parser.add_argument("--nb_threads", type=int, default=1)
77 parser.add_argument("--gpus", type=str, default="all")
79 parser.add_argument("--nb_gpts", type=int, default=5)
81 parser.add_argument("--min_to_validate", type=int, default=None)
83 parser.add_argument("--max_to_validate", type=int, default=None)
85 parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
87 parser.add_argument("--proba_understands", type=float, default=0.99)
89 parser.add_argument("--proba_not_understands", type=float, default=0.5)
91 parser.add_argument("--generation_temperature", type=float, default=2.0)
93 parser.add_argument("--dirty_debug", action="store_true", default=False)
95 ######################################################################
97 grids_tasks = ", ".join(
98 [x.__name__.removeprefix("task_") for x in grids.Grids().all_tasks]
105 help="A comma-separated subset of: " + grids_tasks + ", or None for all.",
108 ######################################################################
110 parser.add_argument("--sky_height", type=int, default=6)
112 parser.add_argument("--sky_width", type=int, default=8)
114 parser.add_argument("--sky_nb_birds", type=int, default=3)
116 parser.add_argument("--sky_nb_iterations", type=int, default=2)
118 parser.add_argument("--sky_speed", type=int, default=3)
120 ######################################################################
122 args = parser.parse_args()
124 if args.min_to_validate is None:
125 args.min_to_validate = args.nb_gpts - 1
127 if args.max_to_validate is None:
128 args.max_to_validate = args.nb_gpts - 1
130 if args.result_dir is None:
131 args.result_dir = f"results_culture"
133 ######################################################################
138 "nb_train_samples": 100000,
139 "nb_test_samples": 10000,
142 for k, v in default_args.items():
143 if getattr(args, k) is None:
146 ######################################################################
148 default_model_args = {
186 if args.model in default_model_args:
187 for k, v in default_model_args[args.model].items():
188 if getattr(args, k) is None:
191 raise ValueError(f"Unknown model {args.model}")
193 ######################################################################
196 os.mkdir(args.result_dir)
197 except FileExistsError:
198 print(f"result directory {args.result_dir} already exists")
201 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
204 # torch.backends.cudnn.deterministic = True
205 # torch.backends.cudnn.benchmark = False
206 # torch.use_deterministic_algorithms(True)
207 torch.manual_seed(args.seed)
208 if torch.cuda.is_available():
209 torch.cuda.manual_seed_all(args.seed)
211 ######################################################################
215 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
217 if log_file is not None:
218 log_file.write(t + s + "\n")
225 log_string(f"argv {' '.join(sys.argv)}")
228 log_string(f"args.{n} {getattr(args, n)}")
231 ######################################################################
233 if args.gpus == "all":
234 gpus_idx = range(torch.cuda.device_count())
236 gpus_idx = [int(k) for k in args.gpus.split(",")]
238 gpus = [torch.device(f"cuda:{n}") for n in gpus_idx]
240 if torch.cuda.is_available():
241 main_device = gpus[0]
243 assert len(gpus) == 0
244 main_device = torch.device("cpu")
247 args.nb_train_samples = 2500
248 args.nb_test_samples = 100
250 if args.physical_batch_size is None:
251 args.physical_batch_size = args.batch_size
253 assert args.batch_size % args.physical_batch_size == 0
255 assert args.nb_train_samples % args.batch_size == 0
256 assert args.nb_test_samples % args.batch_size == 0
258 if args.problem == "sky":
260 height=args.sky_height,
261 width=args.sky_width,
262 nb_birds=args.sky_nb_birds,
263 nb_iterations=args.sky_nb_iterations,
264 speed=args.sky_speed,
265 max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
267 nb_threads=args.nb_threads,
269 back_accuracy = False
270 elif args.problem == "grids":
271 problem = grids.Grids(
272 max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
274 nb_threads=args.nb_threads,
275 tasks=args.grids_tasks,
281 problem.save_some_examples(args.result_dir)
283 quiz_machine = quiz_machine.QuizMachine(
285 nb_train_samples=args.nb_train_samples,
286 nb_test_samples=args.nb_test_samples,
287 back_accuracy=back_accuracy,
288 batch_size=args.physical_batch_size,
289 result_dir=args.result_dir,
294 ######################################################################
296 log_string(f"main_device {main_device} gpus {[ str(g) for g in gpus]}")
298 vocabulary_size = quiz_machine.vocabulary_size()
300 log_string(f"vocabulary_size {vocabulary_size}")
302 ######################################################################
305 def run_tests(model, quiz_machine, deterministic_synthesis, local_device=main_device):
306 with torch.autograd.no_grad():
307 model.eval().to(local_device)
309 nb_test_samples, acc_test_loss = 0, 0.0
310 nb_samples_accumulated = 0
312 for input in quiz_machine.batches(model, split="test"):
313 input = input.to(local_device)
315 bs = model(mygpt.BracketedSequence(input))
318 loss = F.cross_entropy(output.transpose(1, 2), input)
320 acc_test_loss += loss.item() * input.size(0)
322 nb_test_samples += input.size(0)
324 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
326 log_string(f"test_perplexity {n_epoch} model {model.id} {test_perplexity}")
328 model.main_test_accuracy = quiz_machine.produce_results(
331 result_dir=args.result_dir,
332 deterministic_synthesis=deterministic_synthesis,
336 def one_epoch(model, quiz_machine, local_device=main_device):
337 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
339 model.to(local_device).train()
341 nb_train_samples, acc_train_loss = 0, 0.0
343 for input in quiz_machine.batches(model, split="train"):
344 input = input.to(local_device)
346 if nb_train_samples % args.batch_size == 0:
347 optimizer.zero_grad()
349 output = model(mygpt.BracketedSequence(input)).x
350 loss = F.cross_entropy(output.transpose(1, 2), input)
351 acc_train_loss += loss.item() * input.size(0)
353 nb_train_samples += input.size(0)
357 if nb_train_samples % args.batch_size == 0:
360 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
362 log_string(f"train_perplexity {n_epoch} model {model.id} {train_perplexity}")
364 run_tests(model, quiz_machine, deterministic_synthesis=False)
366 model.to(main_device)
369 ######################################################################
372 def standard_validity(logproba):
373 l = logproba.sort(dim=-1).values
374 return (l[:, 0] < math.log(args.proba_not_understands)) & (
375 l[:, 1] > math.log(args.proba_understands)
379 def valid_c_quizzes(recorded, criteria):
380 result = [q[criteria(lp)] for q, lp in recorded]
381 return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([])
384 ######################################################################
387 def create_c_quizzes(
393 quizzes_and_logproba_records = []
395 nb_to_create = nb_for_train + nb_for_test
397 # ------------------------------------------------------------
399 file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat")
401 with open(file_name, "w") as logp_file:
403 valid_c_quizzes(quizzes_and_logproba_records, standard_validity).size(0)
406 # Select a model at random to generate the new quizzes
408 model_for_generation = models[torch.randint(len(models), (1,))]
410 c_quizzes = quiz_machine.generate_quizzes(
412 model_for_generation=model_for_generation,
413 temperature=args.generation_temperature,
416 c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)]
418 if c_quizzes.size(0) > 0:
419 logproba = quiz_machine.logproba_of_solutions(models, c_quizzes)
421 s = " ".join([str(x.item()) for x in l])
422 logp_file.write(s + "\n")
423 quizzes_and_logproba_records.append((c_quizzes, logproba))
425 nb_validated = valid_c_quizzes(
426 quizzes_and_logproba_records, standard_validity
430 f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}"
433 # store the new c_quizzes which have been validated
435 new_c_quizzes = valid_c_quizzes(quizzes_and_logproba_records, standard_validity)
437 quiz_machine.reverse_random_half_in_place(new_c_quizzes)
439 quiz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
440 quiz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
442 # save a bunch of images to investigate what quizzes with a
443 # certain nb of correct predictions look like
445 q = new_c_quizzes[:72]
448 quiz_machine.save_quizzes(args.result_dir, f"culture_c_quiz_{n_epoch:04d}", q)
451 ######################################################################
455 for k in range(args.nb_gpts):
456 log_string(f"creating model {k} and its w_quizzes")
458 vocabulary_size=vocabulary_size,
459 dim_model=args.dim_model,
460 dim_keys=args.dim_keys,
461 dim_hidden=args.dim_hidden,
462 nb_heads=args.nb_heads,
463 nb_blocks=args.nb_blocks,
465 dropout=args.dropout,
468 model.main_test_accuracy = 0.0
471 model.train_w_quizzes = quiz_machine.generate_token_sequences(args.nb_train_samples)
472 quiz_machine.reverse_random_half_in_place(model.train_w_quizzes)
473 model.test_w_quizzes = quiz_machine.generate_token_sequences(args.nb_test_samples)
474 quiz_machine.reverse_random_half_in_place(model.test_w_quizzes)
479 nb_parameters = sum(p.numel() for p in models[0].parameters())
480 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
482 ######################################################################
484 # Compute the entropy of the training tokens
487 for input in quiz_machine.batches(models[0], split="train", desc="train-entropy"):
488 token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum(
491 token_probas = token_count / token_count.sum()
492 entropy = -torch.xlogy(token_probas, token_probas).sum()
493 train_set_perplexity = math.exp(entropy)
495 ######################################################################
496 # A bit of paranoia never hurts
498 if args.max_percents_of_test_in_train >= 0:
500 def subsets_as_tuples(batches, cs):
502 for batch in batches:
504 s.add(tuple([v.item() for v in x]))
510 nb_test, nb_in_train = 0, 0
511 for test_subset in subsets_as_tuples(
512 quiz_machine.batches(models[0], split="test", desc="test-check"), 25000
515 for train_subset in subsets_as_tuples(
516 quiz_machine.batches(models[0], split="train", desc="train-check"), 25000
518 in_train.update(test_subset.intersection(train_subset))
519 nb_in_train += len(in_train)
520 nb_test += len(test_subset)
523 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
527 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
528 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
530 ######################################################################
532 nb_new_c_quizzes_for_train = args.nb_train_samples // 50
533 nb_new_c_quizzes_for_test = args.nb_test_samples // 50
536 f"nb_new_c_quizzes_for_train {nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {nb_new_c_quizzes_for_test}"
539 ######################################################################
542 args.accuracy_to_make_c_quizzes = 0.0
544 nb_new_c_quizzes_for_train = 100
545 nb_new_c_quizzes_for_test = 10
547 def standard_validity(logproba):
548 l = logproba.sort(dim=-1).values
549 return l[:, 0] < math.log(0.5)
552 ######################################################################
554 for n_epoch in range(args.nb_epochs):
555 log_string(f"--- epoch {n_epoch} ----------------------------------------")
557 cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
558 log_string(f"current_test_accuracies {cta}")
560 ##################################################
561 # Select, improve, and eval the worst model
563 ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy))
565 weakest_models = ranked_models[: len(gpus)]
569 for gpu, model in zip(gpus, weakest_models):
570 log_string(f"training model {model.id}")
572 t = threading.Thread(
573 target=one_epoch, daemon=True, args=(model, quiz_machine, gpu)
583 ##################################################
584 # Replace a fraction of the w_quizzes with fresh ones
587 f"cache_w_quizzes contains {quiz_machine.problem.nb_cached_quizzes()} quizzes"
590 # Renew entirely the train set
592 for model in weakest_models:
593 quiz_machine.renew_w_quizzes(model, args.nb_train_samples)
595 ##################################################
596 # If all the models are good enough, generate new quizzes and
597 # re-compute the test errors
599 if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
603 nb_for_train=nb_new_c_quizzes_for_train,
604 nb_for_test=nb_new_c_quizzes_for_test,
607 ######################################################################