3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os
10 import torch, torchvision
12 from torch.nn import functional as F
15 import mygpt, tasks, problems
17 ######################################################################
19 if torch.cuda.is_available():
20 device = torch.device("cuda")
21 torch.backends.cuda.matmul.allow_tf32 = True
23 device = torch.device("cpu")
25 ######################################################################
27 parser = argparse.ArgumentParser(
28 description="An implementation of GPT with cache.",
29 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
36 help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
39 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
41 parser.add_argument("--result_dir", type=str, default=None)
43 parser.add_argument("--seed", type=int, default=0)
45 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
47 ########################################
49 parser.add_argument("--nb_epochs", type=int, default=25)
51 parser.add_argument("--batch_size", type=int, default=None)
53 parser.add_argument("--nb_train_samples", type=int, default=None)
55 parser.add_argument("--nb_test_samples", type=int, default=None)
57 parser.add_argument("--optim", type=str, default="adam")
59 parser.add_argument("--learning_rate", type=float, default=1e-4)
61 parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
63 ########################################
65 parser.add_argument("--model", type=str, default=None)
67 parser.add_argument("--dim_model", type=int, default=None)
69 parser.add_argument("--dim_keys", type=int, default=None)
71 parser.add_argument("--dim_hidden", type=int, default=None)
73 parser.add_argument("--nb_heads", type=int, default=None)
75 parser.add_argument("--nb_blocks", type=int, default=None)
77 parser.add_argument("--dropout", type=float, default=0.1)
79 ########################################
81 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
83 parser.add_argument("--no_checkpoint", action="store_true", default=False)
85 parser.add_argument("--overwrite_results", action="store_true", default=False)
87 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
89 ##############################
92 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
94 parser.add_argument("--rpl_max_input", type=int, default=9)
96 parser.add_argument("--rpl_prog_len", type=int, default=8)
98 parser.add_argument("--rpl_nb_runs", type=int, default=5)
100 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
102 ##############################
105 parser.add_argument("--grid_size", type=int, default=6)
107 ##############################
110 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
112 parser.add_argument("--picoclvr_height", type=int, default=12)
114 parser.add_argument("--picoclvr_width", type=int, default=16)
116 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
118 ##############################
121 parser.add_argument("--maze_height", type=int, default=13)
123 parser.add_argument("--maze_width", type=int, default=21)
125 parser.add_argument("--maze_nb_walls", type=int, default=15)
127 ##############################
130 parser.add_argument("--snake_height", type=int, default=9)
132 parser.add_argument("--snake_width", type=int, default=12)
134 parser.add_argument("--snake_nb_colors", type=int, default=5)
136 parser.add_argument("--snake_length", type=int, default=200)
138 ##############################
141 parser.add_argument("--stack_nb_steps", type=int, default=100)
143 parser.add_argument("--stack_nb_stacks", type=int, default=3)
145 parser.add_argument("--stack_nb_digits", type=int, default=3)
147 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
149 ##############################
152 parser.add_argument("--expr_nb_variables", type=int, default=5)
154 parser.add_argument("--expr_sequence_length", type=int, default=40)
156 parser.add_argument("--expr_operand_max", type=int, default=9)
158 parser.add_argument("--expr_result_max", type=int, default=99)
160 parser.add_argument("--expr_input_file", type=str, default=None)
162 ##############################
165 parser.add_argument("--mixing_hard", action="store_true", default=False)
167 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
169 ######################################################################
171 args = parser.parse_args()
173 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
175 if args.result_dir is None:
176 args.result_dir = f"results_{args.task}"
178 ######################################################################
180 default_task_args = {
184 "nb_train_samples": 250000,
185 "nb_test_samples": 10000,
190 "nb_train_samples": 50000,
191 "nb_test_samples": 10000,
196 "nb_train_samples": 2500000,
197 "nb_test_samples": 10000,
202 "nb_train_samples": 250000,
203 "nb_test_samples": 10000,
208 "nb_train_samples": 100000,
209 "nb_test_samples": 1000,
214 "nb_train_samples": 1000000,
215 "nb_test_samples": 10000,
220 "nb_train_samples": 50000,
221 "nb_test_samples": 10000,
226 "nb_train_samples": 100000,
227 "nb_test_samples": 10000,
232 "nb_train_samples": 250000,
233 "nb_test_samples": 10000,
238 "nb_train_samples": 2500000,
239 "nb_test_samples": 10000,
244 "nb_train_samples": 250000,
245 "nb_test_samples": 10000,
250 "nb_train_samples": 100000,
251 "nb_test_samples": 1000,
256 "nb_train_samples": 50000,
257 "nb_test_samples": 10000,
262 "nb_train_samples": 5000,
263 "nb_test_samples": 1000,
268 "nb_train_samples": 250000,
269 "nb_test_samples": 10000,
274 "nb_train_samples": 60000,
275 "nb_test_samples": 10000,
279 if args.task in default_task_args:
280 for k, v in default_task_args[args.task].items():
281 if getattr(args, k) is None:
284 ######################################################################
286 default_model_args = {
324 if args.model in default_model_args:
325 for k, v in default_model_args[args.model].items():
326 if getattr(args, k) is None:
329 raise ValueError(f"Unknown model {args.model}")
331 ######################################################################
334 os.mkdir(args.result_dir)
335 except FileExistsError:
336 if not args.overwrite_results:
337 print(f"result directory {args.result_dir} already exists")
340 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
343 # torch.backends.cudnn.deterministic = True
344 # torch.backends.cudnn.benchmark = False
345 # torch.use_deterministic_algorithms(True)
346 torch.manual_seed(args.seed)
347 if torch.cuda.is_available():
348 torch.cuda.manual_seed_all(args.seed)
350 ######################################################################
354 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
356 if log_file is not None:
357 log_file.write(t + s + "\n")
364 log_string(f"argv {' '.join(sys.argv)}")
367 log_string(f"args.{n} {getattr(args, n)}")
370 ######################################################################
373 def picoclvr_pruner_horizontal_green(p):
374 return not ("green" in p and ("left" in p or "right" in p))
377 picoclvr_pruner_train = (
378 picoclvr_pruner_horizontal_green
379 if args.picocvlr_prune_properties in {"train+eval"}
383 picoclvr_pruner_eval = (
384 (lambda p: not picoclvr_pruner_horizontal_green(p))
385 if args.picocvlr_prune_properties in {"train+eval", "eval"}
389 ######################################################################
391 if args.task == "byheart":
392 task = tasks.SandBox(
393 problem=problems.ProblemByHeart(),
394 nb_train_samples=args.nb_train_samples,
395 nb_test_samples=args.nb_test_samples,
396 batch_size=args.batch_size,
400 args.max_percents_of_test_in_train = -1
402 elif args.task == "learnop":
403 task = tasks.SandBox(
404 problem=problems.ProblemLearnOperator(),
405 nb_train_samples=args.nb_train_samples,
406 nb_test_samples=args.nb_test_samples,
407 batch_size=args.batch_size,
413 elif args.task == "guessop":
414 task = tasks.SandBox(
415 problem=problems.ProblemGuessOperator(),
416 nb_train_samples=args.nb_train_samples,
417 nb_test_samples=args.nb_test_samples,
418 batch_size=args.batch_size,
424 elif args.task == "twotargets":
425 task = tasks.SandBox(
426 problem=problems.ProblemTwoTargets(),
427 nb_train_samples=args.nb_train_samples,
428 nb_test_samples=args.nb_test_samples,
429 batch_size=args.batch_size,
434 elif args.task == "memory":
435 task = tasks.SandBox(
436 problem=problems.ProblemMemory(),
437 nb_train_samples=args.nb_train_samples,
438 nb_test_samples=args.nb_test_samples,
439 batch_size=args.batch_size,
444 elif args.task == "mixing":
445 task = tasks.SandBox(
446 problem=problems.ProblemMixing(
447 hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
449 nb_train_samples=args.nb_train_samples,
450 nb_test_samples=args.nb_test_samples,
451 batch_size=args.batch_size,
456 elif args.task == "addition":
457 task = tasks.SandBox(
458 problem=problems.ProblemAddition(),
459 nb_train_samples=args.nb_train_samples,
460 nb_test_samples=args.nb_test_samples,
461 batch_size=args.batch_size,
466 elif args.task == "picoclvr":
467 task = tasks.PicoCLVR(
468 nb_train_samples=args.nb_train_samples,
469 nb_test_samples=args.nb_test_samples,
470 batch_size=args.batch_size,
471 height=args.picoclvr_height,
472 width=args.picoclvr_width,
473 nb_colors=args.picoclvr_nb_colors,
476 pruner_train=picoclvr_pruner_train,
477 pruner_eval=picoclvr_pruner_eval,
480 elif args.task == "mnist":
482 nb_train_samples=args.nb_train_samples,
483 nb_test_samples=args.nb_test_samples,
484 batch_size=args.batch_size,
488 elif args.task == "maze":
490 nb_train_samples=args.nb_train_samples,
491 nb_test_samples=args.nb_test_samples,
492 batch_size=args.batch_size,
493 height=args.maze_height,
494 width=args.maze_width,
495 nb_walls=args.maze_nb_walls,
499 elif args.task == "snake":
501 nb_train_samples=args.nb_train_samples,
502 nb_test_samples=args.nb_test_samples,
503 batch_size=args.batch_size,
504 height=args.snake_height,
505 width=args.snake_width,
506 nb_colors=args.snake_nb_colors,
507 length=args.snake_length,
508 prompt_length=args.snake_length // 2,
512 elif args.task == "stack":
514 nb_train_samples=args.nb_train_samples,
515 nb_test_samples=args.nb_test_samples,
516 batch_size=args.batch_size,
518 nb_steps=args.stack_nb_steps,
519 nb_stacks=args.stack_nb_stacks,
520 nb_digits=args.stack_nb_digits,
521 fraction_values_for_train=args.stack_fraction_values_for_train,
525 elif args.task == "expr":
527 nb_train_samples=args.nb_train_samples,
528 nb_test_samples=args.nb_test_samples,
529 nb_variables=args.expr_nb_variables,
530 sequence_length=args.expr_sequence_length,
531 operand_max=args.expr_operand_max,
532 result_max=args.expr_result_max,
533 batch_size=args.batch_size,
537 elif args.task == "rpl":
539 nb_train_samples=args.nb_train_samples,
540 nb_test_samples=args.nb_test_samples,
541 batch_size=args.batch_size,
542 nb_starting_values=args.rpl_nb_starting_values,
543 max_input=args.rpl_max_input,
544 prog_len=args.rpl_prog_len,
545 nb_runs=args.rpl_nb_runs,
546 no_prog=args.rpl_no_prog,
551 elif args.task == "grid":
553 nb_train_samples=args.nb_train_samples,
554 nb_test_samples=args.nb_test_samples,
555 batch_size=args.batch_size,
561 elif args.task == "qmlp":
563 nb_train_samples=args.nb_train_samples,
564 nb_test_samples=args.nb_test_samples,
565 batch_size=args.batch_size,
566 result_dir=args.result_dir,
572 raise ValueError(f"Unknown task {args.task}")
574 ######################################################################
576 log_string(f"device {device}")
578 vocabulary_size = task.vocabulary_size()
580 log_string(f"vocabulary_size {vocabulary_size}")
582 ##############################
585 vocabulary_size=vocabulary_size,
586 dim_model=args.dim_model,
587 dim_keys=args.dim_keys,
588 dim_hidden=args.dim_hidden,
589 nb_heads=args.nb_heads,
590 nb_blocks=args.nb_blocks,
592 dropout=args.dropout,
597 nb_parameters = sum(p.numel() for p in model.parameters())
598 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
600 ######################################################################
602 nb_epochs_finished = 0
604 if args.no_checkpoint:
605 log_string(f"not trying to load checkpoint.")
609 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
610 checkpoint = torch.load(checkpoint_name)
611 nb_epochs_finished = checkpoint["nb_epochs_finished"]
612 model.load_state_dict(checkpoint["model_state"])
613 torch.set_rng_state(checkpoint["rng_state"])
614 if torch.cuda.is_available():
615 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
617 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
619 except FileNotFoundError:
620 log_string("starting from scratch.")
623 log_string("error when loading the checkpoint.")
626 ######################################################################
628 if args.task == "expr" and args.expr_input_file is not None:
629 task.produce_results(
630 n_epoch=nb_epochs_finished,
632 result_dir=args.result_dir,
634 deterministic_synthesis=args.deterministic_synthesis,
635 input_file=args.expr_input_file,
640 ######################################################################
642 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
644 # Compute the entropy of the training tokens
647 for input in task.batches(split="train"):
648 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
649 token_probas = token_count / token_count.sum()
650 entropy = -torch.xlogy(token_probas, token_probas).sum()
651 train_set_perplexity = math.exp(entropy)
653 ######################################################################
654 # A bit of paranoia never hurts
656 if args.max_percents_of_test_in_train >= 0:
658 def subsets_as_tuples(batches, cs):
660 for batch in batches:
662 s.add(tuple([v.item() for v in x]))
668 nb_test, nb_in_train = 0, 0
669 for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
671 for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
672 in_train.update(test_subset.intersection(train_subset))
673 nb_in_train += len(in_train)
674 nb_test += len(test_subset)
677 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
681 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
682 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
684 ##############################
686 if args.learning_rate_schedule == "cos":
687 learning_rate_schedule = {}
688 for n_epoch in range(args.nb_epochs):
689 u = n_epoch / args.nb_epochs * math.pi
690 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
695 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
699 learning_rate_schedule = {}
700 learning_rate = args.learning_rate
701 for n_epoch in range(args.nb_epochs):
703 learning_rate = u[n_epoch]
704 learning_rate_schedule[n_epoch] = learning_rate
706 log_string(f"learning_rate_schedule {learning_rate_schedule}")
708 ##############################
712 if nb_epochs_finished >= nb_epochs:
713 task.produce_results(
714 n_epoch=nb_epochs_finished,
716 result_dir=args.result_dir,
718 deterministic_synthesis=args.deterministic_synthesis,
721 for n_epoch in range(nb_epochs_finished, nb_epochs):
722 learning_rate = learning_rate_schedule[n_epoch]
724 log_string(f"learning_rate {learning_rate}")
726 if args.optim == "sgd":
727 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
728 elif args.optim == "adam":
729 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
730 elif args.optim == "adamw":
731 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
733 raise ValueError(f"Unknown optimizer {args.optim}.")
737 nb_train_samples, acc_train_loss = 0, 0.0
739 for input in task.batches(split="train"):
740 input = input.to(device)
741 output = model(mygpt.BracketedSequence(input)).x
742 loss = F.cross_entropy(output.transpose(1, 2), input)
743 acc_train_loss += loss.item() * input.size(0)
744 nb_train_samples += input.size(0)
745 nb_samples_seen += input.size(0)
747 optimizer.zero_grad()
751 with torch.autograd.no_grad():
754 nb_test_samples, acc_test_loss = 0, 0.0
756 for input in task.batches(split="test"):
757 input = input.to(device)
759 output = model(mygpt.BracketedSequence(input)).x
760 loss = F.cross_entropy(output.transpose(1, 2), input)
761 acc_test_loss += loss.item() * input.size(0)
762 nb_test_samples += input.size(0)
764 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
765 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
768 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
771 task.produce_results(
774 result_dir=args.result_dir,
776 deterministic_synthesis=args.deterministic_synthesis,
780 "nb_epochs_finished": n_epoch + 1,
781 "model_state": model.state_dict(),
782 "rng_state": torch.get_rng_state(),
785 if torch.cuda.is_available():
786 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
788 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
789 torch.save(checkpoint, checkpoint_name)
790 log_string(f"saved checkpoint {checkpoint_name}")
792 ######################################################################