3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
15 import mygpt, tasks, problems
17 ######################################################################
22 if x in {"1", "true", "yes"}:
24 elif x in {"0", "false", "no"}:
30 parser = argparse.ArgumentParser(
31 description="An implementation of GPT with cache.",
32 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
39 help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
42 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
44 parser.add_argument("--result_dir", type=str, default=None)
46 parser.add_argument("--seed", type=int, default=0)
48 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
50 parser.add_argument("--force_cpu", type=str2bool, default=False)
52 ########################################
54 parser.add_argument("--nb_epochs", type=int, default=50)
56 parser.add_argument("--batch_size", type=int, default=None)
58 parser.add_argument("--nb_train_samples", type=int, default=None)
60 parser.add_argument("--nb_test_samples", type=int, default=None)
62 parser.add_argument("--optim", type=str, default="adam")
64 ########################################
66 parser.add_argument("--nb_warmup_iter", type=int, default=100)
68 parser.add_argument("--nb_decay_iter", type=int, default=5000)
70 parser.add_argument("--learning_rate", type=float, default=6e-4)
72 parser.add_argument("--min_learning_rate", type=float, default=6e-5)
76 parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True)
78 parser.add_argument("--legacy_large_lr", type=float, default=1e-4)
80 parser.add_argument("--legacy_small_lr", type=float, default=2e-5)
82 parser.add_argument("--legacy_nb_epoch_large_lr", type=float, default=10)
84 ########################################
86 parser.add_argument("--model", type=str, default=None)
88 parser.add_argument("--attention", type=str, default=None)
90 parser.add_argument("--memex_proba", type=float, default=0)
92 parser.add_argument("--memex_nb_epochs", type=float, default=1)
94 parser.add_argument("--dim_model", type=int, default=None)
96 parser.add_argument("--dim_keys", type=int, default=None)
98 parser.add_argument("--dim_hidden", type=int, default=None)
100 parser.add_argument("--nb_heads", type=int, default=None)
102 parser.add_argument("--nb_lines", type=int, default=None)
104 parser.add_argument("--caterpillar_height", type=int, default=None)
106 parser.add_argument("--gate_dropout_proba", type=float, default=0.0)
108 parser.add_argument("--gate_dropout_sync", type=str2bool, default=False)
110 parser.add_argument("--gate_dropout_replace", type=str2bool, default=False)
112 parser.add_argument("--rho_inner_loss", type=float, default=0.0)
114 parser.add_argument("--nb_blocks", type=int, default=None)
116 parser.add_argument("--dropout", type=float, default=0.1)
118 ########################################
120 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
122 parser.add_argument("--no_checkpoint", action="store_true", default=False)
124 parser.add_argument("--continue_training", action="store_true", default=False)
126 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
128 ##############################
131 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
133 parser.add_argument("--rpl_max_input", type=int, default=9)
135 parser.add_argument("--rpl_prog_len", type=int, default=8)
137 parser.add_argument("--rpl_nb_runs", type=int, default=5)
139 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
141 ##############################
144 parser.add_argument("--grid_size", type=int, default=6)
146 parser.add_argument("--grid_nb_colors", type=int, default=6)
148 parser.add_argument("--grid_nb_shapes", type=int, default=6)
150 ##############################
153 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
155 parser.add_argument("--picoclvr_height", type=int, default=12)
157 parser.add_argument("--picoclvr_width", type=int, default=16)
159 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
161 ##############################
164 parser.add_argument("--maze_height", type=int, default=13)
166 parser.add_argument("--maze_width", type=int, default=21)
168 parser.add_argument("--maze_nb_walls", type=int, default=15)
170 ##############################
173 parser.add_argument("--snake_height", type=int, default=9)
175 parser.add_argument("--snake_width", type=int, default=12)
177 parser.add_argument("--snake_nb_colors", type=int, default=5)
179 parser.add_argument("--snake_length", type=int, default=200)
181 ##############################
184 parser.add_argument("--stack_nb_steps", type=int, default=100)
186 parser.add_argument("--stack_nb_stacks", type=int, default=3)
188 parser.add_argument("--stack_nb_digits", type=int, default=3)
190 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
192 ##############################
195 parser.add_argument("--expr_nb_variables", type=int, default=5)
197 parser.add_argument("--expr_sequence_length", type=int, default=40)
199 parser.add_argument("--expr_operand_max", type=int, default=9)
201 parser.add_argument("--expr_result_max", type=int, default=99)
203 parser.add_argument("--expr_input_file", type=str, default=None)
205 ##############################
208 parser.add_argument("--memory_len_total", type=int, default=32)
210 ##############################
213 parser.add_argument("--mixing_hard", action="store_true", default=False)
215 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
217 ######################################################################
219 # args = parser.parse_args()
221 args, sup_args = parser.parse_known_args()
223 sup_args = dict([x.removeprefix("--").split("=") for x in sup_args])
225 if args.result_dir is None:
226 args.result_dir = f"results_{args.task}_{args.model}"
228 ######################################################################
230 if not args.force_cpu and torch.cuda.is_available():
231 device = torch.device("cuda")
232 torch.backends.cuda.matmul.allow_tf32 = True
234 device = torch.device("cpu")
236 ######################################################################
238 default_task_args = {
242 "nb_train_samples": 250000,
243 "nb_test_samples": 10000,
248 "nb_train_samples": 50000,
249 "nb_test_samples": 10000,
254 "nb_train_samples": 2500000,
255 "nb_test_samples": 10000,
260 "nb_train_samples": 250000,
261 "nb_test_samples": 10000,
266 "nb_train_samples": 100000,
267 "nb_test_samples": 1000,
272 "nb_train_samples": 1000000,
273 "nb_test_samples": 10000,
278 "nb_train_samples": 50000,
279 "nb_test_samples": 10000,
284 "nb_train_samples": 100000,
285 "nb_test_samples": 10000,
290 "nb_train_samples": 250000,
291 "nb_test_samples": 10000,
296 "nb_train_samples": 2500000,
297 "nb_test_samples": 10000,
302 "nb_train_samples": 250000,
303 "nb_test_samples": 10000,
308 "nb_train_samples": 100000,
309 "nb_test_samples": 1000,
314 "nb_train_samples": 50000,
315 "nb_test_samples": 10000,
320 "nb_train_samples": 25000,
321 "nb_test_samples": 10000,
326 "nb_train_samples": 250000,
327 "nb_test_samples": 10000,
332 "nb_train_samples": 60000,
333 "nb_test_samples": 10000,
337 if args.task in default_task_args:
338 for k, v in default_task_args[args.task].items():
339 if getattr(args, k) is None:
342 ######################################################################
344 default_model_args = {
354 "attention": "caterpillar",
360 "caterpillar_height": 4,
372 "attention": "caterpillar",
378 "caterpillar_height": 4,
390 "attention": "caterpillar",
396 "caterpillar_height": 32,
408 "attention": "caterpillar",
425 "attention": "caterpillar",
435 if args.model in default_model_args:
436 for k, v in default_model_args[args.model].items():
437 if getattr(args, k) is None:
440 raise ValueError(f"Unknown model {args.model}")
442 ######################################################################
445 os.mkdir(args.result_dir)
446 except FileExistsError:
447 if not args.continue_training:
448 print(f"result directory {args.result_dir} already exists")
451 loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a")
453 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
456 # torch.backends.cudnn.deterministic = True
457 # torch.backends.cudnn.benchmark = False
458 # torch.use_deterministic_algorithms(True)
459 torch.manual_seed(args.seed)
460 if torch.cuda.is_available():
461 torch.cuda.manual_seed_all(args.seed)
463 ######################################################################
467 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
469 if log_file is not None:
470 log_file.write(t + s + "\n")
477 with os.popen("sha256sum *.py") as f:
479 log_string(f"sha256sum {l.strip()}")
481 now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
482 os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py *.sh")
484 log_string(f"argv {' '.join(sys.argv)}")
487 log_string(f"args.{n} {getattr(args, n)}")
489 for k, v in sup_args.items():
490 log_string(f'sup_args["{k}"] "{v}"')
493 ######################################################################
496 def get_lr(n_epoch, it):
497 if args.legacy_lr_schedule:
498 # my crude scheduling to compare to previous baseline, added
501 if it < args.nb_warmup_iter:
502 return args.legacy_large_lr * it / args.nb_warmup_iter
503 elif n_epoch < args.legacy_nb_epoch_large_lr:
504 return args.legacy_large_lr
506 return args.legacy_small_lr
510 # 1) linear warmup for warmup_iter steps
511 if it < args.nb_warmup_iter:
512 return args.learning_rate * it / args.nb_warmup_iter
513 # 2) if it > nb_decay_iter, return min learning rate
514 if it > args.nb_decay_iter:
515 return args.min_learning_rate
516 # 3) in between, use cosine decay down to min learning rate
517 decay_ratio = (it - args.nb_warmup_iter) / (
518 args.nb_decay_iter - args.nb_warmup_iter
520 coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
521 return args.min_learning_rate + coeff * (
522 args.learning_rate - args.min_learning_rate
526 ######################################################################
529 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
532 def picoclvr_pruner_horizontal_green(p):
533 return not ("green" in p and ("left" in p or "right" in p))
536 picoclvr_pruner_train = (
537 picoclvr_pruner_horizontal_green
538 if args.picocvlr_prune_properties in {"train+eval"}
542 picoclvr_pruner_eval = (
543 (lambda p: not picoclvr_pruner_horizontal_green(p))
544 if args.picocvlr_prune_properties in {"train+eval", "eval"}
548 ######################################################################
552 if args.task == "byheart":
553 task = tasks.SandBox(
554 problem=problems.ProblemByHeart(),
555 nb_train_samples=args.nb_train_samples,
556 nb_test_samples=args.nb_test_samples,
557 batch_size=args.batch_size,
561 args.max_percents_of_test_in_train = -1
563 elif args.task == "learnop":
564 task = tasks.SandBox(
565 problem=problems.ProblemLearnOperator(),
566 nb_train_samples=args.nb_train_samples,
567 nb_test_samples=args.nb_test_samples,
568 batch_size=args.batch_size,
574 elif args.task == "guessop":
575 task = tasks.SandBox(
576 problem=problems.ProblemGuessOperator(),
577 nb_train_samples=args.nb_train_samples,
578 nb_test_samples=args.nb_test_samples,
579 batch_size=args.batch_size,
585 elif args.task == "twotargets":
586 task = tasks.SandBox(
587 problem=problems.ProblemTwoTargets(),
588 nb_train_samples=args.nb_train_samples,
589 nb_test_samples=args.nb_test_samples,
590 batch_size=args.batch_size,
595 elif args.task == "memory":
596 task = tasks.SandBox(
597 problem=problems.ProblemMemory(len_total=args.memory_len_total),
598 nb_train_samples=args.nb_train_samples,
599 nb_test_samples=args.nb_test_samples,
600 batch_size=args.batch_size,
605 elif args.task == "mixing":
606 task = tasks.SandBox(
607 problem=problems.ProblemMixing(
608 hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
610 nb_train_samples=args.nb_train_samples,
611 nb_test_samples=args.nb_test_samples,
612 batch_size=args.batch_size,
617 elif args.task == "addition":
618 task = tasks.SandBox(
619 problem=problems.ProblemAddition(),
620 nb_train_samples=args.nb_train_samples,
621 nb_test_samples=args.nb_test_samples,
622 batch_size=args.batch_size,
627 elif args.task == "picoclvr":
628 task = tasks.PicoCLVR(
629 nb_train_samples=args.nb_train_samples,
630 nb_test_samples=args.nb_test_samples,
631 batch_size=args.batch_size,
632 height=args.picoclvr_height,
633 width=args.picoclvr_width,
634 nb_colors=args.picoclvr_nb_colors,
637 pruner_train=picoclvr_pruner_train,
638 pruner_eval=picoclvr_pruner_eval,
641 elif args.task == "mnist":
643 nb_train_samples=args.nb_train_samples,
644 nb_test_samples=args.nb_test_samples,
645 batch_size=args.batch_size,
649 elif args.task == "maze":
651 nb_train_samples=args.nb_train_samples,
652 nb_test_samples=args.nb_test_samples,
653 batch_size=args.batch_size,
654 height=args.maze_height,
655 width=args.maze_width,
656 nb_walls=args.maze_nb_walls,
660 elif args.task == "snake":
662 nb_train_samples=args.nb_train_samples,
663 nb_test_samples=args.nb_test_samples,
664 batch_size=args.batch_size,
665 height=args.snake_height,
666 width=args.snake_width,
667 nb_colors=args.snake_nb_colors,
668 length=args.snake_length,
669 prompt_length=args.snake_length // 2,
673 elif args.task == "stack":
675 nb_train_samples=args.nb_train_samples,
676 nb_test_samples=args.nb_test_samples,
677 batch_size=args.batch_size,
679 nb_steps=args.stack_nb_steps,
680 nb_stacks=args.stack_nb_stacks,
681 nb_digits=args.stack_nb_digits,
682 fraction_values_for_train=args.stack_fraction_values_for_train,
686 elif args.task == "expr":
688 nb_train_samples=args.nb_train_samples,
689 nb_test_samples=args.nb_test_samples,
690 nb_variables=args.expr_nb_variables,
691 sequence_length=args.expr_sequence_length,
692 operand_max=args.expr_operand_max,
693 result_max=args.expr_result_max,
694 batch_size=args.batch_size,
698 elif args.task == "rpl":
700 nb_train_samples=args.nb_train_samples,
701 nb_test_samples=args.nb_test_samples,
702 batch_size=args.batch_size,
703 nb_starting_values=args.rpl_nb_starting_values,
704 max_input=args.rpl_max_input,
705 prog_len=args.rpl_prog_len,
706 nb_runs=args.rpl_nb_runs,
707 no_prog=args.rpl_no_prog,
712 elif args.task == "grid":
714 nb_train_samples=args.nb_train_samples,
715 nb_test_samples=args.nb_test_samples,
716 batch_size=args.batch_size,
718 nb_shapes=args.grid_nb_shapes,
719 nb_colors=args.grid_nb_colors,
724 elif args.task == "qmlp":
726 nb_train_samples=args.nb_train_samples,
727 nb_test_samples=args.nb_test_samples,
728 batch_size=args.batch_size,
729 result_dir=args.result_dir,
735 raise ValueError(f"Unknown task {args.task}")
737 ######################################################################
739 log_string(f"device {device}")
741 vocabulary_size = task.vocabulary_size()
743 if args.memex_proba > 0:
746 log_string(f"vocabulary_size {vocabulary_size}")
748 ##############################
751 vocabulary_size=vocabulary_size,
752 dim_model=args.dim_model,
753 dim_keys=args.dim_keys,
754 dim_hidden=args.dim_hidden,
755 nb_heads=args.nb_heads,
756 nb_lines=args.nb_lines,
757 caterpillar_height=args.caterpillar_height,
758 nb_blocks=args.nb_blocks,
760 dropout=args.dropout,
761 attention_layer=args.attention,
768 nb_parameters = sum(p.numel() for p in model.parameters())
769 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
771 ######################################################################
773 nb_epochs_finished = 0
775 if args.no_checkpoint:
776 log_string(f"not trying to load checkpoint.")
780 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
781 checkpoint = torch.load(checkpoint_name)
782 nb_epochs_finished = checkpoint["nb_epochs_finished"]
783 model.load_state_dict(checkpoint["model_state"])
784 torch.set_rng_state(checkpoint["rng_state"])
785 if torch.cuda.is_available():
786 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
788 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
790 except FileNotFoundError:
791 log_string("starting from scratch.")
794 log_string("error when loading the checkpoint.")
797 ######################################################################
799 if args.task == "expr" and args.expr_input_file is not None:
800 task.produce_results(
801 n_epoch=nb_epochs_finished,
803 result_dir=args.result_dir,
805 deterministic_synthesis=args.deterministic_synthesis,
806 input_file=args.expr_input_file,
811 ######################################################################
813 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
815 # Compute the entropy of the training tokens
818 for input in task.batches(split="train"):
819 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
820 token_probas = token_count / token_count.sum()
821 entropy = -torch.xlogy(token_probas, token_probas).sum()
822 train_set_perplexity = math.exp(entropy)
824 ######################################################################
825 # A bit of paranoia never hurts
827 if args.max_percents_of_test_in_train >= 0:
829 def subsets_as_tuples(batches, cs):
831 for batch in batches:
833 s.add(tuple([v.item() for v in x]))
839 nb_test, nb_in_train = 0, 0
840 for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
842 for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
843 in_train.update(test_subset.intersection(train_subset))
844 nb_in_train += len(in_train)
845 nb_test += len(test_subset)
848 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
852 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
853 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
855 ##############################
857 if "calibrate" in sup_args:
858 for input in task.batches(split="train", desc="calibrate"):
859 input = input.to(device)
860 output = model(mygpt.BracketedSequence(input)).x
862 for n, m in model.named_modules():
865 if isinstance(x, mygpt.Calibrator):
866 print(f"####### ${n} | ${a} ########################")
867 mean, std = x.moments()
868 print("mean\n", mean, "\n")
869 print("std\n", std, "\n")
870 print(f"############################################\n\n")
874 ##############################
878 if nb_epochs_finished >= nb_epochs:
879 task.produce_results(
880 n_epoch=nb_epochs_finished,
882 result_dir=args.result_dir,
884 deterministic_synthesis=args.deterministic_synthesis,
887 time_pred_result = datetime.datetime.now()
893 for n_epoch in range(nb_epochs_finished, nb_epochs):
894 if args.optim == "sgd":
895 optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
896 elif args.optim == "adam":
897 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
898 elif args.optim == "adamw":
899 optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
901 raise ValueError(f"Unknown optimizer {args.optim}.")
905 nb_train_samples, acc_train_loss, acc_train_inner_loss = 0, 0.0, 0.0
907 def add_memex(batches, memex_proba):
908 for input in batches:
909 if torch.rand(1).item() < memex_proba:
911 (input.size(0), 1), vocabulary_size - 1, device=input.device
924 train_batches = add_memex(
925 task.batches(split="train"),
926 args.memex_proba if n_epoch < args.memex_nb_epochs else 0.0,
929 for input in train_batches:
930 model.reset_inner_loss()
931 input = input.to(device)
933 output = model(mygpt.BracketedSequence(input)).x
934 loss = F.cross_entropy(output.transpose(1, 2), input)
935 inner_loss = model.get_inner_loss()
937 acc_train_loss += loss.item() * input.size(0)
938 acc_train_inner_loss += inner_loss.item() * input.size(0)
940 nb_train_samples += input.size(0)
941 nb_samples_seen += input.size(0)
943 total_loss = loss + (
944 args.rho_inner_loss * inner_loss if args.rho_inner_loss > 0 else 0.0
948 lr = get_lr(n_epoch, it)
949 for param_group in optimizer.param_groups:
950 param_group["lr"] = lr
952 # log_string(f"learning_rate {lr}")
954 optimizer.zero_grad()
955 total_loss.backward()
958 grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt()
960 loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n")
964 with torch.autograd.no_grad():
967 nb_test_samples, acc_test_loss = 0, 0.0
969 for input in task.batches(split="test"):
970 input = input.to(device)
972 output = model(mygpt.BracketedSequence(input)).x
973 loss = F.cross_entropy(output.transpose(1, 2), input)
974 acc_test_loss += loss.item() * input.size(0)
975 nb_test_samples += input.size(0)
978 f"loss {n_epoch} train_loss {acc_train_loss/nb_train_samples} train_inner_loss {acc_train_inner_loss/nb_train_samples} test_prediction {acc_test_loss/nb_test_samples}"
981 task.produce_results(
984 result_dir=args.result_dir,
986 deterministic_synthesis=args.deterministic_synthesis,
989 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
990 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
993 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
996 time_current_result = datetime.datetime.now()
998 f"next_result {time_current_result + (time_current_result - time_pred_result)}"
1000 time_pred_result = time_current_result
1003 "nb_epochs_finished": n_epoch + 1,
1004 "model_state": model.state_dict(),
1005 "rng_state": torch.get_rng_state(),
1008 if torch.cuda.is_available():
1009 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
1011 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
1012 torch.save(checkpoint, checkpoint_name)
1013 log_string(f"saved checkpoint {checkpoint_name}")
1015 ######################################################################