3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
15 import mygpt, tasks, problems
17 ######################################################################
19 if torch.cuda.is_available():
20 device = torch.device("cuda")
21 torch.backends.cuda.matmul.allow_tf32 = True
23 device = torch.device("cpu")
25 ######################################################################
30 if x in {"1", "true", "yes"}:
32 elif x in {"0", "false", "no"}:
38 parser = argparse.ArgumentParser(
39 description="An implementation of GPT with cache.",
40 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
47 help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
50 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
52 parser.add_argument("--result_dir", type=str, default=None)
54 parser.add_argument("--seed", type=int, default=0)
56 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
58 ########################################
60 parser.add_argument("--nb_epochs", type=int, default=50)
62 parser.add_argument("--batch_size", type=int, default=None)
64 parser.add_argument("--nb_train_samples", type=int, default=None)
66 parser.add_argument("--nb_test_samples", type=int, default=None)
68 parser.add_argument("--optim", type=str, default="adam")
70 ########################################
72 parser.add_argument("--nb_warmup_iter", type=int, default=100)
74 parser.add_argument("--nb_decay_iter", type=int, default=5000)
76 parser.add_argument("--learning_rate", type=float, default=6e-4)
78 parser.add_argument("--min_learning_rate", type=float, default=6e-5)
82 parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True)
84 parser.add_argument("--legacy_large_lr", type=float, default=1e-4)
86 parser.add_argument("--legacy_small_lr", type=float, default=2e-5)
88 parser.add_argument("--legacy_nb_epoch_large_lr", type=float, default=10)
90 ########################################
92 parser.add_argument("--model", type=str, default=None)
94 parser.add_argument("--attention", type=str, default=None)
96 parser.add_argument("--dim_model", type=int, default=None)
98 parser.add_argument("--dim_keys", type=int, default=None)
100 parser.add_argument("--dim_hidden", type=int, default=None)
102 parser.add_argument("--nb_heads", type=int, default=None)
104 parser.add_argument("--nb_lines", type=int, default=None)
106 parser.add_argument("--caterpillar_height", type=int, default=None)
108 parser.add_argument("--rho", type=float, default=0.0)
110 parser.add_argument("--dim_rec_v", type=int, default=None)
112 parser.add_argument("--nb_blocks", type=int, default=None)
114 parser.add_argument("--dropout", type=float, default=0.1)
116 ########################################
118 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
120 parser.add_argument("--no_checkpoint", action="store_true", default=False)
122 parser.add_argument("--overwrite_results", action="store_true", default=False)
124 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
126 ##############################
129 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
131 parser.add_argument("--rpl_max_input", type=int, default=9)
133 parser.add_argument("--rpl_prog_len", type=int, default=8)
135 parser.add_argument("--rpl_nb_runs", type=int, default=5)
137 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
139 ##############################
142 parser.add_argument("--grid_size", type=int, default=6)
144 ##############################
147 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
149 parser.add_argument("--picoclvr_height", type=int, default=12)
151 parser.add_argument("--picoclvr_width", type=int, default=16)
153 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
155 ##############################
158 parser.add_argument("--maze_height", type=int, default=13)
160 parser.add_argument("--maze_width", type=int, default=21)
162 parser.add_argument("--maze_nb_walls", type=int, default=15)
164 ##############################
167 parser.add_argument("--snake_height", type=int, default=9)
169 parser.add_argument("--snake_width", type=int, default=12)
171 parser.add_argument("--snake_nb_colors", type=int, default=5)
173 parser.add_argument("--snake_length", type=int, default=200)
175 ##############################
178 parser.add_argument("--stack_nb_steps", type=int, default=100)
180 parser.add_argument("--stack_nb_stacks", type=int, default=3)
182 parser.add_argument("--stack_nb_digits", type=int, default=3)
184 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
186 ##############################
189 parser.add_argument("--expr_nb_variables", type=int, default=5)
191 parser.add_argument("--expr_sequence_length", type=int, default=40)
193 parser.add_argument("--expr_operand_max", type=int, default=9)
195 parser.add_argument("--expr_result_max", type=int, default=99)
197 parser.add_argument("--expr_input_file", type=str, default=None)
199 ##############################
202 parser.add_argument("--memory_len_total", type=int, default=32)
204 ##############################
207 parser.add_argument("--mixing_hard", action="store_true", default=False)
209 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
211 ######################################################################
213 args = parser.parse_args()
215 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
217 if args.result_dir is None:
218 args.result_dir = f"results_{args.task}_{args.model}"
220 ######################################################################
222 default_task_args = {
226 "nb_train_samples": 250000,
227 "nb_test_samples": 10000,
232 "nb_train_samples": 50000,
233 "nb_test_samples": 10000,
238 "nb_train_samples": 2500000,
239 "nb_test_samples": 10000,
244 "nb_train_samples": 250000,
245 "nb_test_samples": 10000,
250 "nb_train_samples": 100000,
251 "nb_test_samples": 1000,
256 "nb_train_samples": 1000000,
257 "nb_test_samples": 10000,
262 "nb_train_samples": 50000,
263 "nb_test_samples": 10000,
268 "nb_train_samples": 100000,
269 "nb_test_samples": 10000,
274 "nb_train_samples": 250000,
275 "nb_test_samples": 10000,
280 "nb_train_samples": 2500000,
281 "nb_test_samples": 10000,
286 "nb_train_samples": 250000,
287 "nb_test_samples": 10000,
292 "nb_train_samples": 100000,
293 "nb_test_samples": 1000,
298 "nb_train_samples": 50000,
299 "nb_test_samples": 10000,
304 "nb_train_samples": 25000,
305 "nb_test_samples": 10000,
310 "nb_train_samples": 250000,
311 "nb_test_samples": 10000,
316 "nb_train_samples": 60000,
317 "nb_test_samples": 10000,
321 if args.task in default_task_args:
322 for k, v in default_task_args[args.task].items():
323 if getattr(args, k) is None:
326 ######################################################################
328 default_model_args = {
339 "attention": "caterpillar",
345 "caterpillar_height": 4,
359 "attention": "caterpillar",
365 "caterpillar_height": 4,
366 "dim_rec_v": 64, # dim_model / nb_heads
379 "attention": "caterpillar",
385 "caterpillar_height": 32,
399 "attention": "caterpillar",
418 "attention": "caterpillar",
429 if args.model in default_model_args:
430 for k, v in default_model_args[args.model].items():
431 if getattr(args, k) is None:
434 raise ValueError(f"Unknown model {args.model}")
436 ######################################################################
439 os.mkdir(args.result_dir)
440 except FileExistsError:
441 if not args.overwrite_results:
442 print(f"result directory {args.result_dir} already exists")
445 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
448 # torch.backends.cudnn.deterministic = True
449 # torch.backends.cudnn.benchmark = False
450 # torch.use_deterministic_algorithms(True)
451 torch.manual_seed(args.seed)
452 if torch.cuda.is_available():
453 torch.cuda.manual_seed_all(args.seed)
455 ######################################################################
459 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
461 if log_file is not None:
462 log_file.write(t + s + "\n")
469 with os.popen("sha256sum *.py") as f:
471 log_string(f"sha256sum {l.strip()}")
473 now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
474 os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py *.sh")
476 log_string(f"argv {' '.join(sys.argv)}")
479 log_string(f"args.{n} {getattr(args, n)}")
482 ######################################################################
485 def get_lr(n_epoch, it):
486 if args.legacy_lr_schedule:
487 # my crude scheduling to compare to previous baseline, added
490 if it < args.nb_warmup_iter:
491 return args.legacy_large_lr * it / args.nb_warmup_iter
492 elif n_epoch < args.legacy_nb_epoch_large_lr:
493 return args.legacy_large_lr
495 return args.legacy_small_lr
499 # 1) linear warmup for warmup_iter steps
500 if it < args.nb_warmup_iter:
501 return args.learning_rate * it / args.nb_warmup_iter
502 # 2) if it > nb_decay_iter, return min learning rate
503 if it > args.nb_decay_iter:
504 return args.min_learning_rate
505 # 3) in between, use cosine decay down to min learning rate
506 decay_ratio = (it - args.nb_warmup_iter) / (
507 args.nb_decay_iter - args.nb_warmup_iter
509 coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
510 return args.min_learning_rate + coeff * (
511 args.learning_rate - args.min_learning_rate
515 ######################################################################
518 def picoclvr_pruner_horizontal_green(p):
519 return not ("green" in p and ("left" in p or "right" in p))
522 picoclvr_pruner_train = (
523 picoclvr_pruner_horizontal_green
524 if args.picocvlr_prune_properties in {"train+eval"}
528 picoclvr_pruner_eval = (
529 (lambda p: not picoclvr_pruner_horizontal_green(p))
530 if args.picocvlr_prune_properties in {"train+eval", "eval"}
534 ######################################################################
538 if args.task == "byheart":
539 task = tasks.SandBox(
540 problem=problems.ProblemByHeart(),
541 nb_train_samples=args.nb_train_samples,
542 nb_test_samples=args.nb_test_samples,
543 batch_size=args.batch_size,
547 args.max_percents_of_test_in_train = -1
549 elif args.task == "learnop":
550 task = tasks.SandBox(
551 problem=problems.ProblemLearnOperator(),
552 nb_train_samples=args.nb_train_samples,
553 nb_test_samples=args.nb_test_samples,
554 batch_size=args.batch_size,
560 elif args.task == "guessop":
561 task = tasks.SandBox(
562 problem=problems.ProblemGuessOperator(),
563 nb_train_samples=args.nb_train_samples,
564 nb_test_samples=args.nb_test_samples,
565 batch_size=args.batch_size,
571 elif args.task == "twotargets":
572 task = tasks.SandBox(
573 problem=problems.ProblemTwoTargets(),
574 nb_train_samples=args.nb_train_samples,
575 nb_test_samples=args.nb_test_samples,
576 batch_size=args.batch_size,
581 elif args.task == "memory":
582 task = tasks.SandBox(
583 problem=problems.ProblemMemory(len_total=args.memory_len_total),
584 nb_train_samples=args.nb_train_samples,
585 nb_test_samples=args.nb_test_samples,
586 batch_size=args.batch_size,
591 elif args.task == "mixing":
592 task = tasks.SandBox(
593 problem=problems.ProblemMixing(
594 hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
596 nb_train_samples=args.nb_train_samples,
597 nb_test_samples=args.nb_test_samples,
598 batch_size=args.batch_size,
603 elif args.task == "addition":
604 task = tasks.SandBox(
605 problem=problems.ProblemAddition(),
606 nb_train_samples=args.nb_train_samples,
607 nb_test_samples=args.nb_test_samples,
608 batch_size=args.batch_size,
613 elif args.task == "picoclvr":
614 task = tasks.PicoCLVR(
615 nb_train_samples=args.nb_train_samples,
616 nb_test_samples=args.nb_test_samples,
617 batch_size=args.batch_size,
618 height=args.picoclvr_height,
619 width=args.picoclvr_width,
620 nb_colors=args.picoclvr_nb_colors,
623 pruner_train=picoclvr_pruner_train,
624 pruner_eval=picoclvr_pruner_eval,
627 elif args.task == "mnist":
629 nb_train_samples=args.nb_train_samples,
630 nb_test_samples=args.nb_test_samples,
631 batch_size=args.batch_size,
635 elif args.task == "maze":
637 nb_train_samples=args.nb_train_samples,
638 nb_test_samples=args.nb_test_samples,
639 batch_size=args.batch_size,
640 height=args.maze_height,
641 width=args.maze_width,
642 nb_walls=args.maze_nb_walls,
646 elif args.task == "snake":
648 nb_train_samples=args.nb_train_samples,
649 nb_test_samples=args.nb_test_samples,
650 batch_size=args.batch_size,
651 height=args.snake_height,
652 width=args.snake_width,
653 nb_colors=args.snake_nb_colors,
654 length=args.snake_length,
655 prompt_length=args.snake_length // 2,
659 elif args.task == "stack":
661 nb_train_samples=args.nb_train_samples,
662 nb_test_samples=args.nb_test_samples,
663 batch_size=args.batch_size,
665 nb_steps=args.stack_nb_steps,
666 nb_stacks=args.stack_nb_stacks,
667 nb_digits=args.stack_nb_digits,
668 fraction_values_for_train=args.stack_fraction_values_for_train,
672 elif args.task == "expr":
674 nb_train_samples=args.nb_train_samples,
675 nb_test_samples=args.nb_test_samples,
676 nb_variables=args.expr_nb_variables,
677 sequence_length=args.expr_sequence_length,
678 operand_max=args.expr_operand_max,
679 result_max=args.expr_result_max,
680 batch_size=args.batch_size,
684 elif args.task == "rpl":
686 nb_train_samples=args.nb_train_samples,
687 nb_test_samples=args.nb_test_samples,
688 batch_size=args.batch_size,
689 nb_starting_values=args.rpl_nb_starting_values,
690 max_input=args.rpl_max_input,
691 prog_len=args.rpl_prog_len,
692 nb_runs=args.rpl_nb_runs,
693 no_prog=args.rpl_no_prog,
698 elif args.task == "grid":
700 nb_train_samples=args.nb_train_samples,
701 nb_test_samples=args.nb_test_samples,
702 batch_size=args.batch_size,
708 elif args.task == "qmlp":
710 nb_train_samples=args.nb_train_samples,
711 nb_test_samples=args.nb_test_samples,
712 batch_size=args.batch_size,
713 result_dir=args.result_dir,
719 raise ValueError(f"Unknown task {args.task}")
721 ######################################################################
723 log_string(f"device {device}")
725 vocabulary_size = task.vocabulary_size()
727 log_string(f"vocabulary_size {vocabulary_size}")
729 ##############################
732 vocabulary_size=vocabulary_size,
733 dim_model=args.dim_model,
734 dim_keys=args.dim_keys,
735 dim_hidden=args.dim_hidden,
736 nb_heads=args.nb_heads,
737 nb_lines=args.nb_lines,
738 caterpillar_height=args.caterpillar_height,
739 dim_rec_v=args.dim_rec_v,
740 nb_blocks=args.nb_blocks,
742 dropout=args.dropout,
743 attention_layer=args.attention,
748 nb_parameters = sum(p.numel() for p in model.parameters())
749 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
751 ######################################################################
753 nb_epochs_finished = 0
755 if args.no_checkpoint:
756 log_string(f"not trying to load checkpoint.")
760 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
761 checkpoint = torch.load(checkpoint_name)
762 nb_epochs_finished = checkpoint["nb_epochs_finished"]
763 model.load_state_dict(checkpoint["model_state"])
764 torch.set_rng_state(checkpoint["rng_state"])
765 if torch.cuda.is_available():
766 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
768 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
770 except FileNotFoundError:
771 log_string("starting from scratch.")
774 log_string("error when loading the checkpoint.")
777 ######################################################################
779 if args.task == "expr" and args.expr_input_file is not None:
780 task.produce_results(
781 n_epoch=nb_epochs_finished,
783 result_dir=args.result_dir,
785 deterministic_synthesis=args.deterministic_synthesis,
786 input_file=args.expr_input_file,
791 ######################################################################
793 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
795 # Compute the entropy of the training tokens
798 for input in task.batches(split="train"):
799 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
800 token_probas = token_count / token_count.sum()
801 entropy = -torch.xlogy(token_probas, token_probas).sum()
802 train_set_perplexity = math.exp(entropy)
804 ######################################################################
805 # A bit of paranoia never hurts
807 if args.max_percents_of_test_in_train >= 0:
809 def subsets_as_tuples(batches, cs):
811 for batch in batches:
813 s.add(tuple([v.item() for v in x]))
819 nb_test, nb_in_train = 0, 0
820 for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
822 for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
823 in_train.update(test_subset.intersection(train_subset))
824 nb_in_train += len(in_train)
825 nb_test += len(test_subset)
828 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
832 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
833 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
835 ##############################
839 if nb_epochs_finished >= nb_epochs:
840 task.produce_results(
841 n_epoch=nb_epochs_finished,
843 result_dir=args.result_dir,
845 deterministic_synthesis=args.deterministic_synthesis,
848 time_pred_result = None
852 for n_epoch in range(nb_epochs_finished, nb_epochs):
853 if args.optim == "sgd":
854 optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
855 elif args.optim == "adam":
856 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
857 elif args.optim == "adamw":
858 optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
860 raise ValueError(f"Unknown optimizer {args.optim}.")
864 nb_train_samples, acc_train_loss, acc_train_inner_loss = 0, 0.0, 0.0
866 for input in task.batches(split="train"):
867 model.reset_inner_loss()
868 input = input.to(device)
870 output = model(mygpt.BracketedSequence(input)).x
871 loss = F.cross_entropy(output.transpose(1, 2), input)
872 inner_loss = model.get_inner_loss()
874 acc_train_loss += loss.item() * input.size(0)
875 acc_train_inner_loss += inner_loss.item() * input.size(0)
877 nb_train_samples += input.size(0)
878 nb_samples_seen += input.size(0)
880 total_loss = loss + (args.rho * inner_loss if args.rho > 0 else 0.0)
883 lr = get_lr(n_epoch, it)
884 for param_group in optimizer.param_groups:
885 param_group["lr"] = lr
887 # log_string(f"learning_rate {lr}")
889 optimizer.zero_grad()
890 total_loss.backward()
893 with torch.autograd.no_grad():
896 nb_test_samples, acc_test_loss = 0, 0.0
898 for input in task.batches(split="test"):
899 input = input.to(device)
901 output = model(mygpt.BracketedSequence(input)).x
902 loss = F.cross_entropy(output.transpose(1, 2), input)
903 acc_test_loss += loss.item() * input.size(0)
904 nb_test_samples += input.size(0)
907 f"loss {n_epoch} train_loss {acc_train_loss/nb_train_samples} train_inner_loss {acc_train_inner_loss/nb_train_samples} test_prediction {acc_test_loss/nb_test_samples}"
910 task.produce_results(
913 result_dir=args.result_dir,
915 deterministic_synthesis=args.deterministic_synthesis,
918 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
919 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
922 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
925 time_current_result = datetime.datetime.now()
926 if time_pred_result is not None:
928 f"next_result {time_current_result + (time_current_result - time_pred_result)}"
930 time_pred_result = time_current_result
933 "nb_epochs_finished": n_epoch + 1,
934 "model_state": model.state_dict(),
935 "rng_state": torch.get_rng_state(),
938 if torch.cuda.is_available():
939 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
941 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
942 torch.save(checkpoint, checkpoint_name)
943 log_string(f"saved checkpoint {checkpoint_name}")
945 ######################################################################