3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime
10 import torch, torchvision
12 from torch.nn import functional as F
15 import mygpt, tasks, problems
17 ######################################################################
19 if torch.cuda.is_available():
20 device = torch.device("cuda")
21 torch.backends.cuda.matmul.allow_tf32 = True
23 device = torch.device("cpu")
25 ######################################################################
27 parser = argparse.ArgumentParser(
28 description="An implementation of GPT with cache.",
29 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
36 help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
39 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
41 parser.add_argument("--result_dir", type=str, default=None)
43 parser.add_argument("--seed", type=int, default=0)
45 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
47 ########################################
49 parser.add_argument("--nb_epochs", type=int, default=25)
51 parser.add_argument("--batch_size", type=int, default=None)
53 parser.add_argument("--nb_train_samples", type=int, default=None)
55 parser.add_argument("--nb_test_samples", type=int, default=None)
57 parser.add_argument("--optim", type=str, default="adam")
59 parser.add_argument("--learning_rate", type=float, default=1e-4)
61 parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
63 ########################################
65 parser.add_argument("--model", type=str, default=None)
67 parser.add_argument("--dim_model", type=int, default=None)
69 parser.add_argument("--dim_keys", type=int, default=None)
71 parser.add_argument("--dim_hidden", type=int, default=None)
73 parser.add_argument("--nb_heads", type=int, default=None)
75 parser.add_argument("--nb_blocks", type=int, default=None)
77 parser.add_argument("--dropout", type=float, default=0.1)
79 ########################################
81 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
83 parser.add_argument("--no_checkpoint", action="store_true", default=False)
85 parser.add_argument("--overwrite_results", action="store_true", default=False)
87 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
89 ##############################
92 parser.add_argument("--filetask_train_file", type=str, default=None)
94 parser.add_argument("--filetask_test_file", type=str, default=None)
96 ##############################
99 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
101 parser.add_argument("--rpl_max_input", type=int, default=9)
103 parser.add_argument("--rpl_prog_len", type=int, default=8)
105 parser.add_argument("--rpl_nb_runs", type=int, default=5)
107 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
109 ##############################
112 parser.add_argument("--grid_size", type=int, default=6)
114 parser.add_argument("--grid_fraction_play", type=float, default=0)
116 ##############################
119 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
121 parser.add_argument("--picoclvr_height", type=int, default=12)
123 parser.add_argument("--picoclvr_width", type=int, default=16)
125 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
127 ##############################
130 parser.add_argument("--maze_height", type=int, default=13)
132 parser.add_argument("--maze_width", type=int, default=21)
134 parser.add_argument("--maze_nb_walls", type=int, default=15)
136 ##############################
139 parser.add_argument("--snake_height", type=int, default=9)
141 parser.add_argument("--snake_width", type=int, default=12)
143 parser.add_argument("--snake_nb_colors", type=int, default=5)
145 parser.add_argument("--snake_length", type=int, default=200)
147 ##############################
150 parser.add_argument("--stack_nb_steps", type=int, default=100)
152 parser.add_argument("--stack_nb_stacks", type=int, default=3)
154 parser.add_argument("--stack_nb_digits", type=int, default=3)
156 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
158 ##############################
161 parser.add_argument("--expr_nb_variables", type=int, default=5)
163 parser.add_argument("--expr_sequence_length", type=int, default=40)
165 parser.add_argument("--expr_operand_max", type=int, default=9)
167 parser.add_argument("--expr_result_max", type=int, default=99)
169 parser.add_argument("--expr_input_file", type=str, default=None)
171 ##############################
174 parser.add_argument("--mixing_hard", action="store_true", default=False)
176 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
178 ##############################
181 parser.add_argument("--greed_height", type=int, default=5)
183 parser.add_argument("--greed_width", type=int, default=7)
185 parser.add_argument("--greed_T", type=int, default=25)
187 parser.add_argument("--greed_nb_walls", type=int, default=5)
189 parser.add_argument("--greed_nb_coins", type=int, default=2)
191 ######################################################################
193 args = parser.parse_args()
195 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
197 if args.result_dir is None:
198 args.result_dir = f"results_{args.task}"
200 ######################################################################
202 default_task_args = {
206 "nb_train_samples": 250000,
207 "nb_test_samples": 10000,
212 "nb_train_samples": 250000,
213 "nb_test_samples": 10000,
218 "nb_train_samples": 50000,
219 "nb_test_samples": 10000,
224 "nb_train_samples": 2500000,
225 "nb_test_samples": 10000,
230 "nb_train_samples": 250000,
231 "nb_test_samples": 10000,
236 "nb_train_samples": 100000,
237 "nb_test_samples": 1000,
242 "nb_train_samples": 1000000,
243 "nb_test_samples": 10000,
248 "nb_train_samples": 50000,
249 "nb_test_samples": 10000,
254 "nb_train_samples": 100000,
255 "nb_test_samples": 10000,
260 "nb_train_samples": 250000,
261 "nb_test_samples": 10000,
266 "nb_train_samples": 2500000,
267 "nb_test_samples": 10000,
272 "nb_train_samples": 250000,
273 "nb_test_samples": 10000,
278 "nb_train_samples": 100000,
279 "nb_test_samples": 1000,
284 "nb_train_samples": 50000,
285 "nb_test_samples": 10000,
290 "nb_train_samples": 25000,
291 "nb_test_samples": 1000,
296 "nb_train_samples": 250000,
297 "nb_test_samples": 10000,
302 "nb_train_samples": 60000,
303 "nb_test_samples": 10000,
308 "nb_train_samples": 25000,
309 "nb_test_samples": 10000,
313 if args.task in default_task_args:
314 for k, v in default_task_args[args.task].items():
315 if getattr(args, k) is None:
318 ######################################################################
320 default_model_args = {
358 if args.model in default_model_args:
359 for k, v in default_model_args[args.model].items():
360 if getattr(args, k) is None:
363 raise ValueError(f"Unknown model {args.model}")
365 ######################################################################
368 os.mkdir(args.result_dir)
369 except FileExistsError:
370 if not args.overwrite_results:
371 print(f"result directory {args.result_dir} already exists")
374 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
377 # torch.backends.cudnn.deterministic = True
378 # torch.backends.cudnn.benchmark = False
379 # torch.use_deterministic_algorithms(True)
380 torch.manual_seed(args.seed)
381 if torch.cuda.is_available():
382 torch.cuda.manual_seed_all(args.seed)
384 ######################################################################
388 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
390 if log_file is not None:
391 log_file.write(t + s + "\n")
398 log_string(f"argv {' '.join(sys.argv)}")
401 log_string(f"args.{n} {getattr(args, n)}")
404 ######################################################################
407 def picoclvr_pruner_horizontal_green(p):
408 return not ("green" in p and ("left" in p or "right" in p))
411 picoclvr_pruner_train = (
412 picoclvr_pruner_horizontal_green
413 if args.picocvlr_prune_properties in {"train+eval"}
417 picoclvr_pruner_eval = (
418 (lambda p: not picoclvr_pruner_horizontal_green(p))
419 if args.picocvlr_prune_properties in {"train+eval", "eval"}
423 ######################################################################
425 if args.task == "file":
427 args.filetask_train_file is not None and args.filetask_test_file is not None
428 ), "You have to specify the task train and test files"
429 task = tasks.TaskFromFile(
430 args.filetask_train_file,
431 args.filetask_test_file,
432 nb_train_samples=args.nb_train_samples,
433 nb_test_samples=args.nb_test_samples,
434 batch_size=args.batch_size,
438 args.max_percents_of_test_in_train = 0
440 elif args.task == "byheart":
441 task = tasks.SandBox(
442 problem=problems.ProblemByHeart(),
443 nb_train_samples=args.nb_train_samples,
444 nb_test_samples=args.nb_test_samples,
445 batch_size=args.batch_size,
449 args.max_percents_of_test_in_train = -1
451 elif args.task == "learnop":
452 task = tasks.SandBox(
453 problem=problems.ProblemLearnOperator(),
454 nb_train_samples=args.nb_train_samples,
455 nb_test_samples=args.nb_test_samples,
456 batch_size=args.batch_size,
462 elif args.task == "guessop":
463 task = tasks.SandBox(
464 problem=problems.ProblemGuessOperator(),
465 nb_train_samples=args.nb_train_samples,
466 nb_test_samples=args.nb_test_samples,
467 batch_size=args.batch_size,
473 elif args.task == "twotargets":
474 task = tasks.SandBox(
475 problem=problems.ProblemTwoTargets(),
476 nb_train_samples=args.nb_train_samples,
477 nb_test_samples=args.nb_test_samples,
478 batch_size=args.batch_size,
483 elif args.task == "memory":
484 task = tasks.SandBox(
485 problem=problems.ProblemMemory(),
486 nb_train_samples=args.nb_train_samples,
487 nb_test_samples=args.nb_test_samples,
488 batch_size=args.batch_size,
493 elif args.task == "mixing":
494 task = tasks.SandBox(
495 problem=problems.ProblemMixing(
496 hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
498 nb_train_samples=args.nb_train_samples,
499 nb_test_samples=args.nb_test_samples,
500 batch_size=args.batch_size,
505 elif args.task == "addition":
506 task = tasks.SandBox(
507 problem=problems.ProblemAddition(),
508 nb_train_samples=args.nb_train_samples,
509 nb_test_samples=args.nb_test_samples,
510 batch_size=args.batch_size,
515 elif args.task == "picoclvr":
516 task = tasks.PicoCLVR(
517 nb_train_samples=args.nb_train_samples,
518 nb_test_samples=args.nb_test_samples,
519 batch_size=args.batch_size,
520 height=args.picoclvr_height,
521 width=args.picoclvr_width,
522 nb_colors=args.picoclvr_nb_colors,
525 pruner_train=picoclvr_pruner_train,
526 pruner_eval=picoclvr_pruner_eval,
529 elif args.task == "mnist":
531 nb_train_samples=args.nb_train_samples,
532 nb_test_samples=args.nb_test_samples,
533 batch_size=args.batch_size,
537 elif args.task == "maze":
539 nb_train_samples=args.nb_train_samples,
540 nb_test_samples=args.nb_test_samples,
541 batch_size=args.batch_size,
542 height=args.maze_height,
543 width=args.maze_width,
544 nb_walls=args.maze_nb_walls,
548 elif args.task == "snake":
550 nb_train_samples=args.nb_train_samples,
551 nb_test_samples=args.nb_test_samples,
552 batch_size=args.batch_size,
553 height=args.snake_height,
554 width=args.snake_width,
555 nb_colors=args.snake_nb_colors,
556 length=args.snake_length,
557 prompt_length=args.snake_length // 2,
561 elif args.task == "stack":
563 nb_train_samples=args.nb_train_samples,
564 nb_test_samples=args.nb_test_samples,
565 batch_size=args.batch_size,
567 nb_steps=args.stack_nb_steps,
568 nb_stacks=args.stack_nb_stacks,
569 nb_digits=args.stack_nb_digits,
570 fraction_values_for_train=args.stack_fraction_values_for_train,
574 elif args.task == "expr":
576 nb_train_samples=args.nb_train_samples,
577 nb_test_samples=args.nb_test_samples,
578 nb_variables=args.expr_nb_variables,
579 sequence_length=args.expr_sequence_length,
580 operand_max=args.expr_operand_max,
581 result_max=args.expr_result_max,
582 batch_size=args.batch_size,
586 elif args.task == "rpl":
588 nb_train_samples=args.nb_train_samples,
589 nb_test_samples=args.nb_test_samples,
590 batch_size=args.batch_size,
591 nb_starting_values=args.rpl_nb_starting_values,
592 max_input=args.rpl_max_input,
593 prog_len=args.rpl_prog_len,
594 nb_runs=args.rpl_nb_runs,
595 no_prog=args.rpl_no_prog,
600 elif args.task == "grid":
602 nb_train_samples=args.nb_train_samples,
603 nb_test_samples=args.nb_test_samples,
604 batch_size=args.batch_size,
606 fraction_play=args.grid_fraction_play,
611 elif args.task == "qmlp":
613 nb_train_samples=args.nb_train_samples,
614 nb_test_samples=args.nb_test_samples,
615 batch_size=args.batch_size,
616 result_dir=args.result_dir,
621 elif args.task == "greed":
623 nb_train_samples=args.nb_train_samples,
624 nb_test_samples=args.nb_test_samples,
625 batch_size=args.batch_size,
626 height=args.greed_height,
627 width=args.greed_width,
629 nb_walls=args.greed_nb_walls,
630 nb_coins=args.greed_nb_coins,
636 raise ValueError(f"Unknown task {args.task}")
638 ######################################################################
640 log_string(f"device {device}")
642 vocabulary_size = task.vocabulary_size()
644 log_string(f"vocabulary_size {vocabulary_size}")
646 ##############################
649 vocabulary_size=vocabulary_size,
650 dim_model=args.dim_model,
651 dim_keys=args.dim_keys,
652 dim_hidden=args.dim_hidden,
653 nb_heads=args.nb_heads,
654 nb_blocks=args.nb_blocks,
656 dropout=args.dropout,
661 nb_parameters = sum(p.numel() for p in model.parameters())
662 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
664 ######################################################################
666 nb_epochs_finished = 0
668 if args.no_checkpoint:
669 log_string(f"not trying to load checkpoint.")
673 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
674 checkpoint = torch.load(checkpoint_name)
675 nb_epochs_finished = checkpoint["nb_epochs_finished"]
676 model.load_state_dict(checkpoint["model_state"])
677 torch.set_rng_state(checkpoint["rng_state"])
678 if torch.cuda.is_available():
679 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
681 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
683 except FileNotFoundError:
684 log_string("starting from scratch.")
687 log_string("error when loading the checkpoint.")
690 ######################################################################
692 if args.task == "expr" and args.expr_input_file is not None:
693 task.produce_results(
694 n_epoch=nb_epochs_finished,
696 result_dir=args.result_dir,
698 deterministic_synthesis=args.deterministic_synthesis,
699 input_file=args.expr_input_file,
704 ######################################################################
706 # Compute the entropy of the training tokens
709 for input in task.batches(split="train", desc="train-entropy"):
710 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
711 token_probas = token_count / token_count.sum()
712 entropy = -torch.xlogy(token_probas, token_probas).sum()
713 train_set_perplexity = math.exp(entropy)
715 ######################################################################
716 # A bit of paranoia never hurts
718 if args.max_percents_of_test_in_train >= 0:
720 def subsets_as_tuples(batches, cs):
722 for batch in batches:
724 s.add(tuple([v.item() for v in x]))
730 nb_test, nb_in_train = 0, 0
731 for test_subset in subsets_as_tuples(
732 task.batches(split="test", desc="test-check"), 25000
735 for train_subset in subsets_as_tuples(
736 task.batches(split="train", desc="train-check"), 25000
738 in_train.update(test_subset.intersection(train_subset))
739 nb_in_train += len(in_train)
740 nb_test += len(test_subset)
743 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
747 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
748 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
750 ##############################
752 if args.learning_rate_schedule == "cos":
753 learning_rate_schedule = {}
754 for n_epoch in range(args.nb_epochs):
755 u = n_epoch / args.nb_epochs * math.pi
756 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
761 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
765 learning_rate_schedule = {}
766 learning_rate = args.learning_rate
767 for n_epoch in range(args.nb_epochs):
769 learning_rate = u[n_epoch]
770 learning_rate_schedule[n_epoch] = learning_rate
772 log_string(f"learning_rate_schedule {learning_rate_schedule}")
774 ##############################
778 if nb_epochs_finished >= args.nb_epochs:
779 task.produce_results(
780 n_epoch=nb_epochs_finished,
782 result_dir=args.result_dir,
784 deterministic_synthesis=args.deterministic_synthesis,
787 time_pred_result = None
789 for n_epoch in range(nb_epochs_finished, args.nb_epochs):
790 learning_rate = learning_rate_schedule[n_epoch]
792 log_string(f"learning_rate {learning_rate}")
794 if args.optim == "sgd":
795 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
796 elif args.optim == "adam":
797 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
798 elif args.optim == "adamw":
799 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
801 raise ValueError(f"Unknown optimizer {args.optim}.")
805 nb_train_samples, acc_train_loss = 0, 0.0
807 for input in task.batches(split="train"):
808 input = input.to(device)
809 output = model(mygpt.BracketedSequence(input)).x
810 loss = F.cross_entropy(output.transpose(1, 2), input)
811 acc_train_loss += loss.item() * input.size(0)
812 nb_train_samples += input.size(0)
813 nb_samples_seen += input.size(0)
815 optimizer.zero_grad()
819 with torch.autograd.no_grad():
822 nb_test_samples, acc_test_loss = 0, 0.0
824 for input in task.batches(split="test"):
825 input = input.to(device)
827 output = model(mygpt.BracketedSequence(input)).x
828 loss = F.cross_entropy(output.transpose(1, 2), input)
829 acc_test_loss += loss.item() * input.size(0)
830 nb_test_samples += input.size(0)
832 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
833 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
836 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
839 task.produce_results(
842 result_dir=args.result_dir,
844 deterministic_synthesis=args.deterministic_synthesis,
847 time_current_result = datetime.datetime.now()
848 if time_pred_result is not None:
850 f"next_result {time_current_result + (time_current_result - time_pred_result)}"
852 time_pred_result = time_current_result
855 "nb_epochs_finished": n_epoch + 1,
856 "model_state": model.state_dict(),
857 "rng_state": torch.get_rng_state(),
860 if torch.cuda.is_available():
861 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
863 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
864 torch.save(checkpoint, checkpoint_name)
865 log_string(f"saved checkpoint {checkpoint_name}")
867 ######################################################################