3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
15 import mygpt, tasks, problems
17 ######################################################################
19 if torch.cuda.is_available():
20 device = torch.device("cuda")
21 torch.backends.cuda.matmul.allow_tf32 = True
23 device = torch.device("cpu")
25 ######################################################################
27 parser = argparse.ArgumentParser(
28 description="An implementation of GPT with cache.",
29 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
36 help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
39 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
41 parser.add_argument("--result_dir", type=str, default=None)
43 parser.add_argument("--seed", type=int, default=0)
45 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
47 ########################################
49 parser.add_argument("--nb_epochs", type=int, default=50)
51 parser.add_argument("--batch_size", type=int, default=None)
53 parser.add_argument("--physical_batch_size", type=int, default=None)
55 parser.add_argument("--nb_train_samples", type=int, default=None)
57 parser.add_argument("--nb_test_samples", type=int, default=None)
59 parser.add_argument("--optim", type=str, default="adam")
61 parser.add_argument("--learning_rate", type=float, default=1e-4)
63 parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
65 ########################################
67 parser.add_argument("--model", type=str, default=None)
69 parser.add_argument("--dim_model", type=int, default=None)
71 parser.add_argument("--dim_keys", type=int, default=None)
73 parser.add_argument("--dim_hidden", type=int, default=None)
75 parser.add_argument("--nb_heads", type=int, default=None)
77 parser.add_argument("--nb_blocks", type=int, default=None)
79 parser.add_argument("--dropout", type=float, default=0.1)
81 ########################################
83 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
85 parser.add_argument("--no_checkpoint", action="store_true", default=False)
87 parser.add_argument("--resume", action="store_true", default=False)
89 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
91 ##############################
94 parser.add_argument("--filetask_train_file", type=str, default=None)
96 parser.add_argument("--filetask_test_file", type=str, default=None)
98 ##############################
101 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
103 parser.add_argument("--rpl_max_input", type=int, default=9)
105 parser.add_argument("--rpl_prog_len", type=int, default=8)
107 parser.add_argument("--rpl_nb_runs", type=int, default=5)
109 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
111 ##############################
114 parser.add_argument("--grid_size", type=int, default=6)
116 parser.add_argument("--grid_fraction_play", type=float, default=0)
118 ##############################
121 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
123 parser.add_argument("--picoclvr_height", type=int, default=12)
125 parser.add_argument("--picoclvr_width", type=int, default=16)
127 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
129 ##############################
132 parser.add_argument("--maze_height", type=int, default=13)
134 parser.add_argument("--maze_width", type=int, default=21)
136 parser.add_argument("--maze_nb_walls", type=int, default=15)
138 ##############################
141 parser.add_argument("--snake_height", type=int, default=9)
143 parser.add_argument("--snake_width", type=int, default=12)
145 parser.add_argument("--snake_nb_colors", type=int, default=5)
147 parser.add_argument("--snake_length", type=int, default=200)
149 ##############################
152 parser.add_argument("--byheart_separation", type=int, default=1)
154 ##############################
157 parser.add_argument("--stack_nb_steps", type=int, default=100)
159 parser.add_argument("--stack_nb_stacks", type=int, default=3)
161 parser.add_argument("--stack_nb_digits", type=int, default=3)
163 parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
165 ##############################
168 parser.add_argument("--expr_nb_variables", type=int, default=5)
170 parser.add_argument("--expr_sequence_length", type=int, default=40)
172 parser.add_argument("--expr_operand_max", type=int, default=9)
174 parser.add_argument("--expr_result_max", type=int, default=99)
176 parser.add_argument("--expr_input_file", type=str, default=None)
178 ##############################
181 parser.add_argument("--mixing_hard", action="store_true", default=False)
183 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
185 ##############################
188 parser.add_argument("--greed_height", type=int, default=5)
190 parser.add_argument("--greed_width", type=int, default=7)
192 parser.add_argument("--greed_T", type=int, default=25)
194 parser.add_argument("--greed_nb_walls", type=int, default=5)
196 parser.add_argument("--greed_nb_coins", type=int, default=2)
198 ######################################################################
200 args = parser.parse_args()
202 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
204 if args.result_dir is None:
205 args.result_dir = f"results_{args.task}"
207 ######################################################################
209 default_task_args = {
213 "nb_train_samples": 250000,
214 "nb_test_samples": 10000,
219 "nb_train_samples": 250000,
220 "nb_test_samples": 10000,
225 "nb_train_samples": 50000,
226 "nb_test_samples": 10000,
231 "nb_train_samples": 2500000,
232 "nb_test_samples": 10000,
237 "nb_train_samples": 250000,
238 "nb_test_samples": 10000,
243 "nb_train_samples": 100000,
244 "nb_test_samples": 1000,
249 "nb_train_samples": 1000000,
250 "nb_test_samples": 10000,
255 "nb_train_samples": 50000,
256 "nb_test_samples": 10000,
261 "nb_train_samples": 100000,
262 "nb_test_samples": 10000,
267 "nb_train_samples": 250000,
268 "nb_test_samples": 10000,
273 "nb_train_samples": 2500000,
274 "nb_test_samples": 10000,
279 "nb_train_samples": 250000,
280 "nb_test_samples": 10000,
285 "nb_train_samples": 100000,
286 "nb_test_samples": 1000,
291 "nb_train_samples": 50000,
292 "nb_test_samples": 10000,
297 "nb_train_samples": 25000,
298 "nb_test_samples": 1000,
303 "nb_train_samples": 250000,
304 "nb_test_samples": 10000,
309 "nb_train_samples": 60000,
310 "nb_test_samples": 10000,
315 "nb_train_samples": 25000,
316 "nb_test_samples": 10000,
320 if args.task in default_task_args:
321 for k, v in default_task_args[args.task].items():
322 if getattr(args, k) is None:
325 ######################################################################
327 default_model_args = {
365 if args.model in default_model_args:
366 for k, v in default_model_args[args.model].items():
367 if getattr(args, k) is None:
370 raise ValueError(f"Unknown model {args.model}")
372 ######################################################################
375 os.mkdir(args.result_dir)
376 except FileExistsError:
378 print(f"result directory {args.result_dir} already exists")
381 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
384 # torch.backends.cudnn.deterministic = True
385 # torch.backends.cudnn.benchmark = False
386 # torch.use_deterministic_algorithms(True)
387 torch.manual_seed(args.seed)
388 if torch.cuda.is_available():
389 torch.cuda.manual_seed_all(args.seed)
391 ######################################################################
395 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
397 if log_file is not None:
398 log_file.write(t + s + "\n")
405 log_string(f"argv {' '.join(sys.argv)}")
408 log_string(f"args.{n} {getattr(args, n)}")
411 ######################################################################
414 def picoclvr_pruner_horizontal_green(p):
415 return not ("green" in p and ("left" in p or "right" in p))
418 picoclvr_pruner_train = (
419 picoclvr_pruner_horizontal_green
420 if args.picocvlr_prune_properties in {"train+eval"}
424 picoclvr_pruner_eval = (
425 (lambda p: not picoclvr_pruner_horizontal_green(p))
426 if args.picocvlr_prune_properties in {"train+eval", "eval"}
430 ######################################################################
432 if args.physical_batch_size is None:
433 args.physical_batch_size = args.batch_size
435 assert args.batch_size % args.physical_batch_size == 0
437 assert args.nb_train_samples % args.batch_size == 0
438 assert args.nb_test_samples % args.batch_size == 0
440 if args.task == "file":
442 args.filetask_train_file is not None and args.filetask_test_file is not None
443 ), "You have to specify the task train and test files"
444 task = tasks.TaskFromFile(
445 args.filetask_train_file,
446 args.filetask_test_file,
447 nb_train_samples=args.nb_train_samples,
448 nb_test_samples=args.nb_test_samples,
449 batch_size=args.physical_batch_size,
453 args.max_percents_of_test_in_train = 0
455 elif args.task == "byheart":
456 task = tasks.SandBox(
457 problem=problems.ProblemByHeart(separation=args.byheart_separation),
458 nb_train_samples=args.nb_train_samples,
459 nb_test_samples=args.nb_test_samples,
460 batch_size=args.physical_batch_size,
464 args.max_percents_of_test_in_train = -1
466 elif args.task == "learnop":
467 task = tasks.SandBox(
468 problem=problems.ProblemLearnOperator(),
469 nb_train_samples=args.nb_train_samples,
470 nb_test_samples=args.nb_test_samples,
471 batch_size=args.physical_batch_size,
477 elif args.task == "guessop":
478 task = tasks.SandBox(
479 problem=problems.ProblemGuessOperator(),
480 nb_train_samples=args.nb_train_samples,
481 nb_test_samples=args.nb_test_samples,
482 batch_size=args.physical_batch_size,
488 elif args.task == "twotargets":
489 task = tasks.SandBox(
490 problem=problems.ProblemTwoTargets(),
491 nb_train_samples=args.nb_train_samples,
492 nb_test_samples=args.nb_test_samples,
493 batch_size=args.physical_batch_size,
498 elif args.task == "memory":
499 task = tasks.SandBox(
500 problem=problems.ProblemMemory(),
501 nb_train_samples=args.nb_train_samples,
502 nb_test_samples=args.nb_test_samples,
503 batch_size=args.physical_batch_size,
508 elif args.task == "mixing":
509 task = tasks.SandBox(
510 problem=problems.ProblemMixing(
511 hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
513 nb_train_samples=args.nb_train_samples,
514 nb_test_samples=args.nb_test_samples,
515 batch_size=args.physical_batch_size,
520 elif args.task == "addition":
521 task = tasks.SandBox(
522 problem=problems.ProblemAddition(),
523 nb_train_samples=args.nb_train_samples,
524 nb_test_samples=args.nb_test_samples,
525 batch_size=args.physical_batch_size,
530 elif args.task == "picoclvr":
531 task = tasks.PicoCLVR(
532 nb_train_samples=args.nb_train_samples,
533 nb_test_samples=args.nb_test_samples,
534 batch_size=args.physical_batch_size,
535 height=args.picoclvr_height,
536 width=args.picoclvr_width,
537 nb_colors=args.picoclvr_nb_colors,
540 pruner_train=picoclvr_pruner_train,
541 pruner_eval=picoclvr_pruner_eval,
544 elif args.task == "mnist":
546 nb_train_samples=args.nb_train_samples,
547 nb_test_samples=args.nb_test_samples,
548 batch_size=args.physical_batch_size,
552 elif args.task == "maze":
554 nb_train_samples=args.nb_train_samples,
555 nb_test_samples=args.nb_test_samples,
556 batch_size=args.physical_batch_size,
557 height=args.maze_height,
558 width=args.maze_width,
559 nb_walls=args.maze_nb_walls,
563 elif args.task == "snake":
565 nb_train_samples=args.nb_train_samples,
566 nb_test_samples=args.nb_test_samples,
567 batch_size=args.physical_batch_size,
568 height=args.snake_height,
569 width=args.snake_width,
570 nb_colors=args.snake_nb_colors,
571 length=args.snake_length,
572 prompt_length=args.snake_length // 2,
576 elif args.task == "stack":
578 nb_train_samples=args.nb_train_samples,
579 nb_test_samples=args.nb_test_samples,
580 batch_size=args.physical_batch_size,
582 nb_steps=args.stack_nb_steps,
583 nb_stacks=args.stack_nb_stacks,
584 nb_digits=args.stack_nb_digits,
585 fraction_values_for_train=args.stack_fraction_values_for_train,
589 elif args.task == "expr":
591 nb_train_samples=args.nb_train_samples,
592 nb_test_samples=args.nb_test_samples,
593 nb_variables=args.expr_nb_variables,
594 sequence_length=args.expr_sequence_length,
595 operand_max=args.expr_operand_max,
596 result_max=args.expr_result_max,
597 batch_size=args.physical_batch_size,
601 elif args.task == "rpl":
603 nb_train_samples=args.nb_train_samples,
604 nb_test_samples=args.nb_test_samples,
605 batch_size=args.physical_batch_size,
606 nb_starting_values=args.rpl_nb_starting_values,
607 max_input=args.rpl_max_input,
608 prog_len=args.rpl_prog_len,
609 nb_runs=args.rpl_nb_runs,
610 no_prog=args.rpl_no_prog,
615 elif args.task == "grid":
617 nb_train_samples=args.nb_train_samples,
618 nb_test_samples=args.nb_test_samples,
619 batch_size=args.physical_batch_size,
621 fraction_play=args.grid_fraction_play,
626 elif args.task == "qmlp":
628 nb_train_samples=args.nb_train_samples,
629 nb_test_samples=args.nb_test_samples,
630 batch_size=args.physical_batch_size,
631 result_dir=args.result_dir,
636 elif args.task == "greed":
638 nb_train_samples=args.nb_train_samples,
639 nb_test_samples=args.nb_test_samples,
640 batch_size=args.physical_batch_size,
641 height=args.greed_height,
642 width=args.greed_width,
644 nb_walls=args.greed_nb_walls,
645 nb_coins=args.greed_nb_coins,
651 raise ValueError(f"Unknown task {args.task}")
653 ######################################################################
655 log_string(f"device {device}")
657 vocabulary_size = task.vocabulary_size()
659 log_string(f"vocabulary_size {vocabulary_size}")
661 ##############################
664 vocabulary_size=vocabulary_size,
665 dim_model=args.dim_model,
666 dim_keys=args.dim_keys,
667 dim_hidden=args.dim_hidden,
668 nb_heads=args.nb_heads,
669 nb_blocks=args.nb_blocks,
671 dropout=args.dropout,
676 nb_parameters = sum(p.numel() for p in model.parameters())
677 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
679 ######################################################################
681 nb_epochs_finished = 0
683 if args.no_checkpoint:
684 log_string(f"not trying to load checkpoint.")
688 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
689 checkpoint = torch.load(checkpoint_name)
690 nb_epochs_finished = checkpoint["nb_epochs_finished"]
691 model.load_state_dict(checkpoint["model_state"])
692 torch.set_rng_state(checkpoint["rng_state"])
693 if torch.cuda.is_available():
694 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
696 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
698 except FileNotFoundError:
699 log_string("starting from scratch.")
702 log_string("error when loading the checkpoint.")
705 ######################################################################
707 if args.task == "expr" and args.expr_input_file is not None:
708 task.produce_results(
709 n_epoch=nb_epochs_finished,
711 result_dir=args.result_dir,
713 deterministic_synthesis=args.deterministic_synthesis,
714 input_file=args.expr_input_file,
719 ######################################################################
721 # Compute the entropy of the training tokens
724 for input in task.batches(split="train", desc="train-entropy"):
725 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
726 token_probas = token_count / token_count.sum()
727 entropy = -torch.xlogy(token_probas, token_probas).sum()
728 train_set_perplexity = math.exp(entropy)
730 ######################################################################
731 # A bit of paranoia never hurts
733 if args.max_percents_of_test_in_train >= 0:
735 def subsets_as_tuples(batches, cs):
737 for batch in batches:
739 s.add(tuple([v.item() for v in x]))
745 nb_test, nb_in_train = 0, 0
746 for test_subset in subsets_as_tuples(
747 task.batches(split="test", desc="test-check"), 25000
750 for train_subset in subsets_as_tuples(
751 task.batches(split="train", desc="train-check"), 25000
753 in_train.update(test_subset.intersection(train_subset))
754 nb_in_train += len(in_train)
755 nb_test += len(test_subset)
758 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
762 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
763 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
765 ##############################
767 if args.learning_rate_schedule == "cos":
768 learning_rate_schedule = {}
769 for n_epoch in range(args.nb_epochs):
770 u = n_epoch / args.nb_epochs * math.pi
771 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
776 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
780 learning_rate_schedule = {}
781 learning_rate = args.learning_rate
782 for n_epoch in range(args.nb_epochs):
784 learning_rate = u[n_epoch]
785 learning_rate_schedule[n_epoch] = learning_rate
787 log_string(f"learning_rate_schedule {learning_rate_schedule}")
789 ##############################
791 if nb_epochs_finished >= args.nb_epochs:
792 task.produce_results(
793 n_epoch=nb_epochs_finished,
795 result_dir=args.result_dir,
797 deterministic_synthesis=args.deterministic_synthesis,
800 time_pred_result = None
802 for n_epoch in range(nb_epochs_finished, args.nb_epochs):
803 learning_rate = learning_rate_schedule[n_epoch]
805 log_string(f"learning_rate {learning_rate}")
807 if args.optim == "sgd":
808 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
809 elif args.optim == "adam":
810 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
811 elif args.optim == "adamw":
812 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
814 raise ValueError(f"Unknown optimizer {args.optim}.")
818 nb_train_samples, acc_train_loss = 0, 0.0
820 for input in task.batches(split="train"):
821 input = input.to(device)
823 if nb_train_samples % args.batch_size == 0:
824 optimizer.zero_grad()
826 output = model(mygpt.BracketedSequence(input)).x
827 loss = F.cross_entropy(output.transpose(1, 2), input)
828 acc_train_loss += loss.item() * input.size(0)
830 nb_train_samples += input.size(0)
834 if nb_train_samples % args.batch_size == 0:
837 with torch.autograd.no_grad():
840 nb_test_samples, acc_test_loss = 0, 0.0
841 nb_samples_accumulated = 0
843 for input in task.batches(split="test"):
844 input = input.to(device)
846 bs = model(mygpt.BracketedSequence(input))
849 loss = F.cross_entropy(output.transpose(1, 2), input)
851 acc_test_loss += loss.item() * input.size(0)
853 nb_test_samples += input.size(0)
855 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
856 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
859 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
862 task.produce_results(
865 result_dir=args.result_dir,
867 deterministic_synthesis=args.deterministic_synthesis,
870 time_current_result = datetime.datetime.now()
871 if time_pred_result is not None:
873 f"next_result {time_current_result + (time_current_result - time_pred_result)}"
875 time_pred_result = time_current_result
878 "nb_epochs_finished": n_epoch + 1,
879 "model_state": model.state_dict(),
880 "rng_state": torch.get_rng_state(),
883 if torch.cuda.is_available():
884 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
886 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
887 torch.save(checkpoint, checkpoint_name)
888 log_string(f"saved checkpoint {checkpoint_name}")
890 ######################################################################