3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 import math, sys, argparse, time, tqdm, os, datetime, warnings
10 import torch, torchvision
12 from torch.nn import functional as F
14 # torch.autograd.set_detect_anomaly(True) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
17 import mygpt, tasks, problems
19 ######################################################################
24 if x in {"1", "true", "yes"}:
26 elif x in {"0", "false", "no"}:
32 parser = argparse.ArgumentParser(
33 description="An implementation of GPT with cache.",
34 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
41 help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
44 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
46 parser.add_argument("--result_dir", type=str, default=None)
48 parser.add_argument("--seed", type=int, default=0)
50 parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
52 parser.add_argument("--force_cpu", type=str2bool, default=False)
54 ########################################
56 parser.add_argument("--nb_epochs", type=int, default=25)
58 parser.add_argument("--physical_batch_size", type=int, default=None)
60 parser.add_argument("--batch_size", type=int, default=25)
62 parser.add_argument("--nb_train_samples", type=int, default=None)
64 parser.add_argument("--nb_test_samples", type=int, default=None)
66 parser.add_argument("--optim", type=str, default="adam")
68 ########################################
70 parser.add_argument("--nb_warmup_iter", type=int, default=100)
72 parser.add_argument("--nb_decay_iter", type=int, default=5000)
74 parser.add_argument("--learning_rate", type=float, default=6e-4)
76 parser.add_argument("--min_learning_rate", type=float, default=6e-5)
80 parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True)
82 parser.add_argument("--legacy_large_lr", type=float, default=1e-4)
84 parser.add_argument("--legacy_small_lr", type=float, default=2e-5)
86 parser.add_argument("--legacy_nb_epoch_large_lr", type=float, default=10)
88 ########################################
90 parser.add_argument("--model", type=str, default=None)
92 parser.add_argument("--attention", type=str, default=None)
94 parser.add_argument("--memex_proba", type=float, default=0)
96 parser.add_argument("--memex_nb_epochs", type=float, default=None)
98 parser.add_argument("--dim_model", type=int, default=None)
100 parser.add_argument("--dim_keys", type=int, default=None)
102 parser.add_argument("--dim_hidden", type=int, default=None)
104 parser.add_argument("--nb_heads", type=int, default=None)
106 parser.add_argument("--nb_lines", type=int, default=None)
108 parser.add_argument("--caterpillar_height", type=int, default=None)
110 parser.add_argument("--gate_dropout_proba", type=float, default=0.0)
112 parser.add_argument("--gate_dropout_sync", type=str2bool, default=False)
114 parser.add_argument("--gate_dropout_replace", type=str2bool, default=False)
116 parser.add_argument("--rho_inner_loss", type=float, default=0.0)
118 parser.add_argument("--nb_blocks", type=int, default=None)
120 parser.add_argument("--dropout", type=float, default=0.1)
122 ########################################
124 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
126 parser.add_argument("--no_checkpoint", action="store_true", default=False)
128 parser.add_argument("--continue_training", action="store_true", default=False)
130 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
132 ##############################
135 parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
137 parser.add_argument("--rpl_max_input", type=int, default=9)
139 parser.add_argument("--rpl_prog_len", type=int, default=8)
141 parser.add_argument("--rpl_nb_runs", type=int, default=5)
143 parser.add_argument("--rpl_no_prog", action="store_true", default=False)
145 ##############################
148 parser.add_argument("--grid_size", type=int, default=6)
150 parser.add_argument("--grid_nb_colors", type=int, default=6)
152 parser.add_argument("--grid_nb_shapes", type=int, default=6)
154 ##############################
157 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
159 parser.add_argument("--picoclvr_height", type=int, default=12)
161 parser.add_argument("--picoclvr_width", type=int, default=16)
163 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
165 ##############################
168 parser.add_argument("--maze_height", type=int, default=13)
170 parser.add_argument("--maze_width", type=int, default=21)
172 parser.add_argument("--maze_nb_walls", type=int, default=15)
174 ##############################
177 parser.add_argument("--snake_height", type=int, default=9)
179 parser.add_argument("--snake_width", type=int, default=12)
181 parser.add_argument("--snake_nb_colors", type=int, default=5)
183 parser.add_argument("--snake_length", type=int, default=200)
185 ##############################
188 parser.add_argument("--stack_nb_steps", type=int, default=100)
190 parser.add_argument("--stack_nb_stacks", type=int, default=3)
192 parser.add_argument("--stack_nb_digits", type=int, default=3)
194 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
196 ##############################
199 parser.add_argument("--expr_nb_variables", type=int, default=5)
201 parser.add_argument("--expr_sequence_length", type=int, default=40)
203 parser.add_argument("--expr_operand_max", type=int, default=9)
205 parser.add_argument("--expr_result_max", type=int, default=99)
207 parser.add_argument("--expr_input_file", type=str, default=None)
209 ##############################
212 parser.add_argument("--memory_len_total", type=int, default=32)
214 ##############################
217 parser.add_argument("--mixing_hard", action="store_true", default=False)
219 parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
221 ######################################################################
223 # args = parser.parse_args()
225 args, sup_args = parser.parse_known_args()
227 sup_args = dict([x.removeprefix("--").split("=") for x in sup_args])
229 if args.result_dir is None:
230 args.result_dir = f"results_{args.task}_{args.model}"
232 ######################################################################
234 if not args.force_cpu and torch.cuda.is_available():
235 device = torch.device("cuda")
236 torch.backends.cuda.matmul.allow_tf32 = True
238 device = torch.device("cpu")
240 ######################################################################
242 default_task_args = {
245 "physical_batch_size": 25,
246 "nb_train_samples": 250000,
247 "nb_test_samples": 10000,
251 "physical_batch_size": 25,
252 "nb_train_samples": 50000,
253 "nb_test_samples": 10000,
257 "physical_batch_size": 25,
258 "nb_train_samples": 2500000,
259 "nb_test_samples": 10000,
263 "physical_batch_size": 25,
264 "nb_train_samples": 250000,
265 "nb_test_samples": 10000,
269 "physical_batch_size": 10,
270 "nb_train_samples": 100000,
271 "nb_test_samples": 1000,
275 "physical_batch_size": 25,
276 "nb_train_samples": 1000000,
277 "nb_test_samples": 10000,
281 "physical_batch_size": 25,
282 "nb_train_samples": 50000,
283 "nb_test_samples": 10000,
287 "physical_batch_size": 5,
288 "nb_train_samples": 100000,
289 "nb_test_samples": 10000,
293 "physical_batch_size": 25,
294 "nb_train_samples": 250000,
295 "nb_test_samples": 10000,
299 "physical_batch_size": 5,
300 "nb_train_samples": 2500000,
301 "nb_test_samples": 10000,
305 "physical_batch_size": 25,
306 "nb_train_samples": 250000,
307 "nb_test_samples": 10000,
311 "physical_batch_size": 25,
312 "nb_train_samples": 100000,
313 "nb_test_samples": 1000,
317 "physical_batch_size": 25,
318 "nb_train_samples": 50000,
319 "nb_test_samples": 10000,
323 "physical_batch_size": 25,
324 "nb_train_samples": 25000,
325 "nb_test_samples": 10000,
329 "physical_batch_size": 25,
330 "nb_train_samples": 250000,
331 "nb_test_samples": 10000,
335 "physical_batch_size": 5,
336 "nb_train_samples": 60000,
337 "nb_test_samples": 10000,
341 if args.task in default_task_args:
342 for k, v in default_task_args[args.task].items():
343 if getattr(args, k) is None:
346 ######################################################################
348 default_model_args = {
358 "attention": "caterpillar",
364 "caterpillar_height": 4,
376 "attention": "caterpillar",
382 "caterpillar_height": 4,
394 "attention": "caterpillar",
400 "caterpillar_height": 32,
412 "attention": "caterpillar",
429 "attention": "caterpillar",
439 if args.model in default_model_args:
440 for k, v in default_model_args[args.model].items():
441 if getattr(args, k) is None:
444 raise ValueError(f"Unknown model {args.model}")
446 ######################################################################
449 os.mkdir(args.result_dir)
450 except FileExistsError:
451 if not args.continue_training:
452 print(f"result directory {args.result_dir} already exists")
455 loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a")
456 lambda_file = open(os.path.join(args.result_dir, "lambda.dat"), "a")
458 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
461 # torch.backends.cudnn.deterministic = True
462 # torch.backends.cudnn.benchmark = False
463 # torch.use_deterministic_algorithms(True)
464 torch.manual_seed(args.seed)
465 if torch.cuda.is_available():
466 torch.cuda.manual_seed_all(args.seed)
468 ######################################################################
472 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
474 if log_file is not None:
475 log_file.write(t + s + "\n")
482 with os.popen("sha256sum *.py") as f:
484 log_string(f"sha256sum {l.strip()}")
486 now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
487 os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py *.sh")
489 log_string(f"argv {' '.join(sys.argv)}")
492 log_string(f"args.{n} {getattr(args, n)}")
494 for k, v in sup_args.items():
495 log_string(f'sup_args["{k}"] "{v}"')
498 ######################################################################
501 def get_lr(n_epoch, it):
502 if args.legacy_lr_schedule:
503 # my crude scheduling to compare to previous baseline, added
506 if it < args.nb_warmup_iter:
507 return args.legacy_large_lr * it / args.nb_warmup_iter
508 elif n_epoch < args.legacy_nb_epoch_large_lr:
509 return args.legacy_large_lr
511 return args.legacy_small_lr
515 # 1) linear warmup for warmup_iter steps
516 if it < args.nb_warmup_iter:
517 return args.learning_rate * it / args.nb_warmup_iter
518 # 2) if it > nb_decay_iter, return min learning rate
519 if it > args.nb_decay_iter:
520 return args.min_learning_rate
521 # 3) in between, use cosine decay down to min learning rate
522 decay_ratio = (it - args.nb_warmup_iter) / (
523 args.nb_decay_iter - args.nb_warmup_iter
525 coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # coeff ranges 0..1
526 return args.min_learning_rate + coeff * (
527 args.learning_rate - args.min_learning_rate
531 ######################################################################
534 def add_memex_v1(batches, memex_proba, marker_token):
535 for input in batches:
536 if torch.rand(1).item() < memex_proba:
538 torch.arange(1 + 2 * input.size(1), device=input.device)[None, :]
539 .expand(input.size(0), -1)
543 u0 = torch.randint(input.size(1), (input.size(0), 1), device=input.device)
544 caterpillar_length = args.nb_lines // args.caterpillar_height
548 caterpillar_length, (input.size(0), 1), device=input.device
554 m1 = (t >= u1).long() * (t < u1 + input.size(1)).long()
556 t = t * m0 + ((-1) * (1 - m0) * (1 - m1)) + (t - u1) * m1
558 n = torch.arange(input.size(0), device=input.device)[:, None].expand(
562 new_input = input[n, t.clamp(min=0)]
563 new_input = (1 - m) * new_input + m * (marker_token)
565 memex_mask = new_input.new_zeros(new_input.size())
566 memex_mask[:, input.size(1) :] = 1.0
568 yield new_input, memex_mask
573 # The marker token is not used for this one
574 def add_memex_v2(batches, memex_proba, marker_token):
575 for input in batches:
576 if torch.rand(1).item() < memex_proba:
577 t = torch.arange(input.size(1) // 4, device=input.device)[None, :].expand(
580 t = t + torch.randint(
581 input.size(1) - t.size(1), (t.size(0), 1), device=t.device
583 n = torch.arange(input.size(0), device=input.device)[:, None].expand(
588 new_input = torch.cat([input, flash], dim=1)
590 memex_mask = new_input.new_zeros(new_input.size())
591 memex_mask[:, input.size(1) :] = 1.0
593 yield new_input, memex_mask
599 def add_memex_v3(batches, memex_proba, marker_token):
600 for input in batches:
601 if torch.rand(1).item() < memex_proba:
602 memex_len = input.size(1) // 4
604 t = torch.arange(input.size(1) + memex_len, device=input.device)[
606 ].expand(input.size(0), -1)
607 n = torch.arange(input.size(0), device=input.device)[:, None].expand(
611 # Call me the tensor-spaghetti master
613 trigger = torch.rand(t.size(), device=t.device)
614 trigger[:, -memex_len:] = 2.0
616 trigger = (trigger == trigger.min(dim=1, keepdim=True).values).long()
617 memex_mask = trigger.clone()
618 memex_mask[:, memex_len:] -= trigger[:, :-memex_len]
619 memex_mask = memex_mask.cumsum(dim=1)
625 assert u.max() == input.size(1) - 1
628 (trigger.cumsum(dim=1) - trigger).cumsum(dim=1)
630 input.size(1) - memex_len, (input.size(0), 1), device=t.device
634 assert v.max() < input.size(1)
635 u = u * (1 - memex_mask) + v * memex_mask
637 new_input = input[n, u]
638 assert input.max() < vocabulary_size
639 assert new_input.max() < vocabulary_size
640 limits = trigger.clone()
641 limits[:, memex_len - 1 :] += limits[:, : -(memex_len - 1)]
642 assert limits.min() == 0
643 assert limits.max() == 1
644 new_input = new_input * (1 - limits) + marker_token * limits
645 assert marker_token < vocabulary_size
646 assert new_input.max() < vocabulary_size
648 yield new_input, memex_mask
654 ######################################################################
656 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
658 assert args.batch_size % args.physical_batch_size == 0
661 def picoclvr_pruner_horizontal_green(p):
662 return not ("green" in p and ("left" in p or "right" in p))
665 picoclvr_pruner_train = (
666 picoclvr_pruner_horizontal_green
667 if args.picocvlr_prune_properties in {"train+eval"}
671 picoclvr_pruner_eval = (
672 (lambda p: not picoclvr_pruner_horizontal_green(p))
673 if args.picocvlr_prune_properties in {"train+eval", "eval"}
677 ######################################################################
681 if args.task == "byheart":
682 task = tasks.SandBox(
683 problem=problems.ProblemByHeart(),
684 nb_train_samples=args.nb_train_samples,
685 nb_test_samples=args.nb_test_samples,
686 batch_size=args.physical_batch_size,
690 args.max_percents_of_test_in_train = -1
692 elif args.task == "learnop":
693 task = tasks.SandBox(
694 problem=problems.ProblemLearnOperator(),
695 nb_train_samples=args.nb_train_samples,
696 nb_test_samples=args.nb_test_samples,
697 batch_size=args.physical_batch_size,
703 elif args.task == "guessop":
704 task = tasks.SandBox(
705 problem=problems.ProblemGuessOperator(),
706 nb_train_samples=args.nb_train_samples,
707 nb_test_samples=args.nb_test_samples,
708 batch_size=args.physical_batch_size,
714 elif args.task == "twotargets":
715 task = tasks.SandBox(
716 problem=problems.ProblemTwoTargets(),
717 nb_train_samples=args.nb_train_samples,
718 nb_test_samples=args.nb_test_samples,
719 batch_size=args.physical_batch_size,
724 elif args.task == "memory":
725 task = tasks.SandBox(
726 problem=problems.ProblemMemory(len_total=args.memory_len_total),
727 nb_train_samples=args.nb_train_samples,
728 nb_test_samples=args.nb_test_samples,
729 batch_size=args.physical_batch_size,
734 elif args.task == "mixing":
735 task = tasks.SandBox(
736 problem=problems.ProblemMixing(
737 hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
739 nb_train_samples=args.nb_train_samples,
740 nb_test_samples=args.nb_test_samples,
741 batch_size=args.physical_batch_size,
746 elif args.task == "addition":
747 task = tasks.SandBox(
748 problem=problems.ProblemAddition(),
749 nb_train_samples=args.nb_train_samples,
750 nb_test_samples=args.nb_test_samples,
751 batch_size=args.physical_batch_size,
756 elif args.task == "picoclvr":
757 task = tasks.PicoCLVR(
758 nb_train_samples=args.nb_train_samples,
759 nb_test_samples=args.nb_test_samples,
760 batch_size=args.physical_batch_size,
761 height=args.picoclvr_height,
762 width=args.picoclvr_width,
763 nb_colors=args.picoclvr_nb_colors,
766 pruner_train=picoclvr_pruner_train,
767 pruner_eval=picoclvr_pruner_eval,
770 elif args.task == "mnist":
772 nb_train_samples=args.nb_train_samples,
773 nb_test_samples=args.nb_test_samples,
774 batch_size=args.physical_batch_size,
778 elif args.task == "maze":
780 nb_train_samples=args.nb_train_samples,
781 nb_test_samples=args.nb_test_samples,
782 batch_size=args.physical_batch_size,
783 height=args.maze_height,
784 width=args.maze_width,
785 nb_walls=args.maze_nb_walls,
789 elif args.task == "snake":
791 nb_train_samples=args.nb_train_samples,
792 nb_test_samples=args.nb_test_samples,
793 batch_size=args.physical_batch_size,
794 height=args.snake_height,
795 width=args.snake_width,
796 nb_colors=args.snake_nb_colors,
797 length=args.snake_length,
798 prompt_length=args.snake_length // 2,
802 elif args.task == "stack":
804 nb_train_samples=args.nb_train_samples,
805 nb_test_samples=args.nb_test_samples,
806 batch_size=args.physical_batch_size,
808 nb_steps=args.stack_nb_steps,
809 nb_stacks=args.stack_nb_stacks,
810 nb_digits=args.stack_nb_digits,
811 fraction_values_for_train=args.stack_fraction_values_for_train,
815 elif args.task == "expr":
817 nb_train_samples=args.nb_train_samples,
818 nb_test_samples=args.nb_test_samples,
819 nb_variables=args.expr_nb_variables,
820 sequence_length=args.expr_sequence_length,
821 operand_max=args.expr_operand_max,
822 result_max=args.expr_result_max,
823 batch_size=args.physical_batch_size,
827 elif args.task == "rpl":
829 nb_train_samples=args.nb_train_samples,
830 nb_test_samples=args.nb_test_samples,
831 batch_size=args.physical_batch_size,
832 nb_starting_values=args.rpl_nb_starting_values,
833 max_input=args.rpl_max_input,
834 prog_len=args.rpl_prog_len,
835 nb_runs=args.rpl_nb_runs,
836 no_prog=args.rpl_no_prog,
841 elif args.task == "grid":
843 nb_train_samples=args.nb_train_samples,
844 nb_test_samples=args.nb_test_samples,
845 batch_size=args.physical_batch_size,
847 nb_shapes=args.grid_nb_shapes,
848 nb_colors=args.grid_nb_colors,
853 elif args.task == "qmlp":
855 nb_train_samples=args.nb_train_samples,
856 nb_test_samples=args.nb_test_samples,
857 batch_size=args.physical_batch_size,
858 result_dir=args.result_dir,
864 raise ValueError(f"Unknown task {args.task}")
866 ######################################################################
868 log_string(f"device {device}")
870 vocabulary_size = task.vocabulary_size()
872 if args.memex_proba > 0:
873 memex_marker = vocabulary_size
876 log_string(f"vocabulary_size {vocabulary_size}")
878 ##############################
881 vocabulary_size=vocabulary_size,
882 dim_model=args.dim_model,
883 dim_keys=args.dim_keys,
884 dim_hidden=args.dim_hidden,
885 nb_heads=args.nb_heads,
886 nb_lines=args.nb_lines,
887 caterpillar_height=args.caterpillar_height,
888 nb_blocks=args.nb_blocks,
890 dropout=args.dropout,
891 attention_layer=args.attention,
898 nb_parameters = sum(p.numel() for p in model.parameters())
899 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
901 ######################################################################
903 nb_epochs_finished = 0
905 if args.no_checkpoint:
906 log_string(f"not trying to load checkpoint.")
910 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
911 checkpoint = torch.load(checkpoint_name)
912 nb_epochs_finished = checkpoint["nb_epochs_finished"]
913 model.load_state_dict(checkpoint["model_state"])
914 torch.set_rng_state(checkpoint["rng_state"])
915 if torch.cuda.is_available():
916 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
918 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
920 except FileNotFoundError:
921 log_string("starting from scratch.")
924 log_string("error when loading the checkpoint.")
927 ######################################################################
929 if args.task == "expr" and args.expr_input_file is not None:
930 task.produce_results(
931 n_epoch=nb_epochs_finished,
933 result_dir=args.result_dir,
935 deterministic_synthesis=args.deterministic_synthesis,
936 input_file=args.expr_input_file,
941 ######################################################################
943 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
945 # Compute the entropy of the training tokens
948 for input in task.batches(split="train"):
949 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
950 token_probas = token_count / token_count.sum()
951 entropy = -torch.xlogy(token_probas, token_probas).sum()
952 train_set_perplexity = math.exp(entropy)
954 ######################################################################
955 # A bit of paranoia never hurts
957 if args.max_percents_of_test_in_train >= 0:
959 def subsets_as_tuples(batches, cs):
961 for batch in batches:
963 s.add(tuple([v.item() for v in x]))
969 nb_test, nb_in_train = 0, 0
970 for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
972 for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
973 in_train.update(test_subset.intersection(train_subset))
974 nb_in_train += len(in_train)
975 nb_test += len(test_subset)
978 f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
982 nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
983 ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
985 ##############################
987 if "calibrate" in sup_args:
988 for input in task.batches(split="train", desc="calibrate"):
989 input = input.to(device)
990 output = model(mygpt.BracketedSequence(input)).x
992 for n, m in model.named_modules():
995 if isinstance(x, mygpt.Calibrator):
996 print(f"####### ${n} | ${a} ########################")
997 mean, std = x.moments()
998 print("mean\n", mean, "\n")
999 print("std\n", std, "\n")
1000 print(f"############################################\n\n")
1004 ##############################
1008 if nb_epochs_finished >= nb_epochs:
1009 task.produce_results(
1010 n_epoch=nb_epochs_finished,
1012 result_dir=args.result_dir,
1014 deterministic_synthesis=args.deterministic_synthesis,
1017 time_pred_result = datetime.datetime.now()
1024 def the_dot_products(value1, value2, params):
1025 g1g1, g1g2, g2g2 = 0, 0, 0
1027 g1 = torch.autograd.grad(value1, p, retain_graph=True)[0]
1028 g2 = torch.autograd.grad(value2, p, retain_graph=True)[0]
1029 g1g1 += g1.pow(2).sum()[None]
1030 g2g2 += g2.pow(2).sum()[None]
1031 g1g2 += (g1 * g2).sum()[None]
1032 return torch.cat([g1g1, g1g2, g2g2])
1035 def update_ave_grad(value, params, name, eps=1e-3):
1037 g = torch.autograd.grad(value, p, retain_graph=True)[0]
1038 ag = getattr(p, name) if hasattr(p, name) else 0
1039 setattr(p, name, (1 - eps) * ag + eps * g)
1042 def norm(params, name):
1045 s += getattr(p, name).pow(2).sum()
1049 for n_epoch in range(nb_epochs_finished, nb_epochs):
1050 if args.optim == "sgd":
1051 optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
1052 elif args.optim == "adam":
1053 optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
1054 elif args.optim == "adamw":
1055 optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate)
1057 raise ValueError(f"Unknown optimizer {args.optim}.")
1061 nb_train_samples, acc_train_loss, acc_train_inner_loss = 0, 0.0, 0.0
1065 if args.memex_nb_epochs is None or n_epoch < args.memex_nb_epochs
1069 log_string(f"memex_proba {memex_proba}")
1071 warnings.warn("memex v3", RuntimeWarning)
1072 train_batches = add_memex_v3(
1073 batches=task.batches(split="train"),
1074 memex_proba=memex_proba,
1075 marker_token=memex_marker,
1085 for input in add_none(train_batches):
1086 if input is not None:
1087 if type(input) is tuple:
1088 input, memex_mask = input
1089 memex_mask = memex_mask.to(device)
1093 model.reset_inner_loss()
1094 input = input.to(device)
1096 output = model(mygpt.BracketedSequence(input)).x
1098 if memex_mask is None:
1099 loss = F.cross_entropy(output.transpose(1, 2), input)
1101 loss = F.cross_entropy(output.transpose(1, 2), input, reduction="none")
1102 loss_regular = (loss * (1 - memex_mask)).mean()
1103 loss_memex = (loss * memex_mask).mean()
1105 if it < 100 or torch.rand(1) < 0.01:
1106 update_ave_grad(loss_regular, model.parameters(), "grad_regular")
1107 update_ave_grad(loss_memex, model.parameters(), "grad_memex")
1108 norm_regular = norm(model.parameters(), "grad_regular")
1109 norm_memex = norm(model.parameters(), "grad_memex")
1111 max(norm_regular, norm_memex) - norm_regular
1114 loss = loss_regular + l_memex * loss_memex
1116 inner_loss = model.get_inner_loss()
1118 acc_train_loss += loss.item() * input.size(0)
1119 acc_train_inner_loss += inner_loss.item() * input.size(0)
1121 nb_train_samples += input.size(0)
1122 nb_samples_seen += input.size(0)
1124 total_loss = loss + (
1125 args.rho_inner_loss * inner_loss if args.rho_inner_loss > 0 else 0.0
1129 lr = get_lr(n_epoch, it)
1130 for param_group in optimizer.param_groups:
1131 param_group["lr"] = lr
1133 # log_string(f"learning_rate {lr}")
1135 total_loss.backward()
1136 nb_acc_samples += input.size(0)
1138 if (input is None and nb_acc_samples > 0) or nb_acc_samples == args.batch_size:
1139 assert nb_acc_samples <= args.batch_size
1141 grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt()
1142 loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n")
1144 f"{n_epoch} {n_batch} {l_memex} {norm_regular} {norm_memex}\n"
1146 optimizer.zero_grad()
1150 with torch.autograd.no_grad():
1153 nb_test_samples, acc_test_loss = 0, 0.0
1155 for input in task.batches(split="test"):
1156 input = input.to(device)
1158 output = model(mygpt.BracketedSequence(input)).x
1159 loss = F.cross_entropy(output.transpose(1, 2), input)
1160 acc_test_loss += loss.item() * input.size(0)
1161 nb_test_samples += input.size(0)
1164 f"loss {n_epoch} train_loss {acc_train_loss/nb_train_samples} train_inner_loss {acc_train_inner_loss/nb_train_samples} test_prediction {acc_test_loss/nb_test_samples}"
1167 task.produce_results(
1170 result_dir=args.result_dir,
1172 deterministic_synthesis=args.deterministic_synthesis,
1175 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
1176 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
1179 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
1182 time_current_result = datetime.datetime.now()
1184 f"next_result {time_current_result + (time_current_result - time_pred_result)}"
1186 time_pred_result = time_current_result
1189 "nb_epochs_finished": n_epoch + 1,
1190 "model_state": model.state_dict(),
1191 "rng_state": torch.get_rng_state(),
1194 if torch.cuda.is_available():
1195 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
1197 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
1198 torch.save(checkpoint, checkpoint_name)
1199 log_string(f"saved checkpoint {checkpoint_name}")
1201 ######################################################################