3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
11 import math, sys, argparse, time, tqdm, os
13 import torch, torchvision
15 from torch.nn import functional as F
17 import mygpt, tensorstack
19 ######################################################################
21 if torch.cuda.is_available():
22 device = torch.device("cuda")
23 torch.backends.cuda.matmul.allow_tf32 = True
25 device = torch.device("cpu")
27 ######################################################################
29 parser = argparse.ArgumentParser(
30 description="An implementation of GPT with cache.",
31 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
35 "--task", type=str, default="picoclvr", help="picoclvr, mnist, maze, snake, stack"
38 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
40 parser.add_argument("--result_dir", type=str, default="results_default")
42 parser.add_argument("--seed", type=int, default=0)
44 parser.add_argument("--nb_epochs", type=int, default=None)
46 parser.add_argument("--batch_size", type=int, default=None)
48 parser.add_argument("--nb_train_samples", type=int, default=250000)
50 parser.add_argument("--nb_test_samples", type=int, default=10000)
52 parser.add_argument("--optim", type=str, default="adam")
54 parser.add_argument("--learning_rate", type=float, default=1e-4)
56 parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
58 parser.add_argument("--dim_model", type=int, default=512)
60 parser.add_argument("--dim_keys", type=int, default=64)
62 parser.add_argument("--dim_hidden", type=int, default=2048)
64 parser.add_argument("--nb_heads", type=int, default=8)
66 parser.add_argument("--nb_blocks", type=int, default=12)
68 parser.add_argument("--dropout", type=float, default=0.1)
70 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
72 parser.add_argument("--no_checkpoint", action="store_true", default=False)
74 parser.add_argument("--overwrite_results", action="store_true", default=False)
76 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
78 ##############################
81 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
83 parser.add_argument("--picoclvr_height", type=int, default=12)
85 parser.add_argument("--picoclvr_width", type=int, default=16)
87 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
89 ##############################
92 parser.add_argument("--maze_height", type=int, default=13)
94 parser.add_argument("--maze_width", type=int, default=21)
96 parser.add_argument("--maze_nb_walls", type=int, default=15)
98 ##############################
101 parser.add_argument("--snake_height", type=int, default=6)
103 parser.add_argument("--snake_width", type=int, default=8)
105 parser.add_argument("--snake_nb_colors", type=int, default=5)
107 parser.add_argument("--snake_length", type=int, default=200)
109 ##############################
112 parser.add_argument("--stack_nb_steps", type=int, default=100)
114 parser.add_argument("--stack_nb_stacks", type=int, default=1)
116 parser.add_argument("--stack_nb_values", type=int, default=10)
118 ######################################################################
120 args = parser.parse_args()
122 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
125 os.mkdir(args.result_dir)
126 except FileExistsError:
127 if not args.overwrite_results:
128 print(f"result directory {args.result_dir} already exists")
131 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
134 # torch.backends.cudnn.deterministic = True
135 # torch.backends.cudnn.benchmark = False
136 # torch.use_deterministic_algorithms(True)
137 torch.manual_seed(args.seed)
138 if torch.cuda.is_available():
139 torch.cuda.manual_seed_all(args.seed)
141 ######################################################################
147 "nb_train_samples": 250000,
148 "nb_test_samples": 10000,
153 "nb_train_samples": 250000,
154 "nb_test_samples": 10000,
159 "nb_train_samples": 250000,
160 "nb_test_samples": 10000,
165 "nb_train_samples": 250000,
166 "nb_test_samples": 10000,
171 "nb_train_samples": 100000,
172 "nb_test_samples": 1000,
176 if args.task in default_args:
177 for k, v in default_args[args.task].items():
178 if getattr(args, k) is None:
181 ######################################################################
185 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
187 if log_file is not None:
188 log_file.write(t + s + "\n")
196 log_string(f"args.{n} {getattr(args, n)}")
198 ######################################################################
201 # ra_mask is boolean, with 1s on the values to generate
204 def masked_inplace_autoregression(
209 forbidden_tokens=None,
210 progress_bar_desc="autoregression",
211 device=torch.device("cpu"),
213 # p = logits.softmax(1)
214 # entropy[:,s]= p.xlogy(p).sum(1) / math.log(2)
215 batches = zip(input.split(batch_size), ar_mask.split(batch_size))
216 if progress_bar_desc is not None:
220 desc=progress_bar_desc,
221 total=input.size(0) // batch_size,
223 for input, ar_mask in batches:
224 i = (ar_mask.sum(0) > 0).nonzero()
227 mygpt.BracketedSequence(input, 0, i.min())
228 ) # Needed to initialize the model's cache
229 for s in range(i.min(), i.max() + 1):
230 output = model(mygpt.BracketedSequence(input, s, 1)).x
231 logits = output[:, s]
232 if forbidden_tokens is not None:
233 logits = logits.masked_fill(forbidden_tokens, float("-inf"))
234 if args.deterministic_synthesis:
235 t_next = logits.argmax(1)
237 dist = torch.distributions.categorical.Categorical(logits=logits)
238 t_next = dist.sample()
239 input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
242 ######################################################################
246 def batches(self, split="train"):
249 def vocabulary_size(self):
252 def produce_results(self, n_epoch, model):
256 ######################################################################
261 class TaskPicoCLVR(Task):
262 # Make a tensor from a list of strings
263 def tensorize(self, descr):
264 token_descr = [s.strip().split(" ") for s in descr]
265 l = max([len(s) for s in token_descr])
266 token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
267 id_descr = [[self.token2id[u] for u in s] for s in token_descr]
268 return torch.tensor(id_descr, device=self.device)
270 # Make a list of strings from a tensor
271 def detensorize(self, x):
272 return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
274 # trim all the tensors in the tuple z to remove as much token from
275 # left and right in the first tensor. If z is a tuple, all its
276 # elements are trimed according to the triming for the first
277 def trim(self, z, token="<nul>"):
278 n = self.token2id[token]
281 i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
282 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
283 return tuple([t[:, a:b] for t in z])
285 i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
286 a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
289 ######################
290 # Not the cleanest part of the code
292 # Extract the last image of each sequence, from the last <img>
293 # included, and set to <nul> all the tokens from the beginning of
294 # that image to the end
295 def excise_last_image(self, input):
296 t_img, t_nul = self.token2id["<img>"], self.token2id["<nul>"]
297 nb_img_tokens = self.height * self.width + 1
299 input = input.clone()
300 t = (input == t_img).long()
301 tail_masks = (t.cumsum(dim=1) == t.sum(dim=1, keepdim=True)).long()
302 i = (t * tail_masks).nonzero(as_tuple=True)
305 i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
307 images = self.trim(input[j])
309 loss_masks = 1 - tail_masks
310 input, loss_masks = self.trim((input, loss_masks))
311 return input, loss_masks, images
313 def add_true_image(self, input, images, loss_masks):
314 t_nul = self.token2id["<nul>"]
315 nb_img_tokens = self.height * self.width + 1
316 input = F.pad(input, (0, nb_img_tokens), value=t_nul)
317 loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
318 t = (input == t_nul).long()
319 i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
322 i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
326 input, loss_masks = self.trim((input, loss_masks))
327 return input, loss_masks
329 def add_generated_image(self, input, loss_masks, model):
330 t_img, t_nul = self.token2id["<img>"], self.token2id["<nul>"]
331 nb_img_tokens = self.height * self.width + 1
333 input = F.pad(input, (0, nb_img_tokens), value=t_nul)
334 loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
335 t = (input == t_nul).long()
336 i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
343 + torch.arange(nb_img_tokens - 1, device=input.device)[None, :],
345 ar_masks = input.new_zeros(input.size(), dtype=torch.int64)
348 torch.arange(self.vocabulary_size(), device=input.device) == t_nul
350 with torch.autograd.no_grad():
353 masked_inplace_autoregression(
359 progress_bar_desc=None,
364 input, loss_masks = self.trim((input, loss_masks))
366 return input, loss_masks
368 ######################
378 device=torch.device("cpu"),
382 def generate_descr(nb, cache_suffix, pruner):
383 return picoclvr.generate(
393 self.batch_size = batch_size
395 self.pruner_train = pruner_train
396 self.pruner_eval = pruner_eval
399 "nb_train_samples": nb_train_samples,
400 "nb_test_samples": nb_test_samples,
403 "nb_colors": nb_colors,
404 "batch_size": batch_size,
405 "rng_state": list(torch.get_rng_state()),
409 f"generating {nb_train_samples+nb_test_samples} samples (can take some time)"
411 self.train_descr = generate_descr(
412 nb_train_samples, "train", pruner=self.pruner_train
414 self.test_descr = generate_descr(nb_test_samples, "test", pruner=None)
416 # Build the tokenizer
417 tokens = {"<nul>", "<img>"}
418 for d in [self.train_descr, self.test_descr]:
420 for t in s.strip().split(" "):
422 # make this set a sorted list to get the same tensors given
424 tokens = list(tokens)
426 self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
427 self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
429 # Tokenize the train and test sets
430 self.train_input = self.tensorize(self.train_descr)
431 self.test_input = self.tensorize(self.test_descr)
433 def batches(self, split="train"):
434 assert split in {"train", "test"}
435 input = self.train_input if split == "train" else self.test_input
436 for batch in tqdm.tqdm(
437 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
439 yield self.trim(batch)
441 def vocabulary_size(self):
442 return len(self.token2id)
444 def compute_missing_properties(self, n_epoch, model, pruner=None):
445 acc_nb_requested_properties = []
446 acc_nb_missing_properties = []
449 for input in tqdm.tqdm(
450 self.test_input.split(self.batch_size),
452 desc=f"test-properties",
454 tape, loss_masks, _ = self.excise_last_image(input)
455 tape, loss_masks = self.add_generated_image(tape, loss_masks, model)
456 result_descr = self.detensorize(tape)
457 np = picoclvr.nb_properties(
463 nb_requested_properties, _, nb_missing_properties = zip(*np)
464 acc_nb_requested_properties += nb_requested_properties
465 acc_nb_missing_properties += nb_missing_properties
466 acc_nb_results += len(result_descr)
468 nb_requested_properties = sum(acc_nb_requested_properties)
469 nb_missing_properties = sum(acc_nb_missing_properties)
471 prefix = "" if pruner is None else "pruned_"
472 log_string(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
474 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
477 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
480 ######################################################################
482 def produce_results(self, n_epoch, model):
483 self.compute_missing_properties(n_epoch, model)
485 if self.pruner_eval is not None:
486 self.compute_missing_properties(n_epoch, model, self.pruner_eval)
488 nb_tokens_to_generate = self.height * self.width + 3
493 for primer_descr in [
494 "red above green <sep> green top <sep> blue right of red",
495 "there is red <sep> there is yellow <sep> there is blue",
496 "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
497 "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
499 primer += [primer_descr] * nb_per_primer
501 tape = self.tensorize(primer)
502 loss_masks = 1 - (tape == self.token2id["<nul>"]).long()
503 tape, loss_masks = self.add_generated_image(tape, loss_masks, model)
504 result_descr = self.detensorize(tape)
506 np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
508 acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
509 acc_nb_results = len(result_descr)
511 nb_requested_properties = sum(acc_nb_requested_properties)
512 nb_missing_properties = sum(acc_nb_missing_properties)
515 log_string(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
517 f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
520 f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
523 img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
527 img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
531 torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
537 image_name = os.path.join(args.result_dir, f"picoclvr_result_{n_epoch:04d}.png")
538 torchvision.utils.save_image(
539 img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
541 log_string(f"wrote {image_name}")
544 ######################################################################
547 class TaskMNIST(Task):
548 def __init__(self, batch_size, device=torch.device("cpu")):
550 self.batch_size = batch_size
552 def batches(self, split="train"):
553 assert split in {"train", "test"}
554 data_set = torchvision.datasets.MNIST(
555 root="./data", train=(split == "train"), download=True
557 data_input = data_set.data.view(-1, 28 * 28).long()
558 if args.nb_train_samples is not None:
559 data_input = data_input[: args.nb_train_samples]
560 for batch in tqdm.tqdm(
561 data_input.split(self.batch_size), desc=f"epoch-{split}"
565 def vocabulary_size(self):
568 def produce_results(self, n_epoch, model):
569 results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64)
570 ar_mask = torch.full_like(results, 1)
571 masked_inplace_autoregression(
572 model, self.batch_size, results, ar_mask, device=self.device
574 image_name = os.path.join(args.result_dir, f"mnist_result_{n_epoch:04d}.png")
575 torchvision.utils.save_image(
576 1 - results.reshape(-1, 1, 28, 28) / 255.0,
581 log_string(f"wrote {image_name}")
584 ######################################################################
589 class TaskMaze(Task):
590 def map2seq(self, *m):
591 return torch.cat([x.flatten(1) for x in m], 1)
593 def seq2map(self, s):
594 s = s.reshape(s.size(0), -1, self.height, self.width)
595 return (s[:, k] for k in range(s.size(1)))
605 device=torch.device("cpu"),
607 self.batch_size = batch_size
612 train_mazes, train_paths, _ = maze.create_maze_data(
617 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
619 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
621 test_mazes, test_paths, _ = maze.create_maze_data(
626 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
628 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
630 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
632 def batches(self, split="train", nb_to_use=-1, desc=None):
633 assert split in {"train", "test"}
634 input = self.train_input if split == "train" else self.test_input
636 input = input[:nb_to_use]
638 desc = f"epoch-{split}"
639 for batch in tqdm.tqdm(
640 input.split(self.batch_size), dynamic_ncols=True, desc=desc
644 def vocabulary_size(self):
647 def compute_error(self, model, split="train", nb_to_use=-1):
648 nb_total, nb_correct = 0, 0
650 self.width * self.height,
651 self.width * self.height,
655 for input in tqdm.tqdm(
656 task.batches(split, nb_to_use),
660 result = input.clone()
661 ar_mask = result.new_zeros(result.size())
662 ar_mask[:, self.height * self.width :] = 1
663 result *= 1 - ar_mask
664 masked_inplace_autoregression(
669 progress_bar_desc=None,
672 mazes, paths = self.seq2map(result)
673 path_correctness = maze.path_correctness(mazes, paths)
674 nb_correct += path_correctness.long().sum()
675 nb_total += mazes.size(0)
677 optimal_path_lengths = (
678 (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
680 predicted_path_lengths = (
681 (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
683 optimal_path_lengths = optimal_path_lengths[path_correctness]
684 predicted_path_lengths = predicted_path_lengths[path_correctness]
685 count[optimal_path_lengths, predicted_path_lengths] += 1
691 : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
694 return nb_total, nb_correct, count
696 def produce_results(self, n_epoch, model):
697 with torch.autograd.no_grad():
701 train_nb_total, train_nb_correct, count = self.compute_error(
702 model, "train", nb_to_use=1000
705 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
708 test_nb_total, test_nb_correct, count = self.compute_error(
709 model, "test", nb_to_use=1000
712 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
715 if count is not None:
716 proportion_optimal = count.diagonal().sum().float() / count.sum()
717 log_string(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
719 os.path.join(args.result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
721 for i in range(count.size(0)):
722 for j in range(count.size(1)):
723 eol = " " if j < count.size(1) - 1 else "\n"
724 f.write(f"{count[i,j]}{eol}")
726 input = self.test_input[:48]
727 result = input.clone()
728 ar_mask = result.new_zeros(result.size())
729 ar_mask[:, self.height * self.width :] = 1
730 result *= 1 - ar_mask
731 masked_inplace_autoregression(
732 model, self.batch_size, result, ar_mask, device=self.device
735 mazes, paths = self.seq2map(input)
736 _, predicted_paths = self.seq2map(result)
738 filename = os.path.join(args.result_dir, f"maze_result_{n_epoch:04d}.png")
743 predicted_paths=predicted_paths,
744 path_correct=maze.path_correctness(mazes, predicted_paths),
745 path_optimal=maze.path_optimality(paths, predicted_paths),
747 log_string(f"wrote {filename}")
752 ######################################################################
758 class TaskSnake(Task):
769 device=torch.device("cpu"),
771 self.batch_size = batch_size
775 self.prompt_length = prompt_length
777 self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
786 self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
796 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
798 def batches(self, split="train", nb_to_use=-1, desc=None):
799 assert split in {"train", "test"}
800 input = self.train_input if split == "train" else self.test_input
802 input = input[:nb_to_use]
804 desc = f"epoch-{split}"
805 for batch in tqdm.tqdm(
806 input.split(self.batch_size), dynamic_ncols=True, desc=desc
810 def vocabulary_size(self):
813 def produce_results(self, n_epoch, model):
814 with torch.autograd.no_grad():
818 def compute_nb_correct(input, prior_visits):
819 result = input.clone()
820 i = torch.arange(result.size(1), device=result.device)[None, :]
822 torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
826 result *= 1 - ar_mask
828 # snake.solver(result,ar_mask)
830 masked_inplace_autoregression(
831 model, self.batch_size, result, ar_mask, device=self.device
834 nb_total = ((prior_visits > 0) * ar_mask).sum()
837 (result == input).long() * (prior_visits > 0) * ar_mask
840 # nb_total = result.size(0)
841 # nb_correct = ((result - input).abs().sum(1) == 0).sum()
843 return nb_total, nb_correct
845 # train_nb_total, train_nb_correct = compute_nb_correct(
846 # self.train_input, self.train_prior_visits
850 # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
853 test_nb_total, test_nb_correct = compute_nb_correct(
854 self.test_input[:1000], self.test_prior_visits[:1000]
858 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
864 ######################################################################
870 class TaskStack(Task):
879 device=torch.device("cpu"),
881 self.batch_size = batch_size
882 self.nb_steps = nb_steps
883 self.nb_stacks = nb_stacks
884 self.nb_values = nb_values
887 self.train_input, self.train_stack_counts = stack.generate_sequences(
888 nb_train_samples, nb_steps, nb_stacks, nb_values, self.device
891 self.test_input, self.test_stack_counts = stack.generate_sequences(
892 nb_test_samples, nb_steps, nb_stacks, nb_values, self.device
895 mask = self.test_input.clone()
896 stack.remove_poped_values(mask,self.nb_stacks)
897 mask=(mask!=self.test_input)
898 counts = self.test_stack_counts.flatten()[mask.flatten()]
899 counts=F.one_hot(counts).sum(0)
900 log_string(f"stack_count {counts}")
902 self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
904 def batches(self, split="train", nb_to_use=-1, desc=None):
905 assert split in {"train", "test"}
906 input = self.train_input if split == "train" else self.test_input
908 input = input[:nb_to_use]
910 desc = f"epoch-{split}"
911 for batch in tqdm.tqdm(
912 input.split(self.batch_size), dynamic_ncols=True, desc=desc
916 def vocabulary_size(self):
919 def produce_results(self, n_epoch, model):
920 with torch.autograd.no_grad():
924 def compute_nb_correct(input):
925 result = input.clone()
926 stack.remove_poped_values(result,self.nb_stacks)
927 ar_mask = (result != input).long()
928 result *= 1 - ar_mask
930 masked_inplace_autoregression(
931 model, self.batch_size, result, ar_mask, device=self.device
934 nb_total = ar_mask.sum()
937 (result == input).long() * ar_mask
940 return nb_total, nb_correct
942 test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
945 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
951 ######################################################################
954 def picoclvr_pruner_horizontal_green(p):
955 return not ("green" in p and ("left" in p or "right" in p))
958 picoclvr_pruner_train = (
959 picoclvr_pruner_horizontal_green
960 if args.picocvlr_prune_properties in {"train+eval"}
964 picoclvr_pruner_eval = (
965 (lambda p: not picoclvr_pruner_horizontal_green(p))
966 if args.picocvlr_prune_properties in {"train+eval", "eval"}
970 ######################################################################
972 if args.task == "picoclvr":
974 nb_train_samples=args.nb_train_samples,
975 nb_test_samples=args.nb_test_samples,
976 batch_size=args.batch_size,
977 height=args.picoclvr_height,
978 width=args.picoclvr_width,
979 nb_colors=args.picoclvr_nb_colors,
981 pruner_train=picoclvr_pruner_train,
982 pruner_eval=picoclvr_pruner_eval,
985 elif args.task == "mnist":
987 batch_size=args.batch_size,
991 elif args.task == "maze":
993 nb_train_samples=args.nb_train_samples,
994 nb_test_samples=args.nb_test_samples,
995 batch_size=args.batch_size,
996 height=args.maze_height,
997 width=args.maze_width,
998 nb_walls=args.maze_nb_walls,
1002 elif args.task == "snake":
1004 nb_train_samples=args.nb_train_samples,
1005 nb_test_samples=args.nb_test_samples,
1006 batch_size=args.batch_size,
1007 height=args.snake_height,
1008 width=args.snake_width,
1009 nb_colors=args.snake_nb_colors,
1010 length=args.snake_length,
1011 prompt_length=args.snake_length // 2,
1015 elif args.task == "stack":
1017 nb_train_samples=args.nb_train_samples,
1018 nb_test_samples=args.nb_test_samples,
1019 batch_size=args.batch_size,
1020 nb_steps = args.stack_nb_steps,
1021 nb_stacks = args.stack_nb_stacks,
1022 nb_values = args.stack_nb_values,
1027 raise ValueError(f"Unknown task {args.task}")
1029 ######################################################################
1031 log_string(f"device {device}")
1033 vocabulary_size = task.vocabulary_size()
1035 log_string(f"vocabulary_size {vocabulary_size}")
1037 ##############################
1039 model = mygpt.MyGPT(
1040 vocabulary_size=vocabulary_size,
1041 dim_model=args.dim_model,
1042 dim_keys=args.dim_keys,
1043 dim_hidden=args.dim_hidden,
1044 nb_heads=args.nb_heads,
1045 nb_blocks=args.nb_blocks,
1047 dropout=args.dropout,
1052 nb_parameters = sum(p.numel() for p in model.parameters())
1053 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
1055 ######################################################################
1057 nb_epochs_finished = 0
1059 if args.no_checkpoint:
1060 log_string(f"not trying to load checkpoint.")
1064 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
1065 checkpoint = torch.load(checkpoint_name)
1066 nb_epochs_finished = checkpoint["nb_epochs_finished"]
1067 model.load_state_dict(checkpoint["model_state"])
1068 torch.set_rng_state(checkpoint["rng_state"])
1069 if torch.cuda.is_available():
1070 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
1072 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
1074 except FileNotFoundError:
1075 log_string("starting from scratch.")
1078 log_string("error when loading the checkpoint.")
1081 ######################################################################
1083 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
1086 for input in task.batches(split="train"):
1087 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
1088 token_probas = token_count / token_count.sum()
1089 entropy = -torch.xlogy(token_probas, token_probas).sum()
1090 train_set_perplexity = math.exp(entropy)
1092 ##############################
1094 if args.learning_rate_schedule == "cos":
1095 learning_rate_schedule = {}
1096 for n_epoch in range(args.nb_epochs):
1097 u = n_epoch / args.nb_epochs * math.pi
1098 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
1103 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
1107 learning_rate_schedule = {}
1108 learning_rate = args.learning_rate
1109 for n_epoch in range(args.nb_epochs):
1111 learning_rate = u[n_epoch]
1112 learning_rate_schedule[n_epoch] = learning_rate
1114 log_string(f"learning_rate_schedule {learning_rate_schedule}")
1116 ##############################
1120 if nb_epochs_finished >= nb_epochs:
1121 task.produce_results(nb_epochs_finished, model)
1123 for n_epoch in range(nb_epochs_finished, nb_epochs):
1124 learning_rate = learning_rate_schedule[n_epoch]
1126 log_string(f"learning_rate {learning_rate}")
1128 if args.optim == "sgd":
1129 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
1130 elif args.optim == "adam":
1131 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
1132 elif args.optim == "adamw":
1133 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
1135 raise ValueError(f"Unknown optimizer {args.optim}.")
1139 nb_train_samples, acc_train_loss = 0, 0.0
1141 for input in task.batches(split="train"):
1142 input = input.to(device)
1143 output = model(mygpt.BracketedSequence(input)).x
1144 loss = F.cross_entropy(output.transpose(1, 2), input)
1145 acc_train_loss += loss.item() * input.size(0)
1146 nb_train_samples += input.size(0)
1147 nb_samples_seen += input.size(0)
1149 optimizer.zero_grad()
1153 with torch.autograd.no_grad():
1156 nb_test_samples, acc_test_loss = 0, 0.0
1158 for input in task.batches(split="test"):
1159 input = input.to(device)
1161 output = model(mygpt.BracketedSequence(input)).x
1162 loss = F.cross_entropy(output.transpose(1, 2), input)
1163 acc_test_loss += loss.item() * input.size(0)
1164 nb_test_samples += input.size(0)
1166 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
1167 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
1170 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
1173 task.produce_results(n_epoch, model)
1176 "nb_epochs_finished": n_epoch + 1,
1177 "model_state": model.state_dict(),
1178 "rng_state": torch.get_rng_state(),
1181 if torch.cuda.is_available():
1182 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
1184 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
1185 torch.save(checkpoint, checkpoint_name)
1186 log_string(f"saved checkpoint {checkpoint_name}")
1188 ######################################################################