3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
11 import math, sys, argparse, time, tqdm, itertools, os
13 import torch, torchvision
15 from torch.nn import functional as F
17 import mygpt, tensorstack
19 ######################################################################
21 if torch.cuda.is_available():
22 device = torch.device("cuda")
23 torch.backends.cuda.matmul.allow_tf32 = True
25 device = torch.device("cpu")
27 ######################################################################
29 parser = argparse.ArgumentParser(description="A maze shortest path solving with a GPT.")
31 parser.add_argument("--log_filename", type=str, default="train.log")
33 parser.add_argument("--result_dir", type=str, default="results_default")
35 parser.add_argument("--seed", type=int, default=0)
37 parser.add_argument("--nb_epochs", type=int, default=25)
39 parser.add_argument("--nb_train_samples", type=int, default=200000)
41 parser.add_argument("--nb_test_samples", type=int, default=50000)
43 parser.add_argument("--batch_size", type=int, default=25)
45 parser.add_argument("--optim", type=str, default="adam")
47 parser.add_argument("--learning_rate", type=float, default=1e-3)
50 "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
53 parser.add_argument("--dim_model", type=int, default=512)
55 parser.add_argument("--dim_keys", type=int, default=64)
57 parser.add_argument("--dim_hidden", type=int, default=2048)
59 parser.add_argument("--nb_heads", type=int, default=8)
61 parser.add_argument("--nb_blocks", type=int, default=12)
63 parser.add_argument("--dropout", type=float, default=0.1)
65 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
67 parser.add_argument("--random_regression_order", action="store_true", default=False)
69 parser.add_argument("--noncausal_prompt", action="store_true", default=False)
71 parser.add_argument("--no_checkpoint", action="store_true", default=False)
73 parser.add_argument("--overwrite_results", action="store_true", default=False)
75 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
77 ##############################
80 parser.add_argument("--maze_height", type=int, default=13)
82 parser.add_argument("--maze_width", type=int, default=21)
84 parser.add_argument("--maze_nb_walls", type=int, default=15)
86 ##############################
89 parser.add_argument("--oneshot", action="store_true", default=False)
91 parser.add_argument("--oneshot_input", type=str, default="head")
93 parser.add_argument("--oneshot_output", type=str, default="trace")
95 ######################################################################
97 args = parser.parse_args()
100 os.mkdir(args.result_dir)
101 except FileExistsError:
102 if not args.overwrite_results:
103 print(f"result directory {args.result_dir} already exists")
106 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
109 # torch.backends.cudnn.deterministic = True
110 # torch.backends.cudnn.benchmark = False
111 # torch.use_deterministic_algorithms(True)
112 torch.manual_seed(args.seed)
113 if torch.cuda.is_available():
114 torch.cuda.manual_seed_all(args.seed)
116 ######################################################################
120 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
122 if log_file is not None:
123 log_file.write(t + s + "\n")
131 log_string(f"args.{n} {getattr(args, n)}")
133 ######################################################################
136 def generation_order(x, fixed_len=0):
137 if args.random_regression_order:
138 order = torch.rand(x.size(), device=x.device)
139 order[:, :fixed_len] = torch.arange(-fixed_len, 0, device=x.device)
140 order = order.sort(1).indices
143 torch.arange(x.size(1), device=x.device).unsqueeze(0).expand(x.size(0), -1)
148 def reorder(x, order, reverse=False): # x is NxTxD1x...xDk, order is NxT'
149 u = x.reshape(x.size()[:2] + (-1,))
150 order = order.unsqueeze(-1).expand(-1, -1, u.size(-1))
152 v = u.new(u.size()).scatter_(1, order, u)
154 v = u.gather(1, order)
155 v = v.reshape(v.size()[:2] + x.size()[2:])
159 def shuffle(x, fixed_len):
160 order = generation_order(x, fixed_len)
161 return reorder(x, order), order
164 def eval_mygpt(model, input, mode="standard", fixed_len=0):
165 x, order = shuffle(input, fixed_len)
166 x = model(mygpt.BracketedSequence(x), mode=mode, order=order).x
167 return reorder(x, order, reverse=True)
170 ######################################################################
172 # ar_mask is a Boolean matrix of same shape as input, with 1s on the
173 # tokens that should be generated
176 def masked_inplace_autoregression(model, batch_size, input, ar_mask, order=None):
177 for input, ar_mask, order in zip(
178 input.split(batch_size), ar_mask.split(batch_size), order.split(batch_size)
180 i = (ar_mask.sum(0) > 0).nonzero()
182 # Needed to initialize the model's cache
183 model(mygpt.BracketedSequence(input, 0, i.min()), order=order)
184 for s in range(i.min(), i.max() + 1):
185 output = model(mygpt.BracketedSequence(input, s, 1), order=order).x
186 logits = output[:, s]
187 if args.deterministic_synthesis:
188 t_next = logits.argmax(1)
190 dist = torch.distributions.categorical.Categorical(logits=logits)
191 t_next = dist.sample()
192 input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
195 ######################################################################
198 def compute_perplexity(model, task, fixed_len, split="train"):
199 with torch.autograd.no_grad():
203 nb_samples, acc_loss = 0, 0.0
205 for input in task.batches(split=split):
206 input = input.to(device)
207 output = eval_mygpt(model, input, fixed_len=fixed_len)
208 if args.noncausal_prompt:
209 d = input.size(1) // 2
210 loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
212 loss = F.cross_entropy(output.transpose(1, 2), input)
213 acc_loss += loss.item() * input.size(0)
214 nb_samples += input.size(0)
218 return math.exp(min(100, acc_loss / nb_samples))
221 ######################################################################
224 def oneshot_policy_loss(mazes, output, policies, height, width):
225 masks = (mazes == maze.v_empty).unsqueeze(-1)
226 targets = policies.permute(0, 2, 1) * masks
227 output = output * masks
228 return -(output.log_softmax(-1) * targets).sum() / masks.sum()
231 def oneshot_trace_loss(mazes, output, policies, height, width):
232 masks = mazes == maze.v_empty
233 targets = maze.stationary_densities(
234 mazes.view(-1, height, width), policies.view(-1, 4, height, width)
236 targets = targets * masks
237 output = output.squeeze(-1) * masks
238 return (output - targets).abs().sum() / masks.sum()
241 def oneshot(gpt, task):
245 if args.oneshot_input == "head":
246 dim_in = args.dim_model
247 elif args.oneshot_input == "deep":
248 dim_in = args.dim_model * args.nb_blocks * 2
250 raise ValueError(f"{args.oneshot_input=}")
252 if args.oneshot_output == "policy":
254 compute_loss = oneshot_policy_loss
255 elif args.oneshot_output == "trace":
257 compute_loss = oneshot_trace_loss
259 raise ValueError(f"{args.oneshot_output=}")
261 model = nn.Sequential(
262 nn.Linear(dim_in, args.dim_model),
264 nn.Linear(args.dim_model, args.dim_model),
266 nn.Linear(args.dim_model, dim_out),
269 for n_epoch in range(args.nb_epochs):
270 learning_rate = learning_rate_schedule[n_epoch]
271 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
273 acc_train_loss, nb_train_samples = 0, 0
274 for mazes, policies in task.policy_batches(split="train"):
275 output_gpt = eval_mygpt(
276 gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
278 output = model(output_gpt)
280 loss = compute_loss(mazes, output, policies, task.height, task.width)
281 acc_train_loss += loss.item() * mazes.size(0)
282 nb_train_samples += mazes.size(0)
284 optimizer.zero_grad()
288 acc_test_loss, nb_test_samples = 0, 0
289 for mazes, policies in task.policy_batches(split="test"):
290 output_gpt = eval_mygpt(
291 gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
293 output = model(output_gpt)
294 loss = compute_loss(mazes, output, policies, task.height, task.width)
295 acc_test_loss += loss.item() * mazes.size(0)
296 nb_test_samples += mazes.size(0)
299 f"diff_ce {n_epoch} train {acc_train_loss/nb_train_samples} test {acc_test_loss/nb_test_samples}"
302 # -------------------
303 mazes = task.test_input[:32, : task.height * task.width]
304 policies = task.test_policies[:32]
305 output_gpt = eval_mygpt(
306 gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
308 output = model(output_gpt)
309 if args.oneshot_output == "policy":
310 targets = policies.permute(0, 2, 1)
312 (F.one_hot(output.argmax(-1), num_classes=4) * targets).sum(-1) == 0
314 elif args.oneshot_output == "trace":
315 targets = maze.stationary_densities(
316 mazes.view(-1, task.height, task.width),
317 policies.view(-1, 4, task.height, task.width),
321 raise ValueError(f"{args.oneshot_output=}")
323 scores = scores.reshape(-1, task.height, task.width)
324 mazes = mazes.reshape(-1, task.height, task.width)
325 targets = targets.reshape(-1, task.height, task.width)
327 f"oneshot_{args.oneshot_input}_{args.oneshot_output}_{n_epoch:04d}.png"
330 os.path.join(args.result_dir, filename),
335 log_string(f"wrote {filename}")
337 # -------------------
342 ######################################################################
346 def batches(self, split="train", nb_to_use=-1, desc=None):
349 def vocabulary_size(self):
352 def produce_results(self, n_epoch, model):
356 ######################################################################
361 class TaskMaze(Task):
362 def map2seq(self, *m):
363 return torch.cat([x.flatten(1) for x in m], 1)
365 def seq2map(self, s):
366 s = s.reshape(s.size(0), -1, self.height, self.width)
367 return (s[:, k] for k in range(s.size(1)))
377 device=torch.device("cpu"),
379 self.batch_size = batch_size
384 train_mazes, train_paths, train_policies = maze.create_maze_data(
389 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
391 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
392 self.train_policies = train_policies.flatten(-2).to(device)
394 test_mazes, test_paths, test_policies = maze.create_maze_data(
399 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
401 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
402 self.test_policies = test_policies.flatten(-2).to(device)
404 self.nb_codes = self.train_input.max() + 1
406 def batches(self, split="train", nb_to_use=-1, desc=None):
407 assert split in {"train", "test"}
408 input = self.train_input if split == "train" else self.test_input
410 input = input[:nb_to_use]
412 desc = f"epoch-{split}"
413 for batch in tqdm.tqdm(
414 input.split(self.batch_size), dynamic_ncols=True, desc=desc
418 def policy_batches(self, split="train", nb_to_use=-1, desc=None):
419 assert split in {"train", "test"}
420 input = self.train_input if split == "train" else self.test_input
421 policies = self.train_policies if split == "train" else self.test_policies
422 input = input[:, : self.height * self.width]
423 policies = policies * (input != maze.v_wall)[:, None]
426 input = input[:nb_to_use]
427 policies = policies[:nb_to_use]
430 desc = f"epoch-{split}"
431 for batch in tqdm.tqdm(
432 zip(input.split(self.batch_size), policies.split(self.batch_size)),
438 def vocabulary_size(self):
441 def compute_error(self, model, split="train", nb_to_use=-1):
442 nb_total, nb_correct = 0, 0
443 for input in task.batches(split, nb_to_use):
444 result = input.clone()
445 ar_mask = result.new_zeros(result.size())
446 ar_mask[:, self.height * self.width :] = 1
447 result *= 1 - ar_mask
448 x, order = shuffle(result, self.height * self.width)
449 masked_inplace_autoregression(
450 model, self.batch_size, x, ar_mask, order=order
452 result = reorder(x, order, reverse=True)
453 mazes, paths = self.seq2map(result)
454 nb_correct += maze.path_correctness(mazes, paths).long().sum()
455 nb_total += mazes.size(0)
457 return nb_total, nb_correct
459 def produce_results(self, n_epoch, model):
460 with torch.autograd.no_grad():
464 train_nb_total, train_nb_correct = self.compute_error(
465 model, "train", nb_to_use=1000
468 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
471 test_nb_total, test_nb_correct = self.compute_error(
472 model, "test", nb_to_use=1000
475 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
478 input = self.test_input[:32]
479 result = input.clone()
480 ar_mask = result.new_zeros(result.size())
481 ar_mask[:, self.height * self.width :] = 1
482 result *= 1 - ar_mask
483 x, order = shuffle(result, self.height * self.width)
484 masked_inplace_autoregression(
485 model, self.batch_size, x, ar_mask, order=order
487 result = reorder(x, order, reverse=True)
489 mazes, paths = self.seq2map(input)
490 _, predicted_paths = self.seq2map(result)
491 filename = f"result_{n_epoch:04d}.png"
493 os.path.join(args.result_dir, filename),
496 predicted_paths=predicted_paths,
497 path_correct=maze.path_correctness(mazes, predicted_paths),
499 log_string(f"wrote {filename}")
504 ######################################################################
506 log_string(f"device {device}")
510 nb_train_samples=args.nb_train_samples,
511 nb_test_samples=args.nb_test_samples,
512 batch_size=args.batch_size,
513 height=args.maze_height,
514 width=args.maze_width,
515 nb_walls=args.maze_nb_walls,
520 vocabulary_size = task.vocabulary_size()
522 log_string(f"vocabulary_size {vocabulary_size}")
524 ##############################
526 def noncausal_prompt_amm_generator(d):
527 q = torch.arange(d)[:, None]
528 k = torch.arange(d)[None, :]
529 s = args.maze_height * args.maze_width
530 # return torch.logical_and(q < k, torch.logical_or(q >= s, k >= s))
535 if args.noncausal_prompt:
536 amm_generator = noncausal_prompt_amm_generator
539 vocabulary_size=vocabulary_size,
540 dim_model=args.dim_model,
541 dim_keys=args.dim_keys,
542 dim_hidden=args.dim_hidden,
543 nb_heads=args.nb_heads,
544 nb_blocks=args.nb_blocks,
546 dropout=args.dropout,
547 amm_generator=amm_generator,
552 nb_parameters = sum(p.numel() for p in model.parameters())
553 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
555 ######################################################################
557 nb_epochs_finished = 0
559 if args.no_checkpoint:
560 log_string(f"not trying to load checkpoint.")
564 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
565 checkpoint = torch.load(checkpoint_name)
566 nb_epochs_finished = checkpoint["nb_epochs_finished"]
567 model.load_state_dict(checkpoint["model_state"])
568 torch.set_rng_state(checkpoint["rng_state"])
569 if torch.cuda.is_available():
570 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
572 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
574 except FileNotFoundError:
575 log_string("starting from scratch.")
578 log_string("error when loading the checkpoint.")
581 ######################################################################
584 for input in task.batches(split="train"):
585 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
586 token_probas = token_count / token_count.sum()
587 entropy = -torch.xlogy(token_probas, token_probas).sum()
588 train_set_perplexity = math.exp(entropy)
590 ##############################
592 if args.learning_rate_schedule == "cos":
593 learning_rate_schedule = {}
594 for n_epoch in range(args.nb_epochs):
595 u = n_epoch / args.nb_epochs * math.pi
596 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
601 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
605 learning_rate_schedule = {}
606 learning_rate = args.learning_rate
607 for n_epoch in range(args.nb_epochs):
609 learning_rate = u[n_epoch]
610 learning_rate_schedule[n_epoch] = learning_rate
612 log_string(f"learning_rate_schedule {learning_rate_schedule}")
614 ##############################
616 if nb_epochs_finished >= args.nb_epochs:
617 n_epoch = nb_epochs_finished
618 train_perplexity = compute_perplexity(
619 model, task, fixed_len=task.height * task.width, split="train"
621 test_perplexity = compute_perplexity(
622 model, task, fixed_len=task.height * task.width, split="test"
626 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
629 task.produce_results(n_epoch, model)
631 ##############################
633 for n_epoch in range(nb_epochs_finished, args.nb_epochs):
634 learning_rate = learning_rate_schedule[n_epoch]
636 log_string(f"learning_rate {learning_rate}")
638 if args.optim == "sgd":
639 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
640 elif args.optim == "adam":
641 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
642 elif args.optim == "adamw":
643 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
645 raise ValueError(f"{args.optim=}")
649 nb_train_samples, acc_train_loss = 0, 0.0
651 for input in task.batches(split="train"):
652 input = input.to(device)
654 model, input, fixed_len=task.height * task.width
656 if args.noncausal_prompt:
657 d = input.size(1) // 2
658 loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
660 loss = F.cross_entropy(output.transpose(1, 2), input)
661 acc_train_loss += loss.item() * input.size(0)
662 nb_train_samples += input.size(0)
664 optimizer.zero_grad()
668 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
669 test_perplexity = compute_perplexity(
670 model, task, fixed_len=task.height * task.width, split="test"
674 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
677 task.produce_results(n_epoch, model)
680 "nb_epochs_finished": n_epoch + 1,
681 "model_state": model.state_dict(),
682 "rng_state": torch.get_rng_state(),
685 if torch.cuda.is_available():
686 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
688 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
689 torch.save(checkpoint, checkpoint_name)
690 log_string(f"saved checkpoint {checkpoint_name}")
692 ######################################################################
697 ######################################################################