3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
11 import math, sys, argparse, time, tqdm, itertools, os
13 import torch, torchvision
15 from torch.nn import functional as F
17 import mygpt, tensorstack
19 ######################################################################
21 if torch.cuda.is_available():
22 device = torch.device("cuda")
23 torch.backends.cuda.matmul.allow_tf32 = True
25 device = torch.device("cpu")
27 ######################################################################
29 parser = argparse.ArgumentParser(description="A maze shortest path solving with a GPT.")
31 parser.add_argument("--log_filename", type=str, default="train.log")
33 parser.add_argument("--result_dir", type=str, default="results_default")
35 parser.add_argument("--seed", type=int, default=0)
37 parser.add_argument("--nb_epochs", type=int, default=25)
39 parser.add_argument("--nb_train_samples", type=int, default=200000)
41 parser.add_argument("--nb_test_samples", type=int, default=50000)
43 parser.add_argument("--batch_size", type=int, default=25)
45 parser.add_argument("--optim", type=str, default="adam")
47 parser.add_argument("--learning_rate", type=float, default=1e-3)
50 "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
53 parser.add_argument("--dim_model", type=int, default=512)
55 parser.add_argument("--dim_keys", type=int, default=64)
57 parser.add_argument("--dim_hidden", type=int, default=2048)
59 parser.add_argument("--nb_heads", type=int, default=8)
61 parser.add_argument("--nb_blocks", type=int, default=12)
63 parser.add_argument("--dropout", type=float, default=0.1)
65 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
67 parser.add_argument("--no_checkpoint", action="store_true", default=False)
69 parser.add_argument("--overwrite_results", action="store_true", default=False)
71 parser.add_argument("--one_shot", action="store_true", default=False)
73 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
75 ##############################
78 parser.add_argument("--maze_height", type=int, default=13)
80 parser.add_argument("--maze_width", type=int, default=21)
82 parser.add_argument("--maze_nb_walls", type=int, default=15)
84 ######################################################################
86 args = parser.parse_args()
89 os.mkdir(args.result_dir)
90 except FileExistsError:
91 if not args.overwrite_results:
92 print(f"result directory {args.result_dir} already exists")
95 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
98 # torch.backends.cudnn.deterministic = True
99 # torch.backends.cudnn.benchmark = False
100 # torch.use_deterministic_algorithms(True)
101 torch.manual_seed(args.seed)
102 if torch.cuda.is_available():
103 torch.cuda.manual_seed_all(args.seed)
105 ######################################################################
109 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
111 if log_file is not None:
112 log_file.write(t + s + "\n")
120 log_string(f"args.{n} {getattr(args, n)}")
122 ######################################################################
125 # ar_mask is a Boolean matrix of same shape as input, with 1s on the
126 # tokens that should be generated
129 def masked_inplace_autoregression(model, batch_size, input, ar_mask):
130 for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
131 i = (ar_mask.sum(0) > 0).nonzero()
133 # Needed to initialize the model's cache
134 model(mygpt.BracketedSequence(input, 0, i.min()))
135 for s in range(i.min(), i.max() + 1):
136 output = model(mygpt.BracketedSequence(input, s, 1)).x
137 logits = output[:, s]
138 if args.deterministic_synthesis:
139 t_next = logits.argmax(1)
141 dist = torch.distributions.categorical.Categorical(logits=logits)
142 t_next = dist.sample()
143 input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
146 ######################################################################
149 def compute_perplexity(model, split="train"):
150 with torch.autograd.no_grad():
154 nb_samples, acc_loss = 0, 0.0
156 for input in task.batches(split=split):
157 input = input.to(device)
159 output = model(mygpt.BracketedSequence(input)).x
160 loss = F.cross_entropy(output.transpose(1, 2), input)
161 acc_loss += loss.item() * input.size(0)
162 nb_samples += input.size(0)
166 return math.exp(min(100, acc_loss / nb_samples))
169 ######################################################################
172 def one_shot(gpt, task):
175 model = nn.Sequential(
176 nn.Linear(args.dim_model, args.dim_model),
178 nn.Linear(args.dim_model, 4),
181 print(f"{args.nb_epochs=}")
183 for n_epoch in range(args.nb_epochs):
185 learning_rate = learning_rate_schedule[n_epoch]
186 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
188 acc_train_loss, nb_train_samples = 0, 0
189 for input, targets in task.policy_batches(split="train"):
190 output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
191 output = model(output_gpt)
193 -(output.log_softmax(-1) * targets).sum(-1).mean()
194 + targets.xlogy(targets).sum(-1).mean()
196 acc_train_loss += loss.item() * input.size(0)
197 nb_train_samples += input.size(0)
199 optimizer.zero_grad()
203 acc_test_loss, nb_test_samples = 0, 0
204 for input, targets in task.policy_batches(split="test"):
205 output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
206 output = model(output_gpt)
208 -(output.log_softmax(-1) * targets).sum(-1).mean()
209 + targets.xlogy(targets).sum(-1).mean()
211 acc_test_loss += loss.item() * input.size(0)
212 nb_test_samples += input.size(0)
215 f"diff_ce {n_epoch} train {acc_train_loss/nb_train_samples} test {acc_test_loss/nb_test_samples}"
218 # -------------------
219 input, targets = next(task.policy_batches(split="test"))
220 output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
221 output = model(output_gpt)
222 losses = (-output.log_softmax(-1) * targets + targets.xlogy(targets)).sum(-1)
223 losses = losses / losses.max()
224 print(f"{input.size()=} {losses.size()=} {losses.min()=} {losses.max()=}")
225 losses = losses * (input == 0)
226 losses = losses.reshape(-1, args.maze_height, args.maze_width)
227 input = input.reshape(-1, args.maze_height, args.maze_width)
229 os.path.join(args.result_dir, f"oneshot_{n_epoch:04d}.png"),
233 # -------------------
238 ######################################################################
242 def batches(self, split="train"):
245 def vocabulary_size(self):
248 def produce_results(self, n_epoch, model):
252 ######################################################################
257 class TaskMaze(Task):
258 def map2seq(self, *m):
259 return torch.cat([x.flatten(1) for x in m], 1)
261 def seq2map(self, s):
262 s = s.reshape(s.size(0), -1, self.height, self.width)
263 return (s[:, k] for k in range(s.size(1)))
273 device=torch.device("cpu"),
275 self.batch_size = batch_size
280 train_mazes, train_paths, train_policies = maze.create_maze_data(
285 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
287 self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
288 self.train_policies = train_policies.flatten(-2).permute(0, 2, 1).to(device)
290 test_mazes, test_paths, test_policies = maze.create_maze_data(
295 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
297 self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
298 self.test_policies = test_policies.flatten(-2).permute(0, 2, 1).to(device)
300 self.nb_codes = self.train_input.max() + 1
302 def batches(self, split="train", nb_to_use=-1):
303 assert split in {"train", "test"}
304 input = self.train_input if split == "train" else self.test_input
306 input = input[:nb_to_use]
307 for batch in tqdm.tqdm(
308 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
312 def policy_batches(self, split="train", nb_to_use=-1):
313 assert split in {"train", "test"}
314 input = self.train_input if split == "train" else self.test_input
315 targets = self.train_policies if split == "train" else self.test_policies
316 input = input[:, : self.height * self.width]
317 targets = targets * (input != maze.v_wall)[:, :, None]
320 input = input[:nb_to_use]
321 targets = targets[:nb_to_use]
323 for batch in tqdm.tqdm(
324 zip(input.split(self.batch_size), targets.split(self.batch_size)),
326 desc=f"epoch-{split}",
330 def vocabulary_size(self):
333 def compute_error(self, model, split="train", nb_to_use=-1):
334 nb_total, nb_correct = 0, 0
335 for input in task.batches(split, nb_to_use):
336 result = input.clone()
337 ar_mask = result.new_zeros(result.size())
338 ar_mask[:, self.height * self.width :] = 1
339 result *= 1 - ar_mask
340 masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
341 mazes, paths = self.seq2map(result)
342 nb_correct += maze.path_correctness(mazes, paths).long().sum()
343 nb_total += mazes.size(0)
345 return nb_total, nb_correct
347 def produce_results(self, n_epoch, model):
348 with torch.autograd.no_grad():
352 train_nb_total, train_nb_correct = self.compute_error(
353 model, "train", nb_to_use=1000
356 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
359 test_nb_total, test_nb_correct = self.compute_error(
360 model, "test", nb_to_use=1000
363 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
366 input = self.test_input[:32]
367 result = input.clone()
368 ar_mask = result.new_zeros(result.size())
369 ar_mask[:, self.height * self.width :] = 1
370 result *= 1 - ar_mask
371 masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
373 mazes, paths = self.seq2map(input)
374 _, predicted_paths = self.seq2map(result)
376 os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
379 predicted_paths=predicted_paths,
380 path_correct=maze.path_correctness(mazes, predicted_paths),
386 ######################################################################
388 log_string(f"device {device}")
392 nb_train_samples=args.nb_train_samples,
393 nb_test_samples=args.nb_test_samples,
394 batch_size=args.batch_size,
395 height=args.maze_height,
396 width=args.maze_width,
397 nb_walls=args.maze_nb_walls,
402 vocabulary_size = task.vocabulary_size()
404 log_string(f"vocabulary_size {vocabulary_size}")
406 ##############################
409 vocabulary_size=vocabulary_size,
410 dim_model=args.dim_model,
411 dim_keys=args.dim_keys,
412 dim_hidden=args.dim_hidden,
413 nb_heads=args.nb_heads,
414 nb_blocks=args.nb_blocks,
416 dropout=args.dropout,
421 nb_parameters = sum(p.numel() for p in model.parameters())
422 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
424 ######################################################################
426 nb_epochs_finished = 0
428 if args.no_checkpoint:
429 log_string(f"not trying to load checkpoint.")
433 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
434 checkpoint = torch.load(checkpoint_name)
435 nb_epochs_finished = checkpoint["nb_epochs_finished"]
436 model.load_state_dict(checkpoint["model_state"])
437 torch.set_rng_state(checkpoint["rng_state"])
438 if torch.cuda.is_available():
439 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
441 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
443 except FileNotFoundError:
444 log_string("starting from scratch.")
447 log_string("error when loading the checkpoint.")
450 ######################################################################
453 for input in task.batches(split="train"):
454 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
455 token_probas = token_count / token_count.sum()
456 entropy = -torch.xlogy(token_probas, token_probas).sum()
457 train_set_perplexity = math.exp(entropy)
459 ##############################
461 if args.learning_rate_schedule == "cos":
462 learning_rate_schedule = {}
463 for n_epoch in range(args.nb_epochs):
464 u = n_epoch / args.nb_epochs * math.pi
465 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
470 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
474 learning_rate_schedule = {}
475 learning_rate = args.learning_rate
476 for n_epoch in range(args.nb_epochs):
478 learning_rate = u[n_epoch]
479 learning_rate_schedule[n_epoch] = learning_rate
481 log_string(f"learning_rate_schedule {learning_rate_schedule}")
483 ##############################
486 one_shot(model, task)
489 ##############################
491 if nb_epochs_finished >= args.nb_epochs:
492 n_epoch = nb_epochs_finished
493 train_perplexity = compute_perplexity(model, split="train")
494 test_perplexity = compute_perplexity(model, split="test")
497 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
500 task.produce_results(n_epoch, model)
504 ##############################
506 for n_epoch in range(nb_epochs_finished, args.nb_epochs):
507 learning_rate = learning_rate_schedule[n_epoch]
509 log_string(f"learning_rate {learning_rate}")
511 if args.optim == "sgd":
512 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
513 elif args.optim == "adam":
514 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
515 elif args.optim == "adamw":
516 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
518 raise ValueError(f"Unknown optimizer {args.optim}.")
522 nb_train_samples, acc_train_loss = 0, 0.0
524 for input in task.batches(split="train"):
525 input = input.to(device)
526 output = model(mygpt.BracketedSequence(input)).x
527 loss = F.cross_entropy(output.transpose(1, 2), input)
528 acc_train_loss += loss.item() * input.size(0)
529 nb_train_samples += input.size(0)
531 optimizer.zero_grad()
535 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
536 test_perplexity = compute_perplexity(model, split="test")
539 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
542 task.produce_results(n_epoch, model)
545 "nb_epochs_finished": n_epoch + 1,
546 "model_state": model.state_dict(),
547 "rng_state": torch.get_rng_state(),
550 if torch.cuda.is_available():
551 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
553 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
554 torch.save(checkpoint, checkpoint_name)
555 log_string(f"saved checkpoint {checkpoint_name}")
557 ######################################################################