3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
11 import math, sys, argparse, time, tqdm, itertools, os
13 import torch, torchvision
15 from torch.nn import functional as F
17 import mygpt, tensorstack
19 ######################################################################
21 if torch.cuda.is_available():
22 device = torch.device("cuda")
23 torch.backends.cuda.matmul.allow_tf32 = True
25 device = torch.device("cpu")
27 ######################################################################
29 parser = argparse.ArgumentParser(
30 description="An implementation of GPT with cache to solve a toy geometric reasoning task."
33 parser.add_argument("--log_filename", type=str, default="train.log")
35 parser.add_argument("--result_dir", type=str, default="results_default")
37 parser.add_argument("--seed", type=int, default=0)
39 parser.add_argument("--nb_epochs", type=int, default=25)
41 parser.add_argument("--batch_size", type=int, default=100)
43 parser.add_argument("--data_size", type=int, default=-1)
45 parser.add_argument("--optim", type=str, default="adam")
47 parser.add_argument("--learning_rate", type=float, default=1e-3)
50 "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
53 parser.add_argument("--dim_model", type=int, default=512)
55 parser.add_argument("--dim_keys", type=int, default=64)
57 parser.add_argument("--dim_hidden", type=int, default=2048)
59 parser.add_argument("--nb_heads", type=int, default=8)
61 parser.add_argument("--nb_blocks", type=int, default=12)
63 parser.add_argument("--dropout", type=float, default=0.1)
65 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
67 parser.add_argument("--no_checkpoint", action="store_true", default=False)
69 parser.add_argument("--overwrite_results", action="store_true", default=False)
71 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
73 ##############################
76 parser.add_argument("--world_height", type=int, default=13)
78 parser.add_argument("--world_width", type=int, default=21)
80 parser.add_argument("--world_nb_walls", type=int, default=15)
82 ######################################################################
84 args = parser.parse_args()
87 os.mkdir(args.result_dir)
88 except FileExistsError:
89 if not args.overwrite_results:
90 print(f"result directory {args.result_dir} already exists")
93 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
96 # torch.backends.cudnn.deterministic = True
97 # torch.backends.cudnn.benchmark = False
98 # torch.use_deterministic_algorithms(True)
99 torch.manual_seed(args.seed)
100 if torch.cuda.is_available():
101 torch.cuda.manual_seed_all(args.seed)
103 ######################################################################
107 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
109 if log_file is not None:
110 log_file.write(t + s + "\n")
118 log_string(f"args.{n} {getattr(args, n)}")
120 ######################################################################
123 # ar_mask is a Boolean matrix of same shape as input, with 1s on the
124 # tokens that should be generated
127 def masked_inplace_autoregression(model, batch_size, input, ar_mask):
129 for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
130 i = (ar_mask.sum(0) > 0).nonzero()
133 mygpt.BracketedSequence(input, 0, i.min())
134 ) # Needed to initialize the model's cache
135 for s in range(i.min(), i.max() + 1):
136 output = model(mygpt.BracketedSequence(input, s, 1)).x
137 logits = output[:, s]
138 if args.deterministic_synthesis:
139 t_next = logits.argmax(1)
141 dist = torch.distributions.categorical.Categorical(logits=logits)
142 t_next = dist.sample()
143 input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
146 ######################################################################
150 def batches(self, split="train"):
153 def vocabulary_size(self):
156 def produce_results(self, n_epoch, model):
160 ######################################################################
165 class TaskMaze(Task):
166 def map2seq(self, *m):
167 return torch.cat([x.flatten(1) for x in m], 1)
169 def seq2map(self, s):
170 s = s.reshape(s.size(0), -1, self.height, self.width)
171 return (s[:, k] for k in range(s.size(1)))
173 def __init__(self, batch_size, height, width, nb_walls, device=torch.device("cpu")):
174 self.batch_size = batch_size
179 nb = args.data_size if args.data_size > 0 else 250000
181 mazes_train, paths_train = maze.create_maze_data(
186 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
188 mazes_train, paths_train = mazes_train.to(device), paths_train.to(device)
189 self.train_input = self.map2seq(mazes_train, paths_train)
190 self.nb_codes = self.train_input.max() + 1
192 mazes_test, paths_test = maze.create_maze_data(
197 progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
199 mazes_test, paths_test = mazes_test.to(device), paths_test.to(device)
200 self.test_input = self.map2seq(mazes_test, paths_test)
202 def batches(self, split="train"):
203 assert split in {"train", "test"}
204 input = self.train_input if split == "train" else self.test_input
205 for batch in tqdm.tqdm(
206 input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
210 def vocabulary_size(self):
213 def compute_error(self, model, split="train"):
214 nb_total, nb_correct = 0, 0
215 for input in task.batches(split):
216 result = input.clone()
217 ar_mask = result.new_zeros(result.size())
218 ar_mask[:, self.height * self.width :] = 1
219 masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
220 mazes, paths = self.seq2map(result)
221 nb_correct += maze.path_correctness(mazes, paths).long().sum()
222 nb_total += mazes.size(0)
224 return nb_total, nb_correct
226 def produce_results(self, n_epoch, model):
227 train_nb_total, train_nb_correct = self.compute_error(model, "train")
229 f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
232 test_nb_total, test_nb_correct = self.compute_error(model, "test")
234 f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
237 input = self.test_input[:32]
238 result = input.clone()
239 ar_mask = result.new_zeros(result.size())
241 ar_mask[:, self.height * self.width :] = 1
242 masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
244 mazes, paths = self.seq2map(input)
245 _, predicted_paths = self.seq2map(result)
246 maze.save_image(f"result_{n_epoch:04d}.png", mazes, paths, predicted_paths)
249 ######################################################################
251 log_string(f"device {device}")
255 batch_size=args.batch_size,
256 height=args.world_height,
257 width=args.world_width,
258 nb_walls=args.world_nb_walls,
263 vocabulary_size = task.vocabulary_size()
265 log_string(f"vocabulary_size {vocabulary_size}")
267 ##############################
270 vocabulary_size=vocabulary_size,
271 dim_model=args.dim_model,
272 dim_keys=args.dim_keys,
273 dim_hidden=args.dim_hidden,
274 nb_heads=args.nb_heads,
275 nb_blocks=args.nb_blocks,
277 dropout=args.dropout,
282 nb_parameters = sum(p.numel() for p in model.parameters())
283 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
285 ######################################################################
287 nb_epochs_finished = 0
289 if args.no_checkpoint:
290 log_string(f"not trying to load checkpoint.")
294 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
295 checkpoint = torch.load(checkpoint_name)
296 nb_epochs_finished = checkpoint["nb_epochs_finished"]
297 model.load_state_dict(checkpoint["model_state"])
298 torch.set_rng_state(checkpoint["rng_state"])
299 if torch.cuda.is_available():
300 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
302 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
304 except FileNotFoundError:
305 log_string("starting from scratch.")
308 log_string("error when loading the checkpoint.")
311 ######################################################################
313 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
316 for input in task.batches(split="train"):
317 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
318 token_probas = token_count / token_count.sum()
319 entropy = -torch.xlogy(token_probas, token_probas).sum()
320 train_set_perplexity = math.exp(entropy)
322 ##############################
324 if args.learning_rate_schedule == "cos":
325 learning_rate_schedule = {}
326 for n_epoch in range(args.nb_epochs):
327 u = n_epoch / args.nb_epochs * math.pi
328 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
333 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
337 learning_rate_schedule = {}
338 learning_rate = args.learning_rate
339 for n_epoch in range(args.nb_epochs):
341 learning_rate = u[n_epoch]
342 learning_rate_schedule[n_epoch] = learning_rate
344 log_string(f"learning_rate_schedule {learning_rate_schedule}")
346 ##############################
350 if nb_epochs_finished >= nb_epochs:
351 task.produce_results(nb_epochs_finished, model)
353 for n_epoch in range(nb_epochs_finished, nb_epochs):
355 learning_rate = learning_rate_schedule[n_epoch]
357 log_string(f"learning_rate {learning_rate}")
359 if args.optim == "sgd":
360 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
361 elif args.optim == "adam":
362 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
363 elif args.optim == "adamw":
364 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
366 raise ValueError(f"Unknown optimizer {args.optim}.")
370 nb_train_samples, acc_train_loss = 0, 0.0
372 for input in task.batches(split="train"):
373 input = input.to(device)
374 output = model(mygpt.BracketedSequence(input)).x
375 loss = F.cross_entropy(output.transpose(1, 2), input)
376 acc_train_loss += loss.item() * input.size(0)
377 nb_train_samples += input.size(0)
378 nb_samples_seen += input.size(0)
380 optimizer.zero_grad()
384 with torch.autograd.no_grad():
388 nb_test_samples, acc_test_loss = 0, 0.0
390 for input in task.batches(split="test"):
391 input = input.to(device)
393 # input, loss_masks, true_images = task.excise_last_image(input)
394 # input, loss_masks = task.add_true_image(input, true_images, loss_masks)
396 output = model(mygpt.BracketedSequence(input)).x
397 loss = F.cross_entropy(output.transpose(1, 2), input)
398 acc_test_loss += loss.item() * input.size(0)
399 nb_test_samples += input.size(0)
401 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
402 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
405 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
408 task.produce_results(n_epoch, model)
411 "nb_epochs_finished": n_epoch + 1,
412 "model_state": model.state_dict(),
413 "rng_state": torch.get_rng_state(),
416 if torch.cuda.is_available():
417 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
419 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
420 torch.save(checkpoint, checkpoint_name)
421 log_string(f"saved checkpoint {checkpoint_name}")
423 ######################################################################