3 # Any copyright is dedicated to the Public Domain.
4 # https://creativecommons.org/publicdomain/zero/1.0/
6 # Written by Francois Fleuret <francois@fleuret.org>
8 # torch.backends.cuda.matmul.allow_tf23
9 # torch.autocast(torch.bfloat16)
11 import math, sys, argparse, time, tqdm, os
13 import torch, torchvision
15 from torch.nn import functional as F
20 ######################################################################
22 if torch.cuda.is_available():
23 device = torch.device("cuda")
24 torch.backends.cuda.matmul.allow_tf32 = True
26 device = torch.device("cpu")
28 ######################################################################
30 parser = argparse.ArgumentParser(
31 description="An implementation of GPT with cache.",
32 formatter_class=argparse.ArgumentDefaultsHelpFormatter,
39 help="sandbox, picoclvr, mnist, maze, snake, stack, expr, world",
42 parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
44 parser.add_argument("--result_dir", type=str, default=None)
46 parser.add_argument("--seed", type=int, default=0)
48 parser.add_argument("--nb_epochs", type=int, default=None)
50 parser.add_argument("--batch_size", type=int, default=None)
52 parser.add_argument("--nb_train_samples", type=int, default=None)
54 parser.add_argument("--nb_test_samples", type=int, default=None)
56 parser.add_argument("--optim", type=str, default="adam")
58 parser.add_argument("--learning_rate", type=float, default=1e-4)
60 parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
62 parser.add_argument("--dim_model", type=int, default=512)
64 parser.add_argument("--dim_keys", type=int, default=64)
66 parser.add_argument("--dim_hidden", type=int, default=2048)
68 parser.add_argument("--nb_heads", type=int, default=8)
70 parser.add_argument("--nb_blocks", type=int, default=12)
72 parser.add_argument("--dropout", type=float, default=0.1)
74 parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
76 parser.add_argument("--no_checkpoint", action="store_true", default=False)
78 parser.add_argument("--overwrite_results", action="store_true", default=False)
80 parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
82 ##############################
85 parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
87 parser.add_argument("--picoclvr_height", type=int, default=12)
89 parser.add_argument("--picoclvr_width", type=int, default=16)
91 parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
93 ##############################
96 parser.add_argument("--maze_height", type=int, default=23)
98 parser.add_argument("--maze_width", type=int, default=39)
100 parser.add_argument("--maze_nb_walls", type=int, default=45)
102 ##############################
105 parser.add_argument("--snake_height", type=int, default=6)
107 parser.add_argument("--snake_width", type=int, default=8)
109 parser.add_argument("--snake_nb_colors", type=int, default=5)
111 parser.add_argument("--snake_length", type=int, default=200)
113 ##############################
116 parser.add_argument("--stack_nb_steps", type=int, default=100)
118 parser.add_argument("--stack_nb_stacks", type=int, default=3)
120 parser.add_argument("--stack_nb_digits", type=int, default=3)
122 parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
124 ##############################
127 parser.add_argument("--expr_nb_variables", type=int, default=5)
129 parser.add_argument("--expr_sequence_length", type=int, default=40)
131 parser.add_argument("--expr_operand_max", type=int, default=9)
133 parser.add_argument("--expr_result_max", type=int, default=99)
135 parser.add_argument("--expr_input_file", type=str, default=None)
137 ##############################
140 parser.add_argument("--world_vqae_nb_epochs", type=int, default=25)
142 ######################################################################
144 args = parser.parse_args()
146 assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
148 if args.result_dir is None:
149 args.result_dir = f"results_{args.task}"
151 ######################################################################
157 "nb_train_samples": 25000,
158 "nb_test_samples": 10000,
163 "nb_train_samples": 250000,
164 "nb_test_samples": 10000,
169 "nb_train_samples": 250000,
170 "nb_test_samples": 10000,
175 "nb_train_samples": 250000,
176 "nb_test_samples": 10000,
181 "nb_train_samples": 250000,
182 "nb_test_samples": 10000,
187 "nb_train_samples": 100000,
188 "nb_test_samples": 1000,
193 "nb_train_samples": 1000000,
194 "nb_test_samples": 10000,
199 "nb_train_samples": 25000,
200 "nb_test_samples": 1000,
204 if args.task in default_args:
205 for k, v in default_args[args.task].items():
206 if getattr(args, k) is None:
209 ######################################################################
212 os.mkdir(args.result_dir)
213 except FileExistsError:
214 if not args.overwrite_results:
215 print(f"result directory {args.result_dir} already exists")
218 log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
221 # torch.backends.cudnn.deterministic = True
222 # torch.backends.cudnn.benchmark = False
223 # torch.use_deterministic_algorithms(True)
224 torch.manual_seed(args.seed)
225 if torch.cuda.is_available():
226 torch.cuda.manual_seed_all(args.seed)
228 ######################################################################
232 t = time.strftime("%Y%m%d-%H:%M:%S ", time.localtime())
234 if log_file is not None:
235 log_file.write(t + s + "\n")
243 log_string(f"args.{n} {getattr(args, n)}")
246 ######################################################################
249 def picoclvr_pruner_horizontal_green(p):
250 return not ("green" in p and ("left" in p or "right" in p))
253 picoclvr_pruner_train = (
254 picoclvr_pruner_horizontal_green
255 if args.picocvlr_prune_properties in {"train+eval"}
259 picoclvr_pruner_eval = (
260 (lambda p: not picoclvr_pruner_horizontal_green(p))
261 if args.picocvlr_prune_properties in {"train+eval", "eval"}
265 ######################################################################
267 if args.task == "sandbox":
268 task = tasks.SandBox(
269 tasks.ProblemLevel1(),
270 # tasks.ProblemAddition(zero_padded=False, inverted_result=False),
271 nb_train_samples=args.nb_train_samples,
272 nb_test_samples=args.nb_test_samples,
273 batch_size=args.batch_size,
278 elif args.task == "picoclvr":
279 task = tasks.PicoCLVR(
280 nb_train_samples=args.nb_train_samples,
281 nb_test_samples=args.nb_test_samples,
282 batch_size=args.batch_size,
283 height=args.picoclvr_height,
284 width=args.picoclvr_width,
285 nb_colors=args.picoclvr_nb_colors,
288 pruner_train=picoclvr_pruner_train,
289 pruner_eval=picoclvr_pruner_eval,
292 elif args.task == "mnist":
294 nb_train_samples=args.nb_train_samples,
295 nb_test_samples=args.nb_test_samples,
296 batch_size=args.batch_size,
300 elif args.task == "maze":
302 nb_train_samples=args.nb_train_samples,
303 nb_test_samples=args.nb_test_samples,
304 batch_size=args.batch_size,
305 height=args.maze_height,
306 width=args.maze_width,
307 nb_walls=args.maze_nb_walls,
311 elif args.task == "snake":
313 nb_train_samples=args.nb_train_samples,
314 nb_test_samples=args.nb_test_samples,
315 batch_size=args.batch_size,
316 height=args.snake_height,
317 width=args.snake_width,
318 nb_colors=args.snake_nb_colors,
319 length=args.snake_length,
320 prompt_length=args.snake_length // 2,
324 elif args.task == "stack":
326 nb_train_samples=args.nb_train_samples,
327 nb_test_samples=args.nb_test_samples,
328 batch_size=args.batch_size,
330 nb_steps=args.stack_nb_steps,
331 nb_stacks=args.stack_nb_stacks,
332 nb_digits=args.stack_nb_digits,
333 fraction_values_for_train=args.stack_fraction_values_for_train,
337 elif args.task == "expr":
339 nb_train_samples=args.nb_train_samples,
340 nb_test_samples=args.nb_test_samples,
341 nb_variables=args.expr_nb_variables,
342 sequence_length=args.expr_sequence_length,
343 operand_max=args.expr_operand_max,
344 result_max=args.expr_result_max,
345 batch_size=args.batch_size,
349 elif args.task == "world":
351 nb_train_samples=args.nb_train_samples,
352 nb_test_samples=args.nb_test_samples,
353 batch_size=args.batch_size,
354 vqae_nb_epochs=args.world_vqae_nb_epochs,
360 raise ValueError(f"Unknown task {args.task}")
362 ######################################################################
364 log_string(f"device {device}")
366 vocabulary_size = task.vocabulary_size()
368 log_string(f"vocabulary_size {vocabulary_size}")
370 ##############################
373 vocabulary_size=vocabulary_size,
374 dim_model=args.dim_model,
375 dim_keys=args.dim_keys,
376 dim_hidden=args.dim_hidden,
377 nb_heads=args.nb_heads,
378 nb_blocks=args.nb_blocks,
380 dropout=args.dropout,
385 nb_parameters = sum(p.numel() for p in model.parameters())
386 log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
388 ######################################################################
390 nb_epochs_finished = 0
392 if args.no_checkpoint:
393 log_string(f"not trying to load checkpoint.")
397 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
398 checkpoint = torch.load(checkpoint_name)
399 nb_epochs_finished = checkpoint["nb_epochs_finished"]
400 model.load_state_dict(checkpoint["model_state"])
401 torch.set_rng_state(checkpoint["rng_state"])
402 if torch.cuda.is_available():
403 torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
405 log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
407 except FileNotFoundError:
408 log_string("starting from scratch.")
411 log_string("error when loading the checkpoint.")
414 ######################################################################
416 if args.task == "expr" and args.expr_input_file is not None:
417 task.produce_results(
422 args.deterministic_synthesis,
423 args.expr_input_file,
428 ######################################################################
430 nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
432 # Compute the entropy of the training tokens
435 for input in task.batches(split="train"):
436 token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
437 token_probas = token_count / token_count.sum()
438 entropy = -torch.xlogy(token_probas, token_probas).sum()
439 train_set_perplexity = math.exp(entropy)
441 ##############################
443 # A bit of paranoia never hurts
448 for input in task.batches(split="train"):
449 assert input.dim() == 2 and input.dtype == torch.int64
451 train_examples[x.sum().item()] = x
453 nb_total, nb_collisions = 0, 0
454 for input in task.batches(split="test"):
455 assert input.dim() == 2 and input.dtype == torch.int64
458 y = train_examples.get(x.sum().item())
460 if x.size() == y.size() and (x - y).abs().sum() == 0:
466 f"data_check {nb_collisions*100/nb_total:.02f}% ({nb_collisions}/{nb_total}) of test samples are in the train set"
469 ##############################
471 if args.learning_rate_schedule == "cos":
472 learning_rate_schedule = {}
473 for n_epoch in range(args.nb_epochs):
474 u = n_epoch / args.nb_epochs * math.pi
475 learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
480 tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
484 learning_rate_schedule = {}
485 learning_rate = args.learning_rate
486 for n_epoch in range(args.nb_epochs):
488 learning_rate = u[n_epoch]
489 learning_rate_schedule[n_epoch] = learning_rate
491 log_string(f"learning_rate_schedule {learning_rate_schedule}")
493 ##############################
497 if nb_epochs_finished >= nb_epochs:
498 task.produce_results(
503 args.deterministic_synthesis,
506 for n_epoch in range(nb_epochs_finished, nb_epochs):
507 learning_rate = learning_rate_schedule[n_epoch]
509 log_string(f"learning_rate {learning_rate}")
511 if args.optim == "sgd":
512 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
513 elif args.optim == "adam":
514 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
515 elif args.optim == "adamw":
516 optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
518 raise ValueError(f"Unknown optimizer {args.optim}.")
522 nb_train_samples, acc_train_loss = 0, 0.0
524 for input in task.batches(split="train"):
525 input = input.to(device)
526 output = model(mygpt.BracketedSequence(input)).x
527 loss = F.cross_entropy(output.transpose(1, 2), input)
528 acc_train_loss += loss.item() * input.size(0)
529 nb_train_samples += input.size(0)
530 nb_samples_seen += input.size(0)
532 optimizer.zero_grad()
536 with torch.autograd.no_grad():
539 nb_test_samples, acc_test_loss = 0, 0.0
541 for input in task.batches(split="test"):
542 input = input.to(device)
544 output = model(mygpt.BracketedSequence(input)).x
545 loss = F.cross_entropy(output.transpose(1, 2), input)
546 acc_test_loss += loss.item() * input.size(0)
547 nb_test_samples += input.size(0)
549 train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
550 test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
553 f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
556 task.produce_results(
557 n_epoch, model, args.result_dir, log_string, args.deterministic_synthesis
561 "nb_epochs_finished": n_epoch + 1,
562 "model_state": model.state_dict(),
563 "rng_state": torch.get_rng_state(),
566 if torch.cuda.is_available():
567 checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
569 checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
570 torch.save(checkpoint, checkpoint_name)
571 log_string(f"saved checkpoint {checkpoint_name}")
573 ######################################################################