X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=24c21f54fa1113424af6a5638eb1a49a8c231735;hb=f04e158f0679195821a7288bbfe08f775b894096;hp=6d9f69d65120dc5152c4a38ed2cf72a7a7682b72;hpb=943a440a83b98de60bad767a9ad09f63b5088514;p=picoclvr.git diff --git a/main.py b/main.py index 6d9f69d..24c21f5 100755 --- a/main.py +++ b/main.py @@ -5,6 +5,9 @@ # Written by Francois Fleuret +# torch.backends.cuda.matmul.allow_tf23 +# torch.autocast(torch.bfloat16) + import math, sys, argparse, time, tqdm, itertools, os import torch, torchvision @@ -15,14 +18,20 @@ import mygpt, tensorstack ###################################################################### -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +if torch.cuda.is_available(): + device = torch.device("cuda") + torch.backends.cuda.matmul.allow_tf32 = True +else: + device = torch.device("cpu") ###################################################################### parser = argparse.ArgumentParser( - description="An implementation of GPT with cache to solve a toy geometric reasonning task." + description="An implementation of GPT with cache to solve a toy geometric reasoning task." ) +parser.add_argument("--task", type=str, default="picoclvr") + parser.add_argument("--log_filename", type=str, default="train.log") parser.add_argument("--result_dir", type=str, default="results_default") @@ -31,17 +40,17 @@ parser.add_argument("--seed", type=int, default=0) parser.add_argument("--nb_epochs", type=int, default=25) -parser.add_argument("--batch_size", type=int, default=100) +parser.add_argument("--batch_size", type=int, default=25) + +parser.add_argument("--nb_train_samples", type=int, default=250000) -parser.add_argument("--data_size", type=int, default=-1) +parser.add_argument("--nb_test_samples", type=int, default=10000) parser.add_argument("--optim", type=str, default="adam") -parser.add_argument("--learning_rate", type=float, default=1e-3) +parser.add_argument("--learning_rate", type=float, default=1e-4) -parser.add_argument( - "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6" -) +parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6") parser.add_argument("--dim_model", type=int, default=512) @@ -55,8 +64,6 @@ parser.add_argument("--nb_blocks", type=int, default=12) parser.add_argument("--dropout", type=float, default=0.1) -parser.add_argument("--nb_oneshot_blocks", type=int, default=-1) - parser.add_argument("--deterministic_synthesis", action="store_true", default=False) parser.add_argument("--no_checkpoint", action="store_true", default=False) @@ -68,19 +75,28 @@ parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth") ############################## # picoclvr options -parser.add_argument("--nb_colors", type=int, default=5) +parser.add_argument("--picoclvr_nb_colors", type=int, default=5) + +parser.add_argument("--picoclvr_height", type=int, default=12) -parser.add_argument("--height", type=int, default=12) +parser.add_argument("--picoclvr_width", type=int, default=16) -parser.add_argument("--width", type=int, default=16) +parser.add_argument("--picocvlr_prune_properties", type=str, default="none") -parser.add_argument("--prune_properties", type=str, default="none") +############################## +# Maze options + +parser.add_argument("--maze_height", type=int, default=13) + +parser.add_argument("--maze_width", type=int, default=21) + +parser.add_argument("--maze_nb_walls", type=int, default=15) ###################################################################### args = parser.parse_args() -assert args.prune_properties in {"none", "train+eval", "eval"} +assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} try: os.mkdir(args.result_dir) @@ -89,7 +105,7 @@ except FileExistsError: print(f"result directory {args.result_dir} already exists") exit(1) -log_file = open(os.path.join(args.result_dir, args.log_filename), "w") +log_file = open(os.path.join(args.result_dir, args.log_filename), "a") if args.seed >= 0: # torch.backends.cudnn.deterministic = True @@ -122,7 +138,6 @@ for n in vars(args): def masked_inplace_autoregression( model, batch_size, input, ar_mask, forbidden_tokens=None, device=torch.device("cpu") ): - for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)): i = (ar_mask.sum(0) > 0).nonzero() if i.min() > 0: @@ -162,7 +177,6 @@ import picoclvr class TaskPicoCLVR(Task): - # Make a tensor from a list of strings def tensorize(self, descr): token_descr = [s.strip().split(" ") for s in descr] @@ -272,6 +286,8 @@ class TaskPicoCLVR(Task): def __init__( self, + nb_train_samples, + nb_test_samples, batch_size, height, width, @@ -293,12 +309,12 @@ class TaskPicoCLVR(Task): self.width = width self.batch_size = batch_size self.device = device - nb = args.data_size if args.data_size > 0 else 250000 self.pruner_train = pruner_train self.pruner_eval = pruner_eval param = { - "nb": nb, + "nb_train_samples": nb_train_samples, + "nb_test_samples": nb_test_samples, "height": height, "width": width, "nb_colors": nb_colors, @@ -306,11 +322,13 @@ class TaskPicoCLVR(Task): "rng_state": list(torch.get_rng_state()), } - log_string(f"generating {nb} samples (can take some time)") + log_string( + f"generating {nb_train_samples+nb_test_samples} samples (can take some time)" + ) self.train_descr = generate_descr( - (nb * 4) // 5, "train", pruner=self.pruner_train + nb_train_samples, "train", pruner=self.pruner_train ) - self.test_descr = generate_descr((nb * 1) // 5, "test", pruner=None) + self.test_descr = generate_descr(nb_test_samples, "test", pruner=None) # Build the tokenizer tokens = {"", ""} @@ -341,7 +359,6 @@ class TaskPicoCLVR(Task): return len(self.token2id) def compute_missing_properties(self, n_epoch, model, pruner=None): - acc_nb_requested_properties = [] acc_nb_missing_properties = [] acc_nb_results = 0 @@ -380,7 +397,6 @@ class TaskPicoCLVR(Task): ###################################################################### def produce_results(self, n_epoch, model): - self.compute_missing_properties(n_epoch, model) if self.pruner_eval is not None: @@ -421,9 +437,7 @@ class TaskPicoCLVR(Task): f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%" ) - img = picoclvr.descr2img( - result_descr, [0], height=self.height, width=self.width - ) + img = picoclvr.descr2img(result_descr, height=self.height, width=self.width) if img.dim() == 5: if img.size(1) == 1: @@ -437,7 +451,7 @@ class TaskPicoCLVR(Task): 0, ) - image_name = os.path.join(args.result_dir, f"result_{n_epoch:04d}.png") + image_name = os.path.join(args.result_dir, f"picoclvr_result_{n_epoch:04d}.png") torchvision.utils.save_image( img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=1.0 ) @@ -446,27 +460,247 @@ class TaskPicoCLVR(Task): ###################################################################### -log_string(f"device {device}") + +class TaskMNIST(Task): + def __init__(self, batch_size, device=torch.device("cpu")): + self.device = device + self.batch_size = batch_size + + def batches(self, split="train"): + assert split in {"train", "test"} + data_set = torchvision.datasets.MNIST( + root="./data", train=(split == "train"), download=True + ) + data_input = data_set.data.view(-1, 28 * 28).long() + if args.nb_train_samples is not None: + data_input = data_input[: args.nb_train_samples] + for batch in tqdm.tqdm( + data_input.split(self.batch_size), desc=f"epoch-{split}" + ): + yield batch + + def vocabulary_size(self): + return 256 + + def produce_results(self, n_epoch, model): + results = torch.empty(64, 28 * 28, device=self.device, dtype=torch.int64) + ar_mask = torch.full_like(results, 1) + masked_inplace_autoregression( + model, self.batch_size, results, ar_mask, device=self.device + ) + image_name = os.path.join(args.result_dir, f"result_mnist_{n_epoch:04d}.png") + torchvision.utils.save_image( + 1 - results.reshape(-1, 1, 28, 28) / 255.0, + image_name, + nrow=16, + pad_value=0.8, + ) + log_string(f"wrote {image_name}") + + +###################################################################### + +import maze + + +class TaskMaze(Task): + def map2seq(self, *m): + return torch.cat([x.flatten(1) for x in m], 1) + + def seq2map(self, s): + s = s.reshape(s.size(0), -1, self.height, self.width) + return (s[:, k] for k in range(s.size(1))) + + def __init__( + self, + nb_train_samples, + nb_test_samples, + batch_size, + height, + width, + nb_walls, + device=torch.device("cpu"), + ): + self.batch_size = batch_size + self.height = height + self.width = width + self.device = device + + train_mazes, train_paths, train_policies = maze.create_maze_data( + nb_train_samples, + height=height, + width=width, + nb_walls=nb_walls, + progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"), + ) + self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device)) + self.train_policies = train_policies.flatten(-2).to(device) + + test_mazes, test_paths, test_policies = maze.create_maze_data( + nb_test_samples, + height=height, + width=width, + nb_walls=nb_walls, + progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"), + ) + self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device)) + self.test_policies = test_policies.flatten(-2).to(device) + + self.nb_codes = self.train_input.max() + 1 + + def batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + if nb_to_use > 0: + input = input[:nb_to_use] + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + input.split(self.batch_size), dynamic_ncols=True, desc=desc + ): + yield batch + + def policy_batches(self, split="train", nb_to_use=-1, desc=None): + assert split in {"train", "test"} + input = self.train_input if split == "train" else self.test_input + policies = self.train_policies if split == "train" else self.test_policies + input = input[:, : self.height * self.width] + policies = policies * (input != maze.v_wall)[:, None] + + if nb_to_use > 0: + input = input[:nb_to_use] + policies = policies[:nb_to_use] + + if desc is None: + desc = f"epoch-{split}" + for batch in tqdm.tqdm( + zip(input.split(self.batch_size), policies.split(self.batch_size)), + dynamic_ncols=True, + desc=desc, + ): + yield batch + + def vocabulary_size(self): + return self.nb_codes + + def compute_error(self, model, split="train", nb_to_use=-1): + nb_total, nb_correct = 0, 0 + for input in task.batches(split, nb_to_use): + result = input.clone() + ar_mask = result.new_zeros(result.size()) + ar_mask[:, self.height * self.width :] = 1 + result *= 1 - ar_mask + masked_inplace_autoregression( + model, self.batch_size, result, ar_mask, device=self.device + ) + mazes, paths = self.seq2map(result) + nb_correct += maze.path_correctness(mazes, paths).long().sum() + nb_total += mazes.size(0) + + return nb_total, nb_correct + + def produce_results(self, n_epoch, model): + with torch.autograd.no_grad(): + t = model.training + model.eval() + + train_nb_total, train_nb_correct = self.compute_error( + model, "train", nb_to_use=1000 + ) + log_string( + f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%" + ) + + test_nb_total, test_nb_correct = self.compute_error( + model, "test", nb_to_use=1000 + ) + log_string( + f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%" + ) + + input = self.test_input[:48] + result = input.clone() + ar_mask = result.new_zeros(result.size()) + ar_mask[:, self.height * self.width :] = 1 + result *= 1 - ar_mask + masked_inplace_autoregression( + model, self.batch_size, result, ar_mask, device=self.device + ) + + mazes, paths = self.seq2map(input) + _, predicted_paths = self.seq2map(result) + + filename = os.path.join(args.result_dir, f"result_{n_epoch:04d}.png") + maze.save_image( + filename, + mazes=mazes, + target_paths=paths, + predicted_paths=predicted_paths, + path_correct=maze.path_correctness(mazes, predicted_paths), + ) + log_string(f"wrote {filename}") + + model.train(t) + + +###################################################################### -def pruner_horizontal_green(p): +def picoclvr_pruner_horizontal_green(p): return not ("green" in p and ("left" in p or "right" in p)) -task = TaskPicoCLVR( - batch_size=args.batch_size, - height=args.height, - width=args.width, - nb_colors=args.nb_colors, - device=device, - pruner_train=pruner_horizontal_green - if args.prune_properties in {"train+eval"} - else None, - pruner_eval=(lambda p: not pruner_horizontal_green(p)) - if args.prune_properties in {"train+eval", "eval"} - else None, +picoclvr_pruner_train = ( + picoclvr_pruner_horizontal_green + if args.picocvlr_prune_properties in {"train+eval"} + else None +) + +picoclvr_pruner_eval = ( + (lambda p: not picoclvr_pruner_horizontal_green(p)) + if args.picocvlr_prune_properties in {"train+eval", "eval"} + else None ) +###################################################################### + +if args.task == "picoclvr": + task = TaskPicoCLVR( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.batch_size, + height=args.picoclvr_height, + width=args.picoclvr_width, + nb_colors=args.picoclvr_nb_colors, + device=device, + pruner_train=picoclvr_pruner_train, + pruner_eval=picoclvr_pruner_eval, + ) + +elif args.task == "mnist": + task = TaskMNIST( + batch_size=args.batch_size, + device=device, + ) + +elif args.task == "maze": + task = TaskMaze( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.batch_size, + height=args.maze_height, + width=args.maze_width, + nb_walls=args.maze_nb_walls, + device=device, + ) + +else: + raise ValueError(f"Unknown task {args.task}") + +###################################################################### + +log_string(f"device {device}") + vocabulary_size = task.vocabulary_size() log_string(f"vocabulary_size {vocabulary_size}") @@ -558,7 +792,6 @@ if nb_epochs_finished >= nb_epochs: task.produce_results(nb_epochs_finished, model) for n_epoch in range(nb_epochs_finished, nb_epochs): - learning_rate = learning_rate_schedule[n_epoch] log_string(f"learning_rate {learning_rate}") @@ -589,7 +822,6 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): optimizer.step() with torch.autograd.no_grad(): - model.eval() nb_test_samples, acc_test_loss = 0, 0.0