# torch.backends.cuda.matmul.allow_tf23
# torch.autocast(torch.bfloat16)
-import math, sys, argparse, time, tqdm, itertools, os
+import math, sys, argparse, time, tqdm, os
import torch, torchvision
from torch import nn
######################################################################
parser = argparse.ArgumentParser(
- description="An implementation of GPT with cache to solve a toy geometric reasoning task."
+ description="An implementation of GPT with cache.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
-parser.add_argument("--task", type=str, default="picoclvr")
+parser.add_argument(
+ "--task",
+ type=str,
+ default="picoclvr",
+ help="picoclvr, mnist, maze, snake, stack, expr",
+)
-parser.add_argument("--log_filename", type=str, default="train.log")
+parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
-parser.add_argument("--result_dir", type=str, default="results_default")
+parser.add_argument("--result_dir", type=str, default=None)
parser.add_argument("--seed", type=int, default=0)
-parser.add_argument("--nb_epochs", type=int, default=25)
+parser.add_argument("--nb_epochs", type=int, default=None)
-parser.add_argument("--batch_size", type=int, default=25)
+parser.add_argument("--batch_size", type=int, default=None)
-parser.add_argument("--nb_train_samples", type=int, default=250000)
+parser.add_argument("--nb_train_samples", type=int, default=None)
-parser.add_argument("--nb_test_samples", type=int, default=10000)
+parser.add_argument("--nb_test_samples", type=int, default=None)
parser.add_argument("--optim", type=str, default="adam")
parser.add_argument("--maze_nb_walls", type=int, default=15)
+##############################
+# Snake options
+
+parser.add_argument("--snake_height", type=int, default=6)
+
+parser.add_argument("--snake_width", type=int, default=8)
+
+parser.add_argument("--snake_nb_colors", type=int, default=5)
+
+parser.add_argument("--snake_length", type=int, default=200)
+
+##############################
+# Snake options
+
+parser.add_argument("--stack_nb_steps", type=int, default=100)
+
+parser.add_argument("--stack_nb_stacks", type=int, default=1)
+
+parser.add_argument("--stack_nb_digits", type=int, default=3)
+
+parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
+
######################################################################
args = parser.parse_args()
assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
+if args.result_dir is None:
+ args.result_dir = f"results_{args.task}"
+
+######################################################################
+
+default_args = {
+ "picoclvr": {
+ "nb_epochs": 25,
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
+ "mnist": {
+ "nb_epochs": 25,
+ "batch_size": 10,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
+ "maze": {
+ "nb_epochs": 25,
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
+ "snake": {
+ "nb_epochs": 5,
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
+ "stack": {
+ "nb_epochs": 5,
+ "batch_size": 25,
+ "nb_train_samples": 100000,
+ "nb_test_samples": 1000,
+ },
+ "expr": {
+ "nb_epochs": 5,
+ "batch_size": 25,
+ "nb_train_samples": 100000,
+ "nb_test_samples": 1000,
+ },
+}
+
+if args.task in default_args:
+ for k, v in default_args[args.task].items():
+ if getattr(args, k) is None:
+ setattr(args, k, v)
+
+######################################################################
+
try:
os.mkdir(args.result_dir)
except FileExistsError:
######################################################################
+# ra_mask is boolean, with 1s on the values to generate
+
+
def masked_inplace_autoregression(
- model, batch_size, input, ar_mask, forbidden_tokens=None, device=torch.device("cpu")
+ model,
+ batch_size,
+ input,
+ ar_mask,
+ forbidden_tokens=None,
+ progress_bar_desc="autoregression",
+ device=torch.device("cpu"),
):
- for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
+ batches = zip(input.split(batch_size), ar_mask.split(batch_size))
+
+ if progress_bar_desc is not None:
+ batches = tqdm.tqdm(
+ batches,
+ dynamic_ncols=True,
+ desc=progress_bar_desc,
+ total=input.size(0) // batch_size,
+ )
+
+ for input, ar_mask in batches:
i = (ar_mask.sum(0) > 0).nonzero()
if i.min() > 0:
model(
input,
ar_masks,
forbidden_tokens,
+ progress_bar_desc=None,
device=self.device,
)
model.train(t)
image_name = os.path.join(args.result_dir, f"picoclvr_result_{n_epoch:04d}.png")
torchvision.utils.save_image(
- img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=1.0
+ img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=0.0
)
log_string(f"wrote {image_name}")
masked_inplace_autoregression(
model, self.batch_size, results, ar_mask, device=self.device
)
- image_name = os.path.join(args.result_dir, f"result_mnist_{n_epoch:04d}.png")
+ image_name = os.path.join(args.result_dir, f"mnist_result_{n_epoch:04d}.png")
torchvision.utils.save_image(
1 - results.reshape(-1, 1, 28, 28) / 255.0,
image_name,
def compute_error(self, model, split="train", nb_to_use=-1):
nb_total, nb_correct = 0, 0
- for input in task.batches(split, nb_to_use):
+ count = torch.zeros(
+ self.width * self.height,
+ self.width * self.height,
+ device=self.device,
+ dtype=torch.int64,
+ )
+ for input in tqdm.tqdm(
+ task.batches(split, nb_to_use),
+ dynamic_ncols=True,
+ desc=f"test-mazes",
+ ):
result = input.clone()
ar_mask = result.new_zeros(result.size())
ar_mask[:, self.height * self.width :] = 1
result *= 1 - ar_mask
masked_inplace_autoregression(
- model, self.batch_size, result, ar_mask, device=self.device
+ model,
+ self.batch_size,
+ result,
+ ar_mask,
+ progress_bar_desc=None,
+ device=self.device,
)
mazes, paths = self.seq2map(result)
- nb_correct += maze.path_correctness(mazes, paths).long().sum()
+ path_correctness = maze.path_correctness(mazes, paths)
+ nb_correct += path_correctness.long().sum()
nb_total += mazes.size(0)
- return nb_total, nb_correct
+ optimal_path_lengths = (
+ (input[:, self.height * self.width :] == maze.v_path).long().sum(1)
+ )
+ predicted_path_lengths = (
+ (result[:, self.height * self.width :] == maze.v_path).long().sum(1)
+ )
+ optimal_path_lengths = optimal_path_lengths[path_correctness]
+ predicted_path_lengths = predicted_path_lengths[path_correctness]
+ count[optimal_path_lengths, predicted_path_lengths] += 1
+
+ if count.max() == 0:
+ count = None
+ else:
+ count = count[
+ : count.sum(1).nonzero().max() + 1, : count.sum(0).nonzero().max() + 1
+ ]
+
+ return nb_total, nb_correct, count
def produce_results(self, n_epoch, model):
with torch.autograd.no_grad():
t = model.training
model.eval()
- train_nb_total, train_nb_correct = self.compute_error(
+ train_nb_total, train_nb_correct, count = self.compute_error(
model, "train", nb_to_use=1000
)
log_string(
- f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ f"accuracy_train {n_epoch} nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
)
- test_nb_total, test_nb_correct = self.compute_error(
+ test_nb_total, test_nb_correct, count = self.compute_error(
model, "test", nb_to_use=1000
)
log_string(
- f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
+ if count is not None:
+ proportion_optimal = count.diagonal().sum().float() / count.sum()
+ log_string(f"proportion_optimal_test {proportion_optimal*100:.02f}%")
+ with open(
+ os.path.join(args.result_dir, f"maze_result_{n_epoch:04d}.txt"), "w"
+ ) as f:
+ for i in range(count.size(0)):
+ for j in range(count.size(1)):
+ eol = " " if j < count.size(1) - 1 else "\n"
+ f.write(f"{count[i,j]}{eol}")
+
input = self.test_input[:48]
result = input.clone()
ar_mask = result.new_zeros(result.size())
mazes, paths = self.seq2map(input)
_, predicted_paths = self.seq2map(result)
- filename = os.path.join(args.result_dir, f"result_{n_epoch:04d}.png")
+ filename = os.path.join(args.result_dir, f"maze_result_{n_epoch:04d}.png")
maze.save_image(
filename,
mazes=mazes,
target_paths=paths,
predicted_paths=predicted_paths,
path_correct=maze.path_correctness(mazes, predicted_paths),
+ path_optimal=maze.path_optimality(paths, predicted_paths),
)
log_string(f"wrote {filename}")
######################################################################
+
+import snake
+
+
class TaskSnake(Task):
def __init__(
self,
batch_size,
height,
width,
- nb_walls,
+ nb_colors,
+ length,
+ prompt_length,
device=torch.device("cpu"),
):
self.batch_size = batch_size
self.height = height
self.width = width
self.device = device
+ self.prompt_length = prompt_length
- # self.train_input =
- # self.test_input =
+ self.train_input, self.train_prior_visits, _, _ = snake.generate_sequences(
+ nb_train_samples,
+ height,
+ width,
+ nb_colors,
+ length,
+ prompt_length,
+ self.device,
+ )
+ self.test_input, self.test_prior_visits, _, _ = snake.generate_sequences(
+ nb_test_samples,
+ height,
+ width,
+ nb_colors,
+ length,
+ prompt_length,
+ self.device,
+ )
- self.nb_codes = max(self.train_input.max(), self.train_input.max()) + 1
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
def batches(self, split="train", nb_to_use=-1, desc=None):
assert split in {"train", "test"}
):
yield batch
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(self, n_epoch, model):
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ def compute_nb_correct(input, prior_visits):
+ result = input.clone()
+ i = torch.arange(result.size(1), device=result.device)[None, :]
+ ar_mask = (
+ torch.logical_and(i >= self.prompt_length * 2, i % 2 == 0)
+ .long()
+ .expand_as(result)
+ )
+ result *= 1 - ar_mask
+
+ # snake.solver(result,ar_mask)
+
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+
+ nb_total = ((prior_visits > 0) * ar_mask).sum()
+
+ nb_correct = (
+ (result == input).long() * (prior_visits > 0) * ar_mask
+ ).sum()
+
+ # nb_total = result.size(0)
+ # nb_correct = ((result - input).abs().sum(1) == 0).sum()
+
+ return nb_total, nb_correct
+
+ # train_nb_total, train_nb_correct = compute_nb_correct(
+ # self.train_input, self.train_prior_visits
+ # )
+
+ # log_string(
+ # f"accuracy_train nb_total {train_nb_total} nb_correct {train_nb_correct} accuracy {(100.0*train_nb_correct)/train_nb_total:.02f}%"
+ # )
+
+ test_nb_total, test_nb_correct = compute_nb_correct(
+ self.test_input[:1000], self.test_prior_visits[:1000]
+ )
+
+ log_string(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
+
+ model.train(t)
+
+
+######################################################################
+
+
+import stack
+
+
+class TaskStack(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ nb_steps,
+ nb_stacks,
+ nb_digits,
+ fraction_values_for_train=None,
+ device=torch.device("cpu"),
+ ):
+ self.batch_size = batch_size
+ self.nb_steps = nb_steps
+ self.nb_stacks = nb_stacks
+ self.nb_digits = nb_digits
+ self.device = device
+
+ if fraction_values_for_train is None:
+ values_for_train = None
+ values_for_test = None
+ else:
+ all = torch.randperm(10**nb_digits)
+ nb_for_train = int(all.size(0) * fraction_values_for_train)
+ values_for_train = all[:nb_for_train]
+ values_for_test = all[nb_for_train:]
+
+ self.train_input, self.train_stack_counts = stack.generate_sequences(
+ nb_train_samples,
+ nb_steps,
+ nb_stacks,
+ nb_digits,
+ values_for_train,
+ self.device,
+ )
+
+ self.test_input, self.test_stack_counts = stack.generate_sequences(
+ nb_test_samples,
+ nb_steps,
+ nb_stacks,
+ nb_digits,
+ values_for_test,
+ self.device,
+ )
+
+ i = torch.logical_and(self.test_input % 2 == 1, self.test_input < 2 * nb_stacks)
+ counts = self.test_stack_counts.flatten()[i.flatten()]
+ counts = F.one_hot(counts).sum(0)
+ log_string(f"test_pop_stack_counts {counts}")
+
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(self, n_epoch, model):
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ def compute_nb_correct(input):
+ result = input.clone()
+ stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
+ ar_mask = (result != input).long()
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+
+ errors = ((result != input).long() * ar_mask).reshape(
+ -1, 1 + self.nb_digits
+ )
+ ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
+
+ nb_total = ar_mask.max(1).values.sum()
+ nb_correct = nb_total - errors.max(1).values.sum()
+
+ return nb_total, nb_correct
+
+ test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
+
+ log_string(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
+
+ ##############################################################
+ # Log a few generated sequences
+ input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
+ result = input.clone()
+ stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
+ ar_mask = (result != input).long()
+ for n in range(result.size(0)):
+ log_string(
+ f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
+ )
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+ for n in range(result.size(0)):
+ log_string(
+ f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
+ )
+ ##############################################################
+
+ model.train(t)
+
+
+######################################################################
+
+
+import expr
+
+
+class TaskExpr(Task):
+ def __init__(
+ self,
+ nb_train_samples,
+ nb_test_samples,
+ batch_size,
+ device=torch.device("cpu"),
+ ):
+ self.batch_size = batch_size
+ self.device = device
+
+ train_sequences = expr.generate_sequences(nb_train_samples)
+ test_sequences = expr.generate_sequences(nb_test_samples)
+ self.char2id = dict(
+ [
+ (c, n)
+ for n, c in enumerate(set("".join(train_sequences + test_sequences)))
+ ]
+ )
+ self.id2char = dict([(n, c) for n, c in self.char2id.items()])
+ len_max = max([len(x) for x in train_sequences + test_sequences])
+ self.train_input = torch.cat(
+ [
+ torch.tensor(
+ [char2id(c) for c in s + " " * (len_max - len(s))]
+ for s in train_sequences
+ )
+ ],
+ 0,
+ )
+ self.test_input = torch.cat(
+ [
+ torch.tensor(
+ [char2id(c) for c in s + " " * (len_max - len(s))]
+ for s in test_sequences
+ )
+ ],
+ 0,
+ )
+ self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
+
+ def batches(self, split="train", nb_to_use=-1, desc=None):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ if desc is None:
+ desc = f"epoch-{split}"
+ for batch in tqdm.tqdm(
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
+ ):
+ yield batch
+
+ def vocabulary_size(self):
+ return self.nb_codes
+
+ def produce_results(self, n_epoch, model):
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ def compute_nb_correct(input):
+ result = input.clone()
+ stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
+ ar_mask = (result != input).long()
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+
+ errors = ((result != input).long() * ar_mask).reshape(
+ -1, 1 + self.nb_digits
+ )
+ ar_mask = ar_mask.reshape(-1, 1 + self.nb_digits)
+
+ nb_total = ar_mask.max(1).values.sum()
+ nb_correct = nb_total - errors.max(1).values.sum()
+
+ return nb_total, nb_correct
+
+ test_nb_total, test_nb_correct = compute_nb_correct(self.test_input[:1000])
+
+ log_string(
+ f"accuracy_test {n_epoch} nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
+ )
+
+ ##############################################################
+ # Log a few generated sequences
+ input = self.test_input[:10, : 12 * (1 + self.nb_digits)]
+ result = input.clone()
+ stack.remove_popped_values(result, self.nb_stacks, self.nb_digits)
+ ar_mask = (result != input).long()
+ for n in range(result.size(0)):
+ log_string(
+ f"test_before {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
+ )
+ masked_inplace_autoregression(
+ model, self.batch_size, result, ar_mask, device=self.device
+ )
+ for n in range(result.size(0)):
+ log_string(
+ f"test_after {stack.seq_to_str(result[n],nb_stacks=self.nb_stacks,nb_digits=self.nb_digits)}"
+ )
+ ##############################################################
+
+ model.train(t)
+
######################################################################
device=device,
)
+elif args.task == "snake":
+ task = TaskSnake(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ height=args.snake_height,
+ width=args.snake_width,
+ nb_colors=args.snake_nb_colors,
+ length=args.snake_length,
+ prompt_length=args.snake_length // 2,
+ device=device,
+ )
+
+elif args.task == "stack":
+ task = TaskStack(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ nb_steps=args.stack_nb_steps,
+ nb_stacks=args.stack_nb_stacks,
+ nb_digits=args.stack_nb_digits,
+ fraction_values_for_train=args.stack_fraction_values_for_train,
+ device=device,
+ )
+
+elif args.task == "expr":
+ task = TaskExpr(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ device=device,
+ )
+
else:
raise ValueError(f"Unknown task {args.task}")
for input in task.batches(split="test"):
input = input.to(device)
- # input, loss_masks, true_images = task.excise_last_image(input)
- # input, loss_masks = task.add_true_image(input, true_images, loss_masks)
-
output = model(mygpt.BracketedSequence(input)).x
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_test_loss += loss.item() * input.size(0)