######################################################################
-parser = argparse.ArgumentParser(
- description="An implementation of GPT with cache to solve a toy geometric reasoning task."
-)
+parser = argparse.ArgumentParser(description="A maze shortest path solving with a GPT.")
parser.add_argument("--log_filename", type=str, default="train.log")
parser.add_argument("--overwrite_results", action="store_true", default=False)
+parser.add_argument("--one_shot", action="store_true", default=False)
+
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
##############################
def masked_inplace_autoregression(model, batch_size, input, ar_mask):
-
for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
i = (ar_mask.sum(0) > 0).nonzero()
if i.min() > 0:
- model(
- mygpt.BracketedSequence(input, 0, i.min())
- ) # Needed to initialize the model's cache
+ # Needed to initialize the model's cache
+ model(mygpt.BracketedSequence(input, 0, i.min()))
for s in range(i.min(), i.max() + 1):
output = model(mygpt.BracketedSequence(input, s, 1)).x
logits = output[:, s]
######################################################################
+def compute_perplexity(model, split="train"):
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ nb_samples, acc_loss = 0, 0.0
+
+ for input in task.batches(split=split):
+ input = input.to(device)
+
+ output = model(mygpt.BracketedSequence(input)).x
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+ acc_loss += loss.item() * input.size(0)
+ nb_samples += input.size(0)
+
+ model.train(t)
+
+ return math.exp(min(100, acc_loss / nb_samples))
+
+
+######################################################################
+
+
+def nb_rank_error(output, targets):
+ output = output.reshape(-1, output.size(-1))
+ targets = targets.reshape(-1, targets.size(-1))
+ i = outputs.argmax(1)
+ # out=input.gather out[i][j]=input[i][index[i][j]]
+ # u[k]=targets[k][i[k]]
+ return output[targets.argmax(1)]
+
+
+def one_shot(gpt, task):
+ t = gpt.training
+ gpt.eval()
+ model = nn.Linear(args.dim_model, 4).to(device)
+
+ for n_epoch in range(args.nb_epochs):
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
+
+ acc_train_loss, nb_train_samples = 0, 0
+ for input, targets in task.policy_batches(split="train"):
+ output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+ output = model(output_gpt)
+ loss = -(output.log_softmax(-1) * targets).sum(-1).mean()
+ acc_train_loss += loss.item() * input.size(0)
+ nb_train_samples += input.size(0)
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ acc_test_loss, nb_test_samples = 0, 0
+ for input, targets in task.policy_batches(split="test"):
+ output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+ output = model(output_gpt)
+ loss = -(output.log_softmax(-1) * targets).sum(-1).mean()
+ acc_test_loss += loss.item() * input.size(0)
+ nb_test_samples += input.size(0)
+
+ print(
+ f"{n_epoch=} {acc_train_loss/nb_train_samples=} {acc_test_loss/nb_test_samples=}"
+ )
+
+ gpt.train(t)
+
+
+######################################################################
+
+
class Task:
def batches(self, split="train"):
pass
self.width = width
self.device = device
- mazes_train, paths_train = maze.create_maze_data(
+ train_mazes, train_paths, train_policies = maze.create_maze_data(
nb_train_samples,
height=height,
width=width,
nb_walls=nb_walls,
progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
)
- mazes_train, paths_train = mazes_train.to(device), paths_train.to(device)
- self.train_input = self.map2seq(mazes_train, paths_train)
- self.nb_codes = self.train_input.max() + 1
+ self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
+ self.train_policies = train_policies.flatten(-2).permute(0, 2, 1).to(device)
- mazes_test, paths_test = maze.create_maze_data(
+ test_mazes, test_paths, test_policies = maze.create_maze_data(
nb_test_samples,
height=height,
width=width,
nb_walls=nb_walls,
progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
)
- mazes_test, paths_test = mazes_test.to(device), paths_test.to(device)
- self.test_input = self.map2seq(mazes_test, paths_test)
+ self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
+ self.test_policies = test_policies.flatten(-2).permute(0, 2, 1).to(device)
+
+ self.nb_codes = self.train_input.max() + 1
def batches(self, split="train", nb_to_use=-1):
assert split in {"train", "test"}
):
yield batch
+ def policy_batches(self, split="train", nb_to_use=-1):
+ assert split in {"train", "test"}
+ input = self.train_input if split == "train" else self.test_input
+ targets = self.train_policies if split == "train" else self.test_policies
+ input = input[:, : self.height * self.width]
+ targets = targets * (input != maze.v_wall)[:, :, None]
+
+ if nb_to_use > 0:
+ input = input[:nb_to_use]
+ targets = targets[:nb_to_use]
+
+ for batch in tqdm.tqdm(
+ zip(input.split(self.batch_size), targets.split(self.batch_size)),
+ dynamic_ncols=True,
+ desc=f"epoch-{split}",
+ ):
+ yield batch
+
def vocabulary_size(self):
return self.nb_codes
result = input.clone()
ar_mask = result.new_zeros(result.size())
ar_mask[:, self.height * self.width :] = 1
+ result *= 1 - ar_mask
masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
mazes, paths = self.seq2map(result)
nb_correct += maze.path_correctness(mazes, paths).long().sum()
input = self.test_input[:32]
result = input.clone()
ar_mask = result.new_zeros(result.size())
-
ar_mask[:, self.height * self.width :] = 1
+ result *= 1 - ar_mask
masked_inplace_autoregression(model, self.batch_size, result, ar_mask)
mazes, paths = self.seq2map(input)
_, predicted_paths = self.seq2map(result)
maze.save_image(
- f"result_{n_epoch:04d}.png",
+ os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
mazes,
paths,
predicted_paths,
######################################################################
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
-
token_count = 0
for input in task.batches(split="train"):
token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
##############################
-nb_samples_seen = 0
+if args.one_shot:
+ one_shot(model, task)
+ exit(0)
+
+##############################
+
+if nb_epochs_finished >= args.nb_epochs:
+ n_epoch = nb_epochs_finished
+ train_perplexity = compute_perplexity(model, split="train")
+ test_perplexity = compute_perplexity(model, split="test")
+
+ log_string(
+ f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
+ )
-if nb_epochs_finished >= nb_epochs:
- task.produce_results(nb_epochs_finished, model)
+ task.produce_results(n_epoch, model)
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+ exit(0)
+##############################
+
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
log_string(f"learning_rate {learning_rate}")
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
- nb_samples_seen += input.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
- with torch.autograd.no_grad():
-
- model.eval()
-
- nb_test_samples, acc_test_loss = 0, 0.0
-
- for input in task.batches(split="test"):
- input = input.to(device)
-
- # input, loss_masks, true_images = task.excise_last_image(input)
- # input, loss_masks = task.add_true_image(input, true_images, loss_masks)
-
- output = model(mygpt.BracketedSequence(input)).x
- loss = F.cross_entropy(output.transpose(1, 2), input)
- acc_test_loss += loss.item() * input.size(0)
- nb_test_samples += input.size(0)
-
- train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+ train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+ test_perplexity = compute_perplexity(model, split="test")
- log_string(
- f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
- )
+ log_string(
+ f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
+ )
- task.produce_results(n_epoch, model)
+ task.produce_results(n_epoch, model)
checkpoint = {
"nb_epochs_finished": n_epoch + 1,