if args.random_regression_order:
order = torch.rand(x.size(), device=x.device)
order[:, :fixed_len] = torch.linspace(-2, -1, fixed_len, device=order.device)
- return order.sort(1).indices
+ order = order.sort(1).indices
else:
- return (
+ order = (
torch.arange(x.size(1), device=x.device).unsqueeze(0).expand(x.size(0), -1)
)
+ return order
-def shuffle(x, order, reorder=False):
- if x.dim() == 3:
- order = order.unsqueeze(-1).expand(-1, -1, x.size(-1))
- if reorder:
- y = x.new(x.size())
- y.scatter_(1, order, x)
- return y
+def reorder(x, order, back=False): # x is NxTxD1x...xDk, order is NxT'
+ u = x.reshape(x.size()[:2] + (-1,))
+ order = order.unsqueeze(-1).expand(-1, -1, u.size(-1))
+ if back:
+ v = u.new(u.size())
+ v.scatter_(1, order, u)
else:
- return x.gather(1, order)
+ v = u.gather(1, order)
+ v = v.reshape(v.size()[:2] + x.size()[2:])
+ return v
+def shuffle(x, fixed_len):
+ order = generation_order(x, fixed_len)
+ return reorder(x, order), order
+
+
+######################################################################
+
# ar_mask is a Boolean matrix of same shape as input, with 1s on the
# tokens that should be generated
######################################################################
-def compute_perplexity(model, split="train"):
+def compute_perplexity(model, fixed_len, split="train"):
with torch.autograd.no_grad():
t = model.training
model.eval()
for input in task.batches(split=split):
input = input.to(device)
- order = generation_order(input, task.height * task.width)
- input = shuffle(input, order)
- output = model(mygpt.BracketedSequence(input), order=order).x
+ x, order = shuffle(input, fixed_len)
+ x = model(mygpt.BracketedSequence(x), order=order).x
+ output = reorder(x, order, back=True)
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_loss += loss.item() * input.size(0)
nb_samples += input.size(0)
acc_train_loss, nb_train_samples = 0, 0
for mazes, policies in task.policy_batches(split="train"):
- order = generation_order(mazes, task.height * task.width)
- x = shuffle(mazes, order)
+ x, order = shuffle(mazes, task.height * task.width)
x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
- output_gpt = shuffle(x, order, reorder=True)
+ output_gpt = reorder(x, order, back=True)
output = model(output_gpt)
loss = compute_loss(mazes, output, policies, task.height, task.width)
acc_test_loss, nb_test_samples = 0, 0
for mazes, policies in task.policy_batches(split="test"):
- order = generation_order(mazes, task.height * task.width)
- x = shuffle(mazes, order)
+ x, order = shuffle(mazes, task.height * task.width)
x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
- output_gpt = shuffle(x, order, reorder=True)
+ output_gpt = reorder(x, order, back=True)
output = model(output_gpt)
loss = compute_loss(mazes, output, policies, task.height, task.width)
acc_test_loss += loss.item() * mazes.size(0)
# -------------------
mazes = task.test_input[:32, : task.height * task.width]
policies = task.test_policies[:32]
- order = generation_order(mazes, task.height * task.width)
- x = shuffle(mazes, order)
+ x, order = shuffle(mazes, task.height * task.width)
x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
- output_gpt = shuffle(x, order, reorder=True)
+ output_gpt = reorder(x, order, back=True)
output = model(output_gpt)
if args.oneshot_output == "policy":
targets = policies.permute(0, 2, 1)
scores = scores.reshape(-1, task.height, task.width)
mazes = mazes.reshape(-1, task.height, task.width)
targets = targets.reshape(-1, task.height, task.width)
+ filename = (
+ f"oneshot_{args.oneshot_input}_{args.oneshot_output}_{n_epoch:04d}.png"
+ )
maze.save_image(
- os.path.join(
- args.result_dir,
- f"oneshot_{args.oneshot_input}_{args.oneshot_output}_{n_epoch:04d}.png",
- ),
+ os.path.join(args.result_dir, filename),
mazes=mazes,
score_paths=scores,
score_truth=targets,
)
+ log_string(f"wrote {filename}")
+
# -------------------
gpt.train(t)
ar_mask = result.new_zeros(result.size())
ar_mask[:, self.height * self.width :] = 1
result *= 1 - ar_mask
- order = generation_order(result, self.height * self.width)
+ x, order = shuffle(result, self.height * self.width)
masked_inplace_autoregression(
- model, self.batch_size, result, ar_mask, order=order
+ model, self.batch_size, x, ar_mask, order=order
)
- result = shuffle(result, order, reorder=True)
+ result = reorder(x, order, back=True)
mazes, paths = self.seq2map(result)
nb_correct += maze.path_correctness(mazes, paths).long().sum()
nb_total += mazes.size(0)
mazes, paths = self.seq2map(input)
_, predicted_paths = self.seq2map(result)
+ filename = f"result_{n_epoch:04d}.png"
maze.save_image(
- os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
+ os.path.join(args.result_dir, filename),
mazes=mazes,
target_paths=paths,
predicted_paths=predicted_paths,
path_correct=maze.path_correctness(mazes, predicted_paths),
)
+ log_string(f"wrote {filename}")
model.train(t)
if nb_epochs_finished >= args.nb_epochs:
n_epoch = nb_epochs_finished
- train_perplexity = compute_perplexity(model, split="train")
- test_perplexity = compute_perplexity(model, split="test")
+ train_perplexity = compute_perplexity(
+ model, fixed_len=task.height * task.width, split="train"
+ )
+ test_perplexity = compute_perplexity(
+ model, fixed_len=task.height * task.width, split="test"
+ )
log_string(
f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
for input in task.batches(split="train"):
input = input.to(device)
- order = generation_order(input, task.height * task.width)
- input = shuffle(input, order)
- output = model(mygpt.BracketedSequence(input), order=order).x
+ x, order = shuffle(input, task.height * task.width)
+ x = model(mygpt.BracketedSequence(x), order=order).x
+ output = reorder(x, order, back=True)
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
optimizer.step()
train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- test_perplexity = compute_perplexity(model, split="test")
+ test_perplexity = compute_perplexity(
+ model, fixed_len=task.height * task.width, split="test"
+ )
log_string(
f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"