+def compute_perplexity(model, fixed_len, split="train"):
+ with torch.autograd.no_grad():
+ t = model.training
+ model.eval()
+
+ nb_samples, acc_loss = 0, 0.0
+
+ for input in task.batches(split=split):
+ input = input.to(device)
+ x, order = shuffle(input, fixed_len)
+ x = model(mygpt.BracketedSequence(x), order=order).x
+ output = reorder(x, order, back=True)
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+ acc_loss += loss.item() * input.size(0)
+ nb_samples += input.size(0)
+
+ model.train(t)
+
+ return math.exp(min(100, acc_loss / nb_samples))
+
+
+######################################################################
+
+
+def oneshot_policy_loss(mazes, output, policies, height, width):
+ masks = (mazes == maze.v_empty).unsqueeze(-1)
+ targets = policies.permute(0, 2, 1) * masks
+ output = output * masks
+ return -(output.log_softmax(-1) * targets).sum() / masks.sum()
+
+
+def oneshot_trace_loss(mazes, output, policies, height, width):
+ masks = mazes == maze.v_empty
+ targets = maze.stationary_densities(
+ mazes.view(-1, height, width), policies.view(-1, 4, height, width)
+ ).flatten(-2)
+ targets = targets * masks
+ output = output.squeeze(-1) * masks
+ return (output - targets).abs().sum() / masks.sum()
+
+
+def oneshot(gpt, task):
+ t = gpt.training
+ gpt.eval()
+
+ if args.oneshot_input == "head":
+ dim_in = args.dim_model
+ elif args.oneshot_input == "deep":
+ dim_in = args.dim_model * args.nb_blocks * 2
+ else:
+ raise ValueError(f"{args.oneshot_input=}")
+
+ if args.oneshot_output == "policy":
+ dim_out = 4
+ compute_loss = oneshot_policy_loss
+ elif args.oneshot_output == "trace":
+ dim_out = 1
+ compute_loss = oneshot_trace_loss
+ else:
+ raise ValueError(f"{args.oneshot_output=}")
+
+ model = nn.Sequential(
+ nn.Linear(dim_in, args.dim_model),
+ nn.ReLU(),
+ nn.Linear(args.dim_model, args.dim_model),
+ nn.ReLU(),
+ nn.Linear(args.dim_model, dim_out),
+ ).to(device)
+
+ for n_epoch in range(args.nb_epochs):
+ learning_rate = learning_rate_schedule[n_epoch]
+ optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
+
+ acc_train_loss, nb_train_samples = 0, 0
+ for mazes, policies in task.policy_batches(split="train"):
+ x, order = shuffle(mazes, task.height * task.width)
+ x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
+ output_gpt = reorder(x, order, back=True)
+ output = model(output_gpt)
+
+ loss = compute_loss(mazes, output, policies, task.height, task.width)
+ acc_train_loss += loss.item() * mazes.size(0)
+ nb_train_samples += mazes.size(0)
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ acc_test_loss, nb_test_samples = 0, 0
+ for mazes, policies in task.policy_batches(split="test"):
+ x, order = shuffle(mazes, task.height * task.width)
+ x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
+ output_gpt = reorder(x, order, back=True)
+ output = model(output_gpt)
+ loss = compute_loss(mazes, output, policies, task.height, task.width)
+ acc_test_loss += loss.item() * mazes.size(0)
+ nb_test_samples += mazes.size(0)
+
+ log_string(
+ f"diff_ce {n_epoch} train {acc_train_loss/nb_train_samples} test {acc_test_loss/nb_test_samples}"
+ )
+
+ # -------------------
+ mazes = task.test_input[:32, : task.height * task.width]
+ policies = task.test_policies[:32]
+ x, order = shuffle(mazes, task.height * task.width)
+ x = gpt(mygpt.BracketedSequence(x), mode=args.oneshot_input, order=order).x
+ output_gpt = reorder(x, order, back=True)
+ output = model(output_gpt)
+ if args.oneshot_output == "policy":
+ targets = policies.permute(0, 2, 1)
+ scores = (
+ (F.one_hot(output.argmax(-1), num_classes=4) * targets).sum(-1) == 0
+ ).float()
+ elif args.oneshot_output == "trace":
+ targets = maze.stationary_densities(
+ mazes.view(-1, task.height, task.width),
+ policies.view(-1, 4, task.height, task.width),
+ ).flatten(-2)
+ scores = output
+ else:
+ raise ValueError(f"{args.oneshot_output=}")
+
+ scores = scores.reshape(-1, task.height, task.width)
+ mazes = mazes.reshape(-1, task.height, task.width)
+ targets = targets.reshape(-1, task.height, task.width)
+ filename = (
+ f"oneshot_{args.oneshot_input}_{args.oneshot_output}_{n_epoch:04d}.png"
+ )
+ maze.save_image(
+ os.path.join(args.result_dir, filename),
+ mazes=mazes,
+ score_paths=scores,
+ score_truth=targets,
+ )
+ log_string(f"wrote {filename}")
+
+ # -------------------
+
+ gpt.train(t)
+
+
+######################################################################
+
+