parser.add_argument("--overwrite_results", action="store_true", default=False)
-parser.add_argument("--one_shot", action="store_true", default=False)
-
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
##############################
parser.add_argument("--maze_nb_walls", type=int, default=15)
-parser.add_argument("--oneshot_mode", type=str, default="head")
+parser.add_argument("--oneshot", action="store_true", default=False)
+
+parser.add_argument("--oneshot_input", type=str, default="head")
+
+parser.add_argument("--oneshot_output", type=str, default="policy")
######################################################################
######################################################################
-def one_shot(gpt, task):
+def oneshot_policy_loss(output, policies, mask):
+ targets = policies.permute(0, 2, 1) * mask.unsqueeze(-1)
+ output = output * mask.unsqueeze(-1)
+ return -(output.log_softmax(-1) * targets).sum() / mask.sum()
+
+
+# loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
+
+
+def oneshot(gpt, task):
t = gpt.training
gpt.eval()
- dim_in = args.dim_model * (args.nb_blocks * 2 if args.oneshot_mode == "deep" else 1)
+
+ if args.oneshot_input == "head":
+ dim_in = args.dim_model
+ elif args.oneshot_input == "deep":
+ dim_in = args.dim_model * args.nb_blocks * 2
+ else:
+ raise ValueError(f"{args.oneshot_input=}")
+
+ if args.oneshot_output == "policy":
+ dim_out = 4
+ compute_loss = oneshot_policy_loss
+ elif args.oneshot_output == "trace":
+ dim_out = 1
+ else:
+ raise ValueError(f"{args.oneshot_output=}")
+
model = nn.Sequential(
nn.Linear(dim_in, args.dim_model),
nn.ReLU(),
# s = maze.stationary_densities(
# exit(0)
####
- mask = input.unsqueeze(-1) == maze.v_empty
- output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
+ mask = input == maze.v_empty
+ output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_input).x
output = model(output_gpt)
- targets = policies.permute(0, 2, 1) * mask
- output = output * mask
- # loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
- loss = -(output.log_softmax(-1) * targets).sum() / mask.sum()
+
+ loss = compute_loss(output, policies, mask)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
acc_test_loss, nb_test_samples = 0, 0
for input, policies in task.policy_batches(split="test"):
- mask = input.unsqueeze(-1) == maze.v_empty
- output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
+ mask = input == maze.v_empty
+ output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_input).x
output = model(output_gpt)
- targets = policies.permute(0, 2, 1) * mask
- output = output * mask
- # loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
- loss = -(output.log_softmax(-1) * targets).sum() / mask.sum()
+ loss = compute_loss(output, policies, mask)
acc_test_loss += loss.item() * input.size(0)
nb_test_samples += input.size(0)
# -------------------
input = task.test_input[:32, : task.height * task.width]
targets = task.test_policies[:32].permute(0, 2, 1)
- output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
+ output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_input).x
output = model(output_gpt)
- # losses = (-output.log_softmax(-1) * targets + targets.xlogy(targets)).sum(-1)
- # losses = losses * mask
- # losses = losses / losses.max()
- # losses = (output.softmax(-1) - targets).abs().max(-1).values
- # losses = (losses >= 0.05).float()
- losses = (
+ scores = (
(F.one_hot(output.argmax(-1), num_classes=4) * targets).sum(-1) == 0
).float()
- losses = losses.reshape(-1, args.maze_height, args.maze_width)
- input = input.reshape(-1, args.maze_height, args.maze_width)
+ scores = scores.reshape(-1, task.height, task.width)
+ input = input.reshape(-1, task.height, task.width)
maze.save_image(
os.path.join(
- args.result_dir, f"oneshot_{args.oneshot_mode}_{n_epoch:04d}.png"
+ args.result_dir,
+ f"oneshot_{args.oneshot_input}_{args.oneshot_output}_{n_epoch:04d}.png",
),
mazes=input,
- score_paths=losses,
+ score_paths=scores,
)
# -------------------
##############################
-if args.one_shot:
- one_shot(model, task)
+if args.oneshot:
+ oneshot(model, task)
exit(0)
##############################
elif args.optim == "adamw":
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
else:
- raise ValueError(f"Unknown optimizer {args.optim}.")
+ raise ValueError(f"{args.optim=}")
model.train()