parser.add_argument("--maze_nb_walls", type=int, default=15)
+parser.add_argument("--oneshot_mode", type=str, default="head")
+
######################################################################
args = parser.parse_args()
######################################################################
-def nb_rank_error(output, targets):
- output = output.reshape(-1, output.size(-1))
- targets = targets.reshape(-1, targets.size(-1))
- i = outputs.argmax(1)
- # out=input.gather out[i][j]=input[i][index[i][j]]
- # u[k]=targets[k][i[k]]
- return output[targets.argmax(1)]
-
-
def one_shot(gpt, task):
t = gpt.training
gpt.eval()
- model = nn.Linear(args.dim_model, 4).to(device)
+ dim_in = args.dim_model * (args.nb_blocks * 2 if args.oneshot_mode == "deep" else 1)
+ model = nn.Sequential(
+ nn.Linear(dim_in, args.dim_model),
+ nn.ReLU(),
+ nn.Linear(args.dim_model, args.dim_model),
+ nn.ReLU(),
+ nn.Linear(args.dim_model, 4),
+ ).to(device)
for n_epoch in range(args.nb_epochs):
- optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
+ learning_rate = learning_rate_schedule[n_epoch]
+ optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
acc_train_loss, nb_train_samples = 0, 0
- for input, targets in task.policy_batches(split="train"):
- output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+ for input, policies in task.policy_batches(split="train"):
+ ####
+ # print(f'{input.size()=} {policies.size()=}')
+ # s = maze.stationary_densities(
+ # exit(0)
+ ####
+ mask = input.unsqueeze(-1) == maze.v_empty
+ output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
output = model(output_gpt)
- loss = -(output.log_softmax(-1) * targets).sum(-1).mean()
+ targets = policies.permute(0, 2, 1) * mask
+ output = output * mask
+ # loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
+ loss = -(output.log_softmax(-1) * targets).sum() / mask.sum()
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
optimizer.step()
acc_test_loss, nb_test_samples = 0, 0
- for input, targets in task.policy_batches(split="test"):
- output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+ for input, policies in task.policy_batches(split="test"):
+ mask = input.unsqueeze(-1) == maze.v_empty
+ output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
output = model(output_gpt)
- loss = -(output.log_softmax(-1) * targets).sum(-1).mean()
+ targets = policies.permute(0, 2, 1) * mask
+ output = output * mask
+ # loss = (output.softmax(-1) - targets).abs().max(-1).values.mean()
+ loss = -(output.log_softmax(-1) * targets).sum() / mask.sum()
acc_test_loss += loss.item() * input.size(0)
nb_test_samples += input.size(0)
- print(
- f"{n_epoch=} {acc_train_loss/nb_train_samples=} {acc_test_loss/nb_test_samples=}"
+ log_string(
+ f"diff_ce {n_epoch} train {acc_train_loss/nb_train_samples} test {acc_test_loss/nb_test_samples}"
+ )
+
+ # -------------------
+ input = task.test_input[:32, : task.height * task.width]
+ targets = task.test_policies[:32].permute(0, 2, 1)
+ output_gpt = gpt(mygpt.BracketedSequence(input), mode=args.oneshot_mode).x
+ output = model(output_gpt)
+ # losses = (-output.log_softmax(-1) * targets + targets.xlogy(targets)).sum(-1)
+ # losses = losses * mask
+ # losses = losses / losses.max()
+ # losses = (output.softmax(-1) - targets).abs().max(-1).values
+ # losses = (losses >= 0.05).float()
+ losses = (
+ (F.one_hot(output.argmax(-1), num_classes=4) * targets).sum(-1) == 0
+ ).float()
+ losses = losses.reshape(-1, args.maze_height, args.maze_width)
+ input = input.reshape(-1, args.maze_height, args.maze_width)
+ maze.save_image(
+ os.path.join(
+ args.result_dir, f"oneshot_{args.oneshot_mode}_{n_epoch:04d}.png"
+ ),
+ mazes=input,
+ score_paths=losses,
)
+ # -------------------
gpt.train(t)
progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
)
self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
- self.train_policies = train_policies.flatten(-2).permute(0, 2, 1).to(device)
+ self.train_policies = train_policies.flatten(-2).to(device)
test_mazes, test_paths, test_policies = maze.create_maze_data(
nb_test_samples,
progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
)
self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
- self.test_policies = test_policies.flatten(-2).permute(0, 2, 1).to(device)
+ self.test_policies = test_policies.flatten(-2).to(device)
self.nb_codes = self.train_input.max() + 1
def policy_batches(self, split="train", nb_to_use=-1):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
- targets = self.train_policies if split == "train" else self.test_policies
+ policies = self.train_policies if split == "train" else self.test_policies
input = input[:, : self.height * self.width]
- targets = targets * (input != maze.v_wall)[:, :, None]
+ policies = policies * (input != maze.v_wall)[:, None]
if nb_to_use > 0:
input = input[:nb_to_use]
- targets = targets[:nb_to_use]
+ policies = policies[:nb_to_use]
for batch in tqdm.tqdm(
- zip(input.split(self.batch_size), targets.split(self.batch_size)),
+ zip(input.split(self.batch_size), policies.split(self.batch_size)),
dynamic_ncols=True,
desc=f"epoch-{split}",
):
_, predicted_paths = self.seq2map(result)
maze.save_image(
os.path.join(args.result_dir, f"result_{n_epoch:04d}.png"),
- mazes,
- paths,
- predicted_paths,
- maze.path_correctness(mazes, predicted_paths),
+ mazes=mazes,
+ target_paths=paths,
+ predicted_paths=predicted_paths,
+ path_correct=maze.path_correctness(mazes, predicted_paths),
)
model.train(t)