######################################################################
+def nb_rank_error(output, targets):
+ output = output.reshape(-1, output.size(-1))
+ targets = targets.reshape(-1, targets.size(-1))
+ i = outputs.argmax(1)
+ # out=input.gather out[i][j]=input[i][index[i][j]]
+ # u[k]=targets[k][i[k]]
+ return output[targets.argmax(1)]
+
+
def one_shot(gpt, task):
t = gpt.training
gpt.eval()
- for input, targets in task.policy_batches():
- output = gpt(mygpt.BracketedSequence(input), with_readout = False).x
+ model = nn.Linear(args.dim_model, 4).to(device)
+
+ for n_epoch in range(args.nb_epochs):
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
+
+ acc_train_loss, nb_train_samples = 0, 0
+ for input, targets in task.policy_batches(split="train"):
+ output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+ output = model(output_gpt)
+ loss = -(output.log_softmax(-1) * targets).sum(-1).mean()
+ acc_train_loss += loss.item() * input.size(0)
+ nb_train_samples += input.size(0)
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ acc_test_loss, nb_test_samples = 0, 0
+ for input, targets in task.policy_batches(split="test"):
+ output_gpt = gpt(mygpt.BracketedSequence(input), with_readout=False).x
+ output = model(output_gpt)
+ loss = -(output.log_softmax(-1) * targets).sum(-1).mean()
+ acc_test_loss += loss.item() * input.size(0)
+ nb_test_samples += input.size(0)
+
+ print(
+ f"{n_epoch=} {acc_train_loss/nb_train_samples=} {acc_test_loss/nb_test_samples=}"
+ )
+
gpt.train(t)
+
######################################################################
progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-train"),
)
self.train_input = self.map2seq(train_mazes.to(device), train_paths.to(device))
- self.train_policies = train_policies.to(device)
+ self.train_policies = train_policies.flatten(-2).permute(0, 2, 1).to(device)
test_mazes, test_paths, test_policies = maze.create_maze_data(
nb_test_samples,
progress_bar=lambda x: tqdm.tqdm(x, dynamic_ncols=True, desc=f"data-test"),
)
self.test_input = self.map2seq(test_mazes.to(device), test_paths.to(device))
- self.test_policies = test_policies.to(device)
+ self.test_policies = test_policies.flatten(-2).permute(0, 2, 1).to(device)
self.nb_codes = self.train_input.max() + 1
input = self.train_input if split == "train" else self.test_input
targets = self.train_policies if split == "train" else self.test_policies
input = input[:, : self.height * self.width]
- targets = targets.flatten(-2) * (input != maze.v_wall)[:,None]
+ targets = targets * (input != maze.v_wall)[:, :, None]
if nb_to_use > 0:
input = input[:nb_to_use]
######################################################################
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
-
token_count = 0
for input in task.batches(split="train"):
token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
##############################
-if nb_epochs_finished >= nb_epochs:
+if nb_epochs_finished >= args.nb_epochs:
n_epoch = nb_epochs_finished
train_perplexity = compute_perplexity(model, split="train")
test_perplexity = compute_perplexity(model, split="test")
##############################
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
log_string(f"learning_rate {learning_rate}")
):
mazes = torch.empty(nb, height, width, dtype=torch.int64)
paths = torch.empty(nb, height, width, dtype=torch.int64)
- policies = torch.empty(nb, 4, height, width, dtype=torch.int64)
+ policies = torch.empty(nb, 4, height, width)
for n in progress_bar(range(nb)):
maze = create_maze(height, width, nb_walls)
- i = (1 - maze).nonzero()
+ i = (maze == v_empty).nonzero()
while True:
start, goal = i[torch.randperm(i.size(0))[:2]]
if (start - goal).abs().sum() >= dist_min: