######################################################################
-def generation_order(x, fixed_len=0):
+def generation_order(x, prompt_len=0):
if args.random_regression_order:
order = torch.rand(x.size(), device=x.device)
- order[:, :fixed_len] = torch.arange(-fixed_len, 0, device=x.device)
+ order[:, :prompt_len] = torch.arange(-prompt_len, 0, device=x.device)
order = order.sort(1).indices
else:
order = (
return v
-def shuffle(x, fixed_len):
- order = generation_order(x, fixed_len)
+def shuffle(x, prompt_len):
+ order = generation_order(x, prompt_len)
return reorder(x, order), order
-def eval_mygpt(model, input, mode="standard", fixed_len=0):
- x, order = shuffle(input, fixed_len)
+def eval_mygpt(model, input, mode="standard", prompt_len=0):
+ x, order = shuffle(input, prompt_len)
x = model(mygpt.BracketedSequence(x), mode=mode, order=order).x
return reorder(x, order, reverse=True)
######################################################################
-def compute_perplexity(model, task, fixed_len, split="train"):
+def compute_perplexity(model, task, prompt_len, split="train"):
with torch.autograd.no_grad():
t = model.training
model.eval()
for input in task.batches(split=split):
input = input.to(device)
- output = eval_mygpt(model, input, fixed_len=fixed_len)
+ output = eval_mygpt(model, input, prompt_len=prompt_len)
if args.noncausal_prompt:
d = input.size(1) // 2
loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
acc_train_loss, nb_train_samples = 0, 0
for mazes, policies in task.policy_batches(split="train"):
output_gpt = eval_mygpt(
- gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
+ gpt, mazes, mode=args.oneshot_input, prompt_len=task.height * task.width
)
output = model(output_gpt)
acc_test_loss, nb_test_samples = 0, 0
for mazes, policies in task.policy_batches(split="test"):
output_gpt = eval_mygpt(
- gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
+ gpt, mazes, mode=args.oneshot_input, prompt_len=task.height * task.width
)
output = model(output_gpt)
loss = compute_loss(mazes, output, policies, task.height, task.width)
mazes = task.test_input[:32, : task.height * task.width]
policies = task.test_policies[:32]
output_gpt = eval_mygpt(
- gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
+ gpt, mazes, mode=args.oneshot_input, prompt_len=task.height * task.width
)
output = model(output_gpt)
if args.oneshot_output == "policy":
##############################
+
def noncausal_prompt_amm_generator(d):
q = torch.arange(d)[:, None]
k = torch.arange(d)[None, :]
s = args.maze_height * args.maze_width
-# return torch.logical_and(q < k, torch.logical_or(q >= s, k >= s))
+ # return torch.logical_and(q < k, torch.logical_or(q >= s, k >= s))
return q < k
+
amm_generator = None
if args.noncausal_prompt:
if nb_epochs_finished >= args.nb_epochs:
n_epoch = nb_epochs_finished
train_perplexity = compute_perplexity(
- model, task, fixed_len=task.height * task.width, split="train"
+ model, task, prompt_len=task.height * task.width, split="train"
)
test_perplexity = compute_perplexity(
- model, task, fixed_len=task.height * task.width, split="test"
+ model, task, prompt_len=task.height * task.width, split="test"
)
log_string(
for input in task.batches(split="train"):
input = input.to(device)
- output = eval_mygpt(
- model, input, fixed_len=task.height * task.width
- )
+ output = eval_mygpt(model, input, prompt_len=task.height * task.width)
if args.noncausal_prompt:
d = input.size(1) // 2
loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
test_perplexity = compute_perplexity(
- model, task, fixed_len=task.height * task.width, split="test"
+ model, task, prompt_len=task.height * task.width, split="test"
)
log_string(