input = input.to(device)
output = eval_mygpt(model, input, fixed_len=fixed_len)
if args.noncausal_prompt:
- t = input.size(1) // 2
- loss = F.cross_entropy(output[:, t:].transpose(1, 2), input[:, t:])
+ d = input.size(1) // 2
+ loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
else:
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_loss += loss.item() * input.size(0)
##############################
+def noncausal_prompt_amm_generator(d):
+ q = torch.arange(d)[:, None]
+ k = torch.arange(d)[None, :]
+ s = args.maze_height * args.maze_width
+# return torch.logical_and(q < k, torch.logical_or(q >= s, k >= s))
+ return q < k
+
amm_generator = None
if args.noncausal_prompt:
- amm_generator = lambda d: torch.logical_and(
- torch.arange(d)[None, None, :, None] < torch.arange(d)[None, None, None, :],
- torch.logical_or(
- torch.arange(d)[None, None, :, None] >= d // 2,
- torch.arange(d)[None, None, None, :] >= d // 2,
- ),
- )
+ amm_generator = noncausal_prompt_amm_generator
model = mygpt.MyGPT(
vocabulary_size=vocabulary_size,
for input in task.batches(split="train"):
input = input.to(device)
output = eval_mygpt(
- model, input, mode=args.oneshot_input, fixed_len=task.height * task.width
+ model, input, fixed_len=task.height * task.width
)
if args.noncausal_prompt:
- t = input.size(1) // 2
- loss = F.cross_entropy(output[:, t:].transpose(1, 2), input[:, t:])
+ d = input.size(1) // 2
+ loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
else:
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)