parser.add_argument("--random_regression_order", action="store_true", default=False)
+parser.add_argument("--noncausal_prompt", action="store_true", default=False)
+
parser.add_argument("--no_checkpoint", action="store_true", default=False)
parser.add_argument("--overwrite_results", action="store_true", default=False)
for input in task.batches(split=split):
input = input.to(device)
output = eval_mygpt(model, input, fixed_len=fixed_len)
- loss = F.cross_entropy(output.transpose(1, 2), input)
+ if args.noncausal_prompt:
+ t = input.size(1) // 2
+ loss = F.cross_entropy(output[:, t:].transpose(1, 2), input[:, t:])
+ else:
+ loss = F.cross_entropy(output.transpose(1, 2), input)
acc_loss += loss.item() * input.size(0)
nb_samples += input.size(0)
##############################
+amm_generator = None
+
+if args.noncausal_prompt:
+ amm_generator = lambda d: torch.logical_and(
+ torch.arange(d)[None, None, :, None] < torch.arange(d)[None, None, None, :],
+ torch.logical_or(
+ torch.arange(d)[None, None, :, None] >= d // 2,
+ torch.arange(d)[None, None, None, :] >= d // 2,
+ ),
+ )
+
model = mygpt.MyGPT(
vocabulary_size=vocabulary_size,
dim_model=args.dim_model,
nb_blocks=args.nb_blocks,
causal=True,
dropout=args.dropout,
+ amm_generator=amm_generator,
)
model.to(device)
output = eval_mygpt(
model, input, mode=args.oneshot_input, fixed_len=task.height * task.width
)
- loss = F.cross_entropy(output.transpose(1, 2), input)
+ if args.noncausal_prompt:
+ t = input.size(1) // 2
+ loss = F.cross_entropy(output[:, t:].transpose(1, 2), input[:, t:])
+ else:
+ loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)