sys.stdout.flush()
+log_string(f"cmd {' '.join(sys.argv)}")
+
for n in vars(args):
log_string(f"args.{n} {getattr(args, n)}")
######################################################################
-def generation_order(x, fixed_len=0):
- if args.random_regression_order:
- order = torch.rand(x.size(), device=x.device)
- order[:, :fixed_len] = torch.arange(-fixed_len, 0, device=x.device)
- order = order.sort(1).indices
- else:
- order = (
- torch.arange(x.size(1), device=x.device).unsqueeze(0).expand(x.size(0), -1)
- )
- return order
-
-
def reorder(x, order, reverse=False): # x is NxTxD1x...xDk, order is NxT'
u = x.reshape(x.size()[:2] + (-1,))
order = order.unsqueeze(-1).expand(-1, -1, u.size(-1))
return v
-def shuffle(x, fixed_len):
- order = generation_order(x, fixed_len)
+def shuffle(x, prompt_len):
+ if args.random_regression_order:
+ order = torch.rand(x.size(), device=x.device)
+ order[:, :prompt_len] = torch.arange(-prompt_len, 0, device=x.device)
+ order = order.sort(1).indices
+ else:
+ order = (
+ torch.arange(x.size(1), device=x.device).unsqueeze(0).expand(x.size(0), -1)
+ )
return reorder(x, order), order
-def eval_mygpt(model, input, mode="standard", fixed_len=0):
- x, order = shuffle(input, fixed_len)
+def eval_mygpt(model, input, mode="standard", prompt_len=0):
+ x, order = shuffle(input, prompt_len)
x = model(mygpt.BracketedSequence(x), mode=mode, order=order).x
return reorder(x, order, reverse=True)
######################################################################
-def compute_perplexity(model, task, fixed_len, split="train"):
+def compute_perplexity(model, task, prompt_len, split="train"):
with torch.autograd.no_grad():
t = model.training
model.eval()
for input in task.batches(split=split):
input = input.to(device)
- output = eval_mygpt(model, input, fixed_len=fixed_len)
- loss = F.cross_entropy(output.transpose(1, 2), input)
+ output = eval_mygpt(model, input, prompt_len=prompt_len)
+ if args.noncausal_prompt:
+ d = input.size(1) // 2
+ loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
+ else:
+ loss = F.cross_entropy(output.transpose(1, 2), input)
acc_loss += loss.item() * input.size(0)
nb_samples += input.size(0)
return (output - targets).abs().sum() / masks.sum()
-def oneshot(gpt, task):
+def oneshot(model, learning_rate_scheduler, task):
+ t = model.training
+ model.eval()
+ mazes = task.test_input[:48].clone()
+ mazes[:, task.height * task.width :] = 0
+ policies = task.test_policies[:48]
+ targets = maze.stationary_densities(
+ mazes[:, : task.height * task.width].view(-1, task.height, task.width),
+ policies.view(-1, 4, task.height, task.width),
+ ).flatten(-2)
+ output = eval_mygpt(model, mazes, prompt_len=task.height * task.width)
+ output = F.softmax(output, dim=2)
+ print(f"{output.size()=}")
+ proba_path = output[:, task.height * task.width :, 4].reshape(
+ -1, task.height, task.width
+ )
+ mazes = mazes[:, : task.height * task.width].reshape(-1, task.height, task.width)
+ targets = targets.reshape(-1, task.height, task.width)
+ paths = task.test_input[:48, task.height * task.width :].reshape(
+ -1, task.height, task.width
+ )
+ filename = f"oneshot.png"
+ maze.save_image(
+ os.path.join(args.result_dir, filename),
+ mazes=mazes,
+ # target_paths=paths,
+ score_paths=proba_path,
+ score_truth=targets,
+ )
+ log_string(f"wrote {filename}")
+
+
+def oneshot_old(gpt, learning_rate_scheduler, task):
t = gpt.training
gpt.eval()
nn.Linear(args.dim_model, dim_out),
).to(device)
+ learning_rate_scheduler.reset()
+
for n_epoch in range(args.nb_epochs):
- learning_rate = learning_rate_schedule[n_epoch]
+ learning_rate = learning_rate_scheduler.get_learning_rate()
+ log_string(f"learning_rate {n_epoch} {learning_rate}")
+
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
acc_train_loss, nb_train_samples = 0, 0
for mazes, policies in task.policy_batches(split="train"):
output_gpt = eval_mygpt(
- gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
+ gpt, mazes, mode=args.oneshot_input, prompt_len=task.height * task.width
)
output = model(output_gpt)
loss.backward()
optimizer.step()
+ learning_rate_scheduler.update(n_epoch + 1, acc_train_loss)
+
acc_test_loss, nb_test_samples = 0, 0
for mazes, policies in task.policy_batches(split="test"):
output_gpt = eval_mygpt(
- gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
+ gpt, mazes, mode=args.oneshot_input, prompt_len=task.height * task.width
)
output = model(output_gpt)
loss = compute_loss(mazes, output, policies, task.height, task.width)
)
# -------------------
- mazes = task.test_input[:32, : task.height * task.width]
- policies = task.test_policies[:32]
+ mazes = task.test_input[:48, : task.height * task.width]
+ policies = task.test_policies[:48]
output_gpt = eval_mygpt(
- gpt, mazes, mode=args.oneshot_input, fixed_len=task.height * task.width
+ gpt, mazes, mode=args.oneshot_input, prompt_len=task.height * task.width
)
output = model(output_gpt)
if args.oneshot_output == "policy":
######################################################################
+class LearningRateScheduler:
+ def get_learning_rate(self):
+ pass
+
+ def update(self, nb_finished_epochs, loss):
+ pass
+
+ def reset(self):
+ pass
+
+ def get_state(self):
+ return vars(self)
+
+ def set_state(self, state):
+ print(f"{state=}")
+ for k, v in state.items():
+ setattr(self, k, v)
+
+
+class StepWiseScheduler(LearningRateScheduler):
+ def __init__(self, schedule):
+ self.nb_finished_epochs = 0
+ self.schedule = schedule
+
+ def get_learning_rate(self):
+ return self.schedule[self.nb_finished_epochs]
+
+ def update(self, nb_finished_epochs, loss):
+ self.nb_finished_epochs = nb_finished_epochs
+
+ def reset(self):
+ self.nb_finished_epochs = 0
+
+ def get_state(self):
+ return {"nb_finished_epochs": self.nb_finished_epochs}
+
+
+class AutoScheduler(LearningRateScheduler):
+ def __init__(self, learning_rate_init, growth=1.0, degrowth=0.2):
+ self.learning_rate_init = learning_rate_init
+ self.learning_rate = learning_rate_init
+ self.growth = growth
+ self.degrowth = degrowth
+ self.pred_loss = None
+
+ def get_learning_rate(self):
+ return self.learning_rate
+
+ def update(self, nb_finished_epochs, loss):
+ if self.pred_loss is not None:
+ if loss >= self.pred_loss:
+ self.learning_rate *= self.degrowth
+ else:
+ self.learning_rate *= self.growth
+ self.pred_loss = loss
+
+ def reset(self):
+ self.learning_rate = self.learning_rate_init
+
+ def get_state(self):
+ return {
+ "learning_rate_init": self.learning_rate_init,
+ "pred_loss": self.pred_loss,
+ }
+
+
+######################################################################
+
+
class Task:
def batches(self, split="train", nb_to_use=-1, desc=None):
pass
f"accuracy_test nb_total {test_nb_total} nb_correct {test_nb_correct} accuracy {(100.0*test_nb_correct)/test_nb_total:.02f}%"
)
- input = self.test_input[:32]
+ input = self.test_input[:48]
result = input.clone()
ar_mask = result.new_zeros(result.size())
ar_mask[:, self.height * self.width :] = 1
##############################
-amm_generator = None
-if args.noncausal_prompt:
- amm_generator = lambda d: torch.logical_and(
- torch.arange(d)[None, None, :, None] < torch.arange(d)[None, None, None, :],
- torch.logical_or(
- torch.arange(d)[None, None, :, None] >= d // 2,
- torch.arange(d)[None, None, None, :] >= d // 2,
- ),
- )
+def noncausal_prompt_amm_generator(d):
+ q = torch.arange(d)[:, None]
+ k = torch.arange(d)[None, :]
+ s = args.maze_height * args.maze_width
+ return torch.logical_and(q < k, torch.logical_or(q >= s, k >= s))
+ # return q < k
+
+
+def noncausal_prompt_oneshot_amm_generator(d):
+ q = torch.arange(d)[:, None]
+ k = torch.arange(d)[None, :]
+ s = args.maze_height * args.maze_width
+ return k >= s
+ # return q < k
+
+
+if args.oneshot:
+ amm_generator = noncausal_prompt_oneshot_amm_generator
+elif args.noncausal_prompt:
+ amm_generator = noncausal_prompt_amm_generator
+else:
+ amm_generator = None
model = mygpt.MyGPT(
vocabulary_size=vocabulary_size,
######################################################################
+if args.learning_rate_schedule == "auto":
+ learning_rate_scheduler = AutoScheduler(args.learning_rate)
+
+elif args.learning_rate_schedule == "cos":
+ schedule = {}
+ for n_epoch in range(args.nb_epochs):
+ u = n_epoch / args.nb_epochs * math.pi
+ schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
+ learning_rate_scheduler = StepWiseScheduler(schedule)
+ log_string(f"learning_rate_schedule {schedule}")
+
+else:
+ u = {
+ int(k): float(v)
+ for k, v in [
+ tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
+ ]
+ }
+
+ schedule = {}
+ learning_rate = args.learning_rate
+ for n_epoch in range(args.nb_epochs):
+ if n_epoch in u:
+ learning_rate = u[n_epoch]
+ schedule[n_epoch] = learning_rate
+ learning_rate_scheduler = StepWiseScheduler(schedule)
+ log_string(f"learning_rate_schedule {schedule}")
+
+######################################################################
+
nb_epochs_finished = 0
if args.no_checkpoint:
checkpoint = torch.load(checkpoint_name)
nb_epochs_finished = checkpoint["nb_epochs_finished"]
model.load_state_dict(checkpoint["model_state"])
+ learning_rate_scheduler.set_state(checkpoint["learning_rate_scheduler_state"])
torch.set_rng_state(checkpoint["rng_state"])
if torch.cuda.is_available():
torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
except FileNotFoundError:
log_string("starting from scratch.")
- except:
- log_string("error when loading the checkpoint.")
- exit(1)
+ # except:
+ # log_string("error when loading the checkpoint.")
+ # exit(1)
+
+######################################################################
+
+if args.oneshot:
+ oneshot(model, learning_rate_scheduler, task)
+ exit(0)
######################################################################
##############################
-if args.learning_rate_schedule == "cos":
- learning_rate_schedule = {}
- for n_epoch in range(args.nb_epochs):
- u = n_epoch / args.nb_epochs * math.pi
- learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
-else:
- u = {
- int(k): float(v)
- for k, v in [
- tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
- ]
- }
-
- learning_rate_schedule = {}
- learning_rate = args.learning_rate
- for n_epoch in range(args.nb_epochs):
- if n_epoch in u:
- learning_rate = u[n_epoch]
- learning_rate_schedule[n_epoch] = learning_rate
-
-log_string(f"learning_rate_schedule {learning_rate_schedule}")
-
-##############################
-
if nb_epochs_finished >= args.nb_epochs:
n_epoch = nb_epochs_finished
train_perplexity = compute_perplexity(
- model, task, fixed_len=task.height * task.width, split="train"
+ model, task, prompt_len=task.height * task.width, split="train"
)
test_perplexity = compute_perplexity(
- model, task, fixed_len=task.height * task.width, split="test"
+ model, task, prompt_len=task.height * task.width, split="test"
)
log_string(
##############################
-for n_epoch in range(nb_epochs_finished, args.nb_epochs):
- learning_rate = learning_rate_schedule[n_epoch]
+learning_rate_scheduler.reset()
- log_string(f"learning_rate {learning_rate}")
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
+ learning_rate = learning_rate_scheduler.get_learning_rate()
+ log_string(f"learning_rate {n_epoch} {learning_rate}")
if args.optim == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for input in task.batches(split="train"):
input = input.to(device)
- output = eval_mygpt(
- model, input, mode=args.oneshot_input, fixed_len=task.height * task.width
- )
- loss = F.cross_entropy(output.transpose(1, 2), input)
+ output = eval_mygpt(model, input, prompt_len=task.height * task.width)
+ if args.noncausal_prompt:
+ d = input.size(1) // 2
+ loss = F.cross_entropy(output[:, d:].transpose(1, 2), input[:, d:])
+ else:
+ loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
nb_train_samples += input.size(0)
loss.backward()
optimizer.step()
+ learning_rate_scheduler.update(n_epoch + 1, acc_train_loss)
+
train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
test_perplexity = compute_perplexity(
- model, task, fixed_len=task.height * task.width, split="test"
+ model, task, prompt_len=task.height * task.width, split="test"
)
log_string(
checkpoint = {
"nb_epochs_finished": n_epoch + 1,
"model_state": model.state_dict(),
+ "learning_rate_scheduler_state": learning_rate_scheduler.get_state(),
"rng_state": torch.get_rng_state(),
}
log_string(f"saved checkpoint {checkpoint_name}")
######################################################################
-
-if args.oneshot:
- oneshot(model, task)
-
-######################################################################