parser.add_argument("--min_learning_rate", type=float, default=6e-5)
+# legacy
+
+parser.add_argument("--legacy_lr_schedule", action="store_true", default=False)
+
+parser.add_argument("--legacy_learning_rate", type=float, default=1e-4)
+
+parser.add_argument("--legacy_min_learning_rate", type=float, default=2e-5)
+
+parser.add_argument("--nb_large_lr_epochs", type=float, default=10)
+
########################################
parser.add_argument("--model", type=str, default=None)
######################################################################
-# from nanoGPT
+def get_lr(n_epoch, it):
+ if args.legacy_lr_schedule:
+ # my crude scheduling to compare to previous baseline, added
+ # warmup though
+
+ if it < args.nb_warmup_iter:
+ return args.legacy_learning_rate * it / args.nb_warmup_iter
+ elif it < args.nb_large_lr_epochs:
+ return args.legacy_learning_rate
+ else:
+ return args.legacy_min_learning_rate
+
+ # from nanoGPT
-def get_lr(it):
# 1) linear warmup for warmup_iter steps
if it < args.nb_warmup_iter:
return args.learning_rate * it / args.nb_warmup_iter
total_loss = loss + (args.rho * inner_loss if args.rho > 0 else 0.0)
it += 1
- lr = get_lr(it)
+ lr = get_lr(n_epoch, it)
for param_group in optimizer.param_groups:
param_group["lr"] = lr
class Task:
- def batches(self, split="train"):
+ def batches(self, split="train", desc=None):
pass
def vocabulary_size(self):
self.train_input = self.tensorize(self.train_descr)
self.test_input = self.tensorize(self.test_descr)
- def batches(self, split="train"):
+ def batches(self, split="train", desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
for batch in tqdm.tqdm(