X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=1a17e51a63741da0fa8c9f2910a319c331667c14;hb=a09ee76c8283b7daf4c914df47f86d1964fc25d4;hp=74e70b2a9780601053e51e0d359d082e7a869e1d;hpb=cb737bdbd2f112826f739e4581fbe6546aeef638;p=mygptrnn.git diff --git a/main.py b/main.py index 74e70b2..1a17e51 100755 --- a/main.py +++ b/main.py @@ -66,6 +66,16 @@ parser.add_argument("--learning_rate", type=float, default=6e-4) parser.add_argument("--min_learning_rate", type=float, default=6e-5) +# legacy + +parser.add_argument("--legacy_lr_schedule", action="store_true", default=False) + +parser.add_argument("--legacy_learning_rate", type=float, default=1e-4) + +parser.add_argument("--legacy_min_learning_rate", type=float, default=2e-5) + +parser.add_argument("--nb_large_lr_epochs", type=float, default=10) + ######################################## parser.add_argument("--model", type=str, default=None) @@ -460,10 +470,21 @@ for n in vars(args): ###################################################################### -# from nanoGPT +def get_lr(n_epoch, it): + if args.legacy_lr_schedule: + # my crude scheduling to compare to previous baseline, added + # warmup though + + if it < args.nb_warmup_iter: + return args.legacy_learning_rate * it / args.nb_warmup_iter + elif it < args.nb_large_lr_epochs: + return args.legacy_learning_rate + else: + return args.legacy_min_learning_rate + + # from nanoGPT -def get_lr(it): # 1) linear warmup for warmup_iter steps if it < args.nb_warmup_iter: return args.learning_rate * it / args.nb_warmup_iter @@ -848,7 +869,7 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): total_loss = loss + (args.rho * inner_loss if args.rho > 0 else 0.0) it += 1 - lr = get_lr(it) + lr = get_lr(n_epoch, it) for param_group in optimizer.param_groups: param_group["lr"] = lr