X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=3e67a73fab1452f0d1f97aff1f890fe3ab70bcab;hb=2434c00a82ebb0b23f45d891cc9f80324e3200bd;hp=c51035c118f6480f2f33560b8aeea020c4cfbf28;hpb=ffe183868ac8563fd82fc8312fda90f6f8a95833;p=mygptrnn.git diff --git a/main.py b/main.py index c51035c..3e67a73 100755 --- a/main.py +++ b/main.py @@ -16,14 +16,6 @@ import mygpt, tasks, problems ###################################################################### -if torch.cuda.is_available(): - device = torch.device("cuda") - torch.backends.cuda.matmul.allow_tf32 = True -else: - device = torch.device("cpu") - -###################################################################### - def str2bool(x): x = x.lower() @@ -55,6 +47,8 @@ parser.add_argument("--seed", type=int, default=0) parser.add_argument("--max_percents_of_test_in_train", type=int, default=1) +parser.add_argument("--force_cpu", type=str2bool, default=False) + ######################################## parser.add_argument("--nb_epochs", type=int, default=50) @@ -208,15 +202,25 @@ parser.add_argument("--mixing_deterministic_start", action="store_true", default ###################################################################### -args = parser.parse_args() +# args = parser.parse_args() -assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} +args, sup_args = parser.parse_known_args() + +sup_args = dict([x.removeprefix("--").split("=") for x in sup_args]) if args.result_dir is None: args.result_dir = f"results_{args.task}_{args.model}" ###################################################################### +if not args.force_cpu and torch.cuda.is_available(): + device = torch.device("cuda") + torch.backends.cuda.matmul.allow_tf32 = True +else: + device = torch.device("cpu") + +###################################################################### + default_task_args = { "addition": { "model": "352M", @@ -430,6 +434,8 @@ except FileExistsError: print(f"result directory {args.result_dir} already exists") exit(1) +loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a") + log_file = open(os.path.join(args.result_dir, args.log_filename), "a") if args.seed >= 0: @@ -466,6 +472,9 @@ log_string(f"argv {' '.join(sys.argv)}") for n in vars(args): log_string(f"args.{n} {getattr(args, n)}") +for k, v in sup_args.items(): + log_string(f'sup_args["{k}"] "{v}"') + ###################################################################### @@ -503,6 +512,9 @@ def get_lr(n_epoch, it): ###################################################################### +assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} + + def picoclvr_pruner_horizontal_green(p): return not ("green" in p and ("left" in p or "right" in p)) @@ -728,6 +740,8 @@ model = mygpt.MyGPT( causal=True, dropout=args.dropout, attention_layer=args.attention, + logger=log_string, + **sup_args, ) model.to(device) @@ -832,10 +846,12 @@ if nb_epochs_finished >= nb_epochs: deterministic_synthesis=args.deterministic_synthesis, ) -time_pred_result = None +time_pred_result = datetime.datetime.now() it = 0 +n_batch = 0 + for n_epoch in range(nb_epochs_finished, nb_epochs): if args.optim == "sgd": optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate) @@ -877,6 +893,12 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): total_loss.backward() optimizer.step() + grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt() + + loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n") + + n_batch += 1 + with torch.autograd.no_grad(): model.eval() @@ -910,10 +932,9 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): ) time_current_result = datetime.datetime.now() - if time_pred_result is not None: - log_string( - f"next_result {time_current_result + (time_current_result - time_pred_result)}" - ) + log_string( + f"next_result {time_current_result + (time_current_result - time_pred_result)}" + ) time_pred_result = time_current_result checkpoint = {