X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=ec50722f7f6f8e295563ff6960d45ad9490e4f0a;hb=6d23462ce76c9020dcd7c4bc8a0e7a0fae9b7971;hp=18c0730d7608866041a35874da1c1dc0b641133f;hpb=a3abd0f58cfb2f2448c82db836093d20dc2954f2;p=mygptrnn.git diff --git a/main.py b/main.py index 18c0730..ec50722 100755 --- a/main.py +++ b/main.py @@ -16,13 +16,16 @@ import mygpt, tasks, problems ###################################################################### -if torch.cuda.is_available(): - device = torch.device("cuda") - torch.backends.cuda.matmul.allow_tf32 = True -else: - device = torch.device("cpu") -###################################################################### +def str2bool(x): + x = x.lower() + if x in {"1", "true", "yes"}: + return True + elif x in {"0", "false", "no"}: + return False + else: + raise ValueError + parser = argparse.ArgumentParser( description="An implementation of GPT with cache.", @@ -44,6 +47,8 @@ parser.add_argument("--seed", type=int, default=0) parser.add_argument("--max_percents_of_test_in_train", type=int, default=1) +parser.add_argument("--force_cpu", type=str2bool, default=False) + ######################################## parser.add_argument("--nb_epochs", type=int, default=50) @@ -68,7 +73,7 @@ parser.add_argument("--min_learning_rate", type=float, default=6e-5) # legacy -parser.add_argument("--legacy_lr_schedule", action="store_true", default=False) +parser.add_argument("--legacy_lr_schedule", type=str2bool, default=True) parser.add_argument("--legacy_large_lr", type=float, default=1e-4) @@ -94,9 +99,13 @@ parser.add_argument("--nb_lines", type=int, default=None) parser.add_argument("--caterpillar_height", type=int, default=None) -parser.add_argument("--rho", type=float, default=0.0) +parser.add_argument("--gate_dropout_proba", type=float, default=0.0) + +parser.add_argument("--gate_dropout_sync", type=str2bool, default=True) -parser.add_argument("--dim_rec_v", type=int, default=None) +parser.add_argument("--gate_dropout_replace", type=str2bool, default=True) + +parser.add_argument("--rho_inner_loss", type=float, default=0.0) parser.add_argument("--nb_blocks", type=int, default=None) @@ -108,7 +117,7 @@ parser.add_argument("--deterministic_synthesis", action="store_true", default=Fa parser.add_argument("--no_checkpoint", action="store_true", default=False) -parser.add_argument("--overwrite_results", action="store_true", default=False) +parser.add_argument("--continue_training", action="store_true", default=False) parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth") @@ -130,6 +139,10 @@ parser.add_argument("--rpl_no_prog", action="store_true", default=False) parser.add_argument("--grid_size", type=int, default=6) +parser.add_argument("--grid_nb_colors", type=int, default=6) + +parser.add_argument("--grid_nb_shapes", type=int, default=6) + ############################## # picoclvr options @@ -199,15 +212,25 @@ parser.add_argument("--mixing_deterministic_start", action="store_true", default ###################################################################### -args = parser.parse_args() +# args = parser.parse_args() -assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} +args, sup_args = parser.parse_known_args() + +sup_args = dict([x.removeprefix("--").split("=") for x in sup_args]) if args.result_dir is None: args.result_dir = f"results_{args.task}_{args.model}" ###################################################################### +if not args.force_cpu and torch.cuda.is_available(): + device = torch.device("cuda") + torch.backends.cuda.matmul.allow_tf32 = True +else: + device = torch.device("cpu") + +###################################################################### + default_task_args = { "addition": { "model": "352M", @@ -321,7 +344,6 @@ default_model_args = { "dim_keys": 32, "dim_hidden": 32, "nb_heads": 2, - "dim_rec_v": 16, "nb_blocks": 2, }, "17K-C": { @@ -332,7 +354,6 @@ default_model_args = { "nb_heads": 2, "nb_lines": 16, "caterpillar_height": 4, - "dim_rec_v": 16, "nb_blocks": 2, }, "4M": { @@ -341,7 +362,6 @@ default_model_args = { "dim_keys": 32, "dim_hidden": 1024, "nb_heads": 4, - "dim_rec_v": 64, "nb_blocks": 6, }, "4M-C": { @@ -352,7 +372,6 @@ default_model_args = { "nb_heads": 4, "nb_lines": 32, "caterpillar_height": 4, - "dim_rec_v": 64, # dim_model / nb_heads "nb_blocks": 6, }, "37M": { @@ -361,7 +380,6 @@ default_model_args = { "dim_keys": 64, "dim_hidden": 2048, "nb_heads": 8, - "dim_rec_v": 64, "nb_blocks": 12, }, "37M-C": { @@ -372,7 +390,6 @@ default_model_args = { "nb_heads": 8, "nb_lines": 256, "caterpillar_height": 32, - "dim_rec_v": 64, "nb_blocks": 12, }, "122M": { @@ -381,7 +398,6 @@ default_model_args = { "dim_keys": 64, "dim_hidden": 2048, "nb_heads": 8, - "dim_rec_v": 96, "nb_blocks": 24, }, "122M-C": { @@ -391,7 +407,6 @@ default_model_args = { "dim_hidden": 2048, "nb_heads": 8, "nb_lines": 128, - "dim_rec_v": 96, "nb_blocks": 24, }, "352M": { @@ -400,7 +415,6 @@ default_model_args = { "dim_keys": 64, "dim_hidden": 2048, "nb_heads": 8, - "dim_rec_v": 128, "nb_blocks": 48, }, "352M-C": { @@ -410,7 +424,6 @@ default_model_args = { "dim_hidden": 2048, "nb_heads": 8, "nb_lines": 128, - "dim_rec_v": 128, "nb_blocks": 48, }, } @@ -427,10 +440,12 @@ else: try: os.mkdir(args.result_dir) except FileExistsError: - if not args.overwrite_results: + if not args.continue_training: print(f"result directory {args.result_dir} already exists") exit(1) +loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a") + log_file = open(os.path.join(args.result_dir, args.log_filename), "a") if args.seed >= 0: @@ -467,6 +482,9 @@ log_string(f"argv {' '.join(sys.argv)}") for n in vars(args): log_string(f"args.{n} {getattr(args, n)}") +for k, v in sup_args.items(): + log_string(f'sup_args["{k}"] "{v}"') + ###################################################################### @@ -504,6 +522,9 @@ def get_lr(n_epoch, it): ###################################################################### +assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"} + + def picoclvr_pruner_horizontal_green(p): return not ("green" in p and ("left" in p or "right" in p)) @@ -690,6 +711,8 @@ elif args.task == "grid": nb_test_samples=args.nb_test_samples, batch_size=args.batch_size, size=args.grid_size, + nb_shapes=args.grid_nb_shapes, + nb_colors=args.grid_nb_colors, logger=log_string, device=device_data, ) @@ -725,11 +748,12 @@ model = mygpt.MyGPT( nb_heads=args.nb_heads, nb_lines=args.nb_lines, caterpillar_height=args.caterpillar_height, - dim_rec_v=args.dim_rec_v, nb_blocks=args.nb_blocks, causal=True, dropout=args.dropout, attention_layer=args.attention, + logger=log_string, + args=args, ) model.to(device) @@ -823,6 +847,25 @@ if args.max_percents_of_test_in_train >= 0: ############################## +if "calibrate" in sup_args: + for input in task.batches(split="train", desc="calibrate"): + input = input.to(device) + output = model(mygpt.BracketedSequence(input)).x + + for n, m in model.named_modules(): + for a in dir(m): + x = getattr(m, a) + if isinstance(x, mygpt.Calibrator): + print(f"####### ${n} | ${a} ########################") + mean, std = x.moments() + print("mean\n", mean, "\n") + print("std\n", std, "\n") + print(f"############################################\n\n") + + exit(0) + +############################## + nb_samples_seen = 0 if nb_epochs_finished >= nb_epochs: @@ -834,10 +877,12 @@ if nb_epochs_finished >= nb_epochs: deterministic_synthesis=args.deterministic_synthesis, ) -time_pred_result = None +time_pred_result = datetime.datetime.now() it = 0 +n_batch = 0 + for n_epoch in range(nb_epochs_finished, nb_epochs): if args.optim == "sgd": optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate) @@ -866,7 +911,9 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): nb_train_samples += input.size(0) nb_samples_seen += input.size(0) - total_loss = loss + (args.rho * inner_loss if args.rho > 0 else 0.0) + total_loss = loss + ( + args.rho_inner_loss * inner_loss if args.rho_inner_loss > 0 else 0.0 + ) it += 1 lr = get_lr(n_epoch, it) @@ -879,6 +926,12 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): total_loss.backward() optimizer.step() + grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt() + + loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n") + + n_batch += 1 + with torch.autograd.no_grad(): model.eval() @@ -912,10 +965,9 @@ for n_epoch in range(nb_epochs_finished, nb_epochs): ) time_current_result = datetime.datetime.now() - if time_pred_result is not None: - log_string( - f"next_result {time_current_result + (time_current_result - time_pred_result)}" - ) + log_string( + f"next_result {time_current_result + (time_current_result - time_pred_result)}" + ) time_pred_result = time_current_result checkpoint = {