######################################################################
-if torch.cuda.is_available():
- device = torch.device("cuda")
- torch.backends.cuda.matmul.allow_tf32 = True
-else:
- device = torch.device("cpu")
-
-######################################################################
-
def str2bool(x):
x = x.lower()
parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
+parser.add_argument("--force_cpu", type=str2bool, default=False)
+
########################################
parser.add_argument("--nb_epochs", type=int, default=50)
parser.add_argument("--no_checkpoint", action="store_true", default=False)
-parser.add_argument("--overwrite_results", action="store_true", default=False)
+parser.add_argument("--continue_training", action="store_true", default=False)
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
parser.add_argument("--grid_size", type=int, default=6)
+parser.add_argument("--grid_nb_colors", type=int, default=6)
+
+parser.add_argument("--grid_nb_shapes", type=int, default=6)
+
##############################
# picoclvr options
######################################################################
-args = parser.parse_args()
+# args = parser.parse_args()
-assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
+args, sup_args = parser.parse_known_args()
+
+sup_args = dict([x.removeprefix("--").split("=") for x in sup_args])
if args.result_dir is None:
args.result_dir = f"results_{args.task}_{args.model}"
######################################################################
+if not args.force_cpu and torch.cuda.is_available():
+ device = torch.device("cuda")
+ torch.backends.cuda.matmul.allow_tf32 = True
+else:
+ device = torch.device("cpu")
+
+######################################################################
+
default_task_args = {
"addition": {
"model": "352M",
try:
os.mkdir(args.result_dir)
except FileExistsError:
- if not args.overwrite_results:
+ if not args.continue_training:
print(f"result directory {args.result_dir} already exists")
exit(1)
+loss_file = open(os.path.join(args.result_dir, "loss.dat"), "a")
+
log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
if args.seed >= 0:
for n in vars(args):
log_string(f"args.{n} {getattr(args, n)}")
+for k, v in sup_args.items():
+ log_string(f'sup_args["{k}"] "{v}"')
+
######################################################################
######################################################################
+assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
+
+
def picoclvr_pruner_horizontal_green(p):
return not ("green" in p and ("left" in p or "right" in p))
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
size=args.grid_size,
+ nb_shapes=args.grid_nb_shapes,
+ nb_colors=args.grid_nb_colors,
logger=log_string,
device=device_data,
)
causal=True,
dropout=args.dropout,
attention_layer=args.attention,
+ logger=log_string,
+ **sup_args,
)
model.to(device)
##############################
+if "calibrate" in sup_args:
+ for input in task.batches(split="train", desc="calibrate"):
+ input = input.to(device)
+ output = model(mygpt.BracketedSequence(input)).x
+
+ for n, m in model.named_modules():
+ for a in dir(m):
+ x = getattr(m, a)
+ if isinstance(x, mygpt.Calibrator):
+ print(f"####### ${n} | ${a} ########################")
+ mean, std = x.moments()
+ print("mean\n", mean, "\n")
+ print("std\n", std, "\n")
+ print(f"############################################\n\n")
+
+ exit(0)
+
+##############################
+
nb_samples_seen = 0
if nb_epochs_finished >= nb_epochs:
deterministic_synthesis=args.deterministic_synthesis,
)
-time_pred_result = None
+time_pred_result = datetime.datetime.now()
it = 0
+n_batch = 0
+
for n_epoch in range(nb_epochs_finished, nb_epochs):
if args.optim == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
total_loss.backward()
optimizer.step()
+ grad_norm = sum([p.grad.pow(2).sum() for p in model.parameters()]).sqrt()
+
+ loss_file.write(f"{n_epoch} {n_batch} {loss.item()} {grad_norm.item()}\n")
+
+ n_batch += 1
+
with torch.autograd.no_grad():
model.eval()
)
time_current_result = datetime.datetime.now()
- if time_pred_result is not None:
- log_string(
- f"next_result {time_current_result + (time_current_result - time_pred_result)}"
- )
+ log_string(
+ f"next_result {time_current_result + (time_current_result - time_pred_result)}"
+ )
time_pred_result = time_current_result
checkpoint = {