######################################################################
-if torch.cuda.is_available():
- device = torch.device("cuda")
- torch.backends.cuda.matmul.allow_tf32 = True
-else:
- device = torch.device("cpu")
-
-######################################################################
-
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--nb_threads", type=int, default=1)
-parser.add_argument("--nb_gpus", type=int, default=1)
+parser.add_argument("--gpus", type=str, default="all")
parser.add_argument("--nb_gpts", type=int, default=5)
parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
+parser.add_argument("--proba_understands", type=float, default=0.99)
+
+parser.add_argument("--proba_not_understands", type=float, default=0.5)
+
parser.add_argument("--generation_temperature", type=float, default=2.0)
parser.add_argument("--dirty_debug", action="store_true", default=False)
######################################################################
+if args.gpus == "all":
+ gpus_idx = range(torch.cuda.device_count())
+else:
+ gpus_idx = [int(k) for k in args.gpus.split(",")]
+
+gpus = [torch.device(f"cuda:{n}") for n in gpus_idx]
+
+if torch.cuda.is_available():
+ main_device = gpus[0]
+else:
+ assert len(gpus) == 0
+ main_device = torch.device("cpu")
+
if args.dirty_debug:
args.nb_train_samples = 2500
args.nb_test_samples = 100
nb_birds=args.sky_nb_birds,
nb_iterations=args.sky_nb_iterations,
speed=args.sky_speed,
- max_nb_cached_chunks=args.nb_gpus * args.nb_train_samples // 100,
+ max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
chunk_size=100,
nb_threads=args.nb_threads,
)
back_accuracy = False
elif args.problem == "grids":
problem = grids.Grids(
- max_nb_cached_chunks=args.nb_gpus * args.nb_train_samples // 100,
+ max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
chunk_size=100,
nb_threads=args.nb_threads,
tasks=args.grids_tasks,
batch_size=args.physical_batch_size,
result_dir=args.result_dir,
logger=log_string,
- device=device,
+ device=main_device,
)
######################################################################
-log_string(f"device {device}")
+log_string(f"main_device {main_device} gpus {[ str(g) for g in gpus]}")
vocabulary_size = quiz_machine.vocabulary_size()
######################################################################
-######################################################################
-
-
-def run_tests(model, quiz_machine, deterministic_synthesis, local_device=None):
- if local_device is None:
- local_device = device
-
+def run_tests(model, quiz_machine, deterministic_synthesis, local_device=main_device):
with torch.autograd.no_grad():
model.eval().to(local_device)
)
-def one_epoch(model, quiz_machine, local_device=None):
- if local_device is None:
- local_device = device
-
+def one_epoch(model, quiz_machine, local_device=main_device):
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
model.to(local_device).train()
def standard_validity(logproba):
l = logproba.sort(dim=-1).values
- return (l[:, 0] < math.log(0.5)) & (l[:, 1] > math.log(0.99))
+ return (l[:, 0] < math.log(args.proba_not_understands)) & (
+ l[:, 1] > math.log(args.proba_understands)
+ )
def valid_c_quizzes(recorded, criteria):
nb_blocks=args.nb_blocks,
causal=True,
dropout=args.dropout,
- ).to(device)
+ ).to(main_device)
model.main_test_accuracy = 0.0
model.id = k
ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy))
- weakest_models = ranked_models[: args.nb_gpus]
+ weakest_models = ranked_models[: len(gpus)]
threads = []
- for gpu_id, model in enumerate(weakest_models):
+ for gpu, model in zip(gpus, weakest_models):
log_string(f"training model {model.id}")
t = threading.Thread(
- target=one_epoch, daemon=True, args=(model, quiz_machine, f"cuda:{gpu_id}")
+ target=one_epoch, daemon=True, args=(model, quiz_machine, gpu)
)
threads.append(t)