dropout=args.dropout,
).to(main_device)
- # if i < args.nb_models//2:
- # model = TokenCat(model, 10)
-
# model = torch.compile(model)
model.id = i
log_string(f"generate_c_quizz_speed {int(3600 * nb / duration)}/h")
- return torch.cat(record)
+ return torch.cat(record).to("cpu")
######################################################################
def multithread_execution(fun, arguments):
+ # Single instance, no thread
if len(arguments) == 1:
return fun(*(arguments[0]))
nb_gpus = len(gpus)
nb_c_quizzes_to_generate = (args.nb_c_quizzes + nb_gpus - 1) // nb_gpus
- c_quizzes, agreements = multithread_execution(
+ c_quizzes = multithread_execution(
generate_c_quizzes,
[(models, nb_c_quizzes_to_generate, gpu) for gpu in gpus],
)