X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=6b00bbfd991178841468b75006f94121668c2b4f;hb=refs%2Fheads%2Fmaster;hp=1ef01e9c510ba9fbb2df269c19809f1f30d639bb;hpb=050976a525fee2d3b824350a3058ab7299a2bd3d;p=culture.git diff --git a/main.py b/main.py index 1ef01e9..6b00bbf 100755 --- a/main.py +++ b/main.py @@ -18,15 +18,7 @@ import sky, grids, quiz_machine import threading -# world quizzes vs. culture quizzes - -###################################################################### - -if torch.cuda.is_available(): - device = torch.device("cuda") - torch.backends.cuda.matmul.allow_tf32 = True -else: - device = torch.device("cpu") +import torch.multiprocessing as mp ###################################################################### @@ -40,6 +32,8 @@ parser.add_argument("--result_dir", type=str, default=None) parser.add_argument("--seed", type=int, default=0) +parser.add_argument("--resume", action="store_true", default=False) + parser.add_argument("--max_percents_of_test_in_train", type=int, default=-1) ######################################## @@ -54,6 +48,10 @@ parser.add_argument("--nb_train_samples", type=int, default=None) parser.add_argument("--nb_test_samples", type=int, default=None) +parser.add_argument("--nb_new_c_quizzes_for_train", type=int, default=None) + +parser.add_argument("--nb_new_c_quizzes_for_test", type=int, default=None) + parser.add_argument("--learning_rate", type=float, default=5e-4) ######################################## @@ -80,23 +78,32 @@ parser.add_argument("--problem", type=str, default="grids") parser.add_argument("--nb_threads", type=int, default=1) -parser.add_argument("--nb_gpus", type=int, default=1) +parser.add_argument("--gpus", type=str, default="all") parser.add_argument("--nb_gpts", type=int, default=5) -parser.add_argument("--min_to_validate", type=int, default=None) +parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.9) -parser.add_argument("--max_to_validate", type=int, default=None) +parser.add_argument("--proba_understands", type=float, default=0.9) -parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975) +parser.add_argument("--proba_not_understands", type=float, default=0.5) -parser.add_argument("--generation_temperature", type=float, default=2.0) +parser.add_argument("--generation_temperature", type=float, default=1.0) -parser.add_argument("--deterministic_validation", action="store_true", default=False) +parser.add_argument("--dirty_debug", action="store_true", default=False) -parser.add_argument("--bidirectional_validation", action="store_true", default=False) +###################################################################### -parser.add_argument("--dirty_debug", action="store_true", default=False) +grids_tasks = ", ".join( + [x.__name__.removeprefix("task_") for x in grids.Grids().all_tasks] +) + +parser.add_argument( + "--grids_tasks", + type=str, + default=None, + help="A comma-separated subset of: " + grids_tasks + ", or None for all.", +) ###################################################################### @@ -114,12 +121,6 @@ parser.add_argument("--sky_speed", type=int, default=3) args = parser.parse_args() -if args.min_to_validate is None: - args.min_to_validate = args.nb_gpts - 1 - -if args.max_to_validate is None: - args.max_to_validate = args.nb_gpts - 1 - if args.result_dir is None: args.result_dir = f"results_culture" @@ -185,11 +186,15 @@ else: ###################################################################### -try: - os.mkdir(args.result_dir) -except FileExistsError: - print(f"result directory {args.result_dir} already exists") - exit(1) +if args.resume: + assert os.path.isdir(args.result_dir) + +else: + try: + os.mkdir(args.result_dir) + except FileExistsError: + print(f"result directory {args.result_dir} already exists") + exit(1) log_file = open(os.path.join(args.result_dir, args.log_filename), "a") @@ -215,6 +220,10 @@ def log_string(s): sys.stdout.flush() +now = time.strftime("%Y%m%d-%H%M%S", time.localtime()) + +os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py") + log_string(f"argv {' '.join(sys.argv)}") for n in vars(args): @@ -223,6 +232,19 @@ for n in vars(args): ###################################################################### +if args.gpus == "all": + gpus_idx = range(torch.cuda.device_count()) +else: + gpus_idx = [int(k) for k in args.gpus.split(",")] + +gpus = [torch.device(f"cuda:{n}") for n in gpus_idx] + +if torch.cuda.is_available(): + main_device = gpus[0] +else: + assert len(gpus) == 0 + main_device = torch.device("cpu") + if args.dirty_debug: args.nb_train_samples = 2500 args.nb_test_samples = 100 @@ -242,21 +264,24 @@ if args.problem == "sky": nb_birds=args.sky_nb_birds, nb_iterations=args.sky_nb_iterations, speed=args.sky_speed, - max_nb_cached_chunks=args.nb_gpus * args.nb_train_samples // 100, + max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100, chunk_size=100, nb_threads=args.nb_threads, ) back_accuracy = False elif args.problem == "grids": problem = grids.Grids( - max_nb_cached_chunks=args.nb_gpus * args.nb_train_samples // 100, + max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100, chunk_size=100, nb_threads=args.nb_threads, + tasks=args.grids_tasks, ) back_accuracy = True else: raise ValueError +problem.save_some_examples(args.result_dir) + quiz_machine = quiz_machine.QuizMachine( problem=problem, nb_train_samples=args.nb_train_samples, @@ -265,12 +290,12 @@ quiz_machine = quiz_machine.QuizMachine( batch_size=args.physical_batch_size, result_dir=args.result_dir, logger=log_string, - device=device, + device=main_device, ) ###################################################################### -log_string(f"device {device}") +log_string(f"main_device {main_device} gpus {[ str(g) for g in gpus]}") vocabulary_size = quiz_machine.vocabulary_size() @@ -279,13 +304,7 @@ log_string(f"vocabulary_size {vocabulary_size}") ###################################################################### -###################################################################### - - -def run_tests(model, quiz_machine, deterministic_synthesis, local_device=None): - if local_device is None: - local_device = device - +def run_tests(model, quiz_machine, deterministic_synthesis, local_device=main_device): with torch.autograd.no_grad(): model.eval().to(local_device) @@ -306,7 +325,7 @@ def run_tests(model, quiz_machine, deterministic_synthesis, local_device=None): test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) - log_string(f"test_perplexity {n_epoch} {test_perplexity}") + log_string(f"test_perplexity {n_epoch} model {model.id} {test_perplexity}") model.main_test_accuracy = quiz_machine.produce_results( n_epoch=n_epoch, @@ -316,14 +335,11 @@ def run_tests(model, quiz_machine, deterministic_synthesis, local_device=None): ) -def one_epoch(model, quiz_machine, local_device=None): - if local_device is None: - local_device = device +def one_epoch(model, quiz_machine, local_device=main_device): + model.to(local_device).train() optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) - model.to(local_device).train() - nb_train_samples, acc_train_loss = 0, 0.0 for input in quiz_machine.batches(model, split="train"): @@ -345,201 +361,110 @@ def one_epoch(model, quiz_machine, local_device=None): train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples)) - log_string(f"train_perplexity {n_epoch} {train_perplexity}") + log_string(f"train_perplexity {n_epoch} model {model.id} {train_perplexity}") run_tests(model, quiz_machine, deterministic_synthesis=False) - model.TRAINING_LOCK.release() + model.to(main_device) ###################################################################### - -def standard_validity(logproba): - l = logproba.sort(dim=-1).values - return (l[:, 0] < math.log(0.5)) & (l[:, 1] > math.log(0.99)) - # warnings.warn("TEST!!!", RuntimeWarning) - # print(l.exp()) - # return (l[:, 0] < math.log(0.99)) +# This is the key routine that decides what generated quizzes to keep -def valid_c_quizzes(recorded, criteria): - result = [q[criteria(lp)] for q, lp in recorded] - return torch.cat(result, dim=0) if len(result) > 0 else torch.tensor([]) +# token_logprobas are NxMxT where M is the number of models -###################################################################### - +def compute_valid_quizzes_(token_logprobas): + warnings.warn("validation with uniform constraints", RuntimeWarning) + l = token_logprobas.min(dim=-1).values.sort(dim=-1).values + return (l[:, 0] < math.log(0.1)) & (l[:, 1] > math.log(0.5)) -def create_c_quizzes( - models, - quiz_machine, - nb_for_train=1000, - nb_for_test=100, -): - quizzes_and_logproba_records = [] - - nb_to_create = nb_for_train + nb_for_test - # ------------------------------------------------------------ - - file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat") - - with open(file_name, "w") as logp_file: - while ( - valid_c_quizzes(quizzes_and_logproba_records, standard_validity).size(0) - < nb_to_create - ): - # Select a model at random to generate the new quizzes - - model_for_generation = models[torch.randint(len(models), (1,))] - - c_quizzes = quiz_machine.generate_quizzes( - nb_to_create, - model_for_generation=model_for_generation, - temperature=args.generation_temperature, - ) - - c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)] - - if c_quizzes.size(0) > 0: - logproba = quiz_machine.logproba_of_solutions(models, c_quizzes) - for l in logproba: - s = " ".join([str(x.item()) for x in l]) - logp_file.write(s + "\n") - quizzes_and_logproba_records.append((c_quizzes, logproba)) - - nb_validated = valid_c_quizzes( - quizzes_and_logproba_records, standard_validity - ).size(0) - - log_string( - f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}" - ) - - # store the new c_quizzes which have been validated - - new_c_quizzes = valid_c_quizzes(quizzes_and_logproba_records, standard_validity) - - quiz_machine.reverse_random_half_in_place(new_c_quizzes) - - quiz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) - quiz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) +def compute_valid_quizzes(token_logprobas): + l = token_logprobas.sum(dim=-1).sort(dim=-1).values + return (l[:, 0] < math.log(args.proba_not_understands)) & ( + l[:, 1] > math.log(args.proba_understands) + ) - # save a bunch of images to investigate what quizzes with a - # certain nb of correct predictions look like - q = new_c_quizzes[:72] +def extract_valid_quizzes_and_logprobas(recorded): + validated_quizzes, validated_logprobas = [], [] + for quizzes, token_logprobas in recorded: + validated_indices = compute_valid_quizzes(token_logprobas) + validated_quizzes.append(quizzes[validated_indices]) + validated_logprobas.append(token_logprobas[validated_indices]) - if q.size(0) > 0: - quiz_machine.save_quizzes(args.result_dir, f"culture_c_quiz_{n_epoch:04d}", q) + if len(validated_quizzes) > 0: + return torch.cat(validated_quizzes, dim=0), torch.cat( + validated_logprobas, dim=0 + ) + else: + return None, None ###################################################################### -def create_c_quizzes_( - models, - quiz_machine, - nb_for_train=1000, - nb_for_test=100, -): - quizzes_and_nb_correct_records = [] - +def create_c_quizzes(models, quiz_machine, nb_for_train=1000, nb_for_test=100): nb_to_create = nb_for_train + nb_for_test - # ------------------------------------------------------------ - - standard_validity = lambda nb_correct: (nb_correct >= args.min_to_validate) & ( - nb_correct <= args.max_to_validate - ) - - file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat") - - with open(file_name, "w") as logp_file: - while ( - valid_c_quizzes(quizzes_and_nb_correct_records, standard_validity).size(0) - < nb_to_create - ): - # Select a model at random to generate the new quizzes - - model_for_generation = models[torch.randint(len(models), (1,))] + recorded_quizzes_logprobas = [] - c_quizzes = quiz_machine.generate_quizzes( - nb_to_create, - model_for_generation=model_for_generation, - temperature=args.generation_temperature, - ) + nb_validated = 0 - # if args.prediction_correctness: + while nb_validated < nb_to_create: + model_for_generation = models[torch.randint(len(models), (1,))] - # else: - # logproba = quiz_machine.new(quiz_machine.size(0), len(models)) - # for q,l in zip(quizzes.split(args.batch_size), logits.split(args.batch_size)): - # for model in models: - # l[...] = F.cross_entropy(model(q)) - - c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)] - - if c_quizzes.size(0) > 0: - nb_correct, seq_logproba = quiz_machine.compute_correctness( - c_quizzes, - models, - bidirectional_validation=args.bidirectional_validation, - deterministic_validation=args.deterministic_validation, - ) - - for n, l in zip(nb_correct, seq_logproba): - s = " ".join([str(x.item()) for x in l]) - logp_file.write(f"{n} {s}\n") + c_quizzes = quiz_machine.generate_quizzes( + nb_to_create, + model_for_generation=model_for_generation, + temperature=args.generation_temperature, + ) - if args.dirty_debug: - nb_correct = torch.randint( - len(models) + 1, nb_correct.size(), device=c_quizzes.device - ) + c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)] - quizzes_and_nb_correct_records.append((c_quizzes, nb_correct)) + if c_quizzes.size(0) > 0: + token_logproba = quiz_machine.solution_token_logprobas(models, c_quizzes) + recorded_quizzes_logprobas.append((c_quizzes, token_logproba)) - nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0) - nv = " ".join([str(x.item()) for x in nv]) + ( + validated_quizzes, + validated_logprobas, + ) = extract_valid_quizzes_and_logprobas(recorded_quizzes_logprobas) - nb_validated = valid_c_quizzes( - quizzes_and_nb_correct_records, standard_validity - ).size(0) + if validated_quizzes is not None: + nb_validated = validated_quizzes.size(0) - log_string( - f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}" - ) + log_string( + f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}" + ) # store the new c_quizzes which have been validated - new_c_quizzes = valid_c_quizzes(quizzes_and_nb_correct_records, standard_validity) - - quiz_machine.reverse_random_half_in_place(new_c_quizzes) - - quiz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) - quiz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) - - # save a bunch of images to investigate what quizzes with a - # certain nb of correct predictions look like + quiz_machine.reverse_random_half_in_place(validated_quizzes) + quiz_machine.store_c_quizzes(validated_quizzes[:nb_for_train], for_train=True) + quiz_machine.store_c_quizzes( + validated_quizzes[nb_for_train:nb_to_create], for_train=False + ) - for n in range(len(models) + 1): - s = ( - "_validated" - if n >= args.min_to_validate and n <= args.max_to_validate - else "" - ) + ###################################################################### + # save images with their logprobas - q = valid_c_quizzes( - quizzes_and_nb_correct_records, criteria=lambda nb_correct: nb_correct == n - )[:72] + vq = validated_quizzes[:72] + vl = validated_logprobas[:72] - quiz_machine.reverse_random_half_in_place(q) + if vq.size(0) > 0: + prefix = f"culture_c_quiz_{n_epoch:04d}" + filename = os.path.join(args.result_dir, prefix + "_logp.pth") + torch.save(vl, filename) + # with open(file_name, "w") as logp_file: + # for l in vl: + # s = " ".join([str(x.item()) for x in l]) + # logp_file.write(s + "\n") - if q.size(0) > 0: - quiz_machine.save_quizzes( - args.result_dir, f"culture_c_quiz_{n_epoch:04d}_N{n}{s}", q - ) + quiz_machine.save_quiz_illustrations(args.result_dir, prefix, vq) ###################################################################### @@ -557,23 +482,47 @@ for k in range(args.nb_gpts): nb_blocks=args.nb_blocks, causal=True, dropout=args.dropout, - ).to(device) + ).to(main_device) model.main_test_accuracy = 0.0 model.id = k - model.TRAINING_LOCK = threading.Lock() - model.train_w_quizzes = quiz_machine.generate_token_sequences( - args.nb_train_samples - ).to(device) + model.train_w_quizzes = quiz_machine.generate_token_sequences(args.nb_train_samples) quiz_machine.reverse_random_half_in_place(model.train_w_quizzes) - model.test_w_quizzes = quiz_machine.generate_token_sequences( - args.nb_test_samples - ).to(device) + model.test_w_quizzes = quiz_machine.generate_token_sequences(args.nb_test_samples) quiz_machine.reverse_random_half_in_place(model.test_w_quizzes) models.append(model) +###################################################################### + +if args.resume: + try: + for model in models: + filename = f"gpt_{model.id:03d}.pth" + + try: + d = torch.load(os.path.join(args.result_dir, filename)) + model.load_state_dict(d[0]) + model.main_test_accuracy = d[1] + log_string(f"successfully loaded {filename}") + except FileNotFoundError: + log_string(f"cannot find {filename}") + pass + + try: + filename = "c_quizzes.pth" + quiz_machine.load_c_quizzes(os.path.join(args.result_dir, filename)) + log_string(f"successfully loaded {filename}") + except FileNotFoundError: + log_string(f"cannot find {filename}") + pass + + except: + log_string(f"error when loading {filename}.") + exit(1) + +###################################################################### nb_parameters = sum(p.numel() for p in models[0].parameters()) log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") @@ -628,11 +577,14 @@ if args.max_percents_of_test_in_train >= 0: ###################################################################### -nb_new_c_quizzes_for_train = args.nb_train_samples // 50 -nb_new_c_quizzes_for_test = args.nb_test_samples // 50 +if args.nb_new_c_quizzes_for_train is None: + args.nb_new_c_quizzes_for_train = args.nb_train_samples // 50 + +if args.nb_new_c_quizzes_for_test is None: + args.nb_new_c_quizzes_for_test = args.nb_test_samples // 50 log_string( - f"nb_new_c_quizzes_for_train {nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {nb_new_c_quizzes_for_test}" + f"nb_new_c_quizzes_for_train {args.nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {args.nb_new_c_quizzes_for_test}" ) ###################################################################### @@ -640,8 +592,9 @@ log_string( if args.dirty_debug: args.accuracy_to_make_c_quizzes = 0.0 args.nb_gpts = 2 - nb_new_c_quizzes_for_train = 100 - nb_new_c_quizzes_for_test = 10 + args.nb_new_c_quizzes_for_train = 100 + args.nb_new_c_quizzes_for_test = 10 + ###################################################################### @@ -651,50 +604,63 @@ for n_epoch in range(args.nb_epochs): cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models]) log_string(f"current_test_accuracies {cta}") + ################################################## + # If all the models are good enough, generate new quizzes and + # re-compute the test errors + + if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes: + create_c_quizzes( + models, + quiz_machine, + nb_for_train=args.nb_new_c_quizzes_for_train, + nb_for_test=args.nb_new_c_quizzes_for_test, + ) + + filename = "c_quizzes.pth" + quiz_machine.save_c_quizzes(os.path.join(args.result_dir, filename)) + log_string(f"wrote {filename}") + + # Force one epoch of training + for model in models: + model.main_test_accuracy = 0.0 + ################################################## # Select, improve, and eval the worst model ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy)) - weakest_models = ranked_models[: args.nb_gpus] + weakest_models = ranked_models[: len(gpus)] - for gpu_id, model in enumerate(weakest_models): - model.TRAINING_LOCK.acquire() + threads = [] - log_string( - f"training model {model.id} main_test_accuracy {model.main_test_accuracy}" + for gpu, model in zip(gpus, weakest_models): + log_string(f"training model {model.id}") + + t = threading.Thread( + target=one_epoch, daemon=True, args=(model, quiz_machine, gpu) ) - threading.Thread( - target=one_epoch, daemon=True, args=(model, quiz_machine, f"cuda:{gpu_id}") - ).start() + threads.append(t) - for model in weakest_models: - model.TRAINING_LOCK.acquire() - model.TRAINING_LOCK.release() + t.start() - ################################################## - # Replace a fraction of the w_quizzes with fresh ones + for t in threads: + t.join() - log_string( - f"cache_w_quizzes contains {quiz_machine.problem.nb_cached_quizzes()} quizzes" - ) + # Save the models to disk + + for model in weakest_models: + filename = f"gpt_{model.id:03d}.pth" + torch.save( + (model.state_dict(), model.main_test_accuracy), + os.path.join(args.result_dir, filename), + ) + log_string(f"wrote {filename}") - # Renew entirely the train set + # Renew the training samples for model in weakest_models: quiz_machine.renew_w_quizzes(model, args.nb_train_samples) - ################################################## - # If all the models are good enough, generate new quizzes and - # re-compute the test errors - - if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes: - create_c_quizzes( - models, - quiz_machine, - nb_for_train=nb_new_c_quizzes_for_train, - nb_for_test=nb_new_c_quizzes_for_test, - ) ######################################################################