X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=43241dd79a96da7d7a1f902f8be95541d0ab56c7;hb=cc241681730e50ad149a68c612e3a06f2d4a71be;hp=d63398c1246158e4e8b7519366a7a4bd95ccd08d;hpb=d283cd3d46a6323fec4c6a0970ac71e553e4a486;p=culture.git diff --git a/main.py b/main.py index d63398c..43241dd 100755 --- a/main.py +++ b/main.py @@ -13,7 +13,7 @@ from torch.nn import functional as F import ffutils import mygpt -import sky, wireworld, quizz_machine +import sky, reasoning, quizz_machine # world quizzes vs. culture quizzes @@ -79,20 +79,26 @@ parser.add_argument("--dropout", type=float, default=0.1) parser.add_argument("--deterministic_synthesis", action="store_true", default=False) -parser.add_argument("--both_directions", action="store_true", default=False) - parser.add_argument("--problem", type=str, default="sky") parser.add_argument("--nb_gpts", type=int, default=5) -parser.add_argument("--min_to_validate", type=int, default=4) +parser.add_argument("--min_to_validate", type=int, default=None) -parser.add_argument("--max_to_validate", type=int, default=4) +parser.add_argument("--max_to_validate", type=int, default=None) parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975) +parser.add_argument("--generation_temperature", type=float, default=2.0) + +parser.add_argument("--deterministic_validation", action="store_true", default=False) + +parser.add_argument("--bidirectional_validation", action="store_true", default=False) + parser.add_argument("--dirty_debug", action="store_true", default=False) +###################################################################### + parser.add_argument("--sky_height", type=int, default=6) parser.add_argument("--sky_width", type=int, default=8) @@ -107,6 +113,12 @@ parser.add_argument("--sky_speed", type=int, default=3) args = parser.parse_args() +if args.min_to_validate is None: + args.min_to_validate = args.nb_gpts - 1 + +if args.max_to_validate is None: + args.max_to_validate = args.nb_gpts - 1 + if args.result_dir is None: args.result_dir = f"results_culture" @@ -237,8 +249,10 @@ if args.problem == "sky": nb_iterations=args.sky_nb_iterations, speed=args.sky_speed, ) -elif args.problem == "wireworld": - problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5) + back_accuracy = False +elif args.problem == "reasoning": + problem = reasoning.Reasoning(device=device) + back_accuracy = True else: raise ValueError @@ -246,6 +260,7 @@ quizz_machine = quizz_machine.QuizzMachine( problem=problem, nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, + back_accuracy=back_accuracy, batch_size=args.physical_batch_size, result_dir=args.result_dir, logger=log_string, @@ -362,6 +377,10 @@ def run_tests(model, quizz_machine, deterministic_synthesis): nb_test_samples += input.size(0) + test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) + + log_string(f"test_perplexity {n_epoch} {test_perplexity}") + model.main_test_accuracy = quizz_machine.produce_results( n_epoch=n_epoch, model=model, @@ -369,10 +388,6 @@ def run_tests(model, quizz_machine, deterministic_synthesis): deterministic_synthesis=deterministic_synthesis, ) - test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) - - log_string(f"test_perplexity {n_epoch} {test_perplexity}") - ###################################################################### @@ -401,33 +416,45 @@ def create_c_quizzes( nb_correct >= args.min_to_validate, nb_correct <= args.max_to_validate ) - while valid_c_quizzes(recorded, standard_validity).size(0) < nb_to_create: - model_for_generation = models[torch.randint(len(models), (1,))] + file_name = os.path.join(args.result_dir, f"culture_c_quiz_{n_epoch:04d}_logp.dat") + with open(file_name, "w") as logp_file: + while valid_c_quizzes(recorded, standard_validity).size(0) < nb_to_create: + # Select a model at random to generate the new quizzes - c_quizzes, ave_seq_logproba = quizz_machine.generate_quizzes( - nb_to_create, - model_for_generation=model_for_generation, - ) + model_for_generation = models[torch.randint(len(models), (1,))] - nb_correct = quizz_machine.compute_correctness( - c_quizzes, models, both_directions=args.both_directions - ) + c_quizzes = quizz_machine.generate_quizzes( + nb_to_create, + model_for_generation=model_for_generation, + temperature=args.generation_temperature, + ) - if args.dirty_debug: - nb_correct = torch.randint( - len(models) + 1, nb_correct.size(), device=c_quizzes.device + nb_correct, seq_logproba = quizz_machine.compute_correctness( + c_quizzes, + models, + bidirectional_validation=args.bidirectional_validation, + deterministic_validation=args.deterministic_validation, ) - recorded.append((c_quizzes, nb_correct)) + for n, l in zip(nb_correct, seq_logproba): + s = " ".join([str(x.item()) for x in l]) + logp_file.write(f"{n} {s}\n") - nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0) - nv = " ".join([str(x.item()) for x in nv]) + if args.dirty_debug: + nb_correct = torch.randint( + len(models) + 1, nb_correct.size(), device=c_quizzes.device + ) - nb_validated = valid_c_quizzes(recorded, standard_validity).size(0) + recorded.append((c_quizzes, nb_correct)) - log_string( - f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}" - ) + nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0) + nv = " ".join([str(x.item()) for x in nv]) + + nb_validated = valid_c_quizzes(recorded, standard_validity).size(0) + + log_string( + f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}" + ) # store the new c_quizzes which have been validated @@ -484,6 +511,9 @@ log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") for n_epoch in range(args.nb_epochs): log_string(f"--- epoch {n_epoch} ----------------------------------------") + cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models]) + log_string(f"current_test_accuracies {cta}") + # Select, improve, and eval the worst model weakest_model = min(models, key=lambda m: float(m.main_test_accuracy)) @@ -504,9 +534,6 @@ for n_epoch in range(args.nb_epochs): f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" ) - cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models]) - log_string(f"current_test_accuracies {cta}") - # Replace a fraction of the w_quizzes with fresh ones quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)