X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=4a1207d4571af75798390087c248ebed56a069a9;hb=6917d3d52a4b473d31121a471ab98fa114bdb1a6;hp=dace5f2e2276ff4ba2472b90677b253dea58a46e;hpb=798d9526e726b644979cf1124e714f705fdd5966;p=culture.git diff --git a/main.py b/main.py index dace5f2..4a1207d 100755 --- a/main.py +++ b/main.py @@ -82,12 +82,6 @@ parser.add_argument("--dropout", type=float, default=0.1) parser.add_argument("--deterministic_synthesis", action="store_true", default=False) -parser.add_argument("--no_checkpoint", action="store_true", default=False) - -parser.add_argument("--resume", action="store_true", default=False) - -parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth") - ############################## # filetask @@ -207,6 +201,12 @@ if args.result_dir is None: ###################################################################### default_task_args = { + "world": { + "model": "37M", + "batch_size": 100, + "nb_train_samples": 250000, + "nb_test_samples": 10000, + }, "file": { "model": "37M", "batch_size": 25, @@ -463,6 +463,17 @@ elif args.task == "byheart": ) args.max_percents_of_test_in_train = -1 +elif args.task == "world": + task = tasks.World( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + result_dir=args.result_dir, + logger=log_string, + device=device, + ) + args.max_percents_of_test_in_train = -1 + elif args.task == "learnop": task = tasks.SandBox( problem=problems.ProblemLearnOperator(), @@ -660,64 +671,28 @@ log_string(f"vocabulary_size {vocabulary_size}") ############################## -model = mygpt.MyGPT( - vocabulary_size=vocabulary_size, - dim_model=args.dim_model, - dim_keys=args.dim_keys, - dim_hidden=args.dim_hidden, - nb_heads=args.nb_heads, - nb_blocks=args.nb_blocks, - causal=True, - dropout=args.dropout, -) +models = [] + +for k in range(2): + models.append( + mygpt.MyGPT( + vocabulary_size=vocabulary_size, + dim_model=args.dim_model, + dim_keys=args.dim_keys, + dim_hidden=args.dim_hidden, + nb_heads=args.nb_heads, + nb_blocks=args.nb_blocks, + causal=True, + dropout=args.dropout, + ).to(device) + ) -model.to(device) -nb_parameters = sum(p.numel() for p in model.parameters()) +nb_parameters = sum(p.numel() for p in models[0].parameters()) log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)") ###################################################################### -nb_epochs_finished = 0 - -if args.no_checkpoint: - log_string(f"not trying to load checkpoint.") - -else: - try: - checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name) - checkpoint = torch.load(checkpoint_name) - nb_epochs_finished = checkpoint["nb_epochs_finished"] - model.load_state_dict(checkpoint["model_state"]) - torch.set_rng_state(checkpoint["rng_state"]) - if torch.cuda.is_available(): - torch.cuda.set_rng_state(checkpoint["cuda_rng_state"]) - - log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.") - - except FileNotFoundError: - log_string("starting from scratch.") - - except: - log_string("error when loading the checkpoint.") - exit(1) - -###################################################################### - -if args.task == "expr" and args.expr_input_file is not None: - task.produce_results( - n_epoch=nb_epochs_finished, - model=model, - result_dir=args.result_dir, - logger=log_string, - deterministic_synthesis=args.deterministic_synthesis, - input_file=args.expr_input_file, - ) - - exit(0) - -###################################################################### - # Compute the entropy of the training tokens token_count = 0 @@ -786,22 +761,12 @@ else: log_string(f"learning_rate_schedule {learning_rate_schedule}") -############################## - -if nb_epochs_finished >= args.nb_epochs: - task.produce_results( - n_epoch=nb_epochs_finished, - model=model, - result_dir=args.result_dir, - logger=log_string, - deterministic_synthesis=args.deterministic_synthesis, - ) - time_pred_result = None -for n_epoch in range(nb_epochs_finished, args.nb_epochs): - learning_rate = learning_rate_schedule[n_epoch] +###################################################################### + +def one_epoch(model, task, learning_rate): log_string(f"learning_rate {learning_rate}") if args.optim == "sgd": @@ -815,7 +780,7 @@ for n_epoch in range(nb_epochs_finished, args.nb_epochs): model.train() - nb_train_samples, acc_train_loss_ar, acc_train_loss_ae = 0, 0.0, 0.0 + nb_train_samples, acc_train_loss = 0, 0.0 for input in task.batches(split="train"): input = input.to(device) @@ -823,95 +788,129 @@ for n_epoch in range(nb_epochs_finished, args.nb_epochs): if nb_train_samples % args.batch_size == 0: optimizer.zero_grad() - if args.autoencoder_weight > 0: - bs_ar, bs_ae = model(mygpt.BracketedSequence(input), autoencoder=True) - output_ar, output_ae = bs_ar.x, bs_ae.x - loss_ar = F.cross_entropy(output_ar.transpose(1, 2), input) - loss_ae = F.cross_entropy(output_ae[:, 1:].transpose(1, 2), input[:, :-1]) - else: - output = model(mygpt.BracketedSequence(input)).x - loss_ar = F.cross_entropy(output.transpose(1, 2), input) - loss_ae = loss_ar.new_full((1,), 0.0) - - acc_train_loss_ar += loss_ar.item() * input.size(0) - acc_train_loss_ae += loss_ae.item() * input.size(0) + output = model(mygpt.BracketedSequence(input)).x + loss = F.cross_entropy(output.transpose(1, 2), input) + acc_train_loss += loss.item() * input.size(0) nb_train_samples += input.size(0) - (loss_ar + args.autoencoder_weight * loss_ae).backward() + loss.backward() if nb_train_samples % args.batch_size == 0: optimizer.step() + train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples)) + + log_string(f"train_perplexity {n_epoch} {train_perplexity}") + + +###################################################################### + + +def run_tests(model, task, deterministic_synthesis): with torch.autograd.no_grad(): model.eval() - nb_test_samples, acc_test_loss_ar, acc_test_loss_ae = 0, 0.0, 0.0 + nb_test_samples, acc_test_loss = 0, 0.0 nb_samples_accumulated = 0 for input in task.batches(split="test"): input = input.to(device) - if args.autoencoder_weight > 0: - bs_ar, bs_ae = model(mygpt.BracketedSequence(input), autoencoder=True) - output_ar, output_ae = bs_ar.x, bs_ae.x - loss_ae = F.cross_entropy( - output_ae[:, 1:].transpose(1, 2), input[:, :-1] - ) - acc_test_loss_ae += loss_ae.item() * input.size(0) - else: - bs_ar = model(mygpt.BracketedSequence(input)) - output_ar = bs_ar.x + bs = model(mygpt.BracketedSequence(input)) + output = bs.x - loss_ar = F.cross_entropy(output_ar.transpose(1, 2), input) + loss = F.cross_entropy(output.transpose(1, 2), input) - acc_test_loss_ar += loss_ar.item() * input.size(0) + acc_test_loss += loss.item() * input.size(0) nb_test_samples += input.size(0) - train_ar_perplexity = math.exp(min(100, acc_train_loss_ar / nb_train_samples)) - test_ar_perplexity = math.exp(min(100, acc_test_loss_ar / nb_test_samples)) - - log_string( - f"perplexity_ar {n_epoch} train_set {train_set_perplexity} train_prediction {train_ar_perplexity} test_prediction {test_ar_perplexity}" + main_test_accuracy = task.produce_results( + n_epoch=n_epoch, + model=model, + result_dir=args.result_dir, + logger=log_string, + deterministic_synthesis=deterministic_synthesis, ) - if args.autoencoder_weight > 0: - train_ae_perplexity = math.exp( - min(100, acc_train_loss_ae / nb_train_samples) - ) - test_ae_perplexity = math.exp(min(100, acc_test_loss_ae / nb_test_samples)) + test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples)) - log_string( - f"perplexity_ae {n_epoch} train_set {train_set_perplexity} train_prediction {train_ae_perplexity} test_prediction {test_ae_perplexity}" - ) + log_string(f"test_perplexity {n_epoch} {test_perplexity}") + + return main_test_accuracy + + +###################################################################### - task.produce_results( + +def create_quizzes( + model, + other_models, + task, + nb_for_train=1000, + nb_for_test=100, + nb_runs=10, + nb_min_correct=9, + nb_max_correct=9, +): + kept = [] + + while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: + new_quizzes, nb_correct = task.create_new_quizzes( n_epoch=n_epoch, - model=model, result_dir=args.result_dir, logger=log_string, - deterministic_synthesis=args.deterministic_synthesis, + nb=4 * (nb_for_train + nb_for_test), + model=model, + other_models=other_models, + nb_runs=nb_runs, ) - time_current_result = datetime.datetime.now() - if time_pred_result is not None: - log_string( - f"next_result {time_current_result + (time_current_result - time_pred_result)}" + to_keep = new_quizzes[ + torch.logical_and( + nb_correct >= nb_min_correct, nb_correct <= nb_max_correct ) - time_pred_result = time_current_result + ] + log_string(f"keep {to_keep.size(0)} quizzes") + kept.append(to_keep) - checkpoint = { - "nb_epochs_finished": n_epoch + 1, - "model_state": model.state_dict(), - "rng_state": torch.get_rng_state(), - } + new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] - if torch.cuda.is_available(): - checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state() + task.store_new_quizzes(new_quizzes[:nb_for_train], for_train=True) + task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False) + + task.save_image( + new_quizzes[:96], + args.result_dir, + f"world_new_{n_epoch:04d}.png", + log_string, + ) + + +###################################################################### + +accuracy_to_make_quizzes = 0.95 + +for n_epoch in range(nb_epochs_finished, args.nb_epochs): + learning_rate = learning_rate_schedule[n_epoch] - checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name) - torch.save(checkpoint, checkpoint_name) - log_string(f"saved checkpoint {checkpoint_name}") + for m in models: + one_epoch(m, task, learning_rate) + test_accuracy = run_tests(m, task, deterministic_synthesis=False) + + if test_accuracy >= accuracy_to_make_quizzes: + other_models = models.copy() + other_models.remove(model) + create_quizzes(other_models, task) + + # -------------------------------------------- + + time_current_result = datetime.datetime.now() + if time_pred_result is not None: + log_string( + f"next_result {time_current_result + (time_current_result - time_pred_result)}" + ) + time_pred_result = time_current_result ######################################################################