X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;ds=sidebyside;f=main.py;h=ee4e9e5b3aea5fb5f6dcd426bbecbd7687bfbb35;hb=b3392c295bdb75140916e2db70efc6fa50962f63;hp=e0588224f07ecafde8105b00c2b004e0b195e249;hpb=5c751aa1bbfbcf42654f4626f81905acfa946c15;p=culture.git diff --git a/main.py b/main.py index e058822..ee4e9e5 100755 --- a/main.py +++ b/main.py @@ -12,7 +12,7 @@ from torch import nn from torch.nn import functional as F import ffutils -import mygpt, tasks, problems +import mygpt, tasks ###################################################################### @@ -29,8 +29,6 @@ parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) -parser.add_argument("--task", type=str, default="world", help="world") - parser.add_argument("--log_filename", type=str, default="train.log", help=" ") parser.add_argument("--result_dir", type=str, default=None) @@ -82,23 +80,20 @@ parser.add_argument("--check", action="store_true", default=False) args = parser.parse_args() if args.result_dir is None: - args.result_dir = f"results_{args.task}" + args.result_dir = f"results_culture" ###################################################################### -default_task_args = { - "world": { - "model": "37M", - "batch_size": 100, - "nb_train_samples": 250000, - "nb_test_samples": 10000, - }, +default_args = { + "model": "37M", + "batch_size": 100, + "nb_train_samples": 250000, + "nb_test_samples": 10000, } -if args.task in default_task_args: - for k, v in default_task_args[args.task].items(): - if getattr(args, k) is None: - setattr(args, k, v) +for k, v in default_args.items(): + if getattr(args, k) is None: + setattr(args, k, v) ###################################################################### @@ -188,8 +183,8 @@ for n in vars(args): ###################################################################### if args.check: - args.nb_train_samples = 500 - args.nb_test_samples = 100 + args.nb_train_samples = 25000 + args.nb_test_samples = 1000 if args.physical_batch_size is None: args.physical_batch_size = args.batch_size @@ -199,229 +194,14 @@ else: assert args.nb_train_samples % args.batch_size == 0 assert args.nb_test_samples % args.batch_size == 0 -if args.task == "file": - assert ( - args.filetask_train_file is not None and args.filetask_test_file is not None - ), "You have to specify the task train and test files" - task = tasks.TaskFromFile( - args.filetask_train_file, - args.filetask_test_file, - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - shuffle=True, - device=device, - ) - args.max_percents_of_test_in_train = 0 - -elif args.task == "byheart": - task = tasks.SandBox( - problem=problems.ProblemByHeart(separation=args.byheart_separation), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - args.max_percents_of_test_in_train = -1 - -elif args.task == "world": - task = tasks.World( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - result_dir=args.result_dir, - logger=log_string, - device=device, - ) - args.max_percents_of_test_in_train = -1 - -elif args.task == "learnop": - task = tasks.SandBox( - problem=problems.ProblemLearnOperator(), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - - -elif args.task == "guessop": - task = tasks.SandBox( - problem=problems.ProblemGuessOperator(), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - - -elif args.task == "twotargets": - task = tasks.SandBox( - problem=problems.ProblemTwoTargets(), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - -elif args.task == "memory": - task = tasks.SandBox( - problem=problems.ProblemMemory(), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - -elif args.task == "mixing": - task = tasks.SandBox( - problem=problems.ProblemMixing( - hard=args.mixing_hard, random_start=not args.mixing_deterministic_start - ), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - -elif args.task == "addition": - task = tasks.SandBox( - problem=problems.ProblemAddition(), - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - device=device, - ) - -elif args.task == "picoclvr": - task = tasks.PicoCLVR( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - height=args.picoclvr_height, - width=args.picoclvr_width, - nb_colors=args.picoclvr_nb_colors, - logger=log_string, - device=device, - pruner_train=picoclvr_pruner_train, - pruner_eval=picoclvr_pruner_eval, - ) - -elif args.task == "mnist": - task = tasks.MNIST( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - device=device, - ) - -elif args.task == "maze": - task = tasks.Maze( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - height=args.maze_height, - width=args.maze_width, - nb_walls=args.maze_nb_walls, - device="cpu", - ) - -elif args.task == "snake": - task = tasks.Snake( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - height=args.snake_height, - width=args.snake_width, - nb_colors=args.snake_nb_colors, - length=args.snake_length, - prompt_length=args.snake_length // 2, - device=device, - ) - -elif args.task == "stack": - task = tasks.Stack( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - logger=log_string, - nb_steps=args.stack_nb_steps, - nb_stacks=args.stack_nb_stacks, - nb_digits=args.stack_nb_digits, - fraction_values_for_train=args.stack_fraction_values_for_train, - device=device, - ) - -elif args.task == "expr": - task = tasks.Expr( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - nb_variables=args.expr_nb_variables, - sequence_length=args.expr_sequence_length, - operand_max=args.expr_operand_max, - result_max=args.expr_result_max, - batch_size=args.physical_batch_size, - device=device, - ) - -elif args.task == "rpl": - task = tasks.RPL( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - nb_starting_values=args.rpl_nb_starting_values, - max_input=args.rpl_max_input, - prog_len=args.rpl_prog_len, - nb_runs=args.rpl_nb_runs, - no_prog=args.rpl_no_prog, - logger=log_string, - device=device, - ) - -elif args.task == "grid": - task = tasks.Grid( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - size=args.grid_size, - fraction_play=args.grid_fraction_play, - logger=log_string, - device=device, - ) - -elif args.task == "qmlp": - task = tasks.QMLP( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - result_dir=args.result_dir, - logger=log_string, - device=device, - ) - -elif args.task == "greed": - task = tasks.Greed( - nb_train_samples=args.nb_train_samples, - nb_test_samples=args.nb_test_samples, - batch_size=args.physical_batch_size, - height=args.greed_height, - width=args.greed_width, - T=args.greed_T, - nb_walls=args.greed_nb_walls, - nb_coins=args.greed_nb_coins, - logger=log_string, - device=device, - ) - -else: - raise ValueError(f"Unknown task {args.task}") +task = tasks.World( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.physical_batch_size, + result_dir=args.result_dir, + logger=log_string, + device=device, +) ###################################################################### @@ -555,21 +335,31 @@ def create_quizzes( task, nb_for_train=1000, nb_for_test=100, + desired_average_logits=None, ): kept = [] + sum_logits = 0 + while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: - new_quizzes, nb_correct = task.create_new_quizzes( + nb_to_generate = 4 * (nb_for_train + nb_for_test) + + new_quizzes, nb_correct, _sum_logits = task.create_new_quizzes( n_epoch=n_epoch, result_dir=args.result_dir, logger=log_string, - nb=4 * (nb_for_train + nb_for_test), + nb=nb_to_generate, model=model, other_models=other_models, + desired_average_logits=desired_average_logits, ) + sum_logits += _sum_logits + to_keep = new_quizzes[nb_correct == len(other_models) - 1] - log_string(f"keep {to_keep.size(0)} quizzes") + log_string( + f"keep {to_keep.size(0)}/{new_quizzes.size(0)} quizzes ({to_keep.size(0)*100/new_quizzes.size(0):.02f}%)" + ) kept.append(to_keep) new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] @@ -578,12 +368,14 @@ def create_quizzes( task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False) task.save_image( - new_quizzes[:96], + new_quizzes[:72], args.result_dir, f"world_quiz_{n_epoch:04d}_{model.id:02d}.png", log_string, ) + return sum_logits / new_quizzes.size(0) + ###################################################################### @@ -618,10 +410,18 @@ nb_new_quizzes_for_test = 100 if args.check: accuracy_to_make_quizzes = 0.0 - nb_new_quizzes_for_train = 10 + nb_new_quizzes_for_train = 100 nb_new_quizzes_for_test = 10 +desired_average_logits = None + for n_epoch in range(args.nb_epochs): + log_string(f"--- epoch {n_epoch} ----------------------------------------") + + a = [(model.id, float(model.main_test_accuracy)) for model in models] + a.sort(key=lambda p: p[0]) + log_string(f"current accuracies {a}") + # select the model with lowest accuracy models.sort(key=lambda model: model.main_test_accuracy) model = models[0] @@ -633,6 +433,8 @@ for n_epoch in range(args.nb_epochs): # improve it one_epoch(model, task) + task.renew_samples(args.nb_train_samples // args.nb_gpts) + log_string( f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" ) @@ -640,17 +442,34 @@ for n_epoch in range(args.nb_epochs): # test it run_tests(model, task, deterministic_synthesis=False) - if model.main_test_accuracy >= accuracy_to_make_quizzes: + log_string( + f"test_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}" + ) + + if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_quizzes: other_models = models.copy() other_models.remove(model) - create_quizzes( + average_logits = create_quizzes( model, other_models, task, nb_for_train=nb_new_quizzes_for_train, nb_for_test=nb_new_quizzes_for_test, + desired_average_logits=desired_average_logits, ) + # We keep the first average logits as a reference + if desired_average_logits is None: + desired_average_logits = average_logits + else: + log_string( + f"desired_average_logits {desired_average_logits} average_logits {average_logits}" + ) + + # We update everyone + for model in models: + run_tests(model, task, deterministic_synthesis=False) + ######################################################################