X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=main.py;h=d961301b4115b20a19cc63f1432d605564597305;hb=4aa7e109b4c712643cdddc2480b66d8799f71d3f;hp=9a3d34633bd88cff4ac1e05ee77989e4d725e7b1;hpb=960c93d7c0aea41d180814c46d3a05686a426764;p=picoclvr.git diff --git a/main.py b/main.py index 9a3d346..d961301 100755 --- a/main.py +++ b/main.py @@ -32,8 +32,8 @@ parser = argparse.ArgumentParser( parser.add_argument( "--task", type=str, - default="sandbox", - help="byheart, learnop, guessop, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl", + default="twotargets", + help="byheart, learnop, guessop, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp", ) parser.add_argument("--log_filename", type=str, default="train.log", help=" ") @@ -46,7 +46,7 @@ parser.add_argument("--max_percents_of_test_in_train", type=int, default=1) ######################################## -parser.add_argument("--nb_epochs", type=int, default=None) +parser.add_argument("--nb_epochs", type=int, default=25) parser.add_argument("--batch_size", type=int, default=None) @@ -62,7 +62,7 @@ parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: ######################################## -parser.add_argument("--model", type=str, default="37M") +parser.add_argument("--model", type=str, default=None) parser.add_argument("--dim_model", type=int, default=None) @@ -89,16 +89,21 @@ parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth") ############################## # rpl options -parser.add_argument("--rpl_nb_starting_values", type=int, default=5) +parser.add_argument("--rpl_nb_starting_values", type=int, default=3) parser.add_argument("--rpl_max_input", type=int, default=9) -parser.add_argument("--rpl_prog_len", type=int, default=10) +parser.add_argument("--rpl_prog_len", type=int, default=8) -parser.add_argument("--rpl_nb_runs", type=int, default=8) +parser.add_argument("--rpl_nb_runs", type=int, default=5) parser.add_argument("--rpl_no_prog", action="store_true", default=False) +############################## +# grid options + +parser.add_argument("--grid_size", type=int, default=6) + ############################## # picoclvr options @@ -113,18 +118,18 @@ parser.add_argument("--picocvlr_prune_properties", type=str, default="none") ############################## # Maze options -parser.add_argument("--maze_height", type=int, default=23) +parser.add_argument("--maze_height", type=int, default=13) -parser.add_argument("--maze_width", type=int, default=39) +parser.add_argument("--maze_width", type=int, default=21) -parser.add_argument("--maze_nb_walls", type=int, default=45) +parser.add_argument("--maze_nb_walls", type=int, default=15) ############################## # Snake options -parser.add_argument("--snake_height", type=int, default=6) +parser.add_argument("--snake_height", type=int, default=9) -parser.add_argument("--snake_width", type=int, default=8) +parser.add_argument("--snake_width", type=int, default=12) parser.add_argument("--snake_nb_colors", type=int, default=5) @@ -154,11 +159,6 @@ parser.add_argument("--expr_result_max", type=int, default=99) parser.add_argument("--expr_input_file", type=str, default=None) -############################## -# World options - -parser.add_argument("--world_vqae_nb_epochs", type=int, default=25) - ###################################################################### args = parser.parse_args() @@ -171,60 +171,90 @@ if args.result_dir is None: ###################################################################### default_task_args = { - "sandbox": { - "nb_epochs": 50, + "addition": { + "model": "352M", "batch_size": 25, - "nb_train_samples": 100000, + "nb_train_samples": 250000, "nb_test_samples": 10000, }, - "picoclvr": { - "nb_epochs": 25, + "byheart": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 50000, + "nb_test_samples": 10000, + }, + "expr": { + "model": "352M", + "batch_size": 25, + "nb_train_samples": 2500000, + "nb_test_samples": 10000, + }, + "grid": { + "model": "37M", "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, - "mnist": { - "nb_epochs": 25, + "qmlp": { + "model": "37M", "batch_size": 10, - "nb_train_samples": 250000, + "nb_train_samples": 100000, + "nb_test_samples": 1000, + }, + "guessop": { + "model": "352M", + "batch_size": 25, + "nb_train_samples": 1000000, + "nb_test_samples": 10000, + }, + "learnop": { + "model": "37M", + "batch_size": 25, + "nb_train_samples": 50000, "nb_test_samples": 10000, }, "maze": { - "nb_epochs": 25, + "model": "37M", "batch_size": 5, + "nb_train_samples": 100000, + "nb_test_samples": 10000, + }, + "picoclvr": { + "model": "37M", + "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, + "rpl": { + "model": "352M", + "batch_size": 5, + "nb_train_samples": 2500000, + "nb_test_samples": 10000, + }, "snake": { - "nb_epochs": 5, + "model": "37M", "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, "stack": { - "nb_epochs": 5, + "model": "37M", "batch_size": 25, "nb_train_samples": 100000, "nb_test_samples": 1000, }, - "expr": { - "nb_epochs": 40, + "twotargets": { + "model": "37M", "batch_size": 25, - "nb_train_samples": 1000000, + "nb_train_samples": 50000, "nb_test_samples": 10000, }, - "rpl": { - "nb_epochs": 40, - "batch_size": 25, - "nb_train_samples": 100000, + "mnist": { + "model": "37M", + "batch_size": 10, + "nb_train_samples": 60000, "nb_test_samples": 10000, }, - "world": { - "nb_epochs": 10, - "batch_size": 25, - "nb_train_samples": 25000, - "nb_test_samples": 1000, - }, } if args.task in default_task_args: @@ -339,7 +369,7 @@ if args.task == "byheart": logger=log_string, device=device, ) - + args.max_percents_of_test_in_train = -1 elif args.task == "learnop": task = tasks.SandBox( @@ -468,12 +498,22 @@ elif args.task == "rpl": device=device, ) -elif args.task == "world": - task = tasks.World( +elif args.task == "grid": + task = tasks.Grid( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, batch_size=args.batch_size, - vqae_nb_epochs=args.world_vqae_nb_epochs, + size=args.grid_size, + logger=log_string, + device=device, + ) + +elif args.task == "qmlp": + task = tasks.QMLP( + nb_train_samples=args.nb_train_samples, + nb_test_samples=args.nb_test_samples, + batch_size=args.batch_size, + result_dir=args.result_dir, logger=log_string, device=device, ) @@ -563,33 +603,33 @@ train_set_perplexity = math.exp(entropy) ###################################################################### # A bit of paranoia never hurts +if args.max_percents_of_test_in_train >= 0: + + def subsets_as_tuples(batches, cs): + s = set() + for batch in batches: + for x in batch: + s.add(tuple([v.item() for v in x])) + if len(s) == cs: + yield s + s = set() + yield s + + nb_test, nb_in_train = 0, 0 + for test_subset in subsets_as_tuples(task.batches(split="test"), 25000): + in_train = set() + for train_subset in subsets_as_tuples(task.batches(split="train"), 25000): + in_train.update(test_subset.intersection(train_subset)) + nb_in_train += len(in_train) + nb_test += len(test_subset) + + log_string( + f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set" + ) -def subsets_as_tuples(batches, cs): - s = set() - for batch in batches: - for x in batch: - s.add(tuple([v.item() for v in x])) - if len(s) == cs: - yield s - s = set() - yield s - - -nb_test, nb_in_train = 0, 0 -for test_subset in subsets_as_tuples(task.batches(split="test"), 25000): - in_train = set() - for train_subset in subsets_as_tuples(task.batches(split="train"), 25000): - in_train.update(test_subset.intersection(train_subset)) - nb_in_train += len(in_train) - nb_test += len(test_subset) - -log_string( - f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set" -) - -assert ( - nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100 -), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set" + assert ( + nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100 + ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set" ##############################