From 2ec38da6ecc9a0f24c20b6b1e04e17d0eb23bd7e Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Wed, 26 Jul 2023 14:38:47 -1000 Subject: [PATCH] Update. --- do_all.sh | 2 +- main.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/do_all.sh b/do_all.sh index 76f1982..c5d16fc 100755 --- a/do_all.sh +++ b/do_all.sh @@ -13,7 +13,7 @@ # set -o pipefail #prefix="--nb_train_samples=1000 --nb_test_samples=100 --batch_size=25 --nb_epochs=2 --max_percents_of_test_in_train=-1 --model=17K" -prefix="--nb_epochs=2" +prefix="--nb_epochs=25" for task in byheart learnop guessop twotargets addition picoclvr maze snake stack expr rpl do diff --git a/main.py b/main.py index 7b104bf..dbdf89d 100755 --- a/main.py +++ b/main.py @@ -62,7 +62,7 @@ parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: ######################################## -parser.add_argument("--model", type=str, default="37M") +parser.add_argument("--model", type=str, default=None) parser.add_argument("--dim_model", type=int, default=None) @@ -172,78 +172,91 @@ if args.result_dir is None: default_task_args = { "byheart": { + "model": "37M", "nb_epochs": 5, "batch_size": 25, "nb_train_samples": 50000, "nb_test_samples": 10000, }, "learnop": { + "model": "37M", "nb_epochs": 5, "batch_size": 25, "nb_train_samples": 50000, "nb_test_samples": 10000, }, "guessop": { + "model": "122M", "nb_epochs": 5, "batch_size": 25, - "nb_train_samples": 50000, + "nb_train_samples": 250000, "nb_test_samples": 10000, }, "twotargets": { + "model": "37M", "nb_epochs": 5, "batch_size": 25, "nb_train_samples": 50000, "nb_test_samples": 10000, }, "addition": { + "model": "122M", "nb_epochs": 5, "batch_size": 25, "nb_train_samples": 50000, "nb_test_samples": 10000, }, "picoclvr": { + "model": "37M", "nb_epochs": 25, "batch_size": 25, "nb_train_samples": 250000, "nb_test_samples": 10000, }, "mnist": { + "model": "37M", "nb_epochs": 25, "batch_size": 10, "nb_train_samples": 60000, "nb_test_samples": 10000, }, "maze": { + "model": "37M", "nb_epochs": 25, "batch_size": 5, "nb_train_samples": 250000, "nb_test_samples": 10000, }, "snake": { + "model": "37M", "nb_epochs": 5, "batch_size": 25, "nb_train_samples": 50000, "nb_test_samples": 10000, }, "stack": { + "model": "37M", "nb_epochs": 5, "batch_size": 25, "nb_train_samples": 100000, "nb_test_samples": 1000, }, "expr": { + "model": "37M", "nb_epochs": 40, "batch_size": 25, "nb_train_samples": 1000000, "nb_test_samples": 10000, }, "rpl": { + "model": "37M", "nb_epochs": 40, "batch_size": 25, "nb_train_samples": 100000, "nb_test_samples": 10000, }, "world": { + "model": "37M", "nb_epochs": 10, "batch_size": 25, "nb_train_samples": 25000, -- 2.39.5