"--task",
type=str,
default="twotargets",
- help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, escape",
+ help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
)
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
##############################
-# escape options
+# greed options
-parser.add_argument("--escape_height", type=int, default=4)
+parser.add_argument("--greed_height", type=int, default=5)
-parser.add_argument("--escape_width", type=int, default=6)
+parser.add_argument("--greed_width", type=int, default=7)
-parser.add_argument("--escape_T", type=int, default=20)
+parser.add_argument("--greed_T", type=int, default=25)
+
+parser.add_argument("--greed_nb_walls", type=int, default=5)
+
+parser.add_argument("--greed_nb_coins", type=int, default=2)
######################################################################
"nb_train_samples": 60000,
"nb_test_samples": 10000,
},
- "escape": {
+ "greed": {
"model": "37M",
"batch_size": 25,
"nb_train_samples": 25000,
device=device,
)
-elif args.task == "escape":
- task = tasks.Escape(
+elif args.task == "greed":
+ task = tasks.Greed(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
- height=args.escape_height,
- width=args.escape_width,
- T=args.escape_T,
+ height=args.greed_height,
+ width=args.greed_width,
+ T=args.greed_T,
+ nb_walls=args.greed_nb_walls,
+ nb_coins=args.greed_nb_coins,
logger=log_string,
device=device,
)
######################################################################
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
-
# Compute the entropy of the training tokens
token_count = 0
nb_samples_seen = 0
-if nb_epochs_finished >= nb_epochs:
+if nb_epochs_finished >= args.nb_epochs:
task.produce_results(
n_epoch=nb_epochs_finished,
model=model,
time_pred_result = None
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
log_string(f"learning_rate {learning_rate}")