# Written by Francois Fleuret <francois@fleuret.org>
-import math, sys, argparse, time, tqdm, os
+import math, sys, argparse, time, tqdm, os, datetime
import torch, torchvision
from torch import nn
"--task",
type=str,
default="twotargets",
- help="byheart, learnop, guessop, degradation, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
+ help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, escape",
)
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
+##############################
+# filetask
+
+parser.add_argument("--filetask_train_file", type=str, default=None)
+
+parser.add_argument("--filetask_test_file", type=str, default=None)
+
##############################
# rpl options
parser.add_argument("--grid_size", type=int, default=6)
+parser.add_argument("--grid_fraction_play", type=float, default=0)
+
##############################
# picoclvr options
parser.add_argument("--expr_input_file", type=str, default=None)
##############################
-# Misc
+# Mixing
+
+parser.add_argument("--mixing_hard", action="store_true", default=False)
-parser.add_argument("--degradation_hard", action="store_true", default=False)
+parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
+
+##############################
+# escape options
+
+parser.add_argument("--escape_height", type=int, default=5)
+
+parser.add_argument("--escape_width", type=int, default=7)
+
+parser.add_argument("--escape_T", type=int, default=25)
+
+parser.add_argument("--escape_nb_walls", type=int, default=5)
######################################################################
######################################################################
default_task_args = {
+ "file": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
"addition": {
"model": "352M",
"batch_size": 25,
"nb_train_samples": 50000,
"nb_test_samples": 10000,
},
- "degradation": {
+ "memory": {
+ "model": "37M",
+ "batch_size": 100,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 1000,
+ },
+ "mixing": {
"model": "37M",
"batch_size": 25,
"nb_train_samples": 250000,
"nb_train_samples": 60000,
"nb_test_samples": 10000,
},
+ "escape": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 10000,
+ },
}
if args.task in default_task_args:
"nb_heads": 2,
"nb_blocks": 2,
},
+ "4M": {
+ "dim_model": 256,
+ "dim_keys": 32,
+ "dim_hidden": 1024,
+ "nb_heads": 4,
+ "nb_blocks": 6,
+ },
"37M": {
"dim_model": 512,
"dim_keys": 64,
sys.stdout.flush()
+log_string(f"argv {' '.join(sys.argv)}")
+
for n in vars(args):
log_string(f"args.{n} {getattr(args, n)}")
######################################################################
-if args.task == "byheart":
+if args.task == "file":
+ assert (
+ args.filetask_train_file is not None and args.filetask_test_file is not None
+ ), "You have to specify the task train and test files"
+ task = tasks.TaskFromFile(
+ args.filetask_train_file,
+ args.filetask_test_file,
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ shuffle=True,
+ device=device,
+ )
+ args.max_percents_of_test_in_train = 0
+
+elif args.task == "byheart":
task = tasks.SandBox(
problem=problems.ProblemByHeart(),
nb_train_samples=args.nb_train_samples,
device=device,
)
-elif args.task == "degradation":
+elif args.task == "memory":
task = tasks.SandBox(
- problem=problems.ProblemDegradation(hard=args.degradation_hard),
+ problem=problems.ProblemMemory(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "mixing":
+ task = tasks.SandBox(
+ problem=problems.ProblemMixing(
+ hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
+ ),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
nb_test_samples=args.nb_test_samples,
batch_size=args.batch_size,
size=args.grid_size,
+ fraction_play=args.grid_fraction_play,
logger=log_string,
device=device,
)
device=device,
)
+elif args.task == "escape":
+ task = tasks.Escape(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ height=args.escape_height,
+ width=args.escape_width,
+ T=args.escape_T,
+ nb_walls=args.escape_nb_walls,
+ logger=log_string,
+ device=device,
+ )
+
else:
raise ValueError(f"Unknown task {args.task}")
deterministic_synthesis=args.deterministic_synthesis,
)
+time_pred_result = None
+
for n_epoch in range(nb_epochs_finished, nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
deterministic_synthesis=args.deterministic_synthesis,
)
+ time_current_result = datetime.datetime.now()
+ if time_pred_result is not None:
+ log_string(
+ f"next_result {time_current_result + (time_current_result - time_pred_result)}"
+ )
+ time_pred_result = time_current_result
+
checkpoint = {
"nb_epochs_finished": n_epoch + 1,
"model_state": model.state_dict(),