from torch import nn
from torch.nn import functional as F
-import mygpt, tasks, tensorstack
+import ffutils
+import mygpt, tasks
######################################################################
parser.add_argument(
"--task",
type=str,
- default="picoclvr",
- help="picoclvr, mnist, maze, snake, stack, expr",
+ default="sandbox",
+ help="sandbox, picoclvr, mnist, maze, snake, stack, expr, world",
)
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
parser.add_argument("--snake_length", type=int, default=200)
##############################
-# Snake options
+# Stack options
parser.add_argument("--stack_nb_steps", type=int, default=100)
parser.add_argument("--expr_nb_variables", type=int, default=5)
-parser.add_argument("--expr_sequence_length", type=int, default=30)
+parser.add_argument("--expr_sequence_length", type=int, default=40)
+
+parser.add_argument("--expr_operand_max", type=int, default=9)
+
+parser.add_argument("--expr_result_max", type=int, default=99)
+
+parser.add_argument("--expr_input_file", type=str, default=None)
+
+##############################
+# World options
+
+parser.add_argument("--world_vqae_nb_epochs", type=int, default=25)
######################################################################
######################################################################
default_args = {
+ "sandbox": {
+ "nb_epochs": 10,
+ "batch_size": 25,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 10000,
+ },
"picoclvr": {
"nb_epochs": 25,
"batch_size": 25,
"nb_test_samples": 1000,
},
"expr": {
- "nb_epochs": 50,
+ "nb_epochs": 40,
"batch_size": 25,
- "nb_train_samples": 250000,
+ "nb_train_samples": 1000000,
"nb_test_samples": 10000,
},
+ "world": {
+ "nb_epochs": 10,
+ "batch_size": 25,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 1000,
+ },
}
if args.task in default_args:
######################################################################
-if args.task == "picoclvr":
+if args.task == "sandbox":
+ task = tasks.SandBox(
+ tasks.ProblemByheart(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "picoclvr":
task = tasks.PicoCLVR(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
nb_test_samples=args.nb_test_samples,
nb_variables=args.expr_nb_variables,
sequence_length=args.expr_sequence_length,
+ operand_max=args.expr_operand_max,
+ result_max=args.expr_result_max,
batch_size=args.batch_size,
device=device,
)
+elif args.task == "world":
+ task = tasks.World(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.batch_size,
+ vqae_nb_epochs=args.world_vqae_nb_epochs,
+ logger=log_string,
+ device=device,
+ )
+
else:
raise ValueError(f"Unknown task {args.task}")
######################################################################
+if args.task == "expr" and args.expr_input_file is not None:
+ task.produce_results(
+ nb_epochs_finished,
+ model,
+ args.result_dir,
+ log_string,
+ args.deterministic_synthesis,
+ args.expr_input_file,
+ )
+
+ exit(0)
+
+######################################################################
+
nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
# Compute the entropy of the training tokens
train_examples = {}
+
for input in task.batches(split="train"):
- assert input.dim()==2 and input.dtype==torch.int64
+ assert input.dim() == 2 and input.dtype == torch.int64
for x in input:
- train_examples[x.sum().item()]=x
+ train_examples[x.sum().item()] = x
+nb_total, nb_collisions = 0, 0
for input in task.batches(split="test"):
- assert input.dim()==2 and input.dtype==torch.int64
+ assert input.dim() == 2 and input.dtype == torch.int64
for x in input:
+ nb_total += 1
y = train_examples.get(x.sum().item())
if y is not None:
- assert x.size() != y.size() or (x-y).abs().sum() > 0
+ if x.size() == y.size() and (x - y).abs().sum() == 0:
+ nb_collisions += 1
del train_examples
+log_string(
+ f"data_check {nb_collisions*100/nb_total:.02f}% ({nb_collisions}/{nb_total}) of test samples are in the train set"
+)
+
##############################
if args.learning_rate_schedule == "cos":