# Written by Francois Fleuret <francois@fleuret.org>
-# torch.backends.cuda.matmul.allow_tf23
-# torch.autocast(torch.bfloat16)
-
-import math, sys, argparse, time, tqdm, os
+import math, sys, argparse, time, tqdm, os, datetime, warnings
import torch, torchvision
from torch import nn
from torch.nn import functional as F
-import mygpt, tasks
+import ffutils
+import mygpt, tasks, problems
######################################################################
parser.add_argument(
"--task",
type=str,
- default="picoclvr",
- help="picoclvr, mnist, maze, snake, stack, expr",
+ default="twotargets",
+ help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
)
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
parser.add_argument("--seed", type=int, default=0)
-parser.add_argument("--nb_epochs", type=int, default=None)
+parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
+
+########################################
+
+parser.add_argument("--nb_epochs", type=int, default=50)
parser.add_argument("--batch_size", type=int, default=None)
+parser.add_argument("--physical_batch_size", type=int, default=None)
+
parser.add_argument("--nb_train_samples", type=int, default=None)
parser.add_argument("--nb_test_samples", type=int, default=None)
parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
-parser.add_argument("--dim_model", type=int, default=512)
+########################################
-parser.add_argument("--dim_keys", type=int, default=64)
+parser.add_argument("--model", type=str, default=None)
-parser.add_argument("--dim_hidden", type=int, default=2048)
+parser.add_argument("--dim_model", type=int, default=None)
-parser.add_argument("--nb_heads", type=int, default=8)
+parser.add_argument("--dim_keys", type=int, default=None)
-parser.add_argument("--nb_blocks", type=int, default=12)
+parser.add_argument("--dim_hidden", type=int, default=None)
+
+parser.add_argument("--nb_heads", type=int, default=None)
+
+parser.add_argument("--nb_blocks", type=int, default=None)
parser.add_argument("--dropout", type=float, default=0.1)
+########################################
+
parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
parser.add_argument("--no_checkpoint", action="store_true", default=False)
-parser.add_argument("--overwrite_results", action="store_true", default=False)
+parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
+##############################
+# filetask
+
+parser.add_argument("--filetask_train_file", type=str, default=None)
+
+parser.add_argument("--filetask_test_file", type=str, default=None)
+
+##############################
+# rpl options
+
+parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
+
+parser.add_argument("--rpl_max_input", type=int, default=9)
+
+parser.add_argument("--rpl_prog_len", type=int, default=8)
+
+parser.add_argument("--rpl_nb_runs", type=int, default=5)
+
+parser.add_argument("--rpl_no_prog", action="store_true", default=False)
+
+##############################
+# grid options
+
+parser.add_argument("--grid_size", type=int, default=6)
+
+parser.add_argument("--grid_fraction_play", type=float, default=0)
+
##############################
# picoclvr options
##############################
# Maze options
-parser.add_argument("--maze_height", type=int, default=23)
+parser.add_argument("--maze_height", type=int, default=13)
-parser.add_argument("--maze_width", type=int, default=39)
+parser.add_argument("--maze_width", type=int, default=21)
-parser.add_argument("--maze_nb_walls", type=int, default=45)
+parser.add_argument("--maze_nb_walls", type=int, default=15)
##############################
# Snake options
-parser.add_argument("--snake_height", type=int, default=6)
+parser.add_argument("--snake_height", type=int, default=9)
-parser.add_argument("--snake_width", type=int, default=8)
+parser.add_argument("--snake_width", type=int, default=12)
parser.add_argument("--snake_nb_colors", type=int, default=5)
parser.add_argument("--snake_length", type=int, default=200)
##############################
-# Snake options
+# ByHeart options
+
+parser.add_argument("--byheart_separation", type=int, default=1)
+
+##############################
+# Stack options
parser.add_argument("--stack_nb_steps", type=int, default=100)
parser.add_argument("--stack_nb_digits", type=int, default=3)
-parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
+parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
##############################
# Expr options
parser.add_argument("--expr_nb_variables", type=int, default=5)
-parser.add_argument("--expr_sequence_length", type=int, default=30)
+parser.add_argument("--expr_sequence_length", type=int, default=40)
+
+parser.add_argument("--expr_operand_max", type=int, default=9)
+
+parser.add_argument("--expr_result_max", type=int, default=99)
+
+parser.add_argument("--expr_input_file", type=str, default=None)
+
+##############################
+# Mixing
+
+parser.add_argument("--mixing_hard", action="store_true", default=False)
+
+parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
+
+##############################
+# greed options
+
+parser.add_argument("--greed_height", type=int, default=5)
+
+parser.add_argument("--greed_width", type=int, default=7)
+
+parser.add_argument("--greed_T", type=int, default=25)
+
+parser.add_argument("--greed_nb_walls", type=int, default=5)
+
+parser.add_argument("--greed_nb_coins", type=int, default=2)
######################################################################
######################################################################
-default_args = {
- "picoclvr": {
- "nb_epochs": 25,
+default_task_args = {
+ "file": {
+ "model": "37M",
"batch_size": 25,
"nb_train_samples": 250000,
"nb_test_samples": 10000,
},
- "mnist": {
- "nb_epochs": 25,
- "batch_size": 10,
+ "addition": {
+ "model": "352M",
+ "batch_size": 25,
"nb_train_samples": 250000,
"nb_test_samples": 10000,
},
+ "world": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 50000,
+ "nb_test_samples": 10000,
+ },
+ "byheart": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 50000,
+ "nb_test_samples": 10000,
+ },
+ "expr": {
+ "model": "352M",
+ "batch_size": 25,
+ "nb_train_samples": 2500000,
+ "nb_test_samples": 10000,
+ },
+ "grid": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
+ "qmlp": {
+ "model": "37M",
+ "batch_size": 10,
+ "nb_train_samples": 100000,
+ "nb_test_samples": 1000,
+ },
+ "guessop": {
+ "model": "352M",
+ "batch_size": 25,
+ "nb_train_samples": 1000000,
+ "nb_test_samples": 10000,
+ },
+ "learnop": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 50000,
+ "nb_test_samples": 10000,
+ },
"maze": {
- "nb_epochs": 25,
+ "model": "37M",
"batch_size": 5,
+ "nb_train_samples": 100000,
+ "nb_test_samples": 10000,
+ },
+ "picoclvr": {
+ "model": "37M",
+ "batch_size": 25,
"nb_train_samples": 250000,
"nb_test_samples": 10000,
},
+ "rpl": {
+ "model": "352M",
+ "batch_size": 5,
+ "nb_train_samples": 2500000,
+ "nb_test_samples": 10000,
+ },
"snake": {
- "nb_epochs": 5,
+ "model": "37M",
"batch_size": 25,
"nb_train_samples": 250000,
"nb_test_samples": 10000,
},
"stack": {
- "nb_epochs": 5,
+ "model": "37M",
"batch_size": 25,
"nb_train_samples": 100000,
"nb_test_samples": 1000,
},
- "expr": {
- "nb_epochs": 50,
+ "twotargets": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 50000,
+ "nb_test_samples": 10000,
+ },
+ "memory": {
+ "model": "37M",
+ "batch_size": 100,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 1000,
+ },
+ "mixing": {
+ "model": "37M",
"batch_size": 25,
"nb_train_samples": 250000,
"nb_test_samples": 10000,
},
+ "mnist": {
+ "model": "37M",
+ "batch_size": 10,
+ "nb_train_samples": 60000,
+ "nb_test_samples": 10000,
+ },
+ "greed": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 10000,
+ },
+}
+
+if args.task in default_task_args:
+ for k, v in default_task_args[args.task].items():
+ if getattr(args, k) is None:
+ setattr(args, k, v)
+
+######################################################################
+
+default_model_args = {
+ "17K": {
+ "dim_model": 32,
+ "dim_keys": 32,
+ "dim_hidden": 32,
+ "nb_heads": 2,
+ "nb_blocks": 2,
+ },
+ "4M": {
+ "dim_model": 256,
+ "dim_keys": 32,
+ "dim_hidden": 1024,
+ "nb_heads": 4,
+ "nb_blocks": 6,
+ },
+ "37M": {
+ "dim_model": 512,
+ "dim_keys": 64,
+ "dim_hidden": 2048,
+ "nb_heads": 8,
+ "nb_blocks": 12,
+ },
+ "122M": {
+ "dim_model": 768,
+ "dim_keys": 64,
+ "dim_hidden": 2048,
+ "nb_heads": 8,
+ "nb_blocks": 24,
+ },
+ "352M": {
+ "dim_model": 1024,
+ "dim_keys": 64,
+ "dim_hidden": 2048,
+ "nb_heads": 8,
+ "nb_blocks": 48,
+ },
}
-if args.task in default_args:
- for k, v in default_args[args.task].items():
+if args.model in default_model_args:
+ for k, v in default_model_args[args.model].items():
if getattr(args, k) is None:
setattr(args, k, v)
+else:
+ raise ValueError(f"Unknown model {args.model}")
######################################################################
try:
os.mkdir(args.result_dir)
except FileExistsError:
- if not args.overwrite_results:
+ if not args.resume:
print(f"result directory {args.result_dir} already exists")
exit(1)
sys.stdout.flush()
+log_string(f"argv {' '.join(sys.argv)}")
+
for n in vars(args):
log_string(f"args.{n} {getattr(args, n)}")
######################################################################
-if args.task == "picoclvr":
+if args.physical_batch_size is None:
+ args.physical_batch_size = args.batch_size
+else:
+ assert args.batch_size % args.physical_batch_size == 0
+
+assert args.nb_train_samples % args.batch_size == 0
+assert args.nb_test_samples % args.batch_size == 0
+
+if args.task == "file":
+ assert (
+ args.filetask_train_file is not None and args.filetask_test_file is not None
+ ), "You have to specify the task train and test files"
+ task = tasks.TaskFromFile(
+ args.filetask_train_file,
+ args.filetask_test_file,
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ shuffle=True,
+ device=device,
+ )
+ args.max_percents_of_test_in_train = 0
+
+elif args.task == "byheart":
+ task = tasks.SandBox(
+ problem=problems.ProblemByHeart(separation=args.byheart_separation),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+ args.max_percents_of_test_in_train = -1
+
+elif args.task == "world":
+ task = tasks.World(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+ args.max_percents_of_test_in_train = -1
+
+elif args.task == "learnop":
+ task = tasks.SandBox(
+ problem=problems.ProblemLearnOperator(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+
+elif args.task == "guessop":
+ task = tasks.SandBox(
+ problem=problems.ProblemGuessOperator(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+
+elif args.task == "twotargets":
+ task = tasks.SandBox(
+ problem=problems.ProblemTwoTargets(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "memory":
+ task = tasks.SandBox(
+ problem=problems.ProblemMemory(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "mixing":
+ task = tasks.SandBox(
+ problem=problems.ProblemMixing(
+ hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
+ ),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "addition":
+ task = tasks.SandBox(
+ problem=problems.ProblemAddition(),
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "picoclvr":
task = tasks.PicoCLVR(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
height=args.picoclvr_height,
width=args.picoclvr_width,
nb_colors=args.picoclvr_nb_colors,
task = tasks.MNIST(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
device=device,
)
task = tasks.Maze(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
height=args.maze_height,
width=args.maze_width,
nb_walls=args.maze_nb_walls,
- device=device,
+ device="cpu",
)
elif args.task == "snake":
task = tasks.Snake(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
height=args.snake_height,
width=args.snake_width,
nb_colors=args.snake_nb_colors,
task = tasks.Stack(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
nb_steps=args.stack_nb_steps,
nb_stacks=args.stack_nb_stacks,
nb_test_samples=args.nb_test_samples,
nb_variables=args.expr_nb_variables,
sequence_length=args.expr_sequence_length,
- batch_size=args.batch_size,
+ operand_max=args.expr_operand_max,
+ result_max=args.expr_result_max,
+ batch_size=args.physical_batch_size,
+ device=device,
+ )
+
+elif args.task == "rpl":
+ task = tasks.RPL(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ nb_starting_values=args.rpl_nb_starting_values,
+ max_input=args.rpl_max_input,
+ prog_len=args.rpl_prog_len,
+ nb_runs=args.rpl_nb_runs,
+ no_prog=args.rpl_no_prog,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "grid":
+ task = tasks.Grid(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ size=args.grid_size,
+ fraction_play=args.grid_fraction_play,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "qmlp":
+ task = tasks.QMLP(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ result_dir=args.result_dir,
+ logger=log_string,
+ device=device,
+ )
+
+elif args.task == "greed":
+ task = tasks.Greed(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ height=args.greed_height,
+ width=args.greed_width,
+ T=args.greed_T,
+ nb_walls=args.greed_nb_walls,
+ nb_coins=args.greed_nb_coins,
+ logger=log_string,
device=device,
)
######################################################################
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
+if args.task == "expr" and args.expr_input_file is not None:
+ task.produce_results(
+ n_epoch=nb_epochs_finished,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=args.deterministic_synthesis,
+ input_file=args.expr_input_file,
+ )
+
+ exit(0)
+
+######################################################################
# Compute the entropy of the training tokens
token_count = 0
-for input in task.batches(split="train"):
+for input in task.batches(split="train", desc="train-entropy"):
token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
token_probas = token_count / token_count.sum()
entropy = -torch.xlogy(token_probas, token_probas).sum()
train_set_perplexity = math.exp(entropy)
-##############################
-
+######################################################################
# A bit of paranoia never hurts
-train_examples = {}
-
-
-for input in task.batches(split="train"):
- assert input.dim() == 2 and input.dtype == torch.int64
- for x in input:
- train_examples[x.sum().item()] = x
-
-nb_total, nb_collisions = 0, 0
-for input in task.batches(split="test"):
- assert input.dim() == 2 and input.dtype == torch.int64
- for x in input:
- nb_total += 1
- y = train_examples.get(x.sum().item())
- if y is not None:
- if x.size() == y.size() and (x - y).abs().sum() == 0:
- nb_collisions += 1
-
-del train_examples
+if args.max_percents_of_test_in_train >= 0:
+
+ def subsets_as_tuples(batches, cs):
+ s = set()
+ for batch in batches:
+ for x in batch:
+ s.add(tuple([v.item() for v in x]))
+ if len(s) == cs:
+ yield s
+ s = set()
+ yield s
+
+ nb_test, nb_in_train = 0, 0
+ for test_subset in subsets_as_tuples(
+ task.batches(split="test", desc="test-check"), 25000
+ ):
+ in_train = set()
+ for train_subset in subsets_as_tuples(
+ task.batches(split="train", desc="train-check"), 25000
+ ):
+ in_train.update(test_subset.intersection(train_subset))
+ nb_in_train += len(in_train)
+ nb_test += len(test_subset)
+
+ log_string(
+ f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
+ )
-log_string(
- f"data_check {nb_collisions*100/nb_total:.02f}% ({nb_collisions}/{nb_total}) of test samples are in the train set"
-)
+ assert (
+ nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
+ ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
##############################
##############################
-nb_samples_seen = 0
-
-if nb_epochs_finished >= nb_epochs:
+if nb_epochs_finished >= args.nb_epochs:
task.produce_results(
- nb_epochs_finished,
- model,
- args.result_dir,
- log_string,
- args.deterministic_synthesis,
+ n_epoch=nb_epochs_finished,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=args.deterministic_synthesis,
)
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+time_pred_result = None
+
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
log_string(f"learning_rate {learning_rate}")
for input in task.batches(split="train"):
input = input.to(device)
+
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.zero_grad()
+
output = model(mygpt.BracketedSequence(input)).x
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
+
nb_train_samples += input.size(0)
- nb_samples_seen += input.size(0)
- optimizer.zero_grad()
loss.backward()
- optimizer.step()
+
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.step()
with torch.autograd.no_grad():
model.eval()
nb_test_samples, acc_test_loss = 0, 0.0
+ nb_samples_accumulated = 0
for input in task.batches(split="test"):
input = input.to(device)
- output = model(mygpt.BracketedSequence(input)).x
+ bs = model(mygpt.BracketedSequence(input))
+ output = bs.x
+
loss = F.cross_entropy(output.transpose(1, 2), input)
+
acc_test_loss += loss.item() * input.size(0)
+
nb_test_samples += input.size(0)
train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
)
task.produce_results(
- n_epoch, model, args.result_dir, log_string, args.deterministic_synthesis
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=args.deterministic_synthesis,
)
+ time_current_result = datetime.datetime.now()
+ if time_pred_result is not None:
+ log_string(
+ f"next_result {time_current_result + (time_current_result - time_pred_result)}"
+ )
+ time_pred_result = time_current_result
+
checkpoint = {
"nb_epochs_finished": n_epoch + 1,
"model_state": model.state_dict(),