# Written by Francois Fleuret <francois@fleuret.org>
-import math, sys, argparse, time, tqdm, os, datetime
+import math, sys, argparse, time, tqdm, os, datetime, warnings
import torch, torchvision
from torch import nn
"--task",
type=str,
default="twotargets",
- help="byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp",
+ help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
)
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
########################################
-parser.add_argument("--nb_epochs", type=int, default=25)
+parser.add_argument("--nb_epochs", type=int, default=50)
parser.add_argument("--batch_size", type=int, default=None)
+parser.add_argument("--physical_batch_size", type=int, default=None)
+
parser.add_argument("--nb_train_samples", type=int, default=None)
parser.add_argument("--nb_test_samples", type=int, default=None)
parser.add_argument("--no_checkpoint", action="store_true", default=False)
-parser.add_argument("--overwrite_results", action="store_true", default=False)
+parser.add_argument("--resume", action="store_true", default=False)
parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
+##############################
+# filetask
+
+parser.add_argument("--filetask_train_file", type=str, default=None)
+
+parser.add_argument("--filetask_test_file", type=str, default=None)
+
##############################
# rpl options
parser.add_argument("--grid_size", type=int, default=6)
+parser.add_argument("--grid_fraction_play", type=float, default=0)
+
##############################
# picoclvr options
parser.add_argument("--snake_length", type=int, default=200)
+##############################
+# ByHeart options
+
+parser.add_argument("--byheart_separation", type=int, default=1)
+
##############################
# Stack options
parser.add_argument("--stack_nb_digits", type=int, default=3)
-parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
+parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
##############################
# Expr options
parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
+##############################
+# greed options
+
+parser.add_argument("--greed_height", type=int, default=5)
+
+parser.add_argument("--greed_width", type=int, default=7)
+
+parser.add_argument("--greed_T", type=int, default=25)
+
+parser.add_argument("--greed_nb_walls", type=int, default=5)
+
+parser.add_argument("--greed_nb_coins", type=int, default=2)
+
######################################################################
args = parser.parse_args()
######################################################################
default_task_args = {
+ "file": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+ },
"addition": {
"model": "352M",
"batch_size": 25,
"nb_train_samples": 60000,
"nb_test_samples": 10000,
},
+ "greed": {
+ "model": "37M",
+ "batch_size": 25,
+ "nb_train_samples": 25000,
+ "nb_test_samples": 10000,
+ },
}
if args.task in default_task_args:
try:
os.mkdir(args.result_dir)
except FileExistsError:
- if not args.overwrite_results:
+ if not args.resume:
print(f"result directory {args.result_dir} already exists")
exit(1)
######################################################################
-if args.task == "byheart":
+if args.physical_batch_size is None:
+ args.physical_batch_size = args.batch_size
+else:
+ assert args.batch_size % args.physical_batch_size == 0
+
+assert args.nb_train_samples % args.batch_size == 0
+assert args.nb_test_samples % args.batch_size == 0
+
+if args.task == "file":
+ assert (
+ args.filetask_train_file is not None and args.filetask_test_file is not None
+ ), "You have to specify the task train and test files"
+ task = tasks.TaskFromFile(
+ args.filetask_train_file,
+ args.filetask_test_file,
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ shuffle=True,
+ device=device,
+ )
+ args.max_percents_of_test_in_train = 0
+
+elif args.task == "byheart":
task = tasks.SandBox(
- problem=problems.ProblemByHeart(),
+ problem=problems.ProblemByHeart(separation=args.byheart_separation),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
problem=problems.ProblemLearnOperator(),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
problem=problems.ProblemGuessOperator(),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
problem=problems.ProblemTwoTargets(),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
problem=problems.ProblemMemory(),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
problem=problems.ProblemAddition(),
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
device=device,
)
task = tasks.PicoCLVR(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
height=args.picoclvr_height,
width=args.picoclvr_width,
nb_colors=args.picoclvr_nb_colors,
task = tasks.MNIST(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
device=device,
)
task = tasks.Maze(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
height=args.maze_height,
width=args.maze_width,
nb_walls=args.maze_nb_walls,
- device=device,
+ device="cpu",
)
elif args.task == "snake":
task = tasks.Snake(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
height=args.snake_height,
width=args.snake_width,
nb_colors=args.snake_nb_colors,
task = tasks.Stack(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
logger=log_string,
nb_steps=args.stack_nb_steps,
nb_stacks=args.stack_nb_stacks,
sequence_length=args.expr_sequence_length,
operand_max=args.expr_operand_max,
result_max=args.expr_result_max,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
device=device,
)
task = tasks.RPL(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
nb_starting_values=args.rpl_nb_starting_values,
max_input=args.rpl_max_input,
prog_len=args.rpl_prog_len,
task = tasks.Grid(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
size=args.grid_size,
+ fraction_play=args.grid_fraction_play,
logger=log_string,
device=device,
)
task = tasks.QMLP(
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
+ batch_size=args.physical_batch_size,
result_dir=args.result_dir,
logger=log_string,
device=device,
)
+elif args.task == "greed":
+ task = tasks.Greed(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ height=args.greed_height,
+ width=args.greed_width,
+ T=args.greed_T,
+ nb_walls=args.greed_nb_walls,
+ nb_coins=args.greed_nb_coins,
+ logger=log_string,
+ device=device,
+ )
+
else:
raise ValueError(f"Unknown task {args.task}")
######################################################################
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
-
# Compute the entropy of the training tokens
token_count = 0
-for input in task.batches(split="train"):
+for input in task.batches(split="train", desc="train-entropy"):
token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
token_probas = token_count / token_count.sum()
entropy = -torch.xlogy(token_probas, token_probas).sum()
yield s
nb_test, nb_in_train = 0, 0
- for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
+ for test_subset in subsets_as_tuples(
+ task.batches(split="test", desc="test-check"), 25000
+ ):
in_train = set()
- for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
+ for train_subset in subsets_as_tuples(
+ task.batches(split="train", desc="train-check"), 25000
+ ):
in_train.update(test_subset.intersection(train_subset))
nb_in_train += len(in_train)
nb_test += len(test_subset)
##############################
-nb_samples_seen = 0
-
-if nb_epochs_finished >= nb_epochs:
+if nb_epochs_finished >= args.nb_epochs:
task.produce_results(
n_epoch=nb_epochs_finished,
model=model,
time_pred_result = None
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+for n_epoch in range(nb_epochs_finished, args.nb_epochs):
learning_rate = learning_rate_schedule[n_epoch]
log_string(f"learning_rate {learning_rate}")
for input in task.batches(split="train"):
input = input.to(device)
+
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.zero_grad()
+
output = model(mygpt.BracketedSequence(input)).x
loss = F.cross_entropy(output.transpose(1, 2), input)
acc_train_loss += loss.item() * input.size(0)
+
nb_train_samples += input.size(0)
- nb_samples_seen += input.size(0)
- optimizer.zero_grad()
loss.backward()
- optimizer.step()
+
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.step()
with torch.autograd.no_grad():
model.eval()
nb_test_samples, acc_test_loss = 0, 0.0
+ nb_samples_accumulated = 0
for input in task.batches(split="test"):
input = input.to(device)
- output = model(mygpt.BracketedSequence(input)).x
+ bs = model(mygpt.BracketedSequence(input))
+ output = bs.x
+
loss = F.cross_entropy(output.transpose(1, 2), input)
+
acc_test_loss += loss.item() * input.size(0)
+
nb_test_samples += input.size(0)
train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))