formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
-parser.add_argument(
- "--task",
- type=str,
- default="twotargets",
- help="file, byheart, learnop, guessop, mixing, memory, twotargets, addition, picoclvr, mnist, maze, snake, stack, expr, rpl, grid, qmlp, greed",
-)
+parser.add_argument("--task", type=str, default="world", help="world")
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
########################################
-parser.add_argument("--nb_epochs", type=int, default=50)
+parser.add_argument("--nb_epochs", type=int, default=10000)
parser.add_argument("--batch_size", type=int, default=None)
parser.add_argument("--nb_test_samples", type=int, default=None)
-parser.add_argument("--optim", type=str, default="adam")
-
parser.add_argument("--learning_rate", type=float, default=1e-4)
-parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
-
########################################
parser.add_argument("--model", type=str, default=None)
parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
-parser.add_argument("--no_checkpoint", action="store_true", default=False)
-
-parser.add_argument("--resume", action="store_true", default=False)
-
-parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
-
-##############################
-# filetask
-
-parser.add_argument("--filetask_train_file", type=str, default=None)
-
-parser.add_argument("--filetask_test_file", type=str, default=None)
-
-##############################
-# rpl options
-
-parser.add_argument("--rpl_nb_starting_values", type=int, default=3)
-
-parser.add_argument("--rpl_max_input", type=int, default=9)
-
-parser.add_argument("--rpl_prog_len", type=int, default=8)
-
-parser.add_argument("--rpl_nb_runs", type=int, default=5)
-
-parser.add_argument("--rpl_no_prog", action="store_true", default=False)
-
-##############################
-# grid options
-
-parser.add_argument("--grid_size", type=int, default=6)
-
-parser.add_argument("--grid_fraction_play", type=float, default=0)
-
-##############################
-# picoclvr options
-
-parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
-
-parser.add_argument("--picoclvr_height", type=int, default=12)
-
-parser.add_argument("--picoclvr_width", type=int, default=16)
-
-parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
-
-##############################
-# Maze options
-
-parser.add_argument("--maze_height", type=int, default=13)
-
-parser.add_argument("--maze_width", type=int, default=21)
-
-parser.add_argument("--maze_nb_walls", type=int, default=15)
-
-##############################
-# Snake options
-
-parser.add_argument("--snake_height", type=int, default=9)
+parser.add_argument("--nb_gpts", type=int, default=5)
-parser.add_argument("--snake_width", type=int, default=12)
-
-parser.add_argument("--snake_nb_colors", type=int, default=5)
-
-parser.add_argument("--snake_length", type=int, default=200)
-
-##############################
-# ByHeart options
-
-parser.add_argument("--byheart_separation", type=int, default=1)
-
-##############################
-# Stack options
-
-parser.add_argument("--stack_nb_steps", type=int, default=100)
-
-parser.add_argument("--stack_nb_stacks", type=int, default=3)
-
-parser.add_argument("--stack_nb_digits", type=int, default=3)
-
-parser.add_argument("--stack_fraction_values_for_train", type=float, default=None)
-
-##############################
-# Expr options
-
-parser.add_argument("--expr_nb_variables", type=int, default=5)
-
-parser.add_argument("--expr_sequence_length", type=int, default=40)
-
-parser.add_argument("--expr_operand_max", type=int, default=9)
-
-parser.add_argument("--expr_result_max", type=int, default=99)
-
-parser.add_argument("--expr_input_file", type=str, default=None)
-
-##############################
-# Mixing
-
-parser.add_argument("--mixing_hard", action="store_true", default=False)
-
-parser.add_argument("--mixing_deterministic_start", action="store_true", default=False)
-
-##############################
-# greed options
-
-parser.add_argument("--greed_height", type=int, default=5)
-
-parser.add_argument("--greed_width", type=int, default=7)
-
-parser.add_argument("--greed_T", type=int, default=25)
-
-parser.add_argument("--greed_nb_walls", type=int, default=5)
-
-parser.add_argument("--greed_nb_coins", type=int, default=2)
+parser.add_argument("--check", action="store_true", default=False)
######################################################################
args = parser.parse_args()
-assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
-
if args.result_dir is None:
args.result_dir = f"results_{args.task}"
######################################################################
default_task_args = {
- "file": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "addition": {
- "model": "352M",
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "byheart": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 50000,
- "nb_test_samples": 10000,
- },
- "expr": {
- "model": "352M",
- "batch_size": 25,
- "nb_train_samples": 2500000,
- "nb_test_samples": 10000,
- },
- "grid": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "qmlp": {
- "model": "37M",
- "batch_size": 10,
- "nb_train_samples": 100000,
- "nb_test_samples": 1000,
- },
- "guessop": {
- "model": "352M",
- "batch_size": 25,
- "nb_train_samples": 1000000,
- "nb_test_samples": 10000,
- },
- "learnop": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 50000,
- "nb_test_samples": 10000,
- },
- "maze": {
- "model": "37M",
- "batch_size": 5,
- "nb_train_samples": 100000,
- "nb_test_samples": 10000,
- },
- "picoclvr": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "rpl": {
- "model": "352M",
- "batch_size": 5,
- "nb_train_samples": 2500000,
- "nb_test_samples": 10000,
- },
- "snake": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "stack": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 100000,
- "nb_test_samples": 1000,
- },
- "twotargets": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 50000,
- "nb_test_samples": 10000,
- },
- "memory": {
+ "world": {
"model": "37M",
"batch_size": 100,
- "nb_train_samples": 25000,
- "nb_test_samples": 1000,
- },
- "mixing": {
- "model": "37M",
- "batch_size": 25,
"nb_train_samples": 250000,
"nb_test_samples": 10000,
},
- "mnist": {
- "model": "37M",
- "batch_size": 10,
- "nb_train_samples": 60000,
- "nb_test_samples": 10000,
- },
- "greed": {
- "model": "37M",
- "batch_size": 25,
- "nb_train_samples": 25000,
- "nb_test_samples": 10000,
- },
}
if args.task in default_task_args:
try:
os.mkdir(args.result_dir)
except FileExistsError:
- if not args.resume:
- print(f"result directory {args.result_dir} already exists")
- exit(1)
+ print(f"result directory {args.result_dir} already exists")
+ exit(1)
log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
######################################################################
-
-def picoclvr_pruner_horizontal_green(p):
- return not ("green" in p and ("left" in p or "right" in p))
-
-
-picoclvr_pruner_train = (
- picoclvr_pruner_horizontal_green
- if args.picocvlr_prune_properties in {"train+eval"}
- else None
-)
-
-picoclvr_pruner_eval = (
- (lambda p: not picoclvr_pruner_horizontal_green(p))
- if args.picocvlr_prune_properties in {"train+eval", "eval"}
- else None
-)
-
-######################################################################
+if args.check:
+ args.nb_train_samples = 500
+ args.nb_test_samples = 100
if args.physical_batch_size is None:
args.physical_batch_size = args.batch_size
)
args.max_percents_of_test_in_train = -1
+elif args.task == "world":
+ task = tasks.World(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ result_dir=args.result_dir,
+ logger=log_string,
+ device=device,
+ )
+ args.max_percents_of_test_in_train = -1
+
elif args.task == "learnop":
task = tasks.SandBox(
problem=problems.ProblemLearnOperator(),
log_string(f"vocabulary_size {vocabulary_size}")
-##############################
-
-model = mygpt.MyGPT(
- vocabulary_size=vocabulary_size,
- dim_model=args.dim_model,
- dim_keys=args.dim_keys,
- dim_hidden=args.dim_hidden,
- nb_heads=args.nb_heads,
- nb_blocks=args.nb_blocks,
- causal=True,
- dropout=args.dropout,
-)
-
-model.to(device)
-
-nb_parameters = sum(p.numel() for p in model.parameters())
-log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
-
-######################################################################
-
-nb_epochs_finished = 0
-
-if args.no_checkpoint:
- log_string(f"not trying to load checkpoint.")
-
-else:
- try:
- checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
- checkpoint = torch.load(checkpoint_name)
- nb_epochs_finished = checkpoint["nb_epochs_finished"]
- model.load_state_dict(checkpoint["model_state"])
- torch.set_rng_state(checkpoint["rng_state"])
- if torch.cuda.is_available():
- torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
-
- log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
-
- except FileNotFoundError:
- log_string("starting from scratch.")
-
- except:
- log_string("error when loading the checkpoint.")
- exit(1)
-
-######################################################################
-
-if args.task == "expr" and args.expr_input_file is not None:
- task.produce_results(
- n_epoch=nb_epochs_finished,
- model=model,
- result_dir=args.result_dir,
- logger=log_string,
- deterministic_synthesis=args.deterministic_synthesis,
- input_file=args.expr_input_file,
- )
-
- exit(0)
-
######################################################################
# Compute the entropy of the training tokens
##############################
-if args.learning_rate_schedule == "cos":
- learning_rate_schedule = {}
- for n_epoch in range(args.nb_epochs):
- u = n_epoch / args.nb_epochs * math.pi
- learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
-else:
- u = {
- int(k): float(v)
- for k, v in [
- tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
- ]
- }
-
- learning_rate_schedule = {}
- learning_rate = args.learning_rate
- for n_epoch in range(args.nb_epochs):
- if n_epoch in u:
- learning_rate = u[n_epoch]
- learning_rate_schedule[n_epoch] = learning_rate
-
-log_string(f"learning_rate_schedule {learning_rate_schedule}")
-
-##############################
-
-if nb_epochs_finished >= args.nb_epochs:
- task.produce_results(
- n_epoch=nb_epochs_finished,
- model=model,
- result_dir=args.result_dir,
- logger=log_string,
- deterministic_synthesis=args.deterministic_synthesis,
- )
-
-time_pred_result = None
-for n_epoch in range(nb_epochs_finished, args.nb_epochs):
- learning_rate = learning_rate_schedule[n_epoch]
-
- log_string(f"learning_rate {learning_rate}")
-
- if args.optim == "sgd":
- optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
- elif args.optim == "adam":
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
- elif args.optim == "adamw":
- optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
- else:
- raise ValueError(f"Unknown optimizer {args.optim}.")
+def one_epoch(model, task):
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
model.train()
if nb_train_samples % args.batch_size == 0:
optimizer.step()
+ train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+
+ log_string(f"train_perplexity {n_epoch} {train_perplexity}")
+
+
+######################################################################
+
+
+def run_tests(model, task, deterministic_synthesis):
with torch.autograd.no_grad():
model.eval()
input = input.to(device)
bs = model(mygpt.BracketedSequence(input))
- output_ar = bs.x
+ output = bs.x
loss = F.cross_entropy(output.transpose(1, 2), input)
nb_test_samples += input.size(0)
- train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+ main_test_accuracy = task.produce_results(
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=deterministic_synthesis,
+ )
+
test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
- log_string(
- f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
- )
+ log_string(f"test_perplexity {n_epoch} {test_perplexity}")
+
+ model.main_test_accuracy = main_test_accuracy
+
+
+######################################################################
+
+
+def create_quizzes(
+ model,
+ other_models,
+ task,
+ nb_for_train=1000,
+ nb_for_test=100,
+):
+ kept = []
- task.produce_results(
+ while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
+ new_quizzes, nb_correct = task.create_new_quizzes(
n_epoch=n_epoch,
- model=model,
result_dir=args.result_dir,
logger=log_string,
- deterministic_synthesis=args.deterministic_synthesis,
+ nb=4 * (nb_for_train + nb_for_test),
+ model=model,
+ other_models=other_models,
)
- time_current_result = datetime.datetime.now()
- if time_pred_result is not None:
- log_string(
- f"next_result {time_current_result + (time_current_result - time_pred_result)}"
- )
- time_pred_result = time_current_result
+ print(nb_correct)
- checkpoint = {
- "nb_epochs_finished": n_epoch + 1,
- "model_state": model.state_dict(),
- "rng_state": torch.get_rng_state(),
- }
+ to_keep = new_quizzes[nb_correct == len(other_models) - 1]
+ log_string(f"keep {to_keep.size(0)} quizzes")
+ kept.append(to_keep)
- if torch.cuda.is_available():
- checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
+ new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
+
+ task.store_new_quizzes(new_quizzes[:nb_for_train], for_train=True)
+ task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False)
+
+ task.save_image(
+ new_quizzes[:96],
+ args.result_dir,
+ f"world_quiz_{n_epoch:04d}_{model.id:02d}.png",
+ log_string,
+ )
+
+
+######################################################################
+
+models = []
+
+for k in range(args.nb_gpts):
+ model = mygpt.MyGPT(
+ vocabulary_size=vocabulary_size,
+ dim_model=args.dim_model,
+ dim_keys=args.dim_keys,
+ dim_hidden=args.dim_hidden,
+ nb_heads=args.nb_heads,
+ nb_blocks=args.nb_blocks,
+ causal=True,
+ dropout=args.dropout,
+ ).to(device)
+
+ model.main_test_accuracy = 0.0
+ model.id = k
+
+ models.append(model)
+
+
+nb_parameters = sum(p.numel() for p in models[0].parameters())
+log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
+
+######################################################################
+
+accuracy_to_make_quizzes = 0.975
+nb_new_quizzes_for_train = 1000
+nb_new_quizzes_for_test = 100
+
+if args.check:
+ accuracy_to_make_quizzes = 0.0
+ nb_new_quizzes_for_train = 10
+ nb_new_quizzes_for_test = 10
+
+for n_epoch in range(args.nb_epochs):
+ # select the model with lowest accuracy
+ models.sort(key=lambda model: model.main_test_accuracy)
+ model = models[0]
+
+ log_string(
+ f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+ )
+
+ # improve it
+ one_epoch(model, task)
+
+ log_string(
+ f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}"
+ )
+
+ # test it
+ run_tests(model, task, deterministic_synthesis=False)
+
+ if model.main_test_accuracy >= accuracy_to_make_quizzes:
+ other_models = models.copy()
+ other_models.remove(model)
+
+ create_quizzes(
+ model,
+ other_models,
+ task,
+ nb_for_train=nb_new_quizzes_for_train,
+ nb_for_test=nb_new_quizzes_for_test,
+ )
- checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
- torch.save(checkpoint, checkpoint_name)
- log_string(f"saved checkpoint {checkpoint_name}")
######################################################################