from torch.nn import functional as F
import ffutils
-import mygpt, tasks, problems
+import mygpt, quizz_machine
+
+# world quizzes vs. culture quizzes
+
+######################################################################
+
+accuracy_to_make_c_quizzes = 0.975
+nb_new_c_quizzes_for_train = 1000
+nb_new_c_quizzes_for_test = 100
######################################################################
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
-parser.add_argument("--task", type=str, default="world", help="world")
-
parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
parser.add_argument("--result_dir", type=str, default=None)
parser.add_argument("--nb_gpts", type=int, default=5)
-parser.add_argument("--check", action="store_true", default=False)
+parser.add_argument("--dirty_debug", action="store_true", default=False)
######################################################################
args = parser.parse_args()
if args.result_dir is None:
- args.result_dir = f"results_{args.task}"
+ args.result_dir = f"results_culture"
######################################################################
-default_task_args = {
- "world": {
- "model": "37M",
- "batch_size": 100,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
+if args.dirty_debug:
+ accuracy_to_make_c_quizzes = 0.0
+ nb_new_c_quizzes_for_train = 100
+ nb_new_c_quizzes_for_test = 10
+
+######################################################################
+
+default_args = {
+ "model": "37M",
+ "batch_size": 100,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
}
-if args.task in default_task_args:
- for k, v in default_task_args[args.task].items():
- if getattr(args, k) is None:
- setattr(args, k, v)
+for k, v in default_args.items():
+ if getattr(args, k) is None:
+ setattr(args, k, v)
######################################################################
######################################################################
-if args.check:
- args.nb_train_samples = 500
+if args.dirty_debug:
+ args.nb_train_samples = 2500
args.nb_test_samples = 100
if args.physical_batch_size is None:
assert args.nb_train_samples % args.batch_size == 0
assert args.nb_test_samples % args.batch_size == 0
-if args.task == "file":
- assert (
- args.filetask_train_file is not None and args.filetask_test_file is not None
- ), "You have to specify the task train and test files"
- task = tasks.TaskFromFile(
- args.filetask_train_file,
- args.filetask_test_file,
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- shuffle=True,
- device=device,
- )
- args.max_percents_of_test_in_train = 0
-
-elif args.task == "byheart":
- task = tasks.SandBox(
- problem=problems.ProblemByHeart(separation=args.byheart_separation),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
- args.max_percents_of_test_in_train = -1
-
-elif args.task == "world":
- task = tasks.World(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- result_dir=args.result_dir,
- logger=log_string,
- device=device,
- )
- args.max_percents_of_test_in_train = -1
-
-elif args.task == "learnop":
- task = tasks.SandBox(
- problem=problems.ProblemLearnOperator(),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
-
-
-elif args.task == "guessop":
- task = tasks.SandBox(
- problem=problems.ProblemGuessOperator(),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
-
-
-elif args.task == "twotargets":
- task = tasks.SandBox(
- problem=problems.ProblemTwoTargets(),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "memory":
- task = tasks.SandBox(
- problem=problems.ProblemMemory(),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "mixing":
- task = tasks.SandBox(
- problem=problems.ProblemMixing(
- hard=args.mixing_hard, random_start=not args.mixing_deterministic_start
- ),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "addition":
- task = tasks.SandBox(
- problem=problems.ProblemAddition(),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "picoclvr":
- task = tasks.PicoCLVR(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- height=args.picoclvr_height,
- width=args.picoclvr_width,
- nb_colors=args.picoclvr_nb_colors,
- logger=log_string,
- device=device,
- pruner_train=picoclvr_pruner_train,
- pruner_eval=picoclvr_pruner_eval,
- )
-
-elif args.task == "mnist":
- task = tasks.MNIST(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- device=device,
- )
-
-elif args.task == "maze":
- task = tasks.Maze(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- height=args.maze_height,
- width=args.maze_width,
- nb_walls=args.maze_nb_walls,
- device="cpu",
- )
-
-elif args.task == "snake":
- task = tasks.Snake(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- height=args.snake_height,
- width=args.snake_width,
- nb_colors=args.snake_nb_colors,
- length=args.snake_length,
- prompt_length=args.snake_length // 2,
- device=device,
- )
-
-elif args.task == "stack":
- task = tasks.Stack(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- logger=log_string,
- nb_steps=args.stack_nb_steps,
- nb_stacks=args.stack_nb_stacks,
- nb_digits=args.stack_nb_digits,
- fraction_values_for_train=args.stack_fraction_values_for_train,
- device=device,
- )
-
-elif args.task == "expr":
- task = tasks.Expr(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- nb_variables=args.expr_nb_variables,
- sequence_length=args.expr_sequence_length,
- operand_max=args.expr_operand_max,
- result_max=args.expr_result_max,
- batch_size=args.physical_batch_size,
- device=device,
- )
-
-elif args.task == "rpl":
- task = tasks.RPL(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- nb_starting_values=args.rpl_nb_starting_values,
- max_input=args.rpl_max_input,
- prog_len=args.rpl_prog_len,
- nb_runs=args.rpl_nb_runs,
- no_prog=args.rpl_no_prog,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "grid":
- task = tasks.Grid(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- size=args.grid_size,
- fraction_play=args.grid_fraction_play,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "qmlp":
- task = tasks.QMLP(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- result_dir=args.result_dir,
- logger=log_string,
- device=device,
- )
-
-elif args.task == "greed":
- task = tasks.Greed(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.physical_batch_size,
- height=args.greed_height,
- width=args.greed_width,
- T=args.greed_T,
- nb_walls=args.greed_nb_walls,
- nb_coins=args.greed_nb_coins,
- logger=log_string,
- device=device,
- )
-
-else:
- raise ValueError(f"Unknown task {args.task}")
+quizz_machine = quizz_machine.QuizzMachine(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ result_dir=args.result_dir,
+ logger=log_string,
+ device=device,
+)
######################################################################
log_string(f"device {device}")
-vocabulary_size = task.vocabulary_size()
+vocabulary_size = quizz_machine.vocabulary_size()
log_string(f"vocabulary_size {vocabulary_size}")
# Compute the entropy of the training tokens
token_count = 0
-for input in task.batches(split="train", desc="train-entropy"):
- token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
+for input in quizz_machine.batches(split="train", desc="train-entropy"):
+ token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
+ (0, 1)
+ )
token_probas = token_count / token_count.sum()
entropy = -torch.xlogy(token_probas, token_probas).sum()
train_set_perplexity = math.exp(entropy)
nb_test, nb_in_train = 0, 0
for test_subset in subsets_as_tuples(
- task.batches(split="test", desc="test-check"), 25000
+ quizz_machine.batches(split="test", desc="test-check"), 25000
):
in_train = set()
for train_subset in subsets_as_tuples(
- task.batches(split="train", desc="train-check"), 25000
+ quizz_machine.batches(split="train", desc="train-check"), 25000
):
in_train.update(test_subset.intersection(train_subset))
nb_in_train += len(in_train)
##############################
-def one_epoch(model, task):
+def one_epoch(model, quizz_machine):
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
model.train()
nb_train_samples, acc_train_loss = 0, 0.0
- for input in task.batches(split="train"):
+ for input in quizz_machine.batches(split="train"):
input = input.to(device)
if nb_train_samples % args.batch_size == 0:
######################################################################
-def run_tests(model, task, deterministic_synthesis):
+def run_tests(model, quizz_machine, deterministic_synthesis):
with torch.autograd.no_grad():
model.eval()
nb_test_samples, acc_test_loss = 0, 0.0
nb_samples_accumulated = 0
- for input in task.batches(split="test"):
+ for input in quizz_machine.batches(split="test"):
input = input.to(device)
bs = model(mygpt.BracketedSequence(input))
nb_test_samples += input.size(0)
- main_test_accuracy = task.produce_results(
+ main_test_accuracy = quizz_machine.produce_results(
n_epoch=n_epoch,
model=model,
result_dir=args.result_dir,
######################################################################
-def create_quizzes(
+def create_c_quizzes(
model,
other_models,
- task,
+ quizz_machine,
nb_for_train=1000,
nb_for_test=100,
+ min_ave_seq_logproba=None,
):
kept = []
+ sum_logits, sum_nb_c_quizzes = 0, 0
+
while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
- new_quizzes, nb_correct = task.create_new_quizzes(
+ nb_to_generate = 4 * (nb_for_train + nb_for_test)
+
+ new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
n_epoch=n_epoch,
result_dir=args.result_dir,
logger=log_string,
- nb=4 * (nb_for_train + nb_for_test),
+ nb=nb_to_generate,
model=model,
other_models=other_models,
+ min_ave_seq_logproba=min_ave_seq_logproba,
)
- print(nb_correct)
+ sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
+ sum_nb_c_quizzes += new_c_quizzes.size(0)
+
+ to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
+
+ if args.dirty_debug:
+ to_keep = new_c_quizzes
+
+ log_string(
+ f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
+ )
- to_keep = new_quizzes[nb_correct == len(other_models) - 1]
- log_string(f"keep {to_keep.size(0)} quizzes")
kept.append(to_keep)
- new_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
+ new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
- task.store_new_quizzes(new_quizzes[:nb_for_train], for_train=True)
- task.store_new_quizzes(new_quizzes[nb_for_train:], for_train=False)
+ quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
+ quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
- task.save_image(
- new_quizzes[:96],
+ quizz_machine.save_quizzes(
+ new_c_quizzes[:72],
args.result_dir,
- f"world_quiz_{n_epoch:04d}_{model.id:02d}.png",
+ f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
log_string,
)
+ return sum_logits / sum_nb_c_quizzes
+
######################################################################
######################################################################
-accuracy_to_make_quizzes = 0.975
-nb_new_quizzes_for_train = 1000
-nb_new_quizzes_for_test = 100
-
-if args.check:
- accuracy_to_make_quizzes = 0.0
- nb_new_quizzes_for_train = 10
- nb_new_quizzes_for_test = 10
+min_ave_seq_logproba = None
for n_epoch in range(args.nb_epochs):
+ log_string(f"--- epoch {n_epoch} ----------------------------------------")
+
+ a = [(model.id, float(model.main_test_accuracy)) for model in models]
+ a.sort(key=lambda p: p[0])
+ log_string(f"current accuracies {a}")
+
# select the model with lowest accuracy
models.sort(key=lambda model: model.main_test_accuracy)
model = models[0]
)
# improve it
- one_epoch(model, task)
+ one_epoch(model, quizz_machine)
+
+ quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
log_string(
- f"train_set_composition world {task.nb_batch_samples_world} quizzes {task.nb_batch_samples_quizzes}"
+ f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
)
# test it
- run_tests(model, task, deterministic_synthesis=False)
+ run_tests(model, quizz_machine, deterministic_synthesis=False)
- if model.main_test_accuracy >= accuracy_to_make_quizzes:
+ log_string(
+ f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
+ )
+
+ if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
other_models = models.copy()
other_models.remove(model)
- create_quizzes(
+ ave_seq_logproba = create_c_quizzes(
model,
other_models,
- task,
- nb_for_train=nb_new_quizzes_for_train,
- nb_for_test=nb_new_quizzes_for_test,
+ quizz_machine,
+ nb_for_train=nb_new_c_quizzes_for_train,
+ nb_for_test=nb_new_c_quizzes_for_test,
+ min_ave_seq_logproba=min_ave_seq_logproba,
)
+ # We keep the first average logits as a reference
+ if min_ave_seq_logproba is None:
+ min_ave_seq_logproba = ave_seq_logproba
+ else:
+ log_string(
+ f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
+ )
+
+ # We update everyone
+ for model in models:
+ run_tests(model, quizz_machine, deterministic_synthesis=False)
+
######################################################################