# Written by Francois Fleuret <francois@fleuret.org>
-import math, sys, argparse, time, tqdm, os
+import math, sys, argparse, time, tqdm, os, datetime, warnings
import torch, torchvision
from torch import nn
from torch.nn import functional as F
import ffutils
-import mygpt, tasks, problems
+import mygpt
+import sky, wireworld, quizz_machine
+
+# world quizzes vs. culture quizzes
+
+######################################################################
+
+nb_new_c_quizzes_for_train = 1000
+nb_new_c_quizzes_for_test = 100
######################################################################
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
-parser.add_argument(
- "--task",
- type=str,
- default="sandbox",
- help="sandbox, picoclvr, mnist, maze, snake, stack, expr, rpl, world",
-)
-
-parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
+parser.add_argument("--log_filename", type=str, default="train.log")
parser.add_argument("--result_dir", type=str, default=None)
########################################
-parser.add_argument("--nb_epochs", type=int, default=None)
+parser.add_argument("--nb_epochs", type=int, default=10000)
parser.add_argument("--batch_size", type=int, default=None)
+parser.add_argument("--physical_batch_size", type=int, default=None)
+
parser.add_argument("--nb_train_samples", type=int, default=None)
parser.add_argument("--nb_test_samples", type=int, default=None)
-parser.add_argument("--optim", type=str, default="adam")
-
-parser.add_argument("--learning_rate", type=float, default=1e-4)
-
-parser.add_argument("--learning_rate_schedule", type=str, default="10: 2e-5,30: 4e-6")
+parser.add_argument("--learning_rate", type=float, default=1e-3)
########################################
-parser.add_argument("--model", type=str, default="37M")
+parser.add_argument("--model", type=str, default=None)
parser.add_argument("--dim_model", type=int, default=None)
parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
-parser.add_argument("--no_checkpoint", action="store_true", default=False)
-
-parser.add_argument("--overwrite_results", action="store_true", default=False)
-
-parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
-
-##############################
-# rpl options
-
-parser.add_argument("--rpl_nb_starting_values", type=int, default=5)
-
-parser.add_argument("--rpl_max_input", type=int, default=9)
-
-parser.add_argument("--rpl_prog_len", type=int, default=10)
-
-parser.add_argument("--rpl_nb_runs", type=int, default=8)
-
-parser.add_argument("--rpl_no_prog", action="store_true", default=False)
-
-##############################
-# sandbox options
-
-parser.add_argument("--sandbox_level", type=int, default=0)
-
-parser.add_argument("--sandbox_levels_nb_items", type=int, default=25)
-
-parser.add_argument("--sandbox_levels_len_source", type=int, default=6)
-
-parser.add_argument("--sandbox_levels_len_result", type=int, default=8)
-
-##############################
-# picoclvr options
-
-parser.add_argument("--picoclvr_nb_colors", type=int, default=5)
-
-parser.add_argument("--picoclvr_height", type=int, default=12)
-
-parser.add_argument("--picoclvr_width", type=int, default=16)
-
-parser.add_argument("--picocvlr_prune_properties", type=str, default="none")
-
-##############################
-# Maze options
-
-parser.add_argument("--maze_height", type=int, default=23)
-
-parser.add_argument("--maze_width", type=int, default=39)
-
-parser.add_argument("--maze_nb_walls", type=int, default=45)
-
-##############################
-# Snake options
-
-parser.add_argument("--snake_height", type=int, default=6)
-
-parser.add_argument("--snake_width", type=int, default=8)
-
-parser.add_argument("--snake_nb_colors", type=int, default=5)
-
-parser.add_argument("--snake_length", type=int, default=200)
-
-##############################
-# Stack options
-
-parser.add_argument("--stack_nb_steps", type=int, default=100)
-
-parser.add_argument("--stack_nb_stacks", type=int, default=3)
+parser.add_argument("--problem", type=str, default="sky")
-parser.add_argument("--stack_nb_digits", type=int, default=3)
+parser.add_argument("--nb_gpts", type=int, default=5)
-parser.add_argument("--stack_fraction_values_for_train", type=float, default=0.75)
+parser.add_argument("--nb_models_for_generation", type=int, default=1)
-##############################
-# Expr options
-
-parser.add_argument("--expr_nb_variables", type=int, default=5)
-
-parser.add_argument("--expr_sequence_length", type=int, default=40)
+parser.add_argument("--generation_mode", type=str, default="groupthink")
-parser.add_argument("--expr_operand_max", type=int, default=9)
+parser.add_argument("--min_to_validate", type=int, default=4)
-parser.add_argument("--expr_result_max", type=int, default=99)
+parser.add_argument("--max_to_validate", type=int, default=4)
-parser.add_argument("--expr_input_file", type=str, default=None)
+parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
-##############################
-# World options
-
-parser.add_argument("--world_vqae_nb_epochs", type=int, default=25)
+parser.add_argument("--dirty_debug", action="store_true", default=False)
######################################################################
args = parser.parse_args()
-assert args.picocvlr_prune_properties in {"none", "train+eval", "eval"}
-
if args.result_dir is None:
- args.result_dir = f"results_{args.task}"
+ args.result_dir = f"results_culture"
######################################################################
-default_task_args = {
- "sandbox": {
- "nb_epochs": 50,
- "batch_size": 25,
- "nb_train_samples": 100000,
- "nb_test_samples": 10000,
- },
- "picoclvr": {
- "nb_epochs": 25,
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "mnist": {
- "nb_epochs": 25,
- "batch_size": 10,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "maze": {
- "nb_epochs": 25,
- "batch_size": 5,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "snake": {
- "nb_epochs": 5,
- "batch_size": 25,
- "nb_train_samples": 250000,
- "nb_test_samples": 10000,
- },
- "stack": {
- "nb_epochs": 5,
- "batch_size": 25,
- "nb_train_samples": 100000,
- "nb_test_samples": 1000,
- },
- "expr": {
- "nb_epochs": 40,
- "batch_size": 25,
- "nb_train_samples": 1000000,
- "nb_test_samples": 10000,
- },
- "rpl": {
- "nb_epochs": 40,
- "batch_size": 25,
- "nb_train_samples": 100000,
- "nb_test_samples": 10000,
- },
- "world": {
- "nb_epochs": 10,
- "batch_size": 25,
- "nb_train_samples": 25000,
- "nb_test_samples": 1000,
- },
+if args.dirty_debug:
+ args.accuracy_to_make_c_quizzes = 0.0
+ nb_new_c_quizzes_for_train = 100
+ nb_new_c_quizzes_for_test = 10
+
+######################################################################
+
+default_args = {
+ "model": "37M",
+ "batch_size": 100,
+ "nb_train_samples": 100000,
+ "nb_test_samples": 10000,
}
-if args.task in default_task_args:
- for k, v in default_task_args[args.task].items():
- if getattr(args, k) is None:
- setattr(args, k, v)
+for k, v in default_args.items():
+ if getattr(args, k) is None:
+ setattr(args, k, v)
######################################################################
"nb_heads": 2,
"nb_blocks": 2,
},
+ "4M": {
+ "dim_model": 256,
+ "dim_keys": 32,
+ "dim_hidden": 1024,
+ "nb_heads": 4,
+ "nb_blocks": 6,
+ },
"37M": {
"dim_model": 512,
"dim_keys": 64,
try:
os.mkdir(args.result_dir)
except FileExistsError:
- if not args.overwrite_results:
- print(f"result directory {args.result_dir} already exists")
- exit(1)
+ print(f"result directory {args.result_dir} already exists")
+ exit(1)
log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
sys.stdout.flush()
+log_string(f"argv {' '.join(sys.argv)}")
+
for n in vars(args):
log_string(f"args.{n} {getattr(args, n)}")
######################################################################
+if args.dirty_debug:
+ args.nb_train_samples = 2500
+ args.nb_test_samples = 100
-def picoclvr_pruner_horizontal_green(p):
- return not ("green" in p and ("left" in p or "right" in p))
+if args.physical_batch_size is None:
+ args.physical_batch_size = args.batch_size
+else:
+ assert args.batch_size % args.physical_batch_size == 0
+assert args.nb_train_samples % args.batch_size == 0
+assert args.nb_test_samples % args.batch_size == 0
-picoclvr_pruner_train = (
- picoclvr_pruner_horizontal_green
- if args.picocvlr_prune_properties in {"train+eval"}
- else None
+if args.problem == "sky":
+ problem = sky.Sky(height=6, width=8, nb_birds=3, nb_iterations=2, speed=3)
+elif args.problem == "wireworld":
+ problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5)
+else:
+ raise ValueError
+
+quizz_machine = quizz_machine.QuizzMachine(
+ problem=problem,
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ result_dir=args.result_dir,
+ logger=log_string,
+ device=device,
)
-picoclvr_pruner_eval = (
- (lambda p: not picoclvr_pruner_horizontal_green(p))
- if args.picocvlr_prune_properties in {"train+eval", "eval"}
- else None
-)
+######################################################################
+
+log_string(f"device {device}")
+
+vocabulary_size = quizz_machine.vocabulary_size()
+
+log_string(f"vocabulary_size {vocabulary_size}")
######################################################################
-if args.task == "sandbox":
- if args.sandbox_level == 0:
- problem = problems.ProblemLevel0(
- nb_sentences=args.sandbox_levels_nb_items,
- len_prompt=args.sandbox_levels_len_source,
- len_result=args.sandbox_levels_len_result,
- )
- elif args.sandbox_level == 1:
- problem = problems.ProblemLevel1(
- nb_operators=args.sandbox_levels_nb_items,
- len_source=args.sandbox_levels_len_source,
- len_result=args.sandbox_levels_len_result,
- )
- elif args.sandbox_level == 2:
- problem = problems.ProblemLevel2(
- len_source=args.sandbox_levels_len_source,
- len_result=args.sandbox_levels_len_result,
- )
- else:
- raise ValueError(f"Unknown sandbox level {args.sandbox_level}")
-
- task = tasks.SandBox(
- # problem,
- # problems.ProblemAddition(zero_padded=False, inverted_result=False),
- problems.ProblemLenId(len_max=args.sandbox_levels_len_source),
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- logger=log_string,
- device=device,
- )
+# Compute the entropy of the training tokens
-elif args.task == "picoclvr":
- task = tasks.PicoCLVR(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- height=args.picoclvr_height,
- width=args.picoclvr_width,
- nb_colors=args.picoclvr_nb_colors,
- logger=log_string,
- device=device,
- pruner_train=picoclvr_pruner_train,
- pruner_eval=picoclvr_pruner_eval,
+token_count = 0
+for input in quizz_machine.batches(split="train", desc="train-entropy"):
+ token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
+ (0, 1)
)
+token_probas = token_count / token_count.sum()
+entropy = -torch.xlogy(token_probas, token_probas).sum()
+train_set_perplexity = math.exp(entropy)
-elif args.task == "mnist":
- task = tasks.MNIST(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- device=device,
- )
+######################################################################
+# A bit of paranoia never hurts
-elif args.task == "maze":
- task = tasks.Maze(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- height=args.maze_height,
- width=args.maze_width,
- nb_walls=args.maze_nb_walls,
- device=device,
+if args.max_percents_of_test_in_train >= 0:
+
+ def subsets_as_tuples(batches, cs):
+ s = set()
+ for batch in batches:
+ for x in batch:
+ s.add(tuple([v.item() for v in x]))
+ if len(s) == cs:
+ yield s
+ s = set()
+ yield s
+
+ nb_test, nb_in_train = 0, 0
+ for test_subset in subsets_as_tuples(
+ quizz_machine.batches(split="test", desc="test-check"), 25000
+ ):
+ in_train = set()
+ for train_subset in subsets_as_tuples(
+ quizz_machine.batches(split="train", desc="train-check"), 25000
+ ):
+ in_train.update(test_subset.intersection(train_subset))
+ nb_in_train += len(in_train)
+ nb_test += len(test_subset)
+
+ log_string(
+ f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
)
-elif args.task == "snake":
- task = tasks.Snake(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- height=args.snake_height,
- width=args.snake_width,
- nb_colors=args.snake_nb_colors,
- length=args.snake_length,
- prompt_length=args.snake_length // 2,
- device=device,
- )
+ assert (
+ nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
+ ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
-elif args.task == "stack":
- task = tasks.Stack(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- logger=log_string,
- nb_steps=args.stack_nb_steps,
- nb_stacks=args.stack_nb_stacks,
- nb_digits=args.stack_nb_digits,
- fraction_values_for_train=args.stack_fraction_values_for_train,
- device=device,
- )
+##############################
-elif args.task == "expr":
- task = tasks.Expr(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- nb_variables=args.expr_nb_variables,
- sequence_length=args.expr_sequence_length,
- operand_max=args.expr_operand_max,
- result_max=args.expr_result_max,
- batch_size=args.batch_size,
- device=device,
- )
-elif args.task == "rpl":
- task = tasks.RPL(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- nb_starting_values=args.rpl_nb_starting_values,
- max_input=args.rpl_max_input,
- prog_len=args.rpl_prog_len,
- nb_runs=args.rpl_nb_runs,
- no_prog=args.rpl_no_prog,
- logger=log_string,
- device=device,
- )
+def one_epoch(model, quizz_machine):
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
-elif args.task == "world":
- task = tasks.World(
- nb_train_samples=args.nb_train_samples,
- nb_test_samples=args.nb_test_samples,
- batch_size=args.batch_size,
- vqae_nb_epochs=args.world_vqae_nb_epochs,
- logger=log_string,
- device=device,
- )
+ model.train()
-else:
- raise ValueError(f"Unknown task {args.task}")
+ nb_train_samples, acc_train_loss = 0, 0.0
-######################################################################
+ for input in quizz_machine.batches(split="train"):
+ input = input.to(device)
-log_string(f"device {device}")
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.zero_grad()
-vocabulary_size = task.vocabulary_size()
+ output = model(mygpt.BracketedSequence(input)).x
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+ acc_train_loss += loss.item() * input.size(0)
-log_string(f"vocabulary_size {vocabulary_size}")
+ nb_train_samples += input.size(0)
-##############################
+ loss.backward()
-model = mygpt.MyGPT(
- vocabulary_size=vocabulary_size,
- dim_model=args.dim_model,
- dim_keys=args.dim_keys,
- dim_hidden=args.dim_hidden,
- nb_heads=args.nb_heads,
- nb_blocks=args.nb_blocks,
- causal=True,
- dropout=args.dropout,
-)
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.step()
-model.to(device)
+ train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
+
+ log_string(f"train_perplexity {n_epoch} {train_perplexity}")
-nb_parameters = sum(p.numel() for p in model.parameters())
-log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
######################################################################
-nb_epochs_finished = 0
-if args.no_checkpoint:
- log_string(f"not trying to load checkpoint.")
+def run_tests(model, quizz_machine, deterministic_synthesis):
+ with torch.autograd.no_grad():
+ model.eval()
-else:
- try:
- checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
- checkpoint = torch.load(checkpoint_name)
- nb_epochs_finished = checkpoint["nb_epochs_finished"]
- model.load_state_dict(checkpoint["model_state"])
- torch.set_rng_state(checkpoint["rng_state"])
- if torch.cuda.is_available():
- torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
+ nb_test_samples, acc_test_loss = 0, 0.0
+ nb_samples_accumulated = 0
- log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
+ for input in quizz_machine.batches(split="test"):
+ input = input.to(device)
- except FileNotFoundError:
- log_string("starting from scratch.")
+ bs = model(mygpt.BracketedSequence(input))
+ output = bs.x
- except:
- log_string("error when loading the checkpoint.")
- exit(1)
+ loss = F.cross_entropy(output.transpose(1, 2), input)
-######################################################################
+ acc_test_loss += loss.item() * input.size(0)
-if args.task == "expr" and args.expr_input_file is not None:
- task.produce_results(
- n_epoch=nb_epochs_finished,
- model=model,
- result_dir=args.result_dir,
- logger=log_string,
- deterministic_synthesis=args.deterministic_synthesis,
- input_file=args.expr_input_file,
- )
+ nb_test_samples += input.size(0)
- exit(0)
+ main_test_accuracy = quizz_machine.produce_results(
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ deterministic_synthesis=deterministic_synthesis,
+ )
-######################################################################
+ test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
+ log_string(f"test_perplexity {n_epoch} {test_perplexity}")
-# Compute the entropy of the training tokens
+ model.main_test_accuracy = main_test_accuracy
-token_count = 0
-for input in task.batches(split="train"):
- token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
-token_probas = token_count / token_count.sum()
-entropy = -torch.xlogy(token_probas, token_probas).sum()
-train_set_perplexity = math.exp(entropy)
######################################################################
-# A bit of paranoia never hurts
-def subsets_as_tuples(batches, cs):
- s = set()
- for batch in batches:
- for x in batch:
- s.add(tuple([v.item() for v in x]))
- if len(s) == cs:
- yield s
- s = set()
- yield s
+def create_c_quizzes(
+ models,
+ quizz_machine,
+ nb_for_train=1000,
+ nb_for_test=100,
+ min_ave_seq_logproba=None,
+):
+ # We will store the generated quizzes for each number of
+ # correct prediction
+ recorded = dict([(n, []) for n in range(len(models) + 1)])
+
+ model_indexes = []
+ sum_logits, sum_nb_c_quizzes = 0, 0
+
+ def nb_generated():
+ return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()])
+
+ def nb_validated():
+ return sum(
+ [
+ sum([x.size(0) for x in recorded[n]])
+ for n in range(args.min_to_validate, args.max_to_validate + 1)
+ ]
+ )
+
+ nb_to_create = nb_for_train + nb_for_test
+
+ while nb_validated() < nb_to_create:
+ (
+ new_c_quizzes,
+ nb_correct,
+ ave_seq_logproba,
+ ) = quizz_machine.gang_create_c_quizzes(
+ nb=nb_to_create,
+ nb_models_for_generation=args.nb_models_for_generation,
+ models=models,
+ mode=args.generation_mode,
+ min_ave_seq_logproba=min_ave_seq_logproba,
+ n_epoch=n_epoch,
+ result_dir=args.result_dir,
+ )
+ sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
+ sum_nb_c_quizzes += new_c_quizzes.size(0)
-nb_test, nb_in_train = 0, 0
-for test_subset in subsets_as_tuples(task.batches(split="test"), 25000):
- in_train = set()
- for train_subset in subsets_as_tuples(task.batches(split="train"), 25000):
- in_train.update(test_subset.intersection(train_subset))
- nb_in_train += len(in_train)
- nb_test += len(test_subset)
+ if args.dirty_debug:
+ nb_correct = torch.randint(
+ len(models) + 1, nb_correct.size(), device=new_c_quizzes.device
+ )
-log_string(
- f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
-)
+ for n in range(nb_correct.max() + 1):
+ recorded[n].append(new_c_quizzes[nb_correct == n].clone())
-assert (
- nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
-), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
+ log_string(
+ f"keep c_quizzes {nb_validated()*100/nb_generated():.02f}% kept total {nb_validated()} / {nb_to_create}"
+ )
-##############################
+ # concatenate and shuffle
+ for n in recorded.keys():
+ if len(recorded[n]) > 0:
+ q = torch.cat(recorded[n], dim=0)
+ q = q[torch.randperm(q.size(0), device=q.device)]
+ recorded[n] = q
+ else:
+ del recorded[n]
+
+ new_c_quizzes = torch.cat(
+ [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)],
+ dim=0,
+ )
-if args.learning_rate_schedule == "cos":
- learning_rate_schedule = {}
- for n_epoch in range(args.nb_epochs):
- u = n_epoch / args.nb_epochs * math.pi
- learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
-else:
- u = {
- int(k): float(v)
- for k, v in [
- tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
+ new_c_quizzes = new_c_quizzes[
+ torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[
+ : nb_for_train + nb_for_test
]
- }
+ ]
- learning_rate_schedule = {}
- learning_rate = args.learning_rate
- for n_epoch in range(args.nb_epochs):
- if n_epoch in u:
- learning_rate = u[n_epoch]
- learning_rate_schedule[n_epoch] = learning_rate
+ quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
+ quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
-log_string(f"learning_rate_schedule {learning_rate_schedule}")
+ for n in recorded.keys():
+ s = (
+ "_validated"
+ if n >= args.min_to_validate and n <= args.max_to_validate
+ else ""
+ )
+ quizz_machine.problem.save_quizzes(
+ recorded[n][:72],
+ args.result_dir,
+ f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
+ )
-##############################
+ return sum_logits / sum_nb_c_quizzes
-nb_samples_seen = 0
-if nb_epochs_finished >= nb_epochs:
- task.produce_results(
- n_epoch=nb_epochs_finished,
- model=model,
- result_dir=args.result_dir,
- logger=log_string,
- deterministic_synthesis=args.deterministic_synthesis,
- )
+######################################################################
-for n_epoch in range(nb_epochs_finished, nb_epochs):
- learning_rate = learning_rate_schedule[n_epoch]
+models = []
- log_string(f"learning_rate {learning_rate}")
+for k in range(args.nb_gpts):
+ model = mygpt.MyGPT(
+ vocabulary_size=vocabulary_size,
+ dim_model=args.dim_model,
+ dim_keys=args.dim_keys,
+ dim_hidden=args.dim_hidden,
+ nb_heads=args.nb_heads,
+ nb_blocks=args.nb_blocks,
+ causal=True,
+ dropout=args.dropout,
+ ).to(device)
- if args.optim == "sgd":
- optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
- elif args.optim == "adam":
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
- elif args.optim == "adamw":
- optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
- else:
- raise ValueError(f"Unknown optimizer {args.optim}.")
+ model.main_test_accuracy = 0.0
+ model.id = k
- model.train()
+ models.append(model)
- nb_train_samples, acc_train_loss = 0, 0.0
- for input in task.batches(split="train"):
- input = input.to(device)
- output = model(mygpt.BracketedSequence(input)).x
- loss = F.cross_entropy(output.transpose(1, 2), input)
- acc_train_loss += loss.item() * input.size(0)
- nb_train_samples += input.size(0)
- nb_samples_seen += input.size(0)
+nb_parameters = sum(p.numel() for p in models[0].parameters())
+log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
+######################################################################
- with torch.autograd.no_grad():
- model.eval()
+min_ave_seq_logproba = None
- nb_test_samples, acc_test_loss = 0, 0.0
+for n_epoch in range(args.nb_epochs):
+ log_string(f"--- epoch {n_epoch} ----------------------------------------")
- for input in task.batches(split="test"):
- input = input.to(device)
+ a = [(model.id, float(model.main_test_accuracy)) for model in models]
+ a.sort(key=lambda p: p[0])
+ s = " ".join([f"{p[1]*100:.02f}%" for p in a])
+ log_string(f"current accuracies {s}")
- output = model(mygpt.BracketedSequence(input)).x
- loss = F.cross_entropy(output.transpose(1, 2), input)
- acc_test_loss += loss.item() * input.size(0)
- nb_test_samples += input.size(0)
+ # select the model with lowest accuracy
+ models.sort(key=lambda model: model.main_test_accuracy)
+ model = models[0]
- train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+ log_string(
+ f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+ )
- log_string(
- f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
- )
+ # improve it
+ one_epoch(model, quizz_machine)
- task.produce_results(
- n_epoch=n_epoch,
- model=model,
- result_dir=args.result_dir,
- logger=log_string,
- deterministic_synthesis=args.deterministic_synthesis,
+ quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+
+ log_string(
+ f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
+ )
+
+ # test it
+ run_tests(model, quizz_machine, deterministic_synthesis=False)
+
+ log_string(
+ f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
+ )
+
+ if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
+ ave_seq_logproba = create_c_quizzes(
+ models,
+ quizz_machine,
+ nb_for_train=nb_new_c_quizzes_for_train,
+ nb_for_test=nb_new_c_quizzes_for_test,
+ min_ave_seq_logproba=min_ave_seq_logproba,
)
- checkpoint = {
- "nb_epochs_finished": n_epoch + 1,
- "model_state": model.state_dict(),
- "rng_state": torch.get_rng_state(),
- }
+ # We keep the first average logits as a reference
+ # if min_ave_seq_logproba is None:
+ # min_ave_seq_logproba = ave_seq_logproba
+ # else:
+ # log_string(
+ # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
+ # )
- if torch.cuda.is_available():
- checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
+ # We update everyone
+ for model in models:
+ run_tests(model, quizz_machine, deterministic_synthesis=False)
- checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
- torch.save(checkpoint, checkpoint_name)
- log_string(f"saved checkpoint {checkpoint_name}")
######################################################################