from torch.nn import functional as F
import ffutils
-import mygpt
-import sky, wireworld, quizz_machine
-
-# world quizzes vs. culture quizzes
-
-######################################################################
-nb_new_c_quizzes_for_train = 1000
-nb_new_c_quizzes_for_test = 100
+import mygpt
+import sky, grids, quiz_machine
-######################################################################
+import threading
-if torch.cuda.is_available():
- device = torch.device("cuda")
- torch.backends.cuda.matmul.allow_tf32 = True
-else:
- device = torch.device("cpu")
+import torch.multiprocessing as mp
######################################################################
parser = argparse.ArgumentParser(
- description="An implementation of GPT with cache.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--seed", type=int, default=0)
-parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
+parser.add_argument("--resume", action="store_true", default=False)
+
+parser.add_argument("--max_percents_of_test_in_train", type=int, default=-1)
########################################
parser.add_argument("--nb_test_samples", type=int, default=None)
-parser.add_argument("--learning_rate", type=float, default=1e-3)
+parser.add_argument("--learning_rate", type=float, default=5e-4)
########################################
parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
-parser.add_argument("--reverse_cleanup", action="store_true", default=False)
+parser.add_argument("--problem", type=str, default="grids")
-parser.add_argument("--problem", type=str, default="sky")
+parser.add_argument("--nb_threads", type=int, default=1)
-parser.add_argument("--nb_gpts", type=int, default=5)
+parser.add_argument("--gpus", type=str, default="all")
-parser.add_argument("--nb_models_for_generation", type=int, default=1)
+parser.add_argument("--nb_gpts", type=int, default=5)
-parser.add_argument("--generation_mode", type=str, default="groupthink")
+parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
-parser.add_argument("--min_to_validate", type=int, default=4)
+parser.add_argument("--proba_understands", type=float, default=0.99)
-parser.add_argument("--max_to_validate", type=int, default=4)
+parser.add_argument("--proba_not_understands", type=float, default=0.5)
-parser.add_argument("--accuracy_to_make_c_quizzes", type=float, default=0.975)
+parser.add_argument("--generation_temperature", type=float, default=2.0)
parser.add_argument("--dirty_debug", action="store_true", default=False)
+######################################################################
+
+grids_tasks = ", ".join(
+ [x.__name__.removeprefix("task_") for x in grids.Grids().all_tasks]
+)
+
+parser.add_argument(
+ "--grids_tasks",
+ type=str,
+ default=None,
+ help="A comma-separated subset of: " + grids_tasks + ", or None for all.",
+)
+
+######################################################################
+
parser.add_argument("--sky_height", type=int, default=6)
parser.add_argument("--sky_width", type=int, default=8)
######################################################################
-if args.dirty_debug:
- args.accuracy_to_make_c_quizzes = 0.0
- nb_new_c_quizzes_for_train = 100
- nb_new_c_quizzes_for_test = 10
-
-######################################################################
-
default_args = {
"model": "37M",
- "batch_size": 100,
+ "batch_size": 25,
"nb_train_samples": 100000,
"nb_test_samples": 10000,
}
######################################################################
-try:
- os.mkdir(args.result_dir)
-except FileExistsError:
- print(f"result directory {args.result_dir} already exists")
- exit(1)
+if args.resume:
+ assert os.path.isdir(args.result_dir)
+
+else:
+ try:
+ os.mkdir(args.result_dir)
+ except FileExistsError:
+ print(f"result directory {args.result_dir} already exists")
+ exit(1)
log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
sys.stdout.flush()
+now = time.strftime("%Y%m%d-%H%M%S", time.localtime())
+
+os.system(f"tar zcvf {args.result_dir}/src-{now}.tgz *.py")
+
log_string(f"argv {' '.join(sys.argv)}")
for n in vars(args):
######################################################################
+if args.gpus == "all":
+ gpus_idx = range(torch.cuda.device_count())
+else:
+ gpus_idx = [int(k) for k in args.gpus.split(",")]
+
+gpus = [torch.device(f"cuda:{n}") for n in gpus_idx]
+
+if torch.cuda.is_available():
+ main_device = gpus[0]
+else:
+ assert len(gpus) == 0
+ main_device = torch.device("cpu")
+
if args.dirty_debug:
args.nb_train_samples = 2500
args.nb_test_samples = 100
nb_birds=args.sky_nb_birds,
nb_iterations=args.sky_nb_iterations,
speed=args.sky_speed,
+ max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
+ chunk_size=100,
+ nb_threads=args.nb_threads,
+ )
+ back_accuracy = False
+elif args.problem == "grids":
+ problem = grids.Grids(
+ max_nb_cached_chunks=len(gpus) * args.nb_train_samples // 100,
+ chunk_size=100,
+ nb_threads=args.nb_threads,
+ tasks=args.grids_tasks,
)
-elif args.problem == "wireworld":
- problem = wireworld.Wireworld(height=8, width=10, nb_iterations=2, speed=5)
+ back_accuracy = True
else:
raise ValueError
-quizz_machine = quizz_machine.QuizzMachine(
+problem.save_some_examples(args.result_dir)
+
+quiz_machine = quiz_machine.QuizMachine(
problem=problem,
nb_train_samples=args.nb_train_samples,
nb_test_samples=args.nb_test_samples,
+ back_accuracy=back_accuracy,
batch_size=args.physical_batch_size,
result_dir=args.result_dir,
logger=log_string,
- device=device,
+ device=main_device,
)
######################################################################
-log_string(f"device {device}")
+log_string(f"main_device {main_device} gpus {[ str(g) for g in gpus]}")
-vocabulary_size = quizz_machine.vocabulary_size()
+vocabulary_size = quiz_machine.vocabulary_size()
log_string(f"vocabulary_size {vocabulary_size}")
######################################################################
-# Compute the entropy of the training tokens
-token_count = 0
-for input in quizz_machine.batches(split="train", desc="train-entropy"):
- token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
- (0, 1)
- )
-token_probas = token_count / token_count.sum()
-entropy = -torch.xlogy(token_probas, token_probas).sum()
-train_set_perplexity = math.exp(entropy)
+def run_tests(model, quiz_machine, deterministic_synthesis, local_device=main_device):
+ with torch.autograd.no_grad():
+ model.eval().to(local_device)
-######################################################################
-# A bit of paranoia never hurts
+ nb_test_samples, acc_test_loss = 0, 0.0
+ nb_samples_accumulated = 0
-if args.max_percents_of_test_in_train >= 0:
+ for input in quiz_machine.batches(model, split="test"):
+ input = input.to(local_device)
- def subsets_as_tuples(batches, cs):
- s = set()
- for batch in batches:
- for x in batch:
- s.add(tuple([v.item() for v in x]))
- if len(s) == cs:
- yield s
- s = set()
- yield s
+ bs = model(mygpt.BracketedSequence(input))
+ output = bs.x
- nb_test, nb_in_train = 0, 0
- for test_subset in subsets_as_tuples(
- quizz_machine.batches(split="test", desc="test-check"), 25000
- ):
- in_train = set()
- for train_subset in subsets_as_tuples(
- quizz_machine.batches(split="train", desc="train-check"), 25000
- ):
- in_train.update(test_subset.intersection(train_subset))
- nb_in_train += len(in_train)
- nb_test += len(test_subset)
+ loss = F.cross_entropy(output.transpose(1, 2), input)
- log_string(
- f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
- )
+ acc_test_loss += loss.item() * input.size(0)
- assert (
- nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
- ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
+ nb_test_samples += input.size(0)
-##############################
+ test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+ log_string(f"test_perplexity {n_epoch} model {model.id} {test_perplexity}")
-def one_epoch(model, quizz_machine):
- optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
+ model.main_test_accuracy = quiz_machine.produce_results(
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ deterministic_synthesis=deterministic_synthesis,
+ )
- model.train()
+
+def one_epoch(model, quiz_machine, local_device=main_device):
+ model.to(local_device).train()
+
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
nb_train_samples, acc_train_loss = 0, 0.0
- for input in quizz_machine.batches(split="train"):
- input = input.to(device)
+ for input in quiz_machine.batches(model, split="train"):
+ input = input.to(local_device)
if nb_train_samples % args.batch_size == 0:
optimizer.zero_grad()
train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- log_string(f"train_perplexity {n_epoch} {train_perplexity}")
+ log_string(f"train_perplexity {n_epoch} model {model.id} {train_perplexity}")
+ run_tests(model, quiz_machine, deterministic_synthesis=False)
-######################################################################
-
+ model.to(main_device)
-def run_tests(model, quizz_machine, deterministic_synthesis):
- with torch.autograd.no_grad():
- model.eval()
- nb_test_samples, acc_test_loss = 0, 0.0
- nb_samples_accumulated = 0
+######################################################################
- for input in quizz_machine.batches(split="test"):
- input = input.to(device)
- bs = model(mygpt.BracketedSequence(input))
- output = bs.x
-
- loss = F.cross_entropy(output.transpose(1, 2), input)
+def standard_validity(logproba):
+ l = logproba.sort(dim=-1).values
+ return (l[:, 0] < math.log(args.proba_not_understands)) & (
+ l[:, 1] > math.log(args.proba_understands)
+ )
- acc_test_loss += loss.item() * input.size(0)
- nb_test_samples += input.size(0)
+def valid_quizzes_and_logprobas(recorded, criteria):
+ validated_quizzes, validated_logprobas = [], []
+ for q, lp in recorded:
+ validated_indices = criteria(lp)
+ validated_quizzes.append(q[validated_indices])
+ validated_logprobas.append(lp[validated_indices])
- main_test_accuracy = quizz_machine.produce_results(
- n_epoch=n_epoch,
- model=model,
- result_dir=args.result_dir,
- deterministic_synthesis=deterministic_synthesis,
+ if len(validated_quizzes) > 0:
+ return torch.cat(validated_quizzes, dim=0), torch.cat(
+ validated_logprobas, dim=0
)
-
- test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
-
- log_string(f"test_perplexity {n_epoch} {test_perplexity}")
-
- model.main_test_accuracy = main_test_accuracy
+ else:
+ return None, None
######################################################################
-def create_c_quizzes(
- models,
- quizz_machine,
- nb_for_train=1000,
- nb_for_test=100,
- min_ave_seq_logproba=None,
-):
- # We will store the generated quizzes for each number of
- # correct prediction
- recorded = dict([(n, []) for n in range(len(models) + 1)])
-
- model_indexes = []
- sum_logits, sum_nb_c_quizzes = 0, 0
-
- def nb_generated():
- return sum([sum([x.size(0) for x in recorded[n]]) for n in recorded.keys()])
-
- def nb_validated():
- return sum(
- [
- sum([x.size(0) for x in recorded[n]])
- for n in range(args.min_to_validate, args.max_to_validate + 1)
- ]
- )
-
+def create_c_quizzes(models, quiz_machine, nb_for_train=1000, nb_for_test=100):
nb_to_create = nb_for_train + nb_for_test
- warnings.warn(
- f"{args.nb_gpts=} {args.nb_models_for_generation=} {args.min_to_validate=} {args.max_to_validate=}"
- )
+ recorded_quizzes_logprobas = []
- while nb_validated() < nb_to_create:
- (
- new_c_quizzes,
- nb_correct,
- ave_seq_logproba,
- ) = quizz_machine.gang_create_c_quizzes(
- nb=nb_to_create,
- nb_models_for_generation=args.nb_models_for_generation,
- models=models,
- mode=args.generation_mode,
- reverse_cleanup=args.reverse_cleanup,
- min_ave_seq_logproba=min_ave_seq_logproba,
- n_epoch=n_epoch,
- result_dir=args.result_dir,
+ nb_validated = 0
+
+ while nb_validated < nb_to_create:
+ model_for_generation = models[torch.randint(len(models), (1,))]
+
+ c_quizzes = quiz_machine.generate_quizzes(
+ nb_to_create,
+ model_for_generation=model_for_generation,
+ temperature=args.generation_temperature,
)
- sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
- sum_nb_c_quizzes += new_c_quizzes.size(0)
+ c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)]
- if args.dirty_debug:
- nb_correct = torch.randint(
- len(models) + 1, nb_correct.size(), device=new_c_quizzes.device
+ if c_quizzes.size(0) > 0:
+ logproba = quiz_machine.logproba_of_solutions(models, c_quizzes)
+ recorded_quizzes_logprobas.append((c_quizzes, logproba))
+
+ validated_quizzes, validated_logprobas = valid_quizzes_and_logprobas(
+ recorded_quizzes_logprobas, standard_validity
)
- for n in range(nb_correct.max() + 1):
- recorded[n].append(new_c_quizzes[nb_correct == n].clone())
+ if validated_quizzes is not None:
+ nb_validated = validated_quizzes.size(0)
- nv = F.one_hot(nb_correct, num_classes=len(models) + 1).sum(0)
- nv = " ".join([str(x.item()) for x in nv])
+ log_string(
+ f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}"
+ )
- log_string(f"keep c_quizzes kept {nv} total {nb_validated()} / {nb_to_create}")
+ # store the new c_quizzes which have been validated
- # concatenate and shuffle
- for n in recorded.keys():
- if len(recorded[n]) > 0:
- q = torch.cat(recorded[n], dim=0)
- q = q[torch.randperm(q.size(0), device=q.device)]
- recorded[n] = q
- else:
- del recorded[n]
+ quiz_machine.reverse_random_half_in_place(validated_quizzes)
+ quiz_machine.store_c_quizzes(validated_quizzes[:nb_for_train], for_train=True)
+ quiz_machine.store_c_quizzes(
+ validated_quizzes[nb_for_train:nb_to_create], for_train=False
+ )
+
+ ######################################################################
+ # save the log probas
- new_c_quizzes = torch.cat(
- [recorded[n] for n in range(args.min_to_validate, args.max_to_validate + 1)],
- dim=0,
+ file_name = os.path.join(
+ args.result_dir, f"culture_c_quiz_all_{n_epoch:04d}_logp.dat"
)
- new_c_quizzes = new_c_quizzes[
- torch.randperm(new_c_quizzes.size(0), device=new_c_quizzes.device)[
- : nb_for_train + nb_for_test
- ]
- ]
+ with open(file_name, "w") as logp_file:
+ for _, ll in recorded_quizzes_logprobas:
+ for l in ll:
+ s = " ".join([str(x.item()) for x in l])
+ logp_file.write(s + "\n")
- quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
- quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
+ ######################################################################
+ # save images with their logprobas
- for n in recorded.keys():
- s = (
- "_validated"
- if n >= args.min_to_validate and n <= args.max_to_validate
- else ""
- )
- quizz_machine.problem.save_quizzes(
- recorded[n][:72],
- args.result_dir,
- f"culture_c_quiz_{n_epoch:04d}_N{n}{s}",
- )
+ vq = validated_quizzes[:72]
+ vl = validated_logprobas[:72]
+
+ if vq.size(0) > 0:
+ prefix = f"culture_c_quiz_{n_epoch:04d}"
+
+ file_name = os.path.join(args.result_dir, prefix + "_logp.dat")
+ with open(file_name, "w") as logp_file:
+ for l in vl:
+ s = " ".join([str(x.item()) for x in l])
+ logp_file.write(s + "\n")
- return sum_logits / sum_nb_c_quizzes
+ quiz_machine.save_quiz_illustrations(args.result_dir, prefix, vq)
######################################################################
models = []
for k in range(args.nb_gpts):
+ log_string(f"creating model {k} and its w_quizzes")
model = mygpt.MyGPT(
vocabulary_size=vocabulary_size,
dim_model=args.dim_model,
nb_blocks=args.nb_blocks,
causal=True,
dropout=args.dropout,
- ).to(device)
+ ).to(main_device)
model.main_test_accuracy = 0.0
model.id = k
+ model.train_w_quizzes = quiz_machine.generate_token_sequences(args.nb_train_samples)
+ quiz_machine.reverse_random_half_in_place(model.train_w_quizzes)
+ model.test_w_quizzes = quiz_machine.generate_token_sequences(args.nb_test_samples)
+ quiz_machine.reverse_random_half_in_place(model.test_w_quizzes)
+
models.append(model)
+######################################################################
+
+if args.resume:
+ try:
+ for model in models:
+ filename = f"gpt_{model.id:03d}.pth"
+
+ try:
+ d = torch.load(os.path.join(args.result_dir, filename))
+ model.load_state_dict(d[0])
+ model.main_test_accuracy = d[1]
+ log_string(f"successfully loaded {filename}")
+ except FileNotFoundError:
+ log_string(f"cannot find {filename}")
+ pass
+
+ try:
+ filename = "c_quizzes.pth"
+ quiz_machine.load_c_quizzes(os.path.join(args.result_dir, filename))
+ log_string(f"successfully loaded {filename}")
+ except FileNotFoundError:
+ log_string(f"cannot find {filename}")
+ pass
+
+ except:
+ log_string(f"error when loading {filename}.")
+ exit(1)
+
+######################################################################
nb_parameters = sum(p.numel() for p in models[0].parameters())
log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
######################################################################
-min_ave_seq_logproba = None
+# Compute the entropy of the training tokens
-for n_epoch in range(args.nb_epochs):
- log_string(f"--- epoch {n_epoch} ----------------------------------------")
+token_count = 0
+for input in quiz_machine.batches(models[0], split="train", desc="train-entropy"):
+ token_count += F.one_hot(input, num_classes=quiz_machine.vocabulary_size()).sum(
+ (0, 1)
+ )
+token_probas = token_count / token_count.sum()
+entropy = -torch.xlogy(token_probas, token_probas).sum()
+train_set_perplexity = math.exp(entropy)
+
+######################################################################
+# A bit of paranoia never hurts
+
+if args.max_percents_of_test_in_train >= 0:
- a = [(model.id, float(model.main_test_accuracy)) for model in models]
- a.sort(key=lambda p: p[0])
- s = " ".join([f"{p[1]*100:.02f}%" for p in a])
- log_string(f"current accuracies {s}")
+ def subsets_as_tuples(batches, cs):
+ s = set()
+ for batch in batches:
+ for x in batch:
+ s.add(tuple([v.item() for v in x]))
+ if len(s) == cs:
+ yield s
+ s = set()
+ yield s
- # select the model with lowest accuracy
- models.sort(key=lambda model: model.main_test_accuracy)
- model = models[0]
+ nb_test, nb_in_train = 0, 0
+ for test_subset in subsets_as_tuples(
+ quiz_machine.batches(models[0], split="test", desc="test-check"), 25000
+ ):
+ in_train = set()
+ for train_subset in subsets_as_tuples(
+ quiz_machine.batches(models[0], split="train", desc="train-check"), 25000
+ ):
+ in_train.update(test_subset.intersection(train_subset))
+ nb_in_train += len(in_train)
+ nb_test += len(test_subset)
log_string(
- f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+ f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
)
- # improve it
- one_epoch(model, quizz_machine)
+ assert (
+ nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
+ ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
+
+######################################################################
- quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+nb_new_c_quizzes_for_train = args.nb_train_samples // 50
+nb_new_c_quizzes_for_test = args.nb_test_samples // 50
- log_string(
- f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
- )
+log_string(
+ f"nb_new_c_quizzes_for_train {nb_new_c_quizzes_for_train} nb_new_c_quizzes_for_test {nb_new_c_quizzes_for_test}"
+)
- # test it
- run_tests(model, quizz_machine, deterministic_synthesis=False)
+######################################################################
- log_string(
- f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
- )
+if args.dirty_debug:
+ args.accuracy_to_make_c_quizzes = 0.0
+ args.nb_gpts = 2
+ nb_new_c_quizzes_for_train = 100
+ nb_new_c_quizzes_for_test = 10
+
+ def standard_validity(logproba):
+ l = logproba.sort(dim=-1).values
+ return l[:, 0] < math.log(0.5)
+
+
+######################################################################
+
+for n_epoch in range(args.nb_epochs):
+ log_string(f"--- epoch {n_epoch} ----------------------------------------")
+
+ cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
+ log_string(f"current_test_accuracies {cta}")
+
+ ##################################################
+ # Select, improve, and eval the worst model
+
+ ranked_models = sorted(models, key=lambda m: float(m.main_test_accuracy))
+
+ weakest_models = ranked_models[: len(gpus)]
+
+ threads = []
+
+ for gpu, model in zip(gpus, weakest_models):
+ log_string(f"training model {model.id}")
+
+ t = threading.Thread(
+ target=one_epoch, daemon=True, args=(model, quiz_machine, gpu)
+ )
+
+ threads.append(t)
+
+ t.start()
+
+ for t in threads:
+ t.join()
+
+ # Save the models to disk
+
+ for model in weakest_models:
+ filename = f"gpt_{model.id:03d}.pth"
+ torch.save(
+ (model.state_dict(), model.main_test_accuracy),
+ os.path.join(args.result_dir, filename),
+ )
+ log_string(f"wrote {filename}")
+
+ # Renew the training samples
+
+ for model in weakest_models:
+ quiz_machine.renew_w_quizzes(model, args.nb_train_samples)
+
+ ##################################################
+ # If all the models are good enough, generate new quizzes and
+ # re-compute the test errors
if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
- ave_seq_logproba = create_c_quizzes(
+ create_c_quizzes(
models,
- quizz_machine,
+ quiz_machine,
nb_for_train=nb_new_c_quizzes_for_train,
nb_for_test=nb_new_c_quizzes_for_test,
- min_ave_seq_logproba=min_ave_seq_logproba,
)
- # We keep the first average logits as a reference
- # if min_ave_seq_logproba is None:
- # min_ave_seq_logproba = ave_seq_logproba
- # else:
- # log_string(
- # f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
- # )
-
- # We update everyone
- for model in models:
- run_tests(model, quizz_machine, deterministic_synthesis=False)
-
+ filename = "c_quizzes.pth"
+ quiz_machine.save_c_quizzes(os.path.join(args.result_dir, filename))
+ log_string(f"wrote {filename}")
######################################################################