# Written by Francois Fleuret <francois@fleuret.org>
-# torch.backends.cuda.matmul.allow_tf23
-# torch.autocast(torch.bfloat16)
-
-import math, sys, argparse, time, tqdm, itertools, os
+import math, sys, argparse, time, tqdm, os, datetime, warnings
import torch, torchvision
from torch import nn
from torch.nn import functional as F
-import mygpt, tensorstack
+import ffutils
+import mygpt, quizz_machine
+
+# world quizzes vs. culture quizzes
+
+######################################################################
+
+accuracy_to_make_c_quizzes = 0.975
+nb_new_c_quizzes_for_train = 1000
+nb_new_c_quizzes_for_test = 100
######################################################################
######################################################################
parser = argparse.ArgumentParser(
- description="An implementation of GPT with cache to solve a toy geometric reasoning task."
+ description="An implementation of GPT with cache.",
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
-parser.add_argument("--log_filename", type=str, default="train.log")
+parser.add_argument("--log_filename", type=str, default="train.log", help=" ")
-parser.add_argument("--result_dir", type=str, default="results_default")
+parser.add_argument("--result_dir", type=str, default=None)
parser.add_argument("--seed", type=int, default=0)
-parser.add_argument("--nb_epochs", type=int, default=25)
+parser.add_argument("--max_percents_of_test_in_train", type=int, default=1)
-parser.add_argument("--batch_size", type=int, default=100)
+########################################
-parser.add_argument("--data_size", type=int, default=-1)
+parser.add_argument("--nb_epochs", type=int, default=10000)
-parser.add_argument("--optim", type=str, default="adam")
+parser.add_argument("--batch_size", type=int, default=None)
-parser.add_argument("--learning_rate", type=float, default=1e-3)
+parser.add_argument("--physical_batch_size", type=int, default=None)
-parser.add_argument(
- "--learning_rate_schedule", type=str, default="10: 2e-4,20: 4e-5,30: 8e-6"
-)
+parser.add_argument("--nb_train_samples", type=int, default=None)
+
+parser.add_argument("--nb_test_samples", type=int, default=None)
+
+parser.add_argument("--learning_rate", type=float, default=1e-4)
-parser.add_argument("--dim_model", type=int, default=512)
+########################################
-parser.add_argument("--dim_keys", type=int, default=64)
+parser.add_argument("--model", type=str, default=None)
-parser.add_argument("--dim_hidden", type=int, default=2048)
+parser.add_argument("--dim_model", type=int, default=None)
-parser.add_argument("--nb_heads", type=int, default=8)
+parser.add_argument("--dim_keys", type=int, default=None)
-parser.add_argument("--nb_blocks", type=int, default=12)
+parser.add_argument("--dim_hidden", type=int, default=None)
+
+parser.add_argument("--nb_heads", type=int, default=None)
+
+parser.add_argument("--nb_blocks", type=int, default=None)
parser.add_argument("--dropout", type=float, default=0.1)
+########################################
+
parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
-parser.add_argument("--no_checkpoint", action="store_true", default=False)
+parser.add_argument("--nb_gpts", type=int, default=5)
+
+parser.add_argument("--dirty_debug", action="store_true", default=False)
-parser.add_argument("--overwrite_results", action="store_true", default=False)
+######################################################################
+
+args = parser.parse_args()
-parser.add_argument("--checkpoint_name", type=str, default="checkpoint.pth")
+if args.result_dir is None:
+ args.result_dir = f"results_culture"
-##############################
-# picoclvr options
+######################################################################
-parser.add_argument("--nb_colors", type=int, default=5)
+if args.dirty_debug:
+ accuracy_to_make_c_quizzes = 0.0
+ nb_new_c_quizzes_for_train = 100
+ nb_new_c_quizzes_for_test = 10
-parser.add_argument("--height", type=int, default=12)
+######################################################################
-parser.add_argument("--width", type=int, default=16)
+default_args = {
+ "model": "37M",
+ "batch_size": 100,
+ "nb_train_samples": 250000,
+ "nb_test_samples": 10000,
+}
-parser.add_argument("--prune_properties", type=str, default="none")
+for k, v in default_args.items():
+ if getattr(args, k) is None:
+ setattr(args, k, v)
######################################################################
-args = parser.parse_args()
+default_model_args = {
+ "17K": {
+ "dim_model": 32,
+ "dim_keys": 32,
+ "dim_hidden": 32,
+ "nb_heads": 2,
+ "nb_blocks": 2,
+ },
+ "4M": {
+ "dim_model": 256,
+ "dim_keys": 32,
+ "dim_hidden": 1024,
+ "nb_heads": 4,
+ "nb_blocks": 6,
+ },
+ "37M": {
+ "dim_model": 512,
+ "dim_keys": 64,
+ "dim_hidden": 2048,
+ "nb_heads": 8,
+ "nb_blocks": 12,
+ },
+ "122M": {
+ "dim_model": 768,
+ "dim_keys": 64,
+ "dim_hidden": 2048,
+ "nb_heads": 8,
+ "nb_blocks": 24,
+ },
+ "352M": {
+ "dim_model": 1024,
+ "dim_keys": 64,
+ "dim_hidden": 2048,
+ "nb_heads": 8,
+ "nb_blocks": 48,
+ },
+}
+
+if args.model in default_model_args:
+ for k, v in default_model_args[args.model].items():
+ if getattr(args, k) is None:
+ setattr(args, k, v)
+else:
+ raise ValueError(f"Unknown model {args.model}")
-assert args.prune_properties in {"none", "train+eval", "eval"}
+######################################################################
try:
os.mkdir(args.result_dir)
except FileExistsError:
- if not args.overwrite_results:
- print(f"result directory {args.result_dir} already exists")
- exit(1)
+ print(f"result directory {args.result_dir} already exists")
+ exit(1)
-log_file = open(os.path.join(args.result_dir, args.log_filename), "w")
+log_file = open(os.path.join(args.result_dir, args.log_filename), "a")
if args.seed >= 0:
# torch.backends.cudnn.deterministic = True
sys.stdout.flush()
+log_string(f"argv {' '.join(sys.argv)}")
+
for n in vars(args):
log_string(f"args.{n} {getattr(args, n)}")
+
######################################################################
+if args.dirty_debug:
+ args.nb_train_samples = 2500
+ args.nb_test_samples = 100
-def masked_inplace_autoregression(
- model, batch_size, input, ar_mask, forbidden_tokens=None, device=torch.device("cpu")
-):
+if args.physical_batch_size is None:
+ args.physical_batch_size = args.batch_size
+else:
+ assert args.batch_size % args.physical_batch_size == 0
- for input, ar_mask in zip(input.split(batch_size), ar_mask.split(batch_size)):
- i = (ar_mask.sum(0) > 0).nonzero()
- if i.min() > 0:
- model(
- mygpt.BracketedSequence(input, 0, i.min())
- ) # Needed to initialize the model's cache
- for s in range(i.min(), i.max() + 1):
- output = model(mygpt.BracketedSequence(input, s, 1)).x
- logits = output[:, s]
- if forbidden_tokens is not None:
- logits = logits.masked_fill(forbidden_tokens, float("-inf"))
- if args.deterministic_synthesis:
- t_next = logits.argmax(1)
- else:
- dist = torch.distributions.categorical.Categorical(logits=logits)
- t_next = dist.sample()
- input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s]
+assert args.nb_train_samples % args.batch_size == 0
+assert args.nb_test_samples % args.batch_size == 0
+quizz_machine = quizz_machine.QuizzMachine(
+ nb_train_samples=args.nb_train_samples,
+ nb_test_samples=args.nb_test_samples,
+ batch_size=args.physical_batch_size,
+ result_dir=args.result_dir,
+ logger=log_string,
+ device=device,
+)
######################################################################
+log_string(f"device {device}")
-class Task:
- def batches(self, split="train"):
- pass
-
- def vocabulary_size(self):
- pass
-
- def produce_results(self, n_epoch, model):
- pass
+vocabulary_size = quizz_machine.vocabulary_size()
+log_string(f"vocabulary_size {vocabulary_size}")
######################################################################
-import picoclvr
-
-
-class TaskPicoCLVR(Task):
-
- # Make a tensor from a list of strings
- def tensorize(self, descr):
- token_descr = [s.strip().split(" ") for s in descr]
- l = max([len(s) for s in token_descr])
- token_descr = [s + ["<nul>"] * (l - len(s)) for s in token_descr]
- id_descr = [[self.token2id[u] for u in s] for s in token_descr]
- return torch.tensor(id_descr, device=self.device)
-
- # Make a list of strings from a tensor
- def detensorize(self, x):
- return [" ".join([self.id2token[t.item()] for t in r]) for r in x]
-
- # trim all the tensors in the tuple z to remove as much token from
- # left and right in the first tensor. If z is a tuple, all its
- # elements are trimed according to the triming for the first
- def trim(self, z, token="<nul>"):
- n = self.token2id[token]
- if type(z) == tuple:
- x = z[0]
- i = (1 - (F.pad(x, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
- a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
- return tuple([t[:, a:b] for t in z])
- else:
- i = (1 - (F.pad(z, (1, 1), value=n) == n).min(0).values.long()).cumsum(0)
- a, b = (i == 0).nonzero().max(), (i == i.max()).nonzero().min()
- return z[:, a:b]
-
- ######################
- # Not the cleanest part of the code
-
- # Extract the last image of each sequence, from the last <img>
- # included, and set to <nul> all the tokens from the beginning of
- # that image to the end
- def excise_last_image(self, input):
- t_img, t_nul = self.token2id["<img>"], self.token2id["<nul>"]
- nb_img_tokens = self.height * self.width + 1
-
- input = input.clone()
- t = (input == t_img).long()
- tail_masks = (t.cumsum(dim=1) == t.sum(dim=1, keepdim=True)).long()
- i = (t * tail_masks).nonzero(as_tuple=True)
- j = (
- i[0][:, None],
- i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
- )
- images = self.trim(input[j])
- input[j] = t_nul
- loss_masks = 1 - tail_masks
- input, loss_masks = self.trim((input, loss_masks))
- return input, loss_masks, images
-
- def add_true_image(self, input, images, loss_masks):
- t_nul = self.token2id["<nul>"]
- nb_img_tokens = self.height * self.width + 1
- input = F.pad(input, (0, nb_img_tokens), value=t_nul)
- loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
- t = (input == t_nul).long()
- i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
- j = (
- i[0][:, None],
- i[1][:, None] + torch.arange(nb_img_tokens, device=input.device)[None, :],
- )
- input[j] = images
- loss_masks[j] = 1
- input, loss_masks = self.trim((input, loss_masks))
- return input, loss_masks
-
- def add_generated_image(self, input, loss_masks, model):
- t_img, t_nul = self.token2id["<img>"], self.token2id["<nul>"]
- nb_img_tokens = self.height * self.width + 1
-
- input = F.pad(input, (0, nb_img_tokens), value=t_nul)
- loss_masks = F.pad(loss_masks, (0, nb_img_tokens), value=0)
- t = (input == t_nul).long()
- i = (t.cumsum(dim=1) == 1).nonzero(as_tuple=True)
- input[i] = t_img
-
- j = (
- i[0][:, None],
- i[1][:, None]
- + 1
- + torch.arange(nb_img_tokens - 1, device=input.device)[None, :],
- )
- ar_masks = input.new_zeros(input.size(), dtype=torch.int64)
- ar_masks[j] = 1
- forbidden_tokens = (
- torch.arange(self.vocabulary_size(), device=input.device) == t_nul
- )
- with torch.autograd.no_grad():
- t = model.training
- model.eval()
- masked_inplace_autoregression(
- model,
- self.batch_size,
- input,
- ar_masks,
- forbidden_tokens,
- device=self.device,
- )
- model.train(t)
+# Compute the entropy of the training tokens
- input, loss_masks = self.trim((input, loss_masks))
+token_count = 0
+for input in quizz_machine.batches(split="train", desc="train-entropy"):
+ token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum(
+ (0, 1)
+ )
+token_probas = token_count / token_count.sum()
+entropy = -torch.xlogy(token_probas, token_probas).sum()
+train_set_perplexity = math.exp(entropy)
- return input, loss_masks
+######################################################################
+# A bit of paranoia never hurts
+
+if args.max_percents_of_test_in_train >= 0:
+
+ def subsets_as_tuples(batches, cs):
+ s = set()
+ for batch in batches:
+ for x in batch:
+ s.add(tuple([v.item() for v in x]))
+ if len(s) == cs:
+ yield s
+ s = set()
+ yield s
+
+ nb_test, nb_in_train = 0, 0
+ for test_subset in subsets_as_tuples(
+ quizz_machine.batches(split="test", desc="test-check"), 25000
+ ):
+ in_train = set()
+ for train_subset in subsets_as_tuples(
+ quizz_machine.batches(split="train", desc="train-check"), 25000
+ ):
+ in_train.update(test_subset.intersection(train_subset))
+ nb_in_train += len(in_train)
+ nb_test += len(test_subset)
- ######################
+ log_string(
+ f"data_check {nb_in_train*100/nb_test:.02f}% ({nb_in_train}/{nb_test}) of test samples are in the train set"
+ )
- def __init__(
- self,
- batch_size,
- height,
- width,
- nb_colors=5,
- device=torch.device("cpu"),
- pruner_train=None,
- pruner_eval=None,
- ):
- def generate_descr(nb, cache_suffix, pruner):
- return picoclvr.generate(
- nb,
- height=self.height,
- width=self.width,
- nb_colors=nb_colors,
- pruner=pruner,
- )
+ assert (
+ nb_in_train <= args.max_percents_of_test_in_train * nb_test / 100
+ ), f"More than {args.max_percents_of_test_in_train}% of test samples are in the train set"
- self.height = height
- self.width = width
- self.batch_size = batch_size
- self.device = device
- nb = args.data_size if args.data_size > 0 else 250000
- self.pruner_train = pruner_train
- self.pruner_eval = pruner_eval
-
- param = {
- "nb": nb,
- "height": height,
- "width": width,
- "nb_colors": nb_colors,
- "batch_size": batch_size,
- "rng_state": list(torch.get_rng_state()),
- }
-
- log_string(f"generating {nb} samples (can take some time)")
- self.train_descr = generate_descr(
- (nb * 4) // 5, "train", pruner=self.pruner_train
- )
- self.test_descr = generate_descr((nb * 1) // 5, "test", pruner=None)
-
- # Build the tokenizer
- tokens = {"<nul>", "<img>"}
- for d in [self.train_descr, self.test_descr]:
- for s in d:
- for t in s.strip().split(" "):
- tokens.add(t)
- # make this set a sorted list to get the same tensors given
- # the same descr
- tokens = list(tokens)
- tokens.sort()
- self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
- self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
-
- # Tokenize the train and test sets
- self.train_input = self.tensorize(self.train_descr)
- self.test_input = self.tensorize(self.test_descr)
-
- def batches(self, split="train"):
- assert split in {"train", "test"}
- input = self.train_input if split == "train" else self.test_input
- for batch in tqdm.tqdm(
- input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
- ):
- yield self.trim(batch)
+##############################
- def vocabulary_size(self):
- return len(self.token2id)
- def compute_missing_properties(self, n_epoch, model, pruner=None):
+def one_epoch(model, quizz_machine):
+ optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
- acc_nb_requested_properties = []
- acc_nb_missing_properties = []
- acc_nb_results = 0
+ model.train()
- for input in tqdm.tqdm(
- self.test_input.split(self.batch_size),
- dynamic_ncols=True,
- desc=f"test-properties",
- ):
- tape, loss_masks, _ = self.excise_last_image(input)
- tape, loss_masks = self.add_generated_image(tape, loss_masks, model)
- result_descr = self.detensorize(tape)
- np = picoclvr.nb_properties(
- result_descr,
- height=self.height,
- width=self.width,
- pruner=pruner,
- )
- nb_requested_properties, _, nb_missing_properties = zip(*np)
- acc_nb_requested_properties += nb_requested_properties
- acc_nb_missing_properties += nb_missing_properties
- acc_nb_results += len(result_descr)
+ nb_train_samples, acc_train_loss = 0, 0.0
- nb_requested_properties = sum(acc_nb_requested_properties)
- nb_missing_properties = sum(acc_nb_missing_properties)
+ for input in quizz_machine.batches(split="train"):
+ input = input.to(device)
- prefix = "" if pruner is None else "pruned_"
- log_string(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
- log_string(
- f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
- )
- log_string(
- f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
- )
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.zero_grad()
- ######################################################################
+ output = model(mygpt.BracketedSequence(input)).x
+ loss = F.cross_entropy(output.transpose(1, 2), input)
+ acc_train_loss += loss.item() * input.size(0)
+
+ nb_train_samples += input.size(0)
- def produce_results(self, n_epoch, model):
+ loss.backward()
- self.compute_missing_properties(n_epoch, model)
+ if nb_train_samples % args.batch_size == 0:
+ optimizer.step()
- if self.pruner_eval is not None:
- self.compute_missing_properties(n_epoch, model, self.pruner_eval)
+ train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- nb_tokens_to_generate = self.height * self.width + 3
- result_descr = []
- nb_per_primer = 8
- primer = []
+ log_string(f"train_perplexity {n_epoch} {train_perplexity}")
- for primer_descr in [
- "red above green <sep> green top <sep> blue right of red",
- "there is red <sep> there is yellow <sep> there is blue",
- "red below yellow <sep> yellow below green <sep> green below blue <sep> red right <sep> yellow left <sep> green right <sep> blue left",
- "green bottom <sep> yellow bottom <sep> green left of blue <sep> yellow right of blue <sep> blue top",
- ]:
- primer += [primer_descr] * nb_per_primer
- tape = self.tensorize(primer)
- loss_masks = 1 - (tape == self.token2id["<nul>"]).long()
- tape, loss_masks = self.add_generated_image(tape, loss_masks, model)
- result_descr = self.detensorize(tape)
+######################################################################
- np = picoclvr.nb_properties(result_descr, height=self.height, width=self.width)
- acc_nb_requested_properties, _, acc_nb_missing_properties = zip(*np)
- acc_nb_results = len(result_descr)
+def run_tests(model, quizz_machine, deterministic_synthesis):
+ with torch.autograd.no_grad():
+ model.eval()
- nb_requested_properties = sum(acc_nb_requested_properties)
- nb_missing_properties = sum(acc_nb_missing_properties)
+ nb_test_samples, acc_test_loss = 0, 0.0
+ nb_samples_accumulated = 0
- prefix = "demo_"
- log_string(f"nb_{prefix}samples {n_epoch} {acc_nb_results}")
- log_string(
- f"property_{prefix}nb {n_epoch} requested {sum(acc_nb_requested_properties)} missing {sum(acc_nb_missing_properties)}"
- )
- log_string(
- f"property_{prefix}miss {n_epoch} {100*nb_missing_properties/nb_requested_properties:.02f}%"
- )
+ for input in quizz_machine.batches(split="test"):
+ input = input.to(device)
- img = picoclvr.descr2img(result_descr, height=self.height, width=self.width)
-
- if img.dim() == 5:
- if img.size(1) == 1:
- img = F.pad(img.squeeze(1), pad=(1, 1, 1, 1), value=64)
- else:
- img = torch.cat(
- [
- torchvision.utils.make_grid(x, padding=1, pad_value=64)[None]
- for x in img
- ],
- 0,
- )
-
- image_name = os.path.join(args.result_dir, f"result_{n_epoch:04d}.png")
- torchvision.utils.save_image(
- img / 255.0, image_name, nrow=nb_per_primer, padding=1, pad_value=1.0
- )
- log_string(f"wrote {image_name}")
+ bs = model(mygpt.BracketedSequence(input))
+ output = bs.x
+ loss = F.cross_entropy(output.transpose(1, 2), input)
-######################################################################
+ acc_test_loss += loss.item() * input.size(0)
-log_string(f"device {device}")
+ nb_test_samples += input.size(0)
+ main_test_accuracy = quizz_machine.produce_results(
+ n_epoch=n_epoch,
+ model=model,
+ result_dir=args.result_dir,
+ logger=log_string,
+ deterministic_synthesis=deterministic_synthesis,
+ )
-def pruner_horizontal_green(p):
- return not ("green" in p and ("left" in p or "right" in p))
+ test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+ log_string(f"test_perplexity {n_epoch} {test_perplexity}")
-task = TaskPicoCLVR(
- batch_size=args.batch_size,
- height=args.height,
- width=args.width,
- nb_colors=args.nb_colors,
- device=device,
- pruner_train=pruner_horizontal_green
- if args.prune_properties in {"train+eval"}
- else None,
- pruner_eval=(lambda p: not pruner_horizontal_green(p))
- if args.prune_properties in {"train+eval", "eval"}
- else None,
-)
+ model.main_test_accuracy = main_test_accuracy
-vocabulary_size = task.vocabulary_size()
-log_string(f"vocabulary_size {vocabulary_size}")
+######################################################################
-##############################
-model = mygpt.MyGPT(
- vocabulary_size=vocabulary_size,
- dim_model=args.dim_model,
- dim_keys=args.dim_keys,
- dim_hidden=args.dim_hidden,
- nb_heads=args.nb_heads,
- nb_blocks=args.nb_blocks,
- causal=True,
- dropout=args.dropout,
-)
+def create_c_quizzes(
+ model,
+ other_models,
+ quizz_machine,
+ nb_for_train=1000,
+ nb_for_test=100,
+ min_ave_seq_logproba=None,
+):
+ kept = []
-model.to(device)
+ sum_logits, sum_nb_c_quizzes = 0, 0
-nb_parameters = sum(p.numel() for p in model.parameters())
-log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
+ while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
+ nb_to_generate = 4 * (nb_for_train + nb_for_test)
-######################################################################
+ new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
+ n_epoch=n_epoch,
+ result_dir=args.result_dir,
+ logger=log_string,
+ nb=nb_to_generate,
+ model=model,
+ other_models=other_models,
+ min_ave_seq_logproba=min_ave_seq_logproba,
+ )
-nb_epochs_finished = 0
+ sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
+ sum_nb_c_quizzes += new_c_quizzes.size(0)
-if args.no_checkpoint:
- log_string(f"not trying to load checkpoint.")
+ to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
-else:
- try:
- checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
- checkpoint = torch.load(checkpoint_name)
- nb_epochs_finished = checkpoint["nb_epochs_finished"]
- model.load_state_dict(checkpoint["model_state"])
- torch.set_rng_state(checkpoint["rng_state"])
- if torch.cuda.is_available():
- torch.cuda.set_rng_state(checkpoint["cuda_rng_state"])
+ if args.dirty_debug:
+ to_keep = new_c_quizzes
- log_string(f"checkpoint loaded with {nb_epochs_finished} epochs finished.")
+ log_string(
+ f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
+ )
- except FileNotFoundError:
- log_string("starting from scratch.")
+ kept.append(to_keep)
- except:
- log_string("error when loading the checkpoint.")
- exit(1)
+ new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
-######################################################################
+ quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
+ quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False)
-nb_epochs = args.nb_epochs if args.nb_epochs > 0 else nb_epochs_default
+ quizz_machine.save_quizzes(
+ new_c_quizzes[:72],
+ args.result_dir,
+ f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}",
+ log_string,
+ )
-token_count = 0
-for input in task.batches(split="train"):
- token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1))
-token_probas = token_count / token_count.sum()
-entropy = -torch.xlogy(token_probas, token_probas).sum()
-train_set_perplexity = math.exp(entropy)
+ return sum_logits / sum_nb_c_quizzes
-##############################
-if args.learning_rate_schedule == "cos":
- learning_rate_schedule = {}
- for n_epoch in range(args.nb_epochs):
- u = n_epoch / args.nb_epochs * math.pi
- learning_rate_schedule[n_epoch] = args.learning_rate * 0.5 * (1 + math.cos(u))
-else:
- u = {
- int(k): float(v)
- for k, v in [
- tuple(x.split(":")) for x in args.learning_rate_schedule.split(",")
- ]
- }
-
- learning_rate_schedule = {}
- learning_rate = args.learning_rate
- for n_epoch in range(args.nb_epochs):
- if n_epoch in u:
- learning_rate = u[n_epoch]
- learning_rate_schedule[n_epoch] = learning_rate
-
-log_string(f"learning_rate_schedule {learning_rate_schedule}")
+######################################################################
-##############################
+models = []
-nb_samples_seen = 0
+for k in range(args.nb_gpts):
+ model = mygpt.MyGPT(
+ vocabulary_size=vocabulary_size,
+ dim_model=args.dim_model,
+ dim_keys=args.dim_keys,
+ dim_hidden=args.dim_hidden,
+ nb_heads=args.nb_heads,
+ nb_blocks=args.nb_blocks,
+ causal=True,
+ dropout=args.dropout,
+ ).to(device)
-if nb_epochs_finished >= nb_epochs:
- task.produce_results(nb_epochs_finished, model)
+ model.main_test_accuracy = 0.0
+ model.id = k
-for n_epoch in range(nb_epochs_finished, nb_epochs):
+ models.append(model)
- learning_rate = learning_rate_schedule[n_epoch]
- log_string(f"learning_rate {learning_rate}")
+nb_parameters = sum(p.numel() for p in models[0].parameters())
+log_string(f"nb_parameters {nb_parameters} ({int(nb_parameters/1e6)}M)")
- if args.optim == "sgd":
- optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
- elif args.optim == "adam":
- optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
- elif args.optim == "adamw":
- optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
- else:
- raise ValueError(f"Unknown optimizer {args.optim}.")
+######################################################################
- model.train()
+min_ave_seq_logproba = None
- nb_train_samples, acc_train_loss = 0, 0.0
+for n_epoch in range(args.nb_epochs):
+ log_string(f"--- epoch {n_epoch} ----------------------------------------")
- for input in task.batches(split="train"):
- input = input.to(device)
- output = model(mygpt.BracketedSequence(input)).x
- loss = F.cross_entropy(output.transpose(1, 2), input)
- acc_train_loss += loss.item() * input.size(0)
- nb_train_samples += input.size(0)
- nb_samples_seen += input.size(0)
+ a = [(model.id, float(model.main_test_accuracy)) for model in models]
+ a.sort(key=lambda p: p[0])
+ log_string(f"current accuracies {a}")
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
+ # select the model with lowest accuracy
+ models.sort(key=lambda model: model.main_test_accuracy)
+ model = models[0]
- with torch.autograd.no_grad():
+ log_string(
+ f"training model {model.id} main_test_accuracy {model.main_test_accuracy}"
+ )
- model.eval()
+ # improve it
+ one_epoch(model, quizz_machine)
- nb_test_samples, acc_test_loss = 0, 0.0
+ quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
- for input in task.batches(split="test"):
- input = input.to(device)
+ log_string(
+ f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
+ )
- # input, loss_masks, true_images = task.excise_last_image(input)
- # input, loss_masks = task.add_true_image(input, true_images, loss_masks)
+ # test it
+ run_tests(model, quizz_machine, deterministic_synthesis=False)
- output = model(mygpt.BracketedSequence(input)).x
- loss = F.cross_entropy(output.transpose(1, 2), input)
- acc_test_loss += loss.item() * input.size(0)
- nb_test_samples += input.size(0)
+ log_string(
+ f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
+ )
- train_perplexity = math.exp(min(100, acc_train_loss / nb_train_samples))
- test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
+ if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
+ other_models = models.copy()
+ other_models.remove(model)
- log_string(
- f"perplexity {n_epoch} train_set {train_set_perplexity} train_prediction {train_perplexity} test_prediction {test_perplexity}"
+ ave_seq_logproba = create_c_quizzes(
+ model,
+ other_models,
+ quizz_machine,
+ nb_for_train=nb_new_c_quizzes_for_train,
+ nb_for_test=nb_new_c_quizzes_for_test,
+ min_ave_seq_logproba=min_ave_seq_logproba,
)
- task.produce_results(n_epoch, model)
-
- checkpoint = {
- "nb_epochs_finished": n_epoch + 1,
- "model_state": model.state_dict(),
- "rng_state": torch.get_rng_state(),
- }
+ # We keep the first average logits as a reference
+ if min_ave_seq_logproba is None:
+ min_ave_seq_logproba = ave_seq_logproba
+ else:
+ log_string(
+ f"min_ave_seq_logproba {min_ave_seq_logproba} ave_seq_logproba {ave_seq_logproba}"
+ )
- if torch.cuda.is_available():
- checkpoint["cuda_rng_state"] = torch.cuda.get_rng_state()
+ # We update everyone
+ for model in models:
+ run_tests(model, quizz_machine, deterministic_synthesis=False)
- checkpoint_name = os.path.join(args.result_dir, args.checkpoint_name)
- torch.save(checkpoint, checkpoint_name)
- log_string(f"saved checkpoint {checkpoint_name}")
######################################################################