From: François Fleuret Date: Tue, 25 Jun 2024 11:52:20 +0000 (+0200) Subject: Update. X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=commitdiff_plain;h=336130cc923761658029a0af9d5862d59405d47a;p=culture.git Update. --- diff --git a/main.py b/main.py index 2c759ec..05c3557 100755 --- a/main.py +++ b/main.py @@ -12,7 +12,7 @@ from torch import nn from torch.nn import functional as F import ffutils -import mygpt, tasks +import mygpt, quizz_machine # world quizzes vs. culture quizzes @@ -209,7 +209,7 @@ else: assert args.nb_train_samples % args.batch_size == 0 assert args.nb_test_samples % args.batch_size == 0 -task = tasks.World( +quizz_machine = quizz_machine.QuizzMachine( nb_train_samples=args.nb_train_samples, nb_test_samples=args.nb_test_samples, batch_size=args.physical_batch_size, @@ -222,7 +222,7 @@ task = tasks.World( log_string(f"device {device}") -vocabulary_size = task.vocabulary_size() +vocabulary_size = quizz_machine.vocabulary_size() log_string(f"vocabulary_size {vocabulary_size}") @@ -231,8 +231,10 @@ log_string(f"vocabulary_size {vocabulary_size}") # Compute the entropy of the training tokens token_count = 0 -for input in task.batches(split="train", desc="train-entropy"): - token_count += F.one_hot(input, num_classes=task.vocabulary_size()).sum((0, 1)) +for input in quizz_machine.batches(split="train", desc="train-entropy"): + token_count += F.one_hot(input, num_classes=quizz_machine.vocabulary_size()).sum( + (0, 1) + ) token_probas = token_count / token_count.sum() entropy = -torch.xlogy(token_probas, token_probas).sum() train_set_perplexity = math.exp(entropy) @@ -254,11 +256,11 @@ if args.max_percents_of_test_in_train >= 0: nb_test, nb_in_train = 0, 0 for test_subset in subsets_as_tuples( - task.batches(split="test", desc="test-check"), 25000 + quizz_machine.batches(split="test", desc="test-check"), 25000 ): in_train = set() for train_subset in subsets_as_tuples( - task.batches(split="train", desc="train-check"), 25000 + quizz_machine.batches(split="train", desc="train-check"), 25000 ): in_train.update(test_subset.intersection(train_subset)) nb_in_train += len(in_train) @@ -275,14 +277,14 @@ if args.max_percents_of_test_in_train >= 0: ############################## -def one_epoch(model, task): +def one_epoch(model, quizz_machine): optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) model.train() nb_train_samples, acc_train_loss = 0, 0.0 - for input in task.batches(split="train"): + for input in quizz_machine.batches(split="train"): input = input.to(device) if nb_train_samples % args.batch_size == 0: @@ -307,14 +309,14 @@ def one_epoch(model, task): ###################################################################### -def run_tests(model, task, deterministic_synthesis): +def run_tests(model, quizz_machine, deterministic_synthesis): with torch.autograd.no_grad(): model.eval() nb_test_samples, acc_test_loss = 0, 0.0 nb_samples_accumulated = 0 - for input in task.batches(split="test"): + for input in quizz_machine.batches(split="test"): input = input.to(device) bs = model(mygpt.BracketedSequence(input)) @@ -326,7 +328,7 @@ def run_tests(model, task, deterministic_synthesis): nb_test_samples += input.size(0) - main_test_accuracy = task.produce_results( + main_test_accuracy = quizz_machine.produce_results( n_epoch=n_epoch, model=model, result_dir=args.result_dir, @@ -347,7 +349,7 @@ def run_tests(model, task, deterministic_synthesis): def create_c_quizzes( model, other_models, - task, + quizz_machine, nb_for_train=1000, nb_for_test=100, min_ave_seq_logproba=None, @@ -359,7 +361,7 @@ def create_c_quizzes( while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test: nb_to_generate = 4 * (nb_for_train + nb_for_test) - new_c_quizzes, nb_correct, ave_seq_logproba = task.create_c_quizzes( + new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes( n_epoch=n_epoch, result_dir=args.result_dir, logger=log_string, @@ -385,10 +387,10 @@ def create_c_quizzes( new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test] - task.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) - task.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) + quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True) + quizz_machine.store_c_quizzes(new_c_quizzes[nb_for_train:], for_train=False) - task.save_quizzes( + quizz_machine.save_quizzes( new_c_quizzes[:72], args.result_dir, f"culture_c_quiz_{n_epoch:04d}_{model.id:02d}", @@ -443,19 +445,19 @@ for n_epoch in range(args.nb_epochs): ) # improve it - one_epoch(model, task) + one_epoch(model, quizz_machine) - task.renew_w_quizzes(args.nb_train_samples // args.nb_gpts) + quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts) log_string( - f"train_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}" + f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" ) # test it - run_tests(model, task, deterministic_synthesis=False) + run_tests(model, quizz_machine, deterministic_synthesis=False) log_string( - f"test_set_composition w_quizzes {task.nb_batch_w_quizzes} c_quizzes {task.nb_batch_c_quizzes}" + f"test_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}" ) if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes: @@ -465,7 +467,7 @@ for n_epoch in range(args.nb_epochs): ave_seq_logproba = create_c_quizzes( model, other_models, - task, + quizz_machine, nb_for_train=nb_new_c_quizzes_for_train, nb_for_test=nb_new_c_quizzes_for_test, min_ave_seq_logproba=min_ave_seq_logproba, @@ -481,7 +483,7 @@ for n_epoch in range(args.nb_epochs): # We update everyone for model in models: - run_tests(model, task, deterministic_synthesis=False) + run_tests(model, quizz_machine, deterministic_synthesis=False) ###################################################################### diff --git a/mygpt.py b/mygpt.py index 809f790..7047849 100755 --- a/mygpt.py +++ b/mygpt.py @@ -310,9 +310,8 @@ class MyGPT(nn.Module): dist = torch.distributions.categorical.Categorical(logits=logits) t_next = dist.sample() - if seq_logproba is not None: - all_t = torch.arange(t_next.size(0)) - seq_logproba += logits[all_t, t_next].sum(dim=-1) + all_n = torch.arange(t_next.size(0)) + seq_logproba += logits[all_n, t_next].sum(dim=-1) input[:, s] = ar_mask[:, s] * t_next + (1 - ar_mask[:, s]) * input[:, s] diff --git a/tasks.py b/tasks.py index a522728..50ded2c 100755 --- a/tasks.py +++ b/tasks.py @@ -32,7 +32,11 @@ def masked_inplace_autoregression( ): assert input.size() == ar_mask.size() - batches = zip(input.split(batch_size), ar_mask.split(batch_size)) + batches = zip( + input.split(batch_size), + ar_mask.split(batch_size), + seq_logproba.split(batch_size), + ) if progress_bar_desc is not None: batches = tqdm.tqdm( @@ -46,7 +50,7 @@ def masked_inplace_autoregression( t = model.training model.eval() - for input, ar_mask in batches: + for input, ar_mask, seq_logproba in batches: model.masked_inplace_autoregression( input=input, ar_mask=ar_mask, @@ -81,7 +85,7 @@ class Task: import world -class World(Task): +class QuizzMachine(Task): def save_image(self, input, result_dir, filename, logger): img = world.seq2img(input.to("cpu"), self.height, self.width) image_name = os.path.join(result_dir, filename) @@ -178,13 +182,14 @@ class World(Task): input = input[:nmax] ar_mask = self.make_ar_mask(input) result = input.clone() * (1 - ar_mask) + seq_logproba = torch.empty(input.size(0), device=self.device) masked_inplace_autoregression( model=model, batch_size=self.batch_size, input=result, ar_mask=ar_mask, - seq_logproba=None, + seq_logproba=seq_logproba, temperature=1.0, deterministic_synthesis=deterministic_synthesis, progress_bar_desc=None, @@ -218,13 +223,14 @@ class World(Task): input = self.test_w_quizzes[:96] ar_mask = self.make_ar_mask(input) result = input.clone() * (1 - ar_mask) + seq_logproba = torch.empty(input.size(0), device=self.device) masked_inplace_autoregression( model=model, batch_size=self.batch_size, input=result, ar_mask=ar_mask, - seq_logproba=None, + seq_logproba=seq_logproba, temperature=1.0, deterministic_synthesis=deterministic_synthesis, progress_bar_desc=None, @@ -262,7 +268,7 @@ class World(Task): nb, model, other_models, - min_ave_seq_logproba=None, + min_ave_seq_logproba, ): ############################################################### # Generate quizzes with model @@ -272,7 +278,7 @@ class World(Task): ) ar_mask = torch.full(c_quizzes.size(), 1, device=self.device) - seq_logproba = torch.empty(nb, device=self.device) + seq_logproba = torch.empty(ar_mask.size(0), device=self.device) temperature = 1 d_temperature = 1 @@ -302,11 +308,11 @@ class World(Task): # Oh man that's ugly if ave_seq_logproba < min_ave_seq_logproba * 1.1: if d_temperature > 0: - d_temperature *= -0.5 + d_temperature *= -1 / 3 temperature += d_temperature elif ave_seq_logproba > min_ave_seq_logproba: if d_temperature < 0: - d_temperature *= -0.5 + d_temperature *= -1 / 3 temperature += d_temperature else: break @@ -326,6 +332,7 @@ class World(Task): ) ar_mask = self.make_ar_mask(c_quizzes) + seq_logproba = torch.empty(ar_mask.size(0), device=self.device) ############################################################### # Check how many of the other models can solve them in both @@ -341,7 +348,7 @@ class World(Task): batch_size=self.batch_size, input=result, ar_mask=ar_mask, - seq_logproba=None, + seq_logproba=seq_logproba, temperature=1.0, deterministic_synthesis=True, progress_bar_desc="solving c_quizzes", @@ -357,7 +364,7 @@ class World(Task): batch_size=self.batch_size, input=reverse_result, ar_mask=ar_mask, - seq_logproba=None, + seq_logproba=seq_logproba, temperature=1.0, deterministic_synthesis=True, progress_bar_desc="solving reversed c_quizzes",