def create_c_quizzes(
- model,
- other_models,
+ models,
quizz_machine,
nb_for_train=1000,
nb_for_test=100,
min_ave_seq_logproba=None,
):
kept = []
-
+ model_indexes = []
sum_logits, sum_nb_c_quizzes = 0, 0
while sum([x.size(0) for x in kept]) < nb_for_train + nb_for_test:
- nb_to_generate = 4 * (nb_for_train + nb_for_test)
+ nb_to_generate = nb_for_train + nb_for_test
+
+ if len(model_indexes) == 0:
+ model_indexes = [i.item() for i in torch.randperm(len(models))]
+
+ model = models[model_indexes.pop()]
new_c_quizzes, nb_correct, ave_seq_logproba = quizz_machine.create_c_quizzes(
+ nb=nb_to_generate,
+ model_for_generation=model,
+ models_for_validation=models,
+ min_ave_seq_logproba=min_ave_seq_logproba,
n_epoch=n_epoch,
result_dir=args.result_dir,
logger=log_string,
- nb=nb_to_generate,
- model=model,
- other_models=other_models,
- min_ave_seq_logproba=min_ave_seq_logproba,
)
sum_logits += new_c_quizzes.size(0) * ave_seq_logproba
sum_nb_c_quizzes += new_c_quizzes.size(0)
- to_keep = new_c_quizzes[nb_correct == len(other_models) - 1]
+ to_keep = new_c_quizzes[nb_correct == len(models) - 1]
if args.dirty_debug:
- to_keep = new_c_quizzes
+ to_keep = new_c_quizzes[
+ torch.randint(3, (new_c_quizzes.size(0),), device=new_c_quizzes.device)
+ == 0
+ ]
+
+ kept.append(to_keep)
log_string(
- f"keep {to_keep.size(0)}/{new_c_quizzes.size(0)} c_quizzes ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%)"
+ f"keep c_quizzes {to_keep.size(0)}/{new_c_quizzes.size(0)} ({to_keep.size(0)*100/new_c_quizzes.size(0):.02f}%) total {sum([ x.size(0) for x in kept])}/{nb_to_generate}"
)
- kept.append(to_keep)
-
new_c_quizzes = torch.cat(kept, dim=0)[: nb_for_train + nb_for_test]
quizz_machine.store_c_quizzes(new_c_quizzes[:nb_for_train], for_train=True)
)
if min([m.main_test_accuracy for m in models]) >= accuracy_to_make_c_quizzes:
- other_models = models.copy()
- other_models.remove(model)
-
ave_seq_logproba = create_c_quizzes(
- model,
- other_models,
+ models,
quizz_machine,
nb_for_train=nb_new_c_quizzes_for_train,
nb_for_test=nb_new_c_quizzes_for_test,
deterministic_synthesis,
forbidden_tokens=None,
logit_biases=None,
- progress_bar_desc="autoregression",
+ progress_bar_desc=None,
device=torch.device("cpu"),
):
assert input.size() == ar_mask.size()
def create_c_quizzes(
self,
+ nb,
+ model_for_generation,
+ models_for_validation,
+ min_ave_seq_logproba,
n_epoch,
result_dir,
logger,
- nb,
- model,
- other_models,
- min_ave_seq_logproba,
):
###############################################################
# Generate quizzes with model
seq_logproba[...] = 0
masked_inplace_autoregression(
- model=model,
+ model=model_for_generation,
batch_size=self.batch_size,
input=c_quizzes,
ar_mask=ar_mask,
seq_logproba=seq_logproba,
temperature=temperature,
deterministic_synthesis=False,
- progress_bar_desc="sampling c_quizzes",
+ # progress_bar_desc="sampling c_quizzes",
device=self.device,
)
else:
break
- logger(f"chaging temperature to {temperature}")
+ logger(f"changing temperature to {temperature}")
###############################################################
# Create the reverse quizzes
nb_correct = []
- for m in other_models:
+ for model in models_for_validation:
result = c_quizzes.clone()
masked_inplace_autoregression(
- model=m,
+ model=model,
batch_size=self.batch_size,
input=result,
ar_mask=ar_mask,
seq_logproba=seq_logproba,
temperature=1.0,
deterministic_synthesis=True,
- progress_bar_desc="solving c_quizzes",
+ # progress_bar_desc="solving c_quizzes",
device=self.device,
)
reverse_result = reverse_c_quizzes.clone()
masked_inplace_autoregression(
- model=m,
+ model=model,
batch_size=self.batch_size,
input=reverse_result,
ar_mask=ar_mask,
seq_logproba=seq_logproba,
temperature=1.0,
deterministic_synthesis=True,
- progress_bar_desc="solving reversed c_quizzes",
+ # progress_bar_desc="solving reversed c_quizzes",
device=self.device,
)