+ while nb_validated < nb_to_create:
+ model_for_generation = models[torch.randint(len(models), (1,))]
+
+ c_quizzes = quiz_machine.generate_quizzes(
+ nb_to_create,
+ model_for_generation=model_for_generation,
+ temperature=args.generation_temperature,
+ )
+
+ c_quizzes = c_quizzes[quiz_machine.non_trivial(c_quizzes)]
+
+ if c_quizzes.size(0) > 0:
+ token_logproba = quiz_machine.solution_token_logprobas(models, c_quizzes)
+ recorded_quizzes_logprobas.append((c_quizzes, token_logproba))
+
+ (
+ validated_quizzes,
+ validated_logprobas,
+ ) = extract_valid_quizzes_and_logprobas(recorded_quizzes_logprobas)
+
+ if validated_quizzes is not None:
+ nb_validated = validated_quizzes.size(0)
+
+ log_string(
+ f"keep c_quizzes model {model_for_generation.id} nb_accumulated {nb_validated} / {nb_to_create}"
+ )
+
+ # store the new c_quizzes which have been validated
+
+ quiz_machine.reverse_random_half_in_place(validated_quizzes)
+ quiz_machine.store_c_quizzes(validated_quizzes[:nb_for_train], for_train=True)
+ quiz_machine.store_c_quizzes(
+ validated_quizzes[nb_for_train:nb_to_create], for_train=False
+ )
+
+ ######################################################################
+ # save images with their logprobas
+
+ vq = validated_quizzes[:72]
+ vl = validated_logprobas[:72]
+
+ if vq.size(0) > 0:
+ prefix = f"culture_c_quiz_{n_epoch:04d}"
+ filename = os.path.join(args.result_dir, prefix + "_logp.pth")
+ torch.save(vl, filename)
+ # with open(file_name, "w") as logp_file:
+ # for l in vl:
+ # s = " ".join([str(x.item()) for x in l])
+ # logp_file.write(s + "\n")
+
+ quiz_machine.save_quiz_illustrations(args.result_dir, prefix, vq)