parser.add_argument("--deterministic_synthesis", action="store_true", default=False)
-parser.add_argument("--reverse_cleanup", action="store_true", default=True)
-
-parser.add_argument("--validation_forward_only", action="store_true", default=False)
+parser.add_argument("--both_directions", action="store_true", default=False)
parser.add_argument("--problem", type=str, default="sky")
c_quizzes, ave_seq_logproba = quizz_machine.generate_quizzes(
nb_to_create,
model_for_generation=model_for_generation,
- reverse_cleanup=args.reverse_cleanup,
)
nb_correct = quizz_machine.compute_correctness(
- c_quizzes, models, both_directions=not args.validation_forward_only
+ c_quizzes, models, both_directions=args.both_directions
)
if args.dirty_debug:
nb_validated = valid_c_quizzes(recorded, standard_validity).size(0)
log_string(
- f"keep c_quizzes kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
+ f"keep c_quizzes model {model_for_generation.id} kept {nv} nb_accumulated {nb_validated} / {nb_to_create}"
)
# store the new c_quizzes which have been validated
for n_epoch in range(args.nb_epochs):
log_string(f"--- epoch {n_epoch} ----------------------------------------")
+ # Select, improve, and eval the worst model
+
weakest_model = min(models, key=lambda m: float(m.main_test_accuracy))
log_string(
f"training model {weakest_model.id} main_test_accuracy {weakest_model.main_test_accuracy}"
)
- # improve it
one_epoch(weakest_model, quizz_machine)
log_string(
f"train_set_composition w_quizzes {quizz_machine.nb_batch_w_quizzes} c_quizzes {quizz_machine.nb_batch_c_quizzes}"
)
- # test it
run_tests(weakest_model, quizz_machine, deterministic_synthesis=False)
log_string(
cta = " ".join([f"{float(m.main_test_accuracy):.04f}" for m in models])
log_string(f"current_test_accuracies {cta}")
- # replace a fraction of the w_quizzes with fresh ones
+ # Replace a fraction of the w_quizzes with fresh ones
+
quizz_machine.renew_w_quizzes(args.nb_train_samples // args.nb_gpts)
+ # If all the models are good enough, generate new quizzes and
+ # re-compute the test errors
+
if min([m.main_test_accuracy for m in models]) >= args.accuracy_to_make_c_quizzes:
create_c_quizzes(
models,
nb_for_test=nb_new_c_quizzes_for_test,
)
- # We update everyone
for model in models:
run_tests(model, quizz_machine, deterministic_synthesis=False)
)
def compute_correctness(
- self, c_quizzes, models_for_validation, both_directions=True
+ self, c_quizzes, models_for_validation, both_directions=False
):
reversed_c_quizzes = self.reverse_time(c_quizzes)
###############################################################
- def generate_quizzes(self, nb, model_for_generation, reverse_cleanup=False):
+ def generate_quizzes(self, nb, model_for_generation):
c_quizzes = torch.empty(
nb, self.train_w_quizzes.size(1), device=self.device, dtype=torch.int64
)
seq_logproba = torch.empty(ar_mask_first.size(0), device=self.device)
- if reverse_cleanup:
- temperature = 10.0
- else:
- temperature = 1.0
+ temperature = 10.0
# First, we generate the answer at high temperature
input=c_quizzes,
ar_mask=ar_mask_second,
seq_logproba=seq_logproba,
- temperature=temperature,
+ temperature=1.0,
deterministic_synthesis=True,
device=self.device,
)
######################################################################
def frame2img(self, x, scale=15):
- x = x.reshape(-1, self.height, self.width)
+ x = x.reshape(x.size(0), self.height, -1)
m = torch.logical_and(
x >= 0, x < self.first_bird_token + self.nb_bird_tokens
).long()
if __name__ == "__main__":
import time
- sky = Sky(height=6, width=8, speed=4, nb_iterations=2)
+ sky = Sky(height=6, width=8, speed=1, nb_iterations=4)
prompts, answers = sky.generate_prompts_and_answers(4)