self.token2id = dict([(t, n) for n, t in enumerate(tokens)])
self.id2token = dict([(n, t) for n, t in enumerate(tokens)])
self.t_nul = self.token2id["#"]
- self.t_true = self.token2id["<true>"]
- self.t_false = self.token2id["<false>"]
+ self.t_true = self.token2id["true"]
+ self.t_false = self.token2id["false"]
# Tokenize the train and test sets
self.train_input = self.tensorize(self.train_descr)
nb_total = ar_mask.sum().item()
nb_correct = ((correct == result).long() * ar_mask).sum().item()
- logger(f"test_performance {nb_total=} {nb_correct=}")
- logger(f"main_test_accuracy {nb_correct / nb_total}")
+ logger(f"test_performance {n_epoch} {nb_total=} {nb_correct=}")
+ logger(f"main_test_accuracy {n_epoch} {nb_correct / nb_total}")
######################################################################