for input in tqdm.tqdm(src, dynamic_ncols=True, desc="test"):
input = input.to(local_device)
-
- bs = model(mygpt.BracketedSequence(input))
- output = bs.x
-
+ output = model(mygpt.BracketedSequence(input)).x
loss = F.cross_entropy(output.transpose(1, 2), input)
-
acc_test_loss += loss.item() * input.size(0)
-
nb_test_samples += input.size(0)
test_perplexity = math.exp(min(100, acc_test_loss / nb_test_samples))
)
+######################################################################
+
+
def one_epoch(model, quiz_machine, local_device=main_device):
model.to(local_device).train()
(("A", "f_A", "B", "f_B"), (0, 0, 0, 1), model_transformer_cold),
]
+######################################################################
+
def save_additional_results(models, science_w_quizzes):
for model in models:
assert struct in self.train_struct
return self.problem.make_ar_mask(quizzes, struct=struct, mask=mask)
+ ######################################################################
+
def predict(self, model, quizzes, struct, mask):
ar_mask = self.make_ar_mask(quizzes=quizzes, struct=struct, mask=mask)
result = quizzes * (1 - ar_mask)
return result, correct
- def produce_results(
- self,
- n_epoch,
- model,
- input,
- result_dir,
- ):
+ ######################################################################
+
+ def produce_results(self, n_epoch, model, input, result_dir):
input = input.to(self.device)
result = input.new(input.size())
correct = input.new(input.size(0))