# Written by Francois Fleuret <francois@fleuret.org>
-import math, os, tqdm, warnings
+import math, os, tqdm, warnings, sys
import torch, torchvision
import threading
+######################################################################
+# if output is log(P(X=y)) and target is Y, returns -log P(X=Y) + H(X
+# | X != Y)
+
+
+# output is NxCxT and target is NxT
+def confusion(output, target, reduction="mean"):
+ N, C, T = output.shape
+ output = output.permute(0, 2, 1).reshape(-1, C)
+ target = target.flatten()
+ all_t = torch.arange(N * T, device=output.device)
+ output = output.log_softmax(dim=-1)
+ result = -output[all_t, target]
+
+ output[all_t, target] = float("-inf")
+ output = output.log_softmax(dim=-1)
+ e = output.exp()
+ output[all_t, target] = 0
+ result = result - (output * e).sum(-1)
+
+ if reduction == "none":
+ return result.reshape(N, T)
+ elif reduction == "mean":
+ return result.reshape(N, T).mean()
+ elif reduction == "sum":
+ return result.reshape(N, T).sum()
+ else:
+ raise ValueError(f"unknown reduction '{reduction}'.")
+
+
######################################################################
# ar_mask is a tensor with 0s and 1s, of same shape as input, with
return result, correct
- compute_accuracy(model.train_w_quizzes[:nmax], log_prefix="train")
+ # compute_accuracy(model.train_w_quizzes[:nmax], log_prefix="train")
test_result, test_correct = compute_accuracy(
model.test_w_quizzes[:nmax], log_prefix="test"
######################################################################
- def logproba_of_solutions(self, models, c_quizzes):
+ def solution_token_logprobas(self, models, c_quizzes):
logproba = c_quizzes.new_zeros(
- c_quizzes.size(0), len(models), device=self.device, dtype=torch.float32
+ c_quizzes.size(0),
+ len(models),
+ c_quizzes.size(1),
+ device=self.device,
+ dtype=torch.float32,
)
for model in models:
input = input.to(self.device)
ar_mask = self.make_ar_mask(input)
output = model(mygpt.BracketedSequence(input)).x
- ce = (
- F.cross_entropy(output.transpose(1, 2), input, reduction="none")
+ l[:, model.id] = (
+ -F.cross_entropy(
+ output.transpose(1, 2), input, reduction="none"
+ )
* ar_mask
)
- l[:, model.id] = -ce.sum(dim=-1)
model.train(t)