##############################
+for input in task.batches(split="train", desc="calibrate"):
+ input = input.to(device)
+ output = model(mygpt.BracketedSequence(input)).x
+
+for n, m in model.named_modules():
+ for a in dir(m):
+ x = getattr(m, a)
+ if isinstance(x, mygpt.Calibrator):
+ print(f"####### ${n} | ${a} ########################")
+ mean, std = x.moments()
+ print("mean\n", mean, "\n")
+ print("std\n", std, "\n")
+ print(f"############################################\n\n")
+
+exit(0)
+
+##############################
+
nb_samples_seen = 0
if nb_epochs_finished >= nb_epochs:
##############################
+class Calibrator:
+ def __init__(self, w=None, b=None):
+ self.w = w
+ self.b = b
+ self.s, self.s_sq, self.n = 0, 0, 0
+ self.mean, self.std = 0, 0
+
+ def update(self, X):
+ X = X.detach()
+ self.s += X.sum(dim=0)
+ self.s_sq += X.pow(2).sum(dim=0)
+ self.n += X.size(0)
+
+ def moments(self):
+ mean = self.s / self.n
+ std = (self.s_sq / self.n - mean * mean).sqrt()
+ return mean, std
+
+ def normalize(self):
+ mean, std = self.moments()
+ if self.b is not None:
+ self.b.sub_(mean)
+ if self.w is not None:
+ self.w.div_(std)
+ result = mean - self.mean, std - self.std
+ self.mean, self.std = mean, std
+ self.s, self.s_sq, self.n = 0, 0, 0
+ return result
+
+
class Caterpillar(nn.Module):
def __init__(
self,
dim_v,
)
+ self.calibrator_G = Calibrator()
+ self.calibrator_rec_V = Calibrator()
+ self.calibrator_rec_K = Calibrator()
+
def reset_inner_loss(self):
self.acc_attention = 0
self.acc_nb = 0
self.rec_K = X.new_zeros(N, R, T, DK)
# We start the recurrent sequences with optimizable
# initial values. No idea if it helps.
- self.rec_V[:, :, t0 - L : t0] = self.init_V_rec[None, :, :, :]
- self.rec_K[:, :, t0 - L : t0] = self.init_K_rec[None, :, :, :]
+ self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :]
+ self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :]
self.cache_Y = X.new_zeros(N, T, DM)
torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
+ self.calibrator_G.update(G.reshape(-1, G.size(-1)))
+
# warnings.warn("softmax gating", RuntimeWarning)
# G = (
next_V = pscan_dim(A, gated_V, init_rec_V, dim=2)
next_K = pscan_dim(A, gated_K, init_rec_K, dim=2)
- self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
- self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ next_V = next_V.flatten(2, 3)
+ next_K = next_K.flatten(2, 3)
+
+ self.calibrator_rec_V.update(
+ next_V.permute(0, 1, 3, 2).reshape(-1, next_V.size(2))
+ )
+ self.calibrator_rec_K.update(
+ next_K.permute(0, 1, 3, 2).reshape(-1, next_K.size(2))
+ )
+
+ self.rec_V[:, :, t0:t1] = next_V
+ self.rec_K[:, :, t0:t1] = next_K
######################################################################
# compute the readout
self.train_input = self.str2tensor(self.train_descr)
self.test_input = self.str2tensor(self.test_descr)
- def batches(self, split="train"):
+ def batches(self, split="train", desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
+ if desc is None:
+ desc = f"epoch-{split}"
for batch in tqdm.tqdm(
- input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
):
yield self.trim(batch)
self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1
- def batches(self, split="train"):
+ def batches(self, split="train", desc=None):
assert split in {"train", "test"}
input = self.train_input if split == "train" else self.test_input
+ if desc is None:
+ desc = f"epoch-{split}"
for batch in tqdm.tqdm(
- input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}"
+ input.split(self.batch_size), dynamic_ncols=True, desc=desc
):
yield batch