From e56873a0cb64555cbd47e44cdca0ce991765a5fc Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Fleuret?= Date: Tue, 16 Jan 2024 08:13:01 +0100 Subject: [PATCH] Update. --- main.py | 18 ++++++++++++++++++ mygpt.py | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++---- tasks.py | 12 ++++++++---- 3 files changed, 76 insertions(+), 8 deletions(-) diff --git a/main.py b/main.py index 3e67a73..04e5652 100755 --- a/main.py +++ b/main.py @@ -835,6 +835,24 @@ if args.max_percents_of_test_in_train >= 0: ############################## +for input in task.batches(split="train", desc="calibrate"): + input = input.to(device) + output = model(mygpt.BracketedSequence(input)).x + +for n, m in model.named_modules(): + for a in dir(m): + x = getattr(m, a) + if isinstance(x, mygpt.Calibrator): + print(f"####### ${n} | ${a} ########################") + mean, std = x.moments() + print("mean\n", mean, "\n") + print("std\n", std, "\n") + print(f"############################################\n\n") + +exit(0) + +############################## + nb_samples_seen = 0 if nb_epochs_finished >= nb_epochs: diff --git a/mygpt.py b/mygpt.py index 3a48cdb..aded796 100755 --- a/mygpt.py +++ b/mygpt.py @@ -464,6 +464,36 @@ def moving_window(x, dim, win_dim, win_size): ############################## +class Calibrator: + def __init__(self, w=None, b=None): + self.w = w + self.b = b + self.s, self.s_sq, self.n = 0, 0, 0 + self.mean, self.std = 0, 0 + + def update(self, X): + X = X.detach() + self.s += X.sum(dim=0) + self.s_sq += X.pow(2).sum(dim=0) + self.n += X.size(0) + + def moments(self): + mean = self.s / self.n + std = (self.s_sq / self.n - mean * mean).sqrt() + return mean, std + + def normalize(self): + mean, std = self.moments() + if self.b is not None: + self.b.sub_(mean) + if self.w is not None: + self.w.div_(std) + result = mean - self.mean, std - self.std + self.mean, self.std = mean, std + self.s, self.s_sq, self.n = 0, 0, 0 + return result + + class Caterpillar(nn.Module): def __init__( self, @@ -531,6 +561,10 @@ class Caterpillar(nn.Module): dim_v, ) + self.calibrator_G = Calibrator() + self.calibrator_rec_V = Calibrator() + self.calibrator_rec_K = Calibrator() + def reset_inner_loss(self): self.acc_attention = 0 self.acc_nb = 0 @@ -565,8 +599,8 @@ class Caterpillar(nn.Module): self.rec_K = X.new_zeros(N, R, T, DK) # We start the recurrent sequences with optimizable # initial values. No idea if it helps. - self.rec_V[:, :, t0 - L : t0] = self.init_V_rec[None, :, :, :] - self.rec_K[:, :, t0 - L : t0] = self.init_K_rec[None, :, :, :] + self.rec_V[:, :, t0 - L : t0, :] = self.init_V_rec[None, :, :, :] + self.rec_K[:, :, t0 - L : t0, :] = self.init_K_rec[None, :, :, :] self.cache_Y = X.new_zeros(N, T, DM) @@ -586,6 +620,8 @@ class Caterpillar(nn.Module): torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None] ).sigmoid() + self.calibrator_G.update(G.reshape(-1, G.size(-1))) + # warnings.warn("softmax gating", RuntimeWarning) # G = ( @@ -659,8 +695,18 @@ class Caterpillar(nn.Module): next_V = pscan_dim(A, gated_V, init_rec_V, dim=2) next_K = pscan_dim(A, gated_K, init_rec_K, dim=2) - self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3) - self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3) + next_V = next_V.flatten(2, 3) + next_K = next_K.flatten(2, 3) + + self.calibrator_rec_V.update( + next_V.permute(0, 1, 3, 2).reshape(-1, next_V.size(2)) + ) + self.calibrator_rec_K.update( + next_K.permute(0, 1, 3, 2).reshape(-1, next_K.size(2)) + ) + + self.rec_V[:, :, t0:t1] = next_V + self.rec_K[:, :, t0:t1] = next_K ###################################################################### # compute the readout diff --git a/tasks.py b/tasks.py index afad8af..4777a11 100755 --- a/tasks.py +++ b/tasks.py @@ -1515,11 +1515,13 @@ class Grid(Task): self.train_input = self.str2tensor(self.train_descr) self.test_input = self.str2tensor(self.test_descr) - def batches(self, split="train"): + def batches(self, split="train", desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input + if desc is None: + desc = f"epoch-{split}" for batch in tqdm.tqdm( - input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}" + input.split(self.batch_size), dynamic_ncols=True, desc=desc ): yield self.trim(batch) @@ -1618,11 +1620,13 @@ class QMLP(Task): self.nb_codes = max(self.train_input.max(), self.test_input.max()) + 1 - def batches(self, split="train"): + def batches(self, split="train", desc=None): assert split in {"train", "test"} input = self.train_input if split == "train" else self.test_input + if desc is None: + desc = f"epoch-{split}" for batch in tqdm.tqdm( - input.split(self.batch_size), dynamic_ncols=True, desc=f"epoch-{split}" + input.split(self.batch_size), dynamic_ncols=True, desc=desc ): yield batch -- 2.20.1