# insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,)
+
+######################################################################
+
+2024 Jan 09 14:24:42 (from mygpt.py)
+
+ # This piece of code makes the assumption that there is
+ # nothing informative before t0, otherwise we'd have to
+ # implement a cache for V and K too. This should not be
+ # too much of a problem since this is used only during
+ # train, where full sequence are available
+
+ # n = torch.arange(N, device=X.device)[:, None, None, None]
+ # t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
+ # dv = torch.arange(DV, device=X.device)[None, None, None, :]
+ # dk = torch.arange(DK, device=X.device)[None, None, None, :]
+
+ # u = (
+ # torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
+ # ) * CL
+
+ # src_time = t - u - t0
+ # src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
+
+ # mask = (
+ # torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
+ # ).long()
+
+ # self.rec_V[:, :, t0:t1] = (
+ # mask * V[n, src_head, src_time, dv]
+ # + (1 - mask) * self.rec_V[:, :, t0:t1]
+ # )
+
+ # self.rec_K[:, :, t0:t1] = (
+ # mask * K[n, src_head, src_time, dk]
+ # + (1 - mask) * self.rec_K[:, :, t0:t1]
+ # )
+
+######################################################################
+
+2024 Jan 10 08:10:39 (from mygpt.py)
+
+ # That was a bad idea
+ # G = F.dropout(G, self.attention_dropout, self.training)
+
+
+######################################################################
+
+2024 Jan 10 08:46:13 (from mygpt.py)
+
+ #################################################################
+ # Flashbacks. This version sucks, about to replace it
+ if self.training and self.proba_flashback > 0.0:
+ warnings.warn("flash back", RuntimeWarning)
+ # This piece of code makes the assumption that there is
+ # nothing informative before t0, otherwise we'd have to
+ # implement a cache for V and K too. This should not be
+ # too much of a problem since this is used only during
+ # train, where full sequence are available
+
+ n = torch.arange(N, device=X.device)[:, None, None, None]
+ t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
+ dv = torch.arange(DV, device=X.device)[None, None, None, :]
+ dk = torch.arange(DK, device=X.device)[None, None, None, :]
+
+ u = (
+ torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
+ ) * CL
+
+ src_time = t - u - t0
+ src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
+
+ mask = (
+ torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
+ ).long()
+
+ self.rec_V[:, :, t0:t1] = (
+ mask * V[n, src_head, src_time, dv]
+ + (1 - mask) * self.rec_V[:, :, t0:t1]
+ )
+
+ self.rec_K[:, :, t0:t1] = (
+ mask * K[n, src_head, src_time, dk]
+ + (1 - mask) * self.rec_K[:, :, t0:t1]
+ )
+
+
+######################################################################
+
+2024 Jan 13 13:38:31 (from mygpt.py)
+
+ g= F.sigmoid(self.b_G)
+ a=1-g
+
+ print(f"\n\nSANITY {a**T}\n")
+ exit(0)
+
+
+######################################################################
+
+2024 Jan 14 13:39:37 (from mygpt.py)
+
+ epsilon = 0.5
+
+ dropout_head = (
+ (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0)
+ .expand_as(G)
+ .float()
+ )
+
+ dropout_tail = dropout_head.cumsum(dim=3) - dropout_head
+
+ dropout_active = (
+ torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout
+ ).long()
+
+ dropout_head *= dropout_active
+ dropout_tail *= dropout_active
+
+ G = (
+ G
+ + dropout_head * (1 - epsilon - G.detach())
+ - dropout_tail * G.detach()
+ )
+
+######################################################################
+
+2024 Jan 18 07:39:29 (from mygpt.py)
+
+class Calibrator:
+ def __init__(self, w=None, b=None):
+ self.w = w
+ self.b = b
+ self.s, self.s_sq, self.n = 0, 0, 0
+ self.mean, self.std = 0, 0
+
+ def update(self, X):
+ X = X.detach()
+ self.s += X.sum(dim=0)
+ self.s_sq += X.pow(2).sum(dim=0)
+ self.n += X.size(0)
+
+ def moments(self):
+ mean = self.s / self.n
+ std = (self.s_sq / self.n - mean * mean).sqrt()
+ return mean, std
+
+ def normalize(self):
+ mean, std = self.moments()
+ if self.b is not None:
+ self.b.sub_(mean)
+ if self.w is not None:
+ self.w.div_(std)
+ result = mean - self.mean, std - self.std
+ self.mean, self.std = mean, std
+ self.s, self.s_sq, self.n = 0, 0, 0
+ return result
+
+
+
+######################################################################
+
+2024 Jan 18 07:39:34 (from mygpt.py)
+
+ # self.calibrator_G = Calibrator()
+ # self.calibrator_rec_V = Calibrator()
+ # self.calibrator_rec_K = Calibrator()
+
+
+######################################################################
+
+2024 Jan 18 07:39:37 (from mygpt.py)
+
+ # self.calibrator_G.update(G.reshape(-1, G.size(-1)))
+
+
+######################################################################
+
+2024 Jan 18 07:39:42 (from mygpt.py)
+
+ # self.calibrator_rec_V.update(
+ # next_V.permute(0, 1, 3, 2).reshape(-1, next_V.size(2))
+ # )
+ # self.calibrator_rec_K.update(
+ # next_K.permute(0, 1, 3, 2).reshape(-1, next_K.size(2))
+ # )
+
+
+######################################################################
+
+2024 Jan 18 07:47:12 (from mygpt.py)
+
+ ######################################################################
+ # Roll the gating indexes
+
+ # warnings.warn("rotating barrel", RuntimeWarning)
+
+ # r_barrel = torch.arange(R, device=G.device)[None, None, :, None]
+ # t_barrel = torch.arange(t1 - t0, device=G.device)[None, None, None, :]
+ # r_barrel = (r_barrel + (t_barrel + t0) // L) % R
+ # G = G.gather(dim=2, index=r_barrel.expand_as(G))
+
+
+######################################################################
+
+2024 Jan 18 07:47:25 (from mygpt.py)
+
+ # warnings.warn("harmonic recurrence", RuntimeWarning)
+ # har = torch.arange(t0, t1, device = G.device).float() + 1
+ # A = har / (har + 1)
+ # G = G / har
+
+
+######################################################################
+
+2024 Jan 18 08:46:18 (from mygpt.py)
+
+ # warnings.warn("softmax gating", RuntimeWarning)
+
+ # G = (
+ # torch.einsum("ntc,hrc->nhrt", X, self.w_G) + self.b_G[None, :, :, None]
+ # ).softmax(dim=2)
+
+######################################################################
+
+2024 Jan 21 16:55:24 (from main.py)
+
+ with open("test.dat", "a") as f:
+ for m filter(lambda m: isinstance(m,mygpt.Catenn.Linear),model.modules()):
+ for p in m.parameters() ]
+
+
+ for m in model.modules():
+ if isinstance(m, mygpt.Caterpillar):
+
+