X-Git-Url: https://fleuret.org/cgi-bin/gitweb/gitweb.cgi?a=blobdiff_plain;f=fridge;h=f87c1df50ed1bb6ba34e7ae3ded2bcf10c1a597a;hb=3dd98b99909b2bca323673263874e2abb39ac10c;hp=ac7f86c5bc9d5159aca0da267eb8dbcb6673eb94;hpb=f3f490def0be8a3ea2b9a0ac60f5bb33c5c45fb5;p=mygptrnn.git diff --git a/fridge b/fridge index ac7f86c..f87c1df 100644 --- a/fridge +++ b/fridge @@ -81,3 +81,126 @@ def insert_flash_back(rec_V, V, rec_K, K, t0, t1, CL, proba): # insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,) + +###################################################################### + +2024 Jan 09 14:24:42 (from mygpt.py) + + # This piece of code makes the assumption that there is + # nothing informative before t0, otherwise we'd have to + # implement a cache for V and K too. This should not be + # too much of a problem since this is used only during + # train, where full sequence are available + + # n = torch.arange(N, device=X.device)[:, None, None, None] + # t = torch.arange(t0, t1, device=X.device)[None, None, :, None] + # dv = torch.arange(DV, device=X.device)[None, None, None, :] + # dk = torch.arange(DK, device=X.device)[None, None, None, :] + + # u = ( + # torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL + # ) * CL + + # src_time = t - u - t0 + # src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device) + + # mask = ( + # torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback + # ).long() + + # self.rec_V[:, :, t0:t1] = ( + # mask * V[n, src_head, src_time, dv] + # + (1 - mask) * self.rec_V[:, :, t0:t1] + # ) + + # self.rec_K[:, :, t0:t1] = ( + # mask * K[n, src_head, src_time, dk] + # + (1 - mask) * self.rec_K[:, :, t0:t1] + # ) + +###################################################################### + +2024 Jan 10 08:10:39 (from mygpt.py) + + # That was a bad idea + # G = F.dropout(G, self.attention_dropout, self.training) + + +###################################################################### + +2024 Jan 10 08:46:13 (from mygpt.py) + + ################################################################# + # Flashbacks. This version sucks, about to replace it + if self.training and self.proba_flashback > 0.0: + warnings.warn("flash back", RuntimeWarning) + # This piece of code makes the assumption that there is + # nothing informative before t0, otherwise we'd have to + # implement a cache for V and K too. This should not be + # too much of a problem since this is used only during + # train, where full sequence are available + + n = torch.arange(N, device=X.device)[:, None, None, None] + t = torch.arange(t0, t1, device=X.device)[None, None, :, None] + dv = torch.arange(DV, device=X.device)[None, None, None, :] + dk = torch.arange(DK, device=X.device)[None, None, None, :] + + u = ( + torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL + ) * CL + + src_time = t - u - t0 + src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device) + + mask = ( + torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback + ).long() + + self.rec_V[:, :, t0:t1] = ( + mask * V[n, src_head, src_time, dv] + + (1 - mask) * self.rec_V[:, :, t0:t1] + ) + + self.rec_K[:, :, t0:t1] = ( + mask * K[n, src_head, src_time, dk] + + (1 - mask) * self.rec_K[:, :, t0:t1] + ) + + +###################################################################### + +2024 Jan 13 13:38:31 (from mygpt.py) + + g= F.sigmoid(self.b_G) + a=1-g + + print(f"\n\nSANITY {a**T}\n") + exit(0) + + +###################################################################### + +2024 Jan 14 13:39:37 (from mygpt.py) + + epsilon = 0.5 + + dropout_head = ( + (torch.rand(N, H, 1, t1 - t0, device=G.device).sort(dim=3).indices == 0) + .expand_as(G) + .float() + ) + + dropout_tail = dropout_head.cumsum(dim=3) - dropout_head + + dropout_active = ( + torch.rand(N, 1, 1, 1, device=G.device) < self.proba_gate_dropout + ).long() + + dropout_head *= dropout_active + dropout_tail *= dropout_active + + G = ( + G + + dropout_head * (1 - epsilon - G.detach()) + - dropout_tail * G.detach() + )