# insert_flash_back(self.rec_V,V,self.rec_K,K,t0,t1,CL,proba=self.proba_flashback / CL,)
+
+######################################################################
+
+2024 Jan 09 14:24:42 (from mygpt.py)
+
+ # This piece of code makes the assumption that there is
+ # nothing informative before t0, otherwise we'd have to
+ # implement a cache for V and K too. This should not be
+ # too much of a problem since this is used only during
+ # train, where full sequence are available
+
+ # n = torch.arange(N, device=X.device)[:, None, None, None]
+ # t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
+ # dv = torch.arange(DV, device=X.device)[None, None, None, :]
+ # dk = torch.arange(DK, device=X.device)[None, None, None, :]
+
+ # u = (
+ # torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
+ # ) * CL
+
+ # src_time = t - u - t0
+ # src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
+
+ # mask = (
+ # torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
+ # ).long()
+
+ # self.rec_V[:, :, t0:t1] = (
+ # mask * V[n, src_head, src_time, dv]
+ # + (1 - mask) * self.rec_V[:, :, t0:t1]
+ # )
+
+ # self.rec_K[:, :, t0:t1] = (
+ # mask * K[n, src_head, src_time, dk]
+ # + (1 - mask) * self.rec_K[:, :, t0:t1]
+ # )
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
- warnings.warn("flash back", RuntimeWarning)
- self.proba_flashback = 1e-2
+ self.proba_flashback = 0.0
+ self.proba_gate_dropout = 0.0
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- # That bas a bad idea
+ if self.training and self.proba_gate_dropout > 0.0:
+ warnings.warn("gate droupout", RuntimeWarning)
+ epsilon = 0.5
+
+ # That was a bad idea
# G = F.dropout(G, self.attention_dropout, self.training)
V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
# We prepare the arguments for the parallel scan
+ # Clip the gating
+ warnings.warn("gating clipping", RuntimeWarning)
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
+
A = 1 - G.sum(1)
gated_V = torch.einsum("nhet,nhtd->netd", G, V)
gated_K = torch.einsum("nhet,nhtd->netd", G, K)
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
if self.training and self.proba_flashback > 0.0:
+ warnings.warn("flash back", RuntimeWarning)
# This piece of code makes the assumption that there is
# nothing informative before t0, otherwise we'd have to
# implement a cache for V and K too. This should not be