##############################
+# This is one order of magnitude more complicated than I expected, not
+# elegant, slow, hopefully not buggy
+
+
+def flash_back_time_src(N, H, t0, t1, CL, CH, proba, device):
+ # starting flash backs
+ fb_start = (torch.rand(N, CH, t1 - t0, device=device) <= proba).long()
+ fb_start[:, :, -CL:] = 0
+ fb_start[:, :, :CL] = 0
+
+ # Remove series longer than CL
+ fb_body = fb_start.clone()
+ fb_body[:, :, CL + 1 :] -= fb_start[:, :, : -(CL + 1)]
+ fb_body = fb_body.cumsum(dim=2)
+ fb_start = fb_start * (fb_body == 1)
+
+ # Set a origin source time (starting time of the chunck to copy
+ # here) We set it as the current time minus a multiple of CL to be
+ # consistent with the "rolling" caterpillar
+ t = torch.arange(fb_start.size(2), device=fb_start.device)[None, None, :]
+ src_time = fb_start * (
+ t
+ - CL
+ * (
+ 1
+ + (
+ torch.rand(fb_start.size(), device=fb_start.device) * (t // CL - 1)
+ ).long()
+ )
+ )
+ src_time[:, :, CL:] -= src_time.clone()[:, :, :-CL]
+ src_time = src_time.cumsum(dim=2)
+
+ src_head = fb_start * torch.randint(H, fb_start.size(), device=fb_start.device)
+ src_head[:, :, CL:] -= src_head.clone()[:, :, :-CL]
+ src_head = src_head.cumsum(dim=2)
+
+ # combine
+ src_delta = fb_start.clone()
+ src_delta[:, :, CL:] -= fb_start[:, :, :-CL]
+ src_delta = src_delta.cumsum(dim=2)
+ src_delta[:, :, CL:] -= CL * fb_start[:, :, :-CL]
+ src_time += src_delta.cumsum(dim=2) - 1
+
+ return src_time, src_head
+
+
+def insert_flash_back(rec_V, V, rec_K, K, t0, t1, CL, proba):
+ N, H, CH = V.size(0), V.size(1), rec_V.size(1)
+
+ fbt, fbh = flash_back_time_src(N, H, t0, t1, CL, CH, proba, rec_V.device)
+
+ fbt_V = fbt[:, :, :, None]
+ fbh_V = fbh[:, :, :, None]
+ t = fbt_V.clamp(min=0)
+ n = torch.arange(V.size(0), device=V.device)[:, None, None, None]
+ d = torch.arange(V.size(3), device=V.device)[None, None, None, :]
+ q = V[:, :, t0:t1][n, fbh_V, t, d]
+ rec_V[:, :, t0:t1] = q * (fbt_V >= 0) + rec_V[:, :, t0:t1] * (fbt_V < 0)
+
+ fbt_K = fbt[:, :, :, None]
+ fbh_K = fbh[:, :, :, None]
+ t = fbt_K.clamp(min=0)
+ n = torch.arange(K.size(0), device=K.device)[:, None, None, None]
+ d = torch.arange(K.size(3), device=K.device)[None, None, None, :]
+ q = K[:, :, t0:t1][n, fbh_K, t, d]
+ rec_K[:, :, t0:t1] = q * (fbt_K >= 0) + rec_K[:, :, t0:t1] * (fbt_K < 0)
+
+
+######################################################################
+
class Caterpillar(nn.Module):
def __init__(
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
+ warnings.warn("flash back", RuntimeWarning)
+ self.proba_flashback = 0.1
+
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
torch.full(
N = bs.x.size(0)
T = bs.x.size(1)
+ H = self.w_V.size(0)
DV = self.w_V.size(1)
DK = self.w_K.size(1)
- Dout = self.w_O.size(1)
+ DM = self.w_O.size(1)
CH = self.caterpillar_height
CL = self.caterpillar_length
t0 >= CL and (t1 - t0) % CL == 0
), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length"
+ # We cache values to deal efficiently with auto-regression
+
if bs.init_cache:
self.rec_V = X.new_zeros(N, CH, T, DV)
self.rec_K = X.new_zeros(N, CH, T, DK)
self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :]
- self.cache_Y = X.new_zeros(N, T, Dout)
+ self.cache_Y = X.new_zeros(N, T, DM)
######################################################################
# Compute the recurrent state
# This is the Gating sequence that modulates the storing of
# the new key and value in the CH pairs of the current
# stack. The CH gating values are independent, which means
- # that the current K/V could be stored in all the pairs of the
+ # that the current K/V could be stored in multiple pairs of the
# recurrent state, or not at all.
G = (
torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
- G = F.dropout(G, self.attention_dropout, self.training)
+ # That bas a bad idea
+ # G = F.dropout(G, self.attention_dropout, self.training)
V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
# by updating that at time t-L, the parallel scan operates
# with a period of L. To do so we split the time indexing in
# two axes, the second of size CL, and run the parallel scan
- # using the other alone as the sequence index.
+ # using the other as the sequence index.
A = A.unflatten(2, (-1, CL))
gated_V = gated_V.unflatten(2, (-1, CL))
self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ if self.training and self.proba_flashback:
+ # insert_flash_back(
+ # self.rec_V,
+ # V,
+ # self.rec_K,
+ # K,
+ # t0,
+ # t1,
+ # CL,
+ # proba=self.proba_flashback / CL,
+ # )
+
+ n = torch.arange(N, device=X.device)[:, None, None, None]
+ t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
+ dv = torch.arange(DV)[None, None, None, :]
+ dk = torch.arange(DK)[None, None, None, :]
+
+ u = (
+ torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
+ ) * CL
+
+ src_time = t - u - t0
+ src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
+
+ mask_V = (torch.rand(N, CH, t1 - t0, DV) <= self.proba_flashback).long()
+ self.rec_V[:, :, t0:t1] = (
+ mask_V * V[n, src_head, src_time, dv]
+ + (1 - mask_V) * self.rec_V[:, :, t0:t1]
+ )
+
+ mask_K = (torch.rand(N, CH, t1 - t0, DK) <= self.proba_flashback).long()
+ self.rec_K[:, :, t0:t1] = (
+ mask_K * K[n, src_head, src_time, dk]
+ + (1 - mask_K) * self.rec_K[:, :, t0:t1]
+ )
+
+ exit(0)
+
######################################################################
# compute the readout