# with a caching mechanism for keys and values to avoid a O(N^3) cost
# for auto-regression.
+# This implementation is equipped with RNN layers to replace the MHA
+
import math, warnings
import torch, einops
# 1 for the successive tokens.
#
# Modules able to process brackets may implement a cache that is
-# resetted when the input bracket starts at t=0
+# resetted when init_cache is True
class BracketedSequence:
self.caterpillar_height = caterpillar_height
self.attention_dropout = attention_dropout
+ self.proba_flashback = 0.0
+ self.proba_gate_dropout = 0.0
+
self.w_G = randw(nb_heads, caterpillar_height, dim_model)
self.b_G = nn.Parameter(
torch.full(
N = bs.x.size(0)
T = bs.x.size(1)
+ H = self.w_V.size(0)
DV = self.w_V.size(1)
DK = self.w_K.size(1)
- Dout = self.w_O.size(1)
+ DM = self.w_O.size(1)
CH = self.caterpillar_height
CL = self.caterpillar_length
t0 >= CL and (t1 - t0) % CL == 0
), f"bs.first should be greater than caterpillar_length, and bs.nb should be a multiple of caterpillar_length"
+ # We cache values to deal efficiently with auto-regression
+
if bs.init_cache:
self.rec_V = X.new_zeros(N, CH, T, DV)
self.rec_K = X.new_zeros(N, CH, T, DK)
self.rec_V[:, :, t0 - CL : t0] = self.init_V_rec[None, :, :, :]
self.rec_K[:, :, t0 - CL : t0] = self.init_K_rec[None, :, :, :]
- self.cache_Y = X.new_zeros(N, T, Dout)
+ self.cache_Y = X.new_zeros(N, T, DM)
######################################################################
# Compute the recurrent state
# This is the Gating sequence that modulates the storing of
# the new key and value in the CH pairs of the current
# stack. The CH gating values are independent, which means
- # that the current K/V could be stored in all the pairs of the
+ # that the current K/V could be stored in multiple pairs of the
# recurrent state, or not at all.
G = (
torch.einsum("ntc,hec->nhet", X, self.w_G) + self.b_G[None, :, :, None]
).sigmoid()
+ if self.training and self.proba_gate_dropout > 0.0:
+ warnings.warn("gate droupout", RuntimeWarning)
+ epsilon = 0.5
+
+ # That was a bad idea
+ # G = F.dropout(G, self.attention_dropout, self.training)
+
V = torch.einsum("ntc,hdc->nhtd", X, self.w_V)
K = torch.einsum("ntc,hdc->nhtd", X, self.w_K)
# We prepare the arguments for the parallel scan
+ # Clip the gating
+ warnings.warn("gating clipping", RuntimeWarning)
+ G = G / G.sum(1, keepdim=True).clamp(min=1)
+
A = 1 - G.sum(1)
gated_V = torch.einsum("nhet,nhtd->netd", G, V)
gated_K = torch.einsum("nhet,nhtd->netd", G, K)
# by updating that at time t-L, the parallel scan operates
# with a period of L. To do so we split the time indexing in
# two axes, the second of size CL, and run the parallel scan
- # using the other alone as the sequence index.
+ # using the other as the sequence index.
A = A.unflatten(2, (-1, CL))
gated_V = gated_V.unflatten(2, (-1, CL))
self.rec_V[:, :, t0:t1] = next_V.flatten(2, 3)
self.rec_K[:, :, t0:t1] = next_K.flatten(2, 3)
+ if self.training and self.proba_flashback > 0.0:
+ warnings.warn("flash back", RuntimeWarning)
+ # This piece of code makes the assumption that there is
+ # nothing informative before t0, otherwise we'd have to
+ # implement a cache for V and K too. This should not be
+ # too much of a problem since this is used only during
+ # train, where full sequence are available
+
+ n = torch.arange(N, device=X.device)[:, None, None, None]
+ t = torch.arange(t0, t1, device=X.device)[None, None, :, None]
+ dv = torch.arange(DV, device=X.device)[None, None, None, :]
+ dk = torch.arange(DK, device=X.device)[None, None, None, :]
+
+ u = (
+ torch.rand(N, CH, t1 - t0, 1, device=X.device).mul(t).long() // CL
+ ) * CL
+
+ src_time = t - u - t0
+ src_head = torch.randint(H, (N, CH, t1 - t0, 1), device=X.device)
+
+ mask = (
+ torch.rand(N, CH, t1 - t0, DV, device=X.device) <= self.proba_flashback
+ ).long()
+
+ self.rec_V[:, :, t0:t1] = (
+ mask * V[n, src_head, src_time, dv]
+ + (1 - mask) * self.rec_V[:, :, t0:t1]
+ )
+
+ self.rec_K[:, :, t0:t1] = (
+ mask * K[n, src_head, src_time, dk]
+ + (1 - mask) * self.rec_K[:, :, t0:t1]
+ )
+
######################################################################
# compute the readout
):
super().__init__()
- assert attention_layer in {"mha", "dumbrec", "kvrec", "caterpillar"}
+ assert attention_layer in {
+ "mha",
+ "dumbrec",
+ "kvrec",
+ "caterpillar",
+ }, f"Unknown attention operator {attention_layer}."
if attention_layer == "caterpillar":
assert nb_lines % caterpillar_height == 0